repo
stringclasses
358 values
pull_number
int64
6
67.9k
instance_id
stringlengths
12
49
issue_numbers
listlengths
1
7
base_commit
stringlengths
40
40
patch
stringlengths
87
101M
test_patch
stringlengths
72
22.3M
problem_statement
stringlengths
3
256k
hints_text
stringlengths
0
545k
created_at
stringlengths
20
20
PASS_TO_PASS
listlengths
0
0
FAIL_TO_PASS
listlengths
0
0
jupyterhub/jupyterhub
4,249
jupyterhub__jupyterhub-4249
[ "4228" ]
5b7b9b5677fc4721397c65fb4da592dfc5ddaaa8
diff --git a/docs/generate-metrics.py b/docs/generate-metrics.py --- a/docs/generate-metrics.py +++ b/docs/generate-metrics.py @@ -1,7 +1,6 @@ import os -from pytablewriter import RstSimpleTableWriter -from pytablewriter.style import Style +from pytablewriter import MarkdownTableWriter import jupyterhub.metrics @@ -11,12 +10,11 @@ class Generator: @classmethod def create_writer(cls, table_name, headers, values): - writer = RstSimpleTableWriter() + writer = MarkdownTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = values writer.margin = 1 - [writer.set_style(header, Style(align="center")) for header in headers] return writer def _parse_metrics(self): @@ -33,18 +31,17 @@ def prometheus_metrics(self): if not os.path.exists(generated_directory): os.makedirs(generated_directory) - filename = f"{generated_directory}/metrics.rst" + filename = f"{generated_directory}/metrics.md" table_name = "" headers = ["Type", "Name", "Description"] values = self._parse_metrics() writer = self.create_writer(table_name, headers, values) - title = "List of Prometheus Metrics" - underline = "============================" - content = f"{title}\n{underline}\n{writer.dumps()}" with open(filename, 'w') as f: - f.write(content) - print(f"Generated {filename}.") + f.write("# List of Prometheus Metrics\n\n") + f.write(writer.dumps()) + f.write("\n") + print(f"Generated {filename}") def main(): diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -20,8 +20,6 @@ project = "JupyterHub" author = "Project Jupyter Contributors" copyright = f"{datetime.date.today().year}, {author}" -version = "%i.%i" % jupyterhub.version_info[:2] -release = jupyterhub.__version__ # -- General Sphinx configuration -------------------------------------------- @@ -39,7 +37,7 @@ "myst_parser", ] root_doc = "index" -source_suffix = [".md", ".rst"] +source_suffix = [".md"] # default_role let's use use `foo` instead of ``foo`` in rST default_role = "literal" @@ -48,11 +46,21 @@ # ref: https://myst-parser.readthedocs.io/en/latest/configuration.html # myst_heading_anchors = 2 + myst_enable_extensions = [ + # available extensions: https://myst-parser.readthedocs.io/en/latest/syntax/optional.html "colon_fence", "deflist", + "fieldlist", + "substitution", ] +myst_substitutions = { + # date example: Dev 07, 2022 + "date": datetime.date.today().strftime("%b %d, %Y").title(), + "version": jupyterhub.__version__, +} + # -- Custom directives to generate documentation ----------------------------- # ref: https://myst-parser.readthedocs.io/en/latest/syntax/roles-and-directives.html
diff --git a/docs/source/contributing/tests.md b/docs/source/contributing/tests.md new file mode 100644 --- /dev/null +++ b/docs/source/contributing/tests.md @@ -0,0 +1,130 @@ +(contributing-tests)= + +# Testing JupyterHub and linting code + +Unit testing helps to validate that JupyterHub works the way we think it does, +and continues to do so when changes occur. They also help communicate +precisely what we expect our code to do. + +JupyterHub uses [pytest](https://pytest.org) for all the tests. You +can find them under the [jupyterhub/tests](https://github.com/jupyterhub/jupyterhub/tree/main/jupyterhub/tests) directory in the git repository. + +## Running the tests + +1. Make sure you have completed {ref}`contributing/setup`. + Once you are done, you would be able to run `jupyterhub` from the command line and access it from your web browser. + This ensures that the dev environment is properly set up for tests to run. + +2. You can run all tests in JupyterHub + + ```bash + pytest -v jupyterhub/tests + ``` + + This should display progress as it runs all the tests, printing + information about any test failures as they occur. + + If you wish to confirm test coverage the run tests with the `--cov` flag: + + ```bash + pytest -v --cov=jupyterhub jupyterhub/tests + ``` + +3. You can also run tests in just a specific file: + + ```bash + pytest -v jupyterhub/tests/<test-file-name> + ``` + +4. To run a specific test only, you can do: + + ```bash + pytest -v jupyterhub/tests/<test-file-name>::<test-name> + ``` + + This runs the test with function name `<test-name>` defined in + `<test-file-name>`. This is very useful when you are iteratively + developing a single test. + + For example, to run the test `test_shutdown` in the file `test_api.py`, + you would run: + + ```bash + pytest -v jupyterhub/tests/test_api.py::test_shutdown + ``` + + For more details, refer to the [pytest usage documentation](https://pytest.readthedocs.io/en/latest/usage.html). + +## Test organisation + +The tests live in `jupyterhub/tests` and are organized roughly into: + +1. `test_api.py` tests the REST API +2. `test_pages.py` tests loading the HTML pages + +and other collections of tests for different components. +When writing a new test, there should usually be a test of +similar functionality already written and related tests should +be added nearby. + +The fixtures live in `jupyterhub/tests/conftest.py`. There are +fixtures that can be used for JupyterHub components, such as: + +- `app`: an instance of JupyterHub with mocked parts +- `auth_state_enabled`: enables persisting auth_state (like authentication tokens) +- `db`: a sqlite in-memory DB session +- `` io_loop` ``: a Tornado event loop +- `event_loop`: a new asyncio event loop +- `user`: creates a new temporary user +- `admin_user`: creates a new temporary admin user +- single user servers + \- `cleanup_after`: allows cleanup of single user servers between tests +- mocked service + \- `MockServiceSpawner`: a spawner that mocks services for testing with a short poll interval + \- `` mockservice` ``: mocked service with no external service url + \- `mockservice_url`: mocked service with a url to test external services + +And fixtures to add functionality or spawning behavior: + +- `admin_access`: grants admin access +- `` no_patience` ``: sets slow-spawning timeouts to zero +- `slow_spawn`: enables the SlowSpawner (a spawner that takes a few seconds to start) +- `never_spawn`: enables the NeverSpawner (a spawner that will never start) +- `bad_spawn`: enables the BadSpawner (a spawner that fails immediately) +- `slow_bad_spawn`: enables the SlowBadSpawner (a spawner that fails after a short delay) + +Refer to the [pytest fixtures documentation](https://pytest.readthedocs.io/en/latest/fixture.html) to learn how to use fixtures that exists already and to create new ones. + +## Troubleshooting Test Failures + +### All the tests are failing + +Make sure you have completed all the steps in {ref}`contributing/setup` successfully, and are able to access JupyterHub from your browser at <http://localhost:8000> after starting `jupyterhub` in your command line. + +## Code formatting and linting + +JupyterHub automatically enforces code formatting. This means that pull requests +with changes breaking this formatting will receive a commit from pre-commit.ci +automatically. + +To automatically format code locally, you can install pre-commit and register a +_git hook_ to automatically check with pre-commit before you make a commit if +the formatting is okay. + +```bash +pip install pre-commit +pre-commit install --install-hooks +``` + +To run pre-commit manually you would do: + +```bash +# check for changes to code not yet committed +pre-commit run + +# check for changes also in already committed code +pre-commit run --all-files +``` + +You may also install [black integration](https://github.com/psf/black#editor-integration) +into your text editor to format code automatically. diff --git a/docs/source/contributing/tests.rst b/docs/source/contributing/tests.rst deleted file mode 100644 --- a/docs/source/contributing/tests.rst +++ /dev/null @@ -1,138 +0,0 @@ -.. _contributing/tests: - -=================================== -Testing JupyterHub and linting code -=================================== - -Unit testing helps to validate that JupyterHub works the way we think it does, -and continues to do so when changes occur. They also help communicate -precisely what we expect our code to do. - -JupyterHub uses `pytest <https://pytest.org>`_ for all the tests. You -can find them under the `jupyterhub/tests <https://github.com/jupyterhub/jupyterhub/tree/main/jupyterhub/tests>`_ directory in the git repository. - -Running the tests -================== - -#. Make sure you have completed :ref:`contributing/setup`. - Once you are done, you would be able to run ``jupyterhub`` from the command line and access it from your web browser. - This ensures that the dev environment is properly set up for tests to run. - -#. You can run all tests in JupyterHub - - .. code-block:: bash - - pytest -v jupyterhub/tests - - This should display progress as it runs all the tests, printing - information about any test failures as they occur. - - If you wish to confirm test coverage the run tests with the `--cov` flag: - - .. code-block:: bash - - pytest -v --cov=jupyterhub jupyterhub/tests - -#. You can also run tests in just a specific file: - - .. code-block:: bash - - pytest -v jupyterhub/tests/<test-file-name> - -#. To run a specific test only, you can do: - - .. code-block:: bash - - pytest -v jupyterhub/tests/<test-file-name>::<test-name> - - This runs the test with function name ``<test-name>`` defined in - ``<test-file-name>``. This is very useful when you are iteratively - developing a single test. - - For example, to run the test ``test_shutdown`` in the file ``test_api.py``, - you would run: - - .. code-block:: bash - - pytest -v jupyterhub/tests/test_api.py::test_shutdown - - For more details, refer to the `pytest usage documentation <https://pytest.readthedocs.io/en/latest/usage.html>`_. - -Test organisation -================= - -The tests live in ``jupyterhub/tests`` and are organized roughly into: - -#. ``test_api.py`` tests the REST API -#. ``test_pages.py`` tests loading the HTML pages - -and other collections of tests for different components. -When writing a new test, there should usually be a test of -similar functionality already written and related tests should -be added nearby. - -The fixtures live in ``jupyterhub/tests/conftest.py``. There are -fixtures that can be used for JupyterHub components, such as: - -- ``app``: an instance of JupyterHub with mocked parts -- ``auth_state_enabled``: enables persisting auth_state (like authentication tokens) -- ``db``: a sqlite in-memory DB session -- ``io_loop```: a Tornado event loop -- ``event_loop``: a new asyncio event loop -- ``user``: creates a new temporary user -- ``admin_user``: creates a new temporary admin user -- single user servers - - ``cleanup_after``: allows cleanup of single user servers between tests -- mocked service - - ``MockServiceSpawner``: a spawner that mocks services for testing with a short poll interval - - ``mockservice```: mocked service with no external service url - - ``mockservice_url``: mocked service with a url to test external services - -And fixtures to add functionality or spawning behavior: - -- ``admin_access``: grants admin access -- ``no_patience```: sets slow-spawning timeouts to zero -- ``slow_spawn``: enables the SlowSpawner (a spawner that takes a few seconds to start) -- ``never_spawn``: enables the NeverSpawner (a spawner that will never start) -- ``bad_spawn``: enables the BadSpawner (a spawner that fails immediately) -- ``slow_bad_spawn``: enables the SlowBadSpawner (a spawner that fails after a short delay) - -Refer to the `pytest fixtures documentation <https://pytest.readthedocs.io/en/latest/fixture.html>`_ to learn how to use fixtures that exists already and to create new ones. - -Troubleshooting Test Failures -============================= - -All the tests are failing -------------------------- - -Make sure you have completed all the steps in :ref:`contributing/setup` successfully, and are able to access JupyterHub from your browser at http://localhost:8000 after starting ``jupyterhub`` in your command line. - - -Code formatting and linting -=========================== - -JupyterHub automatically enforces code formatting. This means that pull requests -with changes breaking this formatting will receive a commit from pre-commit.ci -automatically. - -To automatically format code locally, you can install pre-commit and register a -*git hook* to automatically check with pre-commit before you make a commit if -the formatting is okay. - -.. code:: bash - - pip install pre-commit - pre-commit install --install-hooks - -To run pre-commit manually you would do: - -.. code:: bash - - # check for changes to code not yet committed - pre-commit run - - # check for changes also in already committed code - pre-commit run --all-files - -You may also install `black integration <https://github.com/psf/black#editor-integration>`_ -into your text editor to format code automatically.
migrate remaining rst docs to myst We still have several doc pages in .rst format, while most are .md. @consideRatio proposed migrating to myst, which I agree we should do, and it may be good to get done before the docs-reorg internship gets underway in a few weeks. The challenge is the following open PRs (mostly from the outreachy application period last month) touch these rst files: - [x] #4209: - docs/source/contributing/setup.rst - [x] #4186: - docs/source/getting-started/security-basics.rst - [x] #4127: - docs/source/contributing/docs.rst - [x] #4119: - docs/source/contributing/tests.rst - [x] #4094: - docs/source/admin/upgrading.rst - docs/source/getting-started/security-basics.rst - [x] #4084: - docs/source/index.rst - [ ] #4066: - docs/source/contributing/index.rst - docs/source/contributing/layout.rst - [x] #4008: - docs/source/index-admin.rst - [x] #3053: - docs/source/reference/index.rst - [ ] #2726: - docs/source/getting-started/index.rst - docs/source/index.rst <details> <summary>PR list generated with</summary> ```python from github import Github as GitHub gh = GitHub() repo = gh.get_repo("jupyterhub/jupyterhub") for pull in repo.get_pulls(state="open"): pull_files = [f.filename for f in list(pull.get_files())] if any(f.endswith(".rst") for f in pull_files): print(f"- [ ] #{pull.number}:") for f in pull_files: if f.endswith(".rst"): print(f" - {f}") continue ``` </details> We should merge/close as many of these as we can to avoid conflicts (some that just touch an index are no big deal to resolve conflicts).
2022-12-07T08:13:39Z
[]
[]
jupyterhub/jupyterhub
4,302
jupyterhub__jupyterhub-4302
[ "4313" ]
0a84738fe9b579ef26034740b125f98c07392636
diff --git a/jupyterhub/apihandlers/groups.py b/jupyterhub/apihandlers/groups.py --- a/jupyterhub/apihandlers/groups.py +++ b/jupyterhub/apihandlers/groups.py @@ -94,8 +94,9 @@ async def post(self): # create the group self.log.info("Creating new group %s with %i users", name, len(users)) self.log.debug("Users: %s", usernames) - group = orm.Group(name=name, users=users) + group = orm.Group(name=name) self.db.add(group) + group.users = users self.db.commit() created.append(group) self.write(json.dumps([self.group_model(group) for group in created])) @@ -131,8 +132,9 @@ async def post(self, group_name): # create the group self.log.info("Creating new group %s with %i users", group_name, len(users)) self.log.debug("Users: %s", usernames) - group = orm.Group(name=group_name, users=users) + group = orm.Group(name=group_name) self.db.add(group) + group.users = users self.db.commit() self.write(json.dumps(self.group_model(group))) self.set_status(201) diff --git a/jupyterhub/app.py b/jupyterhub/app.py --- a/jupyterhub/app.py +++ b/jupyterhub/app.py @@ -1962,9 +1962,9 @@ async def init_users(self): user = orm.User.find(db, name) if user is None: user = orm.User(name=name, admin=True) + db.add(user) roles.assign_default_roles(self.db, entity=user) new_users.append(user) - db.add(user) else: user.admin = True # the admin_users config variable will never be used after this point. @@ -2376,6 +2376,7 @@ def init_services(self): if orm_service is None: # not found, create a new one orm_service = orm.Service(name=name) + self.db.add(orm_service) if spec.get('admin', False): self.log.warning( f"Service {name} sets `admin: True`, which is deprecated in JupyterHub 2.0." @@ -2384,7 +2385,6 @@ def init_services(self): "the Service admin flag will be ignored." ) roles.update_roles(self.db, entity=orm_service, roles=['admin']) - self.db.add(orm_service) orm_service.admin = spec.get('admin', False) self.db.commit() service = Service( diff --git a/jupyterhub/oauth/provider.py b/jupyterhub/oauth/provider.py --- a/jupyterhub/oauth/provider.py +++ b/jupyterhub/oauth/provider.py @@ -257,16 +257,16 @@ def save_authorization_code(self, client_id, code, request, *args, **kwargs): raise ValueError("No such client: %s" % client_id) orm_code = orm.OAuthCode( - client=orm_client, code=code['code'], # oauth has 5 minutes to complete expires_at=int(orm.OAuthCode.now() + 300), scopes=list(request.scopes), - user=request.user.orm_user, redirect_uri=orm_client.redirect_uri, session_id=request.session_id, ) self.db.add(orm_code) + orm_code.client = orm_client + orm_code.user = request.user.orm_user self.db.commit() def get_authorization_code_scopes(self, client_id, code, redirect_uri, request): diff --git a/jupyterhub/orm.py b/jupyterhub/orm.py --- a/jupyterhub/orm.py +++ b/jupyterhub/orm.py @@ -8,7 +8,9 @@ import alembic.command import alembic.config +import sqlalchemy from alembic.script import ScriptDirectory +from packaging.version import parse as parse_version from sqlalchemy import ( Boolean, Column, @@ -24,8 +26,8 @@ inspect, or_, select, + text, ) -from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import ( Session, backref, @@ -34,6 +36,13 @@ relationship, sessionmaker, ) + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + # sqlalchemy < 1.4 + from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.pool import StaticPool from sqlalchemy.types import LargeBinary, Text, TypeDecorator from tornado.log import app_log @@ -750,6 +759,7 @@ def new( session_id=session_id, scopes=list(scopes), ) + db.add(orm_token) orm_token.token = token if user: assert user.id is not None @@ -760,7 +770,6 @@ def new( if expires_in is not None: orm_token.expires_at = cls.now() + timedelta(seconds=expires_in) - db.add(orm_token) db.commit() return token @@ -902,7 +911,7 @@ def register_ping_connection(engine): """ @event.listens_for(engine, "engine_connect") - def ping_connection(connection, branch): + def ping_connection(connection, branch=None): if branch: # "branch" refers to a sub-connection of a connection, # we don't want to bother pinging on these. @@ -913,11 +922,17 @@ def ping_connection(connection, branch): save_should_close_with_result = connection.should_close_with_result connection.should_close_with_result = False + if parse_version(sqlalchemy.__version__) < parse_version("1.4"): + one = [1] + else: + one = 1 + try: - # run a SELECT 1. use a core select() so that + # run a SELECT 1. use a core select() so that # the SELECT of a scalar value without a table is # appropriately formatted for the backend - connection.scalar(select([1])) + with connection.begin() as transaction: + connection.scalar(select(one)) except exc.DBAPIError as err: # catch SQLAlchemy's DBAPIError, which is a wrapper # for the DBAPI's exception. It includes a .connection_invalidated @@ -932,7 +947,8 @@ def ping_connection(connection, branch): # itself and establish a new connection. The disconnect detection # here also causes the whole connection pool to be invalidated # so that all stale connections are discarded. - connection.scalar(select([1])) + with connection.begin() as transaction: + connection.scalar(select(one)) else: raise finally: @@ -956,7 +972,13 @@ def check_db_revision(engine): from .dbutil import _temp_alembic_ini - with _temp_alembic_ini(engine.url) as ini: + if hasattr(engine.url, "render_as_string"): + # sqlalchemy >= 1.4 + engine_url = engine.url.render_as_string(hide_password=False) + else: + engine_url = str(engine.url) + + with _temp_alembic_ini(engine_url) as ini: cfg = alembic.config.Config(ini) scripts = ScriptDirectory.from_config(cfg) head = scripts.get_heads()[0] @@ -991,9 +1013,10 @@ def check_db_revision(engine): # check database schema version # it should always be defined at this point - alembic_revision = engine.execute( - 'SELECT version_num FROM alembic_version' - ).first()[0] + with engine.begin() as connection: + alembic_revision = connection.execute( + text('SELECT version_num FROM alembic_version') + ).first()[0] if alembic_revision == head: app_log.debug("database schema version found: %s", alembic_revision) else: @@ -1010,13 +1033,16 @@ def mysql_large_prefix_check(engine): """Check mysql has innodb_large_prefix set""" if not str(engine.url).startswith('mysql'): return False - variables = dict( - engine.execute( - 'show variables where variable_name like ' - '"innodb_large_prefix" or ' - 'variable_name like "innodb_file_format";' - ).fetchall() - ) + with engine.begin() as connection: + variables = dict( + connection.execute( + text( + 'show variables where variable_name like ' + '"innodb_large_prefix" or ' + 'variable_name like "innodb_file_format";' + ) + ).fetchall() + ) if ( variables.get('innodb_file_format', 'Barracuda') == 'Barracuda' and variables.get('innodb_large_prefix', 'ON') == 'ON' diff --git a/jupyterhub/user.py b/jupyterhub/user.py --- a/jupyterhub/user.py +++ b/jupyterhub/user.py @@ -416,9 +416,10 @@ def all_spawners(self, include_default=True): yield orm_spawner def _new_orm_spawner(self, server_name): - """Creat the low-level orm Spawner object""" - orm_spawner = orm.Spawner(user=self.orm_user, name=server_name) + """Create the low-level orm Spawner object""" + orm_spawner = orm.Spawner(name=server_name) self.db.add(orm_spawner) + orm_spawner.user = self.orm_user self.db.commit() assert server_name in self.orm_spawners return orm_spawner
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -29,6 +29,7 @@ env: # UTF-8 content may be interpreted as ascii and causes errors without this. LANG: C.UTF-8 PYTEST_ADDOPTS: "--verbose --color=yes" + SQLALCHEMY_WARN_20: "1" permissions: contents: read @@ -140,7 +141,7 @@ jobs: - name: Install Python dependencies run: | pip install --upgrade pip - pip install ".[test]" + pip install -e ".[test]" if [ "${{ matrix.oldest_dependencies }}" != "" ]; then # take any dependencies in requirements.txt such as tornado>=5.0 @@ -152,6 +153,7 @@ jobs: if [ "${{ matrix.main_dependencies }}" != "" ]; then pip install git+https://github.com/ipython/traitlets#egg=traitlets --force + pip install --upgrade --pre sqlalchemy fi if [ "${{ matrix.legacy_notebook }}" != "" ]; then pip uninstall jupyter_server --yes diff --git a/jupyterhub/tests/test_api.py b/jupyterhub/tests/test_api.py --- a/jupyterhub/tests/test_api.py +++ b/jupyterhub/tests/test_api.py @@ -444,11 +444,12 @@ async def test_get_self(app): db.add(oauth_client) db.commit() oauth_token = orm.APIToken( - user=u.orm_user, - oauth_client=oauth_client, token=token, ) db.add(oauth_token) + oauth_token.user = u.orm_user + oauth_token.oauth_client = oauth_client + db.commit() r = await api_request( app, @@ -2131,13 +2132,13 @@ async def shutdown(): def stop(): stop.called = True - loop.call_later(1, real_stop) + loop.call_later(2, real_stop) real_cleanup = app.cleanup def cleanup(): cleanup.called = True - return real_cleanup() + loop.call_later(1, real_cleanup) app.cleanup = cleanup diff --git a/jupyterhub/tests/test_orm.py b/jupyterhub/tests/test_orm.py --- a/jupyterhub/tests/test_orm.py +++ b/jupyterhub/tests/test_orm.py @@ -323,7 +323,9 @@ def test_spawner_delete_cascade(db): db.add(user) db.commit() - spawner = orm.Spawner(user=user) + spawner = orm.Spawner() + db.add(spawner) + spawner.user = user db.commit() spawner.server = server = orm.Server() db.commit() @@ -350,16 +352,19 @@ def test_user_delete_cascade(db): # these should all be deleted automatically when the user goes away user.new_api_token() api_token = user.api_tokens[0] - spawner = orm.Spawner(user=user) + spawner = orm.Spawner() + db.add(spawner) + spawner.user = user db.commit() spawner.server = server = orm.Server() - oauth_code = orm.OAuthCode(client=oauth_client, user=user) + oauth_code = orm.OAuthCode() db.add(oauth_code) - oauth_token = orm.APIToken( - oauth_client=oauth_client, - user=user, - ) + oauth_code.client = oauth_client + oauth_code.user = user + oauth_token = orm.APIToken() db.add(oauth_token) + oauth_token.oauth_client = oauth_client + oauth_token.user = user db.commit() # record all of the ids @@ -390,13 +395,14 @@ def test_oauth_client_delete_cascade(db): # create a bunch of objects that reference the User # these should all be deleted automatically when the user goes away - oauth_code = orm.OAuthCode(client=oauth_client, user=user) + oauth_code = orm.OAuthCode() db.add(oauth_code) - oauth_token = orm.APIToken( - oauth_client=oauth_client, - user=user, - ) + oauth_code.client = oauth_client + oauth_code.user = user + oauth_token = orm.APIToken() db.add(oauth_token) + oauth_token.oauth_client = oauth_client + oauth_token.user = user db.commit() assert user.api_tokens == [oauth_token] @@ -517,11 +523,11 @@ def test_expiring_oauth_token(app, user): db.add(client) orm_token = orm.APIToken( token=token, - oauth_client=client, - user=user, expires_at=now() + timedelta(seconds=30), ) db.add(orm_token) + orm_token.oauth_client = client + orm_token.user = user db.commit() found = orm.APIToken.find(db, token) diff --git a/jupyterhub/tests/test_pages.py b/jupyterhub/tests/test_pages.py --- a/jupyterhub/tests/test_pages.py +++ b/jupyterhub/tests/test_pages.py @@ -1033,11 +1033,10 @@ async def test_oauth_token_page(app): user = app.users[orm.User.find(app.db, name)] client = orm.OAuthClient(identifier='token') app.db.add(client) - oauth_token = orm.APIToken( - oauth_client=client, - user=user, - ) + oauth_token = orm.APIToken() app.db.add(oauth_token) + oauth_token.oauth_client = client + oauth_token.user = user app.db.commit() r = await get_page('token', app, cookies=cookies) r.raise_for_status() diff --git a/jupyterhub/tests/test_roles.py b/jupyterhub/tests/test_roles.py --- a/jupyterhub/tests/test_roles.py +++ b/jupyterhub/tests/test_roles.py @@ -3,9 +3,11 @@ # Distributed under the terms of the Modified BSD License. import json import os +import warnings import pytest from pytest import mark +from sqlalchemy.exc import SADeprecationWarning from tornado.log import app_log from .. import orm, roles @@ -343,7 +345,13 @@ async def test_creating_roles(app, role, role_def, response_type, response): # make sure no warnings/info logged when the role exists and its definition hasn't been changed elif response_type == 'no-log': with pytest.warns(response) as record: + # don't catch already-suppressed sqlalchemy warnings + warnings.simplefilter("ignore", SADeprecationWarning) roles.create_role(db, role_def) + + for warning in record.list: + # show warnings for debugging + print("Unexpected warning", warning) assert not record.list role = orm.Role.find(db, role_def['name']) assert role is not None diff --git a/jupyterhub/tests/utils.py b/jupyterhub/tests/utils.py --- a/jupyterhub/tests/utils.py +++ b/jupyterhub/tests/utils.py @@ -6,6 +6,7 @@ import pytest import requests from certipy import Certipy +from sqlalchemy import text from tornado.httputil import url_concat from jupyterhub import metrics, orm @@ -13,6 +14,20 @@ from jupyterhub.roles import assign_default_roles, update_roles from jupyterhub.utils import url_path_join as ujoin +try: + from sqlalchemy.exc import RemovedIn20Warning +except ImportError: + + class RemovedIn20Warning(DeprecationWarning): + """ + I only exist so I can be used in warnings filters in pytest.ini + + I will never be displayed. + + sqlalchemy 1.4 introduces RemovedIn20Warning, + but we still test against older sqlalchemy. + """ + class _AsyncRequests: """Wrapper around requests to return a Future from request methods @@ -85,8 +100,8 @@ def new_func(app, *args, **kwargs): def _check(_=None): temp_session = app.session_factory() try: - temp_session.execute('CREATE TABLE dummy (foo INT)') - temp_session.execute('DROP TABLE dummy') + temp_session.execute(text('CREATE TABLE dummy (foo INT)')) + temp_session.execute(text('DROP TABLE dummy')) finally: temp_session.close()
Restrict the SqlAlchemy dependency to be <2.0.0 fixes https://github.com/jupyterhub/jupyterhub/issues/4312
2023-01-18T09:39:10Z
[]
[]
jupyterhub/jupyterhub
4,471
jupyterhub__jupyterhub-4471
[ "4282" ]
a66801c4244762b12d1c61559895d5fc4b95b62c
diff --git a/jupyterhub/app.py b/jupyterhub/app.py --- a/jupyterhub/app.py +++ b/jupyterhub/app.py @@ -93,6 +93,8 @@ maybe_future, print_ps_info, print_stacks, + subdomain_hook_idna, + subdomain_hook_legacy, url_path_join, ) @@ -739,6 +741,72 @@ def _domain_default(self): return '' return urlparse(self.subdomain_host).hostname + subdomain_hook = Union( + [Callable(), Unicode()], + default_value="idna", + config=True, + help=""" + Hook for constructing subdomains for users and services. + Only used when `JupyterHub.subdomain_host` is set. + + There are two predefined hooks, which can be selected by name: + + - 'legacy' (deprecated) + - 'idna' (default, more robust. No change for _most_ usernames) + + Otherwise, should be a function which must not be async. + A custom subdomain_hook should have the signature: + + def subdomain_hook(name, domain, kind) -> str: + ... + + and should return a unique, valid domain name for all usernames. + + - `name` is the original name, which may need escaping to be safe as a domain name label + - `domain` is the domain of the Hub itself + - `kind` will be one of 'user' or 'service' + + JupyterHub itself puts very little limit on usernames + to accommodate a wide variety of Authenticators, + but your identity provider is likely much more strict, + allowing you to make assumptions about the name. + + The default behavior is to have all services + on a single `services.{domain}` subdomain, + and each user on `{username}.{domain}`. + This is the 'legacy' scheme, + and doesn't work for all usernames. + + The 'idna' scheme is a new scheme that should produce a valid domain name for any user, + using IDNA encoding for unicode usernames, and a truncate-and-hash approach for + any usernames that can't be easily encoded into a domain component. + + .. versionadded:: 5.0 + """, + ) + + @default("subdomain_hook") + def _default_subdomain_hook(self): + return subdomain_hook_idna + + @validate("subdomain_hook") + def _subdomain_hook(self, proposal): + # shortcut `subdomain_hook = "idna"` config + hook = proposal.value + if hook == "idna": + return subdomain_hook_idna + if hook == "legacy": + if self.subdomain_host: + self.log.warning( + "Using deprecated 'legacy' subdomain hook. JupyterHub.subdomain_hook = 'idna' is the new default, added in JupyterHub 5." + ) + return subdomain_hook_legacy + if not callable(hook): + raise ValueError( + f"subdomain_hook must be 'idna', 'legacy', or a callable, got {hook!r}" + ) + return hook + logo_file = Unicode( '', help="Specify path to a logo image to override the Jupyter logo in the banner.", @@ -2401,14 +2469,16 @@ def service_from_orm( Service: the created service """ + name = orm_service.name if self.domain: - domain = 'services.' + self.domain - parsed = urlparse(self.subdomain_host) - host = f'{parsed.scheme}://services.{parsed.netloc}' + parsed_host = urlparse(self.subdomain_host) + domain = self.subdomain_hook(name, self.domain, kind="service") + host = f"{parsed_host.scheme}://{domain}" + if parsed_host.port: + host = f"{host}:{parsed_host.port}" else: domain = host = '' - name = orm_service.name service = Service( parent=self, app=self, @@ -2454,17 +2524,20 @@ def service_from_spec( Optional[Service]: The created service """ + if 'name' not in spec: + raise ValueError(f'service spec must have a name: {spec}') + + name = spec['name'] + if self.domain: - domain = 'services.' + self.domain - parsed = urlparse(self.subdomain_host) - host = f'{parsed.scheme}://services.{parsed.netloc}' + parsed_host = urlparse(self.subdomain_host) + domain = self.subdomain_hook(name, self.domain, kind="service") + host = f"{parsed_host.scheme}://{domain}" + if parsed_host.port: + host = f"{host}:{parsed_host.port}" else: domain = host = '' - if 'name' not in spec: - raise ValueError('service spec must have a name: %r' % spec) - - name = spec['name'] # get/create orm orm_service = orm.Service.find(self.db, name=name) if orm_service is None: @@ -2895,6 +2968,7 @@ def init_tornado_settings(self): static_path=os.path.join(self.data_files_path, 'static'), static_url_prefix=url_path_join(self.hub.base_url, 'static/'), static_handler_class=CacheControlStaticFilesHandler, + subdomain_hook=self.subdomain_hook, template_path=self.template_paths, template_vars=self.template_vars, jinja2_env=jinja_env, diff --git a/jupyterhub/services/service.py b/jupyterhub/services/service.py --- a/jupyterhub/services/service.py +++ b/jupyterhub/services/service.py @@ -365,6 +365,14 @@ def server(self): def prefix(self): return url_path_join(self.base_url, 'services', self.name + '/') + @property + def href(self): + """Convenient 'href' to use for links to this service""" + if self.domain: + return f"//{self.domain}{self.prefix}" + else: + return self.prefix + @property def proxy_spec(self): if not self.server: diff --git a/jupyterhub/user.py b/jupyterhub/user.py --- a/jupyterhub/user.py +++ b/jupyterhub/user.py @@ -1,11 +1,9 @@ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import json -import string import warnings from collections import defaultdict from datetime import datetime, timedelta -from functools import lru_cache from urllib.parse import quote, urlparse from sqlalchemy import inspect @@ -21,8 +19,10 @@ from .spawner import LocalProcessSpawner from .utils import ( AnyTimeoutError, + _strict_dns_safe, make_ssl_context, maybe_future, + subdomain_hook_legacy, url_escape_path, url_path_join, ) @@ -55,42 +55,6 @@ to a number of seconds that is enough for servers to become responsive. """ -# set of chars that are safe in dns labels -# (allow '.' because we don't mind multiple levels of subdomains) -_dns_safe = set(string.ascii_letters + string.digits + '-.') -# don't escape % because it's the escape char and we handle it separately -_dns_needs_replace = _dns_safe | {"%"} - - -@lru_cache() -def _dns_quote(name): - """Escape a name for use in a dns label - - this is _NOT_ fully domain-safe, but works often enough for realistic usernames. - Fully safe would be full IDNA encoding, - PLUS escaping non-IDNA-legal ascii, - PLUS some encoding of boundary conditions - """ - # escape name for subdomain label - label = quote(name, safe="").lower() - # some characters are not handled by quote, - # because they are legal in URLs but not domains, - # specifically _ and ~ (starting in 3.7). - # Escape these in the same way (%{hex_codepoint}). - unique_chars = set(label) - for c in unique_chars: - if c not in _dns_needs_replace: - label = label.replace(c, f"%{ord(c):x}") - - # underscore is our escape char - - # it's not officially legal in hostnames, - # but is valid in _domain_ names (?), - # and always works in practice. - # FIXME: We should consider switching to proper IDNA encoding - # for 3.0. - label = label.replace("%", "_") - return label - class UserDict(dict): """Like defaultdict, but for users @@ -559,8 +523,19 @@ def proxy_spec(self): @property def domain(self): """Get the domain for my server.""" + hook = self.settings.get("subdomain_hook", subdomain_hook_legacy) + return hook(self.name, self.settings['domain'], kind='user') - return _dns_quote(self.name) + '.' + self.settings['domain'] + @property + def dns_safe_name(self): + """Get a dns-safe encoding of my name + + - always safe value for a single DNS label + - max 40 characters, leaving room for additional components + + .. versionadded:: 5.0 + """ + return _strict_dns_safe(self.name, max_length=40) @property def host(self): diff --git a/jupyterhub/utils.py b/jupyterhub/utils.py --- a/jupyterhub/utils.py +++ b/jupyterhub/utils.py @@ -8,19 +8,23 @@ import hashlib import inspect import random +import re import secrets import socket import ssl +import string import sys import threading import uuid import warnings from binascii import b2a_hex from datetime import datetime, timezone +from functools import lru_cache from hmac import compare_digest from operator import itemgetter from urllib.parse import quote +import idna from async_generator import aclosing from sqlalchemy.exc import SQLAlchemyError from tornado import gen, ioloop, web @@ -779,3 +783,153 @@ def get_browser_protocol(request): # no forwarded headers return request.protocol + + +# set of chars that are safe in dns labels +# (allow '.' because we don't mind multiple levels of subdomains) +_dns_safe = set(string.ascii_letters + string.digits + '-.') +# don't escape % because it's the escape char and we handle it separately +_dns_needs_replace = _dns_safe | {"%"} + + +@lru_cache() +def _dns_quote(name): + """Escape a name for use in a dns label + + this is _NOT_ fully domain-safe, but works often enough for realistic usernames. + Fully safe would be full IDNA encoding, + PLUS escaping non-IDNA-legal ascii, + PLUS some encoding of boundary conditions + """ + # escape name for subdomain label + label = quote(name, safe="").lower() + # some characters are not handled by quote, + # because they are legal in URLs but not domains, + # specifically _ and ~ (starting in 3.7). + # Escape these in the same way (%{hex_codepoint}). + unique_chars = set(label) + for c in unique_chars: + if c not in _dns_needs_replace: + label = label.replace(c, f"%{ord(c):x}") + + # underscore is our escape char - + # it's not officially legal in hostnames, + # but is valid in _domain_ names (?), + # and seems to always work in practice. + label = label.replace("%", "_") + return label + + +def subdomain_hook_legacy(name, domain, kind): + """Legacy (default) hook for subdomains + + Users are at '$user.$host' where $user is _mostly_ DNS-safe. + Services are all simultaneously on 'services.$host`. + """ + if kind == "user": + # backward-compatibility + return f"{_dns_quote(name)}.{domain}" + elif kind == "service": + return f"services.{domain}" + else: + raise ValueError(f"kind must be 'service' or 'user', not {kind!r}") + + +# strict dns-safe characters (excludes '-') +_strict_dns_safe = set(string.ascii_lowercase) | set(string.digits) + + +def _trim_and_hash(name): + """Always-safe fallback for a DNS label + + Produces a valid and unique DNS label for any string + + - prefix with 'u-' to avoid collisions and first-character rules + - Selects the first N characters that are safe ('x' if none are safe) + - suffix with truncated hash of true name + - length is guaranteed to be < 32 characters + leaving room for additional components to build a DNS label. + Will currently be between 12-19 characters: + 4 (prefix, delimiters) + 7 (hash) + 1-8 (name stub) + """ + name_hash = hashlib.sha256(name.encode('utf8')).hexdigest()[:7] + + safe_chars = [c for c in name.lower() if c in _strict_dns_safe] + name_stub = ''.join(safe_chars[:8]) + # We MUST NOT put the `--` in the 3rd and 4th position (RFC 5891) + # which is reserved for IDNs + # It would be if name_stub were empty, so put 'x' here + # (value doesn't matter, as uniqueness is in the hash - the stub is more of a hint, anyway) + if not name_stub: + name_stub = "x" + return f"u-{name_stub}--{name_hash}" + + +# A host name (label) can start or end with a letter or a number +# this pattern doesn't need to handle the boundary conditions, +# which are handled more simply with starts/endswith +_dns_re = re.compile(r'^[a-z0-9-]{1,63}$', flags=re.IGNORECASE) + + +def _is_dns_safe(label, max_length=63): + # A host name (label) MUST NOT consist of all numeric values + if label.isnumeric(): + return False + # A host name (label) can be up to 63 characters + if not 0 < len(label) <= max_length: + return False + # A host name (label) MUST NOT start or end with a '-' (dash) + if label.startswith('-') or label.endswith('-'): + return False + return bool(_dns_re.match(label)) + + +def _strict_dns_safe_encode(name, max_length=63): + """Will encode a username to a guaranteed-safe DNS label + + - if it contains '--' at all, jump to the end and take the hash route to avoid collisions with escaped + - if safe, use it + - if not, use IDNA encoding + - if a safe encoding cannot be produced, use stripped safe characters + '--{hash}` + - allow specifying a max_length, to give room for additional components, + if used as only a _part_ of a DNS label. + """ + # short-circuit: avoid accepting already-encoded results + # which all include '--' + if '--' in name: + return _trim_and_hash(name) + + # if name is already safe (and can't collide with an escaped result) use it + if _is_dns_safe(name, max_length=max_length): + return name + + # next: use IDNA encoding, if applicable + try: + idna_name = idna.encode(name).decode("ascii") + except ValueError: + idna_name = None + + if idna_name and idna_name != name and _is_dns_safe(idna_name): + return idna_name + + # fallback, always works: trim to safe characters and hash + return _trim_and_hash(name) + + +def subdomain_hook_idna(name, domain, kind): + """New, reliable subdomain hook + + More reliable than previous, should always produce valid domains + + - uses IDNA encoding for simple unicode names + - separate domain for each service + - uses stripped name and hash, where above schemes fail to produce a valid domain + """ + safe_name = _strict_dns_safe_encode(name) + if kind == 'user': + # 'user' namespace is special-cased as the default + # for aesthetics and backward-compatibility for names that don't need escaping + suffix = "" + else: + suffix = f"--{kind}" + return f"{safe_name}{suffix}.{domain}"
diff --git a/jupyterhub/tests/test_utils.py b/jupyterhub/tests/test_utils.py --- a/jupyterhub/tests/test_utils.py +++ b/jupyterhub/tests/test_utils.py @@ -122,3 +122,46 @@ def test_browser_protocol(x_scheme, x_forwarded_proto, forwarded, expected): proto = utils.get_browser_protocol(request) assert proto == expected + + [email protected]( + "name, expected", + [ + ("safe", "safe"), + ("has--doubledash", "u-hasdoubl--cb052ae"), + ("uhasdoubl--cb052ae", "u-uhasdoub--3c0d1c9"), + ("üni", "xn--ni-wka"), + ("xn--ni-wka", "u-xnniwka--ceb4edd"), + ("x", "x"), + ("-pre", "u-pre--0e46e7b"), + ("É", "u-x--a755f65"), + ("é", "xn--9ca"), + ("a" * 64, "u-aaaaaaaa--ffe054f"), + ("a.b", "u-ab--2e7336d"), + ], +) +def test_subdomain_hook_idna(name, expected): + expected_domain = expected + ".domain" + resolved = utils.subdomain_hook_idna(name, "domain", "user") + assert resolved == expected_domain + + [email protected]( + "name, expected", + [ + ("safe", "safe"), + ("üni", "_c3_bcni"), + ("x", "x"), + ("É", "_c3_89"), + ("é", "_c3_a9"), + # bad cases: + ("a.b", "a.b"), + ("has--doubledash", "has--doubledash"), + ("-pre", "-pre"), + ("a" * 64, "a" * 64), + ], +) +def test_subdomain_hook_legacy(name, expected): + expected_domain = expected + ".domain" + resolved = utils.subdomain_hook_legacy(name, "domain", "user") + assert resolved == expected_domain
Dot should not be an allowed domain name character <!-- Thank you for contributing. These HTML comments will not render in the issue, but you can delete them once you've read them if you prefer! --> ### Bug description If a username contains a period and subdomains are enabled, they will get a sub-subdomain now because this code doesn't mind sub-subdomains: https://github.com/jupyterhub/jupyterhub/blob/49c518940bbdedbaf9037d463bcf455c794e5e3f/jupyterhub/user.py#L58-L60 I believe, however, that this doesn't match a common use case. For example, wildcard certificates don't support multilevel nesting, if I understand correctly. #### Expected behaviour Dot isn't considered a `_dns_safe` character.
Interesting! It definitely _is_ valid sometimes, so I guess it needs to be configurable? But you're right that 'regular' wildcard SSL won't be sufficient (nested subdomains can be added via SAN, but I imagine that's pretty unusual). Would it perhaps be a reasonable idea to consider other pathways for providing a more complex subdomain structure than accidentally via the username? Deeper nested domains are a reasonable niche application, but arbitrary level nesting *based on a username* seems too weird. Plus I'm not even sure if `one...two.example.com` is a legitimate domain name. > Would it perhaps be a reasonable idea to consider other pathways for providing a more complex subdomain structure than accidentally via the username? I definitely think a custom hook to totally override domain-for-user makes sense. > Plus I'm not even sure if one...two.example.com is a legitimate domain name. I'm pretty sure it's not. There's a comment that we should really be using idna encoding for full DNS-safe labels. There's a [package for that](https://pypi.org/project/idna/), but it doesn't handle the ASCII part of it (escape `.`, can't start or end with `-`, etc.).
2023-06-06T09:24:20Z
[]
[]
jupyterhub/jupyterhub
4,480
jupyterhub__jupyterhub-4480
[ "4461" ]
0e4deec714a30729d11e7d0b1ce359f65faaf6af
diff --git a/jupyterhub/app.py b/jupyterhub/app.py --- a/jupyterhub/app.py +++ b/jupyterhub/app.py @@ -2041,21 +2041,20 @@ async def init_users(self): TOTAL_USERS.set(total_users) - async def _get_or_create_user(self, username): + async def _get_or_create_user(self, username, hint): """Create user if username is found in config but user does not exist""" - if not (await maybe_future(self.authenticator.check_allowed(username, None))): - raise ValueError( - "Username %r is not in Authenticator.allowed_users" % username - ) user = orm.User.find(self.db, name=username) if user is None: if not self.authenticator.validate_username(username): raise ValueError("Username %r is not valid" % username) - self.log.info(f"Creating user {username}") + self.log.info(f"Creating user {username} found in {hint}") user = orm.User(name=username) self.db.add(user) roles.assign_default_roles(self.db, entity=user) self.db.commit() + f = self.authenticator.add_user(user) + if f: + await maybe_future(f) return user async def init_groups(self): @@ -2064,7 +2063,9 @@ async def init_groups(self): if self.authenticator.manage_groups and self.load_groups: raise ValueError("Group management has been offloaded to the authenticator") + for name, contents in self.load_groups.items(): + self.log.debug("Loading group %s", name) group = orm.Group.find(db, name) if group is None: @@ -2080,9 +2081,11 @@ async def init_groups(self): if 'users' in contents: for username in contents['users']: username = self.authenticator.normalize_username(username) - user = await self._get_or_create_user(username) + user = await self._get_or_create_user( + username, hint=f"group: {name}" + ) if group not in user.groups: - self.log.debug(f"Adding user {username} to group {name}") + self.log.debug(f"Adding user {username} to group {name}") group.users.append(user) if 'properties' in contents: @@ -2110,8 +2113,9 @@ async def init_role_creation(self): roles_with_new_permissions = [] for role_spec in self.load_roles: role_name = role_spec['name'] + self.log.debug("Loading role %s", role_name) if role_name in default_roles_dict: - self.log.debug(f"Overriding default role {role_name}") + self.log.debug("Overriding default role %s", role_name) # merge custom role spec with default role spec when overriding # so the new role can be partially defined default_role_spec = default_roles_dict.pop(role_name) @@ -2198,34 +2202,33 @@ async def init_role_assignment(self): for name in role_spec[kind]: if kind == 'users': name = self.authenticator.normalize_username(name) - if not ( - await maybe_future( - self.authenticator.check_allowed(name, None) - ) - ): - raise ValueError( - f"Username {name} is not in Authenticator.allowed_users" - ) Class = orm.get_class(kind) orm_obj = Class.find(db, name) if orm_obj is not None: orm_role_bearers.append(orm_obj) else: - self.log.info( - f"Found unexisting {kind} {name} in role definition {role_name}" - ) if kind == 'users': - orm_obj = await self._get_or_create_user(name) + orm_obj = await self._get_or_create_user( + name, hint=f"role: {role_name}" + ) orm_role_bearers.append(orm_obj) elif kind == 'groups': + self.log.info( + f"Creating group {name} found in role: {role_name}" + ) group = orm.Group(name=name) db.add(group) db.commit() orm_role_bearers.append(group) - else: + elif kind == "services": raise ValueError( - f"{kind} {name} defined in config role definition {role_name} but not present in database" + f"Found undefined service {name} in role {role_name}. Define it first in c.JupyterHub.services." ) + else: + # this can't happen now, but keep the `else` in case we introduce a problem + # in the declaration of `kinds` above + raise ValueError(f"Unhandled role member kind: {kind}") + # Ensure all with admin role have admin flag if role_name == 'admin': orm_obj.admin = True @@ -2285,20 +2288,12 @@ async def _add_tokens(self, token_dict, kind): for token, name in token_dict.items(): if kind == 'user': name = self.authenticator.normalize_username(name) - if not ( - await maybe_future(self.authenticator.check_allowed(name, None)) - ): - raise ValueError( - "Token user name %r is not in Authenticator.allowed_users" - % name - ) if not self.authenticator.validate_username(name): raise ValueError("Token user name %r is not valid" % name) if kind == 'service': if not any(service["name"] == name for service in self.services): self.log.warning( - "Warning: service '%s' not in services, creating implicitly. It is recommended to register services using services list." - % name + f"service {name} not in services, creating implicitly. It is recommended to register services using services list." ) orm_token = orm.APIToken.find(db, token) if orm_token is None:
diff --git a/jupyterhub/tests/test_app.py b/jupyterhub/tests/test_app.py --- a/jupyterhub/tests/test_app.py +++ b/jupyterhub/tests/test_app.py @@ -251,7 +251,6 @@ async def test_load_groups(tmpdir, request): hub.init_db() db = hub.db await hub.init_role_creation() - await hub.init_users() await hub.init_groups() @@ -435,3 +434,42 @@ async def start(self): assert hub.argv == sys_argv[1:] else: assert hub.argv == argv + + +async def test_user_creation(tmpdir, request): + allowed_users = {"in-allowed", "in-group-in-allowed", "in-role-in-allowed"} + groups = { + "group": { + "users": ["in-group", "in-group-in-allowed"], + } + } + roles = [ + { + "name": "therole", + "users": ["in-role", "in-role-in-allowed"], + } + ] + + cfg = Config() + cfg.Authenticator.allowed_users = allowed_users + cfg.JupyterHub.load_groups = groups + cfg.JupyterHub.load_roles = roles + ssl_enabled = getattr(request.module, "ssl_enabled", False) + kwargs = dict(config=cfg) + if ssl_enabled: + kwargs['internal_certs_location'] = str(tmpdir) + hub = MockHub(**kwargs) + hub.init_db() + + await hub.init_role_creation() + await hub.init_role_assignment() + await hub.init_users() + await hub.init_groups() + assert hub.authenticator.allowed_users == { + "admin", # added by default config + "in-allowed", + "in-group-in-allowed", + "in-role-in-allowed", + "in-group", + "in-role", + }
Can we override `Authenticator.check_allowed` in jupyterhub/oauthenticator and assume authentication argument be passed? In [oauthenticator #594](https://github.com/jupyterhub/oauthenticator/pull/594) we are currently planning to override the `check_allowed` as considered and proposed in https://github.com/jupyterhub/oauthenticator/pull/594#issuecomment-1519086377 and the comments below. The [`check_allowed` function signature](https://github.com/jupyterhub/jupyterhub/blob/4.0.0/jupyterhub/auth.py#L432-L438) is... ```python def check_allowed(self, username, authentication=None): """Check if a username is allowed to authenticate based on configuration Return True if username is allowed, False otherwise. No allowed_users set means any username is allowed. Names are normalized *before* being checked against the allowed set. ``` What came as a surprise to me now was that check_allowed was also being called by JupyterHub without passing the authentication dictionary (aka. `auth_model`), and having error messages related to specifically that `allowed_users` check failing. It seems that also the [LocalAuthenticator](https://github.com/jupyterhub/jupyterhub/blob/main/jupyterhub/auth.py#L871-L875) class overrides `check_allowed`, and adds a check for beloning to either allowed_users or allowed_groups very similar to what we want to do. Our unique challenge is that we rely on being passed `authentication` aka `auth_model`, and that isn't passed at all times when check_allowed is used. In practice it seems that only if the deprecated `JupyterHub.api_tokens` is used or if `JupyterHub.load_roles` is configured, and they involve user api tokens or similar, then `check_allowed` will be called without an authentication dictionary. ### Using `JupyterHub.load_roles` [`JupyterHub.init_role_assignment`](https://github.com/jupyterhub/jupyterhub/blob/4.0.0/jupyterhub/app.py#L2274-L2295) ```python async def init_role_assignment(self): """ This function loads config from `JupyterHub.load_roles` """ # tokens are added separately kinds = ['users', 'services', 'groups'] # ... for role_spec in self.load_roles: # ... # add users, services, and/or groups, # tokens need to be checked for permissions for kind in kinds: orm_role_bearers = [] if kind in role_spec: for name in role_spec[kind]: if kind == 'users': name = self.authenticator.normalize_username(name) if not ( await maybe_future( self.authenticator.check_allowed(name, None) ) ): raise ValueError( f"Username {name} is not in Authenticator.allowed_users" ) ``` ### Using `JupyterHub.api_tokens` [`JupyterHub.api_tokens`](https://github.com/jupyterhub/jupyterhub/blob/4.0.0/jupyterhub/app.py#L1049-L1060) ```python api_tokens = Dict( Unicode(), help="""PENDING DEPRECATION: consider using services Dict of token:username to be loaded into the database. Allows ahead-of-time generation of API tokens for use by externally managed services, which authenticate as JupyterHub users. Consider using services for general services that talk to the JupyterHub API. """, ).tag(config=True) ``` [`JupyterHub.init_api_tokens`](https://github.com/jupyterhub/jupyterhub/blob/4.0.0/jupyterhub/app.py#L2345-L2349) ```python async def init_api_tokens(self): """Load predefined API tokens (for services) into database""" await self._add_tokens(self.service_tokens, kind='service') await self._add_tokens(self.api_tokens, kind='user') # ... ``` [`JupyterHub._add_token`](https://github.com/jupyterhub/jupyterhub/blob/4.0.0/jupyterhub/app.py#L2274-L2295) ```python async def _add_tokens(self, token_dict, kind): """Add tokens for users or services to the database""" if kind == 'user': Class = orm.User elif kind == 'service': Class = orm.Service else: raise ValueError("kind must be user or service, not %r" % kind) db = self.db for token, name in token_dict.items(): if kind == 'user': name = self.authenticator.normalize_username(name) if not ( await maybe_future(self.authenticator.check_allowed(name, None)) ): raise ValueError( "Token user name %r is not in Authenticator.allowed_users" % name ) if not self.authenticator.validate_username(name): raise ValueError("Token user name %r is not valid" % name) ```
You can't assume that it will always be passed. When users are added via configuration, the allow check is called to avoid creating users that aren't allowed elsewhere during the configuration process, which adds them to the allowed users set, etc. If the check was skipped, then users added via configuration would become allowed who shouldn't be. This is specifically an issue for the per-user allow list. If using other allow checks _to the exclusion of the allowed_users set_, then these extra checks don't matter because adding users to the db doesn't affect whether they will be allowed later. You _can_, however, assume that if `None` is given as the authentication info, that it's coming from startup configuration such as role assignment. So it would not be unreasonable to return True _if_ team auth is enabled and auth info is None. The failure mode of doing so would be: 1. user is on a team 2. team is granted access to jupyterhub 3. user is granted specific permission via role assignments 4. user is removed from a team, losing access to _authentication_, but the user keeps their role assignment The allowed_user check is applied at _authentication time_ (i.e. login), not on every request, so this user who has had their permission revoked will no longer be able to login. However, any tokens they have been issued will still have access to the assigned roles. Arguably, this behavior matches the configuration - the user has been explicitly granted this permission and their login access has been revoked, but it doesn't make a huge amount of sense. If historical background is helpful - for a long time, only the username was passed to the allow check. The authentication dict was added _when available_ to enable things like oauth team allow lists, but the checks were already present in more places than the auth could be made available. I'm not quite sure what changes in JupyterHub would make this better. We could remove the allow check entirely in app startup. I'll need to think about the consequences there, but it should probably be the right behavior for e.g. GitHub team auth. In general, there's an issue with token expiry and upstream user auth state changes, because a user's auth state (e.g. team membership) doesn't get updated until _that user_ logs in again. That's usually fine for privilege _escalation_, but privilege _revocation_ does not take effect promptly. It works fine for login and spawn, but not for token access to the API. If a deployment relies on things like teams auth, they should probably apply things like requiring shorter token expiry (and JupyterHub should make this easier), to limit the window where these can be out of sync. > When users are added via configuration, the allow check is called to avoid creating users that aren't allowed elsewhere during the configuration process, _**which adds them to the allowed users set**_, etc. Ah... I'm catching up with how things really works. Something that becomes clear now that I didn't understand was that: 1. Users in allowed_users not in db are added to the db 2. Users in db are added to the allowed_users, but only if allowed_users is truthy already via `Authenticator.add_users` Okay so this means `allowed_users` when referenced by the authenticator in check_allowed etc, is either a set of all users configured by the user explicitly and the list of users in the database, or an empty set. [`JupyterHub.init_users`](https://github.com/jupyterhub/jupyterhub/blob/4.0.0/jupyterhub/app.py#L1928-L2005) ```python async def init_users(self): # [...] # Notify authenticator of all users. # This ensures Authenticator.allowed_users is up-to-date with the database. # This lets .allowed_users be used to set up initial list, # but changes to the allowed_users set can occur in the database, # and persist across sessions. total_users = 0 for user in db.query(orm.User): try: f = self.authenticator.add_user(user) except Exception: self.log.exception("Error adding user %s already in db", user.name) if self.authenticator.delete_invalid_users: self.log.warning( "Deleting invalid user %s from the Hub database", user.name ) db.delete(user) else: self.log.warning( dedent( """ You can set c.Authenticator.delete_invalid_users = True to automatically delete users from the Hub database that no longer pass Authenticator validation, such as when user accounts are deleted from the external system without notifying JupyterHub. """ ) ) ``` [`Authenticator.add_user`](https://github.com/jupyterhub/jupyterhub/blob/4.0.0/jupyterhub/auth.py#L625-L648) ```python def add_user(self, user): """Hook called when a user is added to JupyterHub This is called: - When a user first authenticates - When the hub restarts, for all users. This method may be a coroutine. By default, this just adds the user to the allowed_users set. Subclasses may do more extensive things, such as adding actual unix users, but they should call super to ensure the allowed_users set is updated. Note that this should be idempotent, since it is called whenever the hub restarts for all users. Args: user (User): The User wrapper object """ if not self.validate_username(user.name): raise ValueError("Invalid username: %s" % user.name) if self.allowed_users: self.allowed_users.add(user.name) ``` [`Authenticator.delete_user`](https://github.com/jupyterhub/jupyterhub/blob/4.0.0/jupyterhub/auth.py#L650-L659) ```python def delete_user(self, user): """Hook called when a user is deleted Removes the user from the allowed_users set. Subclasses should call super to ensure the allowed_users set is updated. Args: user (User): The User wrapper object """ self.allowed_users.discard(user.name) ``` > If the check was skipped, then users added via configuration would become allowed who shouldn't be. I need to understand this better, thinking about the specific situations. There was two situations when check_allowed was called: - `init_api_tokens` -> `_add_tokens` (influenced by deprecated since v0.8 `JupyterHub.api_tokens` config) - `init_role_assignment` (influenced by `JupyterHub.load_roles` config) I'll disregard the first case with use of since very long deprecated `JupyterHub.api_tokens` for simplicity, focusing on the second case with `JupyterHub.load_roles` involving users. Here is an example [from jupyterhub docs](https://jupyterhub.readthedocs.io/en/stable/rbac/roles.html#defining-roles) on using this config. ```python c.JupyterHub.load_roles = [ { 'description': 'Read-only user models', 'name': 'reader', 'scopes': ['read:users'], 'services': ['external'], 'users': ['maria', 'joe'] } ] ``` So if we make `check_allowed` respond True when passed no authentication data, then we influence this code, which means the user will be added to the database, which means that on subsequent startups, it will also be added to the added to truthy allowed_users sets... https://github.com/jupyterhub/jupyterhub/blob/0e4deec714a30729d11e7d0b1ce359f65faaf6af/jupyterhub/app.py#L2193-L2219 ### Situations to drive discussion - GitHubOAuthenticator.allowed_organization The current state of things when for example using GitHubOAuthenticator configured with `allowed_organization` is that `check_allowed` will ignore and the authentication logic will consider it. If one would configure allowed_users together with allowed_organization, then one would need to configure allowed_users to a set of users inside the allowed_organization anyway, making it pointless to have allowed_organization. ### Understanding of issues summarized - `check_allowed` is called on jupyterhub startup without authentication info passed when referencing users via `JupyterHub.load_roles`, `JupyterHub.load_groups`, or deprecated `JupyterHub.api_tokens`. - Getting authentication info _could require_ approval from the authenticated user, and just can't be something we assume to have when starting up jupyterhub that parses `load_roles` and wants to call `check_allowed`. - This makes use of `check_allowed` without an authentication dict problematic, because we can only get a signal if the user is allowed, not know for sure no matter what. ### Key question in my mind I want to articulate why check_allowed should be called at all. Is it because otherwise load_roles and load_groups would influence a configured allowed_users list because users are added to the database, and after restart, they get unexpectedly allowed because of it? @minrk could you try help refine the need for check_allowed to be called when using `load_roles` and `load_groups`? The general idea is that "when attempting to add a user, check if they are allowed", so we perform the check when users are added. load_roles/groups are places where users are added. It's a way to ensure that configuration is self-consistent. For example, without check_allowed (or with a short-circuit that returns True without auth info), you could grant access to JupyterHUb to a user who cannot login, because check_allowed at login time would fail. If they _never_ have access, this is an inconsistency of little consequence (a user exists who can never login). But if they have had access in the past, any api tokens they have will still be usable, but they will never be able to login again. _but_! since those who specify load_roles, etc. config necessarily have control of allowed_users config, we can also implicitly _allow_ any user added in this way. > For example, without check_allowed (or with a short-circuit that returns True without auth info), you could grant access to JupyterHUb to a user who cannot login, because check_allowed at login time would fail. If they never have access, this is an inconsistency of little consequence (a user exists who can never login). I agree, this is acceptable. > But if they have had access in the past, any api tokens they have will still be usable, but they will never be able to login again. Okay hmm... > _but_! since those who specify load_roles, etc. config necessarily have control of allowed_users config, we can also implicitly _allow_ any user added in this way. Hmmm... Are you saying that it could make sense to call `authenticator.add_user()` if they are found in load_roles or load_groups? Yes, I'm saying that if we treat the presence of any user in config as an implicit addition to `allowed_users`. Rather than _enforcing_ that config is consistent, it's _ensuring_ that it's consistent (or assuming it). Removing the check accomplishes this. i.e. we go from: > if you add a user anywhere in your config that's not consistent with allowed_users, we raise to tell you that your config doesn't make sense. to > if you add a user anywhere in your config, that user should be allowed access to jupyterhub. It will still be possible to get into an inconsistent state with 'access' vs 'login', depending on check_allowed when using external team membership, etc., but that's not really different from today - it's just not evaluated at hub startup time anymore. What will be lost is simple cases of usually catching typos in usernames, but that's perhaps not the most important thing. ### Can we get state changes by restarting? I end up thinking about a case where a user is created and added to the database. Should it always go hand in hand with calling `authenticator.add_user`? Right now, load_roles and load_groups creates users if check_allowed returns true, but doesn't call `authenticator.add_user`. But, it will be called after restart because they are part of the database, and they will be added to `allowed_users` if it was set at all at that point as well. Should we make a call to `authenticator.add_user` consistently when creating a user to make the state consistent with the state it arrives at after a restart? > What will be lost is simple cases of usually catching typos in usernames, but that's perhaps not the most important thing. We could still call normalize and validate username on startup right? These function doesn't require authentication dictionaries. > I end up thinking about a case where a user is created and added to the database. Should it always go hand in hand with calling authenticator.add_user? Yes, I believe so. > Should we make a call to authenticator.add_user consistently when creating a user to make the state consistent with the state it arrives at after a restart? Yes, and definitely do need to if we change the behavior. As it is now, only allowed users get past this step, so `add_user` should have already been called for that user. But if we change it so that users can be created at this point, we should call `add_user`, yes. There may be cases for more complex implementations of allowing users where add_user is missed during creation via config for previously undeclared users. > We could still call normalize and validate username on startup right? Yes, and we should. That's not relevant to the issue, though - e.g. a mismatch in spelling between `allowed_users` and a role assignment. Erik: > if you add a user anywhere in your config, that user should be allowed access to jupyterhub. Min: > It will still be possible to get into an inconsistent state with 'access' vs 'login', depending on check_allowed when using external team membership, etc., but that's not really different from today - it's just not evaluated at hub startup time anymore. > > What will be lost is simple cases of usually catching typos in usernames, but that's perhaps not the most important thing. Erik: > We could still call normalize and validate username on startup right? These function doesn't require authentication dictionaries. Min: > Yes, and we should. That's not relevant to the issue, though - e.g. a mismatch in spelling between allowed_users and a role assignment. --- Okay I figure we could log a info/warning about users being added to the database because it was listed in load_roles / load_groups, but it would only present once I guess, as after that the user would be in the database. There is a info log about this for users added via load_roles, but not for users added via load_groups. Wieee thank you @minrk for discussing this with me! Okay, here are things I've distilled from the conversation so far! - Assumption: In https://github.com/jupyterhub/oauthenticator/pull/594, me and @GeorgianaElena proceed with plans on overriding `check_allowed`, and we make it return True when the authentication dictionary isn't passed (`None`). By doing this, we make it not crash when `load_roles` or `load_groups` is used to create users. - Question: do we stop calling `check_allowed` in jupyterhub itself from app.py without an authentication dictionary? - Assumption: if we do, we ensure authenticator.add_user is called when a new user is created - Question: is this a breaking change, and how do we describe it? - Assumption: we should print a log info about creating a new user via load_groups like done for load_roles
2023-06-14T12:49:32Z
[]
[]
jupyterhub/jupyterhub
4,489
jupyterhub__jupyterhub-4489
[ "4488" ]
161cdcd7e711845d2c4c157c150f251261a5f925
diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py --- a/jupyterhub/apihandlers/base.py +++ b/jupyterhub/apihandlers/base.py @@ -89,6 +89,11 @@ def check_xsrf_cookie(self): if not hasattr(self, '_jupyterhub_user'): # called too early to check if we're token-authenticated return + if self._jupyterhub_user is None and 'Origin' not in self.request.headers: + # don't raise xsrf if auth failed + # don't apply this shortcut to actual cross-site requests, which have an 'Origin' header, + # which would reveal if there are credentials present + return if getattr(self, '_token_authenticated', False): # if token-authenticated, ignore XSRF return diff --git a/jupyterhub/handlers/base.py b/jupyterhub/handlers/base.py --- a/jupyterhub/handlers/base.py +++ b/jupyterhub/handlers/base.py @@ -236,11 +236,13 @@ def set_default_headers(self): def check_xsrf_cookie(self): try: return super().check_xsrf_cookie() - except Exception as e: - # ensure _juptyerhub_user is defined on rejected requests + except web.HTTPError as e: + # ensure _jupyterhub_user is defined on rejected requests if not hasattr(self, "_jupyterhub_user"): self._jupyterhub_user = None self._resolve_roles_and_scopes() + # rewrite message because we use this on methods other than POST + e.log_message = e.log_message.replace("POST", self.request.method) raise @property diff --git a/jupyterhub/scopes.py b/jupyterhub/scopes.py --- a/jupyterhub/scopes.py +++ b/jupyterhub/scopes.py @@ -845,6 +845,15 @@ def needs_scope(*scopes): def scope_decorator(func): @functools.wraps(func) def _auth_func(self, *args, **kwargs): + if not self.current_user: + # not authenticated at all, fail with more generic message + # this is the most likely permission error - missing or mis-specified credentials, + # don't indicate that they have insufficient permissions. + raise web.HTTPError( + 403, + "Missing or invalid credentials.", + ) + sig = inspect.signature(func) bound_sig = sig.bind(self, *args, **kwargs) bound_sig.apply_defaults() @@ -853,6 +862,11 @@ def _auth_func(self, *args, **kwargs): self.expanded_scopes = {} self.parsed_scopes = {} + try: + end_point = self.request.path + except AttributeError: + end_point = self.__name__ + s_kwargs = {} for resource in {'user', 'server', 'group', 'service'}: resource_name = resource + '_name' @@ -860,14 +874,10 @@ def _auth_func(self, *args, **kwargs): resource_value = bound_sig.arguments[resource_name] s_kwargs[resource] = resource_value for scope in scopes: - app_log.debug("Checking access via scope %s", scope) + app_log.debug("Checking access to %s via scope %s", end_point, scope) has_access = _check_scope_access(self, scope, **s_kwargs) if has_access: return func(self, *args, **kwargs) - try: - end_point = self.request.path - except AttributeError: - end_point = self.__name__ app_log.warning( "Not authorizing access to {}. Requires any of [{}], not derived from scopes [{}]".format( end_point, ", ".join(scopes), ", ".join(self.expanded_scopes)
diff --git a/jupyterhub/tests/test_api.py b/jupyterhub/tests/test_api.py --- a/jupyterhub/tests/test_api.py +++ b/jupyterhub/tests/test_api.py @@ -122,6 +122,41 @@ async def test_xsrf_check(app, username, method, path, xsrf_in_url): assert r.status_code == 403 [email protected]( + "auth, expected_message", + [ + ("", "Missing or invalid credentials"), + ("cookie_no_xsrf", "'_xsrf' argument missing from GET"), + ("cookie_xsrf_mismatch", "XSRF cookie does not match GET argument"), + ("token_no_scope", "requires any of [list:users]"), + ("cookie_no_scope", "requires any of [list:users]"), + ], +) +async def test_permission_error_messages(app, user, auth, expected_message): + # 1. no credentials, should be 403 and not mention xsrf + + url = public_url(app, path="hub/api/users") + + kwargs = {} + kwargs["headers"] = headers = {} + kwargs["params"] = params = {} + if auth == "token_no_scope": + token = user.new_api_token() + headers["Authorization"] = f"Bearer {token}" + elif "cookie" in auth: + cookies = kwargs["cookies"] = await app.login_user(user.name) + if auth == "cookie_no_scope": + params["_xsrf"] = cookies["_xsrf"] + if auth == "cookie_xsrf_mismatch": + params["_xsrf"] = "somethingelse" + + r = await async_requests.get(url, **kwargs) + assert r.status_code == 403 + response = r.json() + message = response["message"] + assert expected_message in message + + # -------------- # User API tests # --------------
'_xsrf' argument missing from POST while calling INFO - GET METHOD REST API ### Bug description Hi Team, I have set up JupyterHub using the below latest _docker_ image command: `docker run -p 8000:8000 -d --name jupyterhub jupyterhub/jupyterhub jupyterhub ` While calling the get Info GET method API using the Postman tool getting the below error: **API Endpoint**: http://localhost:8000/hub/api/info **Error Response:** ``` { "status": 403, "message": "'_xsrf' argument missing from POST" } ``` **Additional Logs:** 2023-06-23 12:24:21 [W 2023-06-23 06:54:21.430 JupyterHub web:1869] 403 GET /hub/api/info (::ffff:172.17.0.1): '_xsrf' argument missing from POST Can you please let me know which token is required here and how to call the other API in the case of GET and POST API methods? Please let me know if you need more information.
Thank you for opening your first issue in this project! Engagement like this is essential for open source projects! :hugs: <br>If you haven't done so already, check out [Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md). Also, please try to follow the issue template as it helps other other community members to contribute more effectively. ![welcome](https://raw.githubusercontent.com/jupyterhub/.github/master/images/welcome.jpg) You can meet the other [Jovyans](https://jupyter.readthedocs.io/en/latest/community/content-community.html?highlight=jovyan#what-is-a-jovyan) by joining our [Discourse forum](http://discourse.jupyter.org/). There is also an intro thread there where you can stop by and say Hi! :wave: <br>Welcome to the Jupyter community! :tada: Closing as a question rather than a bug, I'll respond in https://discourse.jupyter.org/t/xsrf-argument-missing-from-post-while-calling-info-get-method-rest-api/20083 Hmmm, reopening because I think one thing is off at least in jupyterhub's behavior. Making a GET request shouldn't error with a message like `{"status": 403, "message": "'_xsrf' argument missing from POST"}`, but perhaps like `{"status": 403, "message": "'_xsrf' argument missing from query parameters"}` or more directly like `{"status": 403, "message": "JupyterHub API token not provided"}` > Can you please let me know which token is required here and how to call the other API in the case of GET and POST API methods? When accessing the jupyterhub REST API, you need to authorize your requests. I think you need to use an API token passed in the header, like described in https://jupyterhub.readthedocs.io/en/stable/howto/rest.html. Yes, I think the error message should definitely be better when credentials are missing, because the xsrf note sends people in completely the wrong direction.
2023-06-23T11:56:20Z
[]
[]
jupyterhub/jupyterhub
4,503
jupyterhub__jupyterhub-4503
[ "4493" ]
0e437224d087820cdf78e92e31ee75407d1db793
diff --git a/jupyterhub/singleuser/extension.py b/jupyterhub/singleuser/extension.py --- a/jupyterhub/singleuser/extension.py +++ b/jupyterhub/singleuser/extension.py @@ -483,6 +483,11 @@ def load_config_file(self): cfg.answer_yes = True self.config.FileContentsManager.delete_to_trash = False + # load Spawner.notebook_dir configuration, if given + root_dir = os.getenv("JUPYTERHUB_ROOT_DIR", None) + if root_dir: + cfg.root_dir = os.path.expanduser(root_dir) + # load http server config from environment url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL']) if url.port:
diff --git a/jupyterhub/tests/extension/__init__.py b/jupyterhub/tests/extension/__init__.py --- a/jupyterhub/tests/extension/__init__.py +++ b/jupyterhub/tests/extension/__init__.py @@ -30,6 +30,7 @@ def _class_str(obj): info = { "current_user": self.current_user, "config": self.app.config, + "root_dir": self.contents_manager.root_dir, "disable_user_config": getattr(self.app, "disable_user_config", None), "settings": self.settings, "config_file_paths": self.app.config_file_paths, diff --git a/jupyterhub/tests/test_singleuser.py b/jupyterhub/tests/test_singleuser.py --- a/jupyterhub/tests/test_singleuser.py +++ b/jupyterhub/tests/test_singleuser.py @@ -2,6 +2,7 @@ import os import sys from contextlib import nullcontext +from pprint import pprint from subprocess import CalledProcessError, check_output from unittest import mock from urllib.parse import urlencode, urlparse @@ -171,9 +172,7 @@ async def test_disable_user_config(request, app, tmpdir, full_spawn): ) r.raise_for_status() info = r.json() - import pprint - - pprint.pprint(info) + pprint(info) assert info['disable_user_config'] server_config = info['config'] settings = info['settings'] @@ -198,6 +197,79 @@ def assert_not_in_home(path, name): assert_not_in_home(path, key) [email protected]("extension", [True, False]) [email protected]("notebook_dir", ["", "~", "~/sub", "ABS"]) +async def test_notebook_dir( + request, app, tmpdir, user, full_spawn, extension, notebook_dir +): + if extension: + try: + import jupyter_server # noqa + except ImportError: + pytest.skip("needs jupyter-server 2") + else: + if jupyter_server.version_info < (2,): + pytest.skip("needs jupyter-server 2") + + token = user.new_api_token(scopes=["access:servers!user"]) + headers = {"Authorization": f"Bearer {token}"} + + spawner = user.spawner + if extension: + user.spawner.environment["JUPYTERHUB_SINGLEUSER_EXTENSION"] = "1" + else: + user.spawner.environment["JUPYTERHUB_SINGLEUSER_EXTENSION"] = "0" + + home_dir = tmpdir.join("home").mkdir() + sub_dir = home_dir.join("sub").mkdir() + with sub_dir.join("subfile.txt").open("w") as f: + f.write("txt\n") + abs_dir = tmpdir.join("abs").mkdir() + with abs_dir.join("absfile.txt").open("w") as f: + f.write("absfile\n") + + if notebook_dir: + expected_root_dir = notebook_dir.replace("ABS", str(abs_dir)).replace( + "~", str(home_dir) + ) + else: + expected_root_dir = str(home_dir) + + spawner.notebook_dir = notebook_dir.replace("ABS", str(abs_dir)) + + # home_dir is defined on SimpleSpawner + user.spawner.home_dir = home = str(home_dir) + spawner.environment["HOME"] = home + await user.spawn() + await app.proxy.add_user(user) + url = public_url(app, user) + r = await async_requests.get( + url_path_join(public_url(app, user), 'jupyterhub-test-info'), headers=headers + ) + r.raise_for_status() + info = r.json() + pprint(info) + + assert info["root_dir"] == expected_root_dir + # secondary check: make sure it has the intended effect on root_dir + r = await async_requests.get( + url_path_join(public_url(app, user), 'api/contents/'), headers=headers + ) + r.raise_for_status() + root_contents = sorted(item['name'] for item in r.json()['content']) + + # check contents + if not notebook_dir or notebook_dir == "~": + # use any to avoid counting possible automatically created files in $HOME + assert 'sub' in root_contents + elif notebook_dir == "ABS": + assert 'absfile.txt' in root_contents + elif notebook_dir == "~/sub": + assert 'subfile.txt' in root_contents + else: + raise ValueError(f"No contents check for {notebook_dir=}") + + def test_help_output(): out = check_output( [sys.executable, '-m', 'jupyterhub.singleuser', '--help-all']
Jupyter 4.0.1 - Unable to access filesystem outside of home directory/notebook directory <!-- Thank you for contributing. These HTML comments will not render in the issue, but you can delete them once you've read them if you prefer! --> ### Bug description I've also posted to Discord. please forgive me as this is my first bug report. LMK the preferred method github v discord. ty I’ve been using jupyterhub for a few years now. I’ve recently tried updating to 4.0.1 but it no longer seems to honor my Spawner.default_url and Spawner.notebook_dir. My system uses the base local single user spawner along with NativeAuthenticator. As of 4.0.1, I’m always locked within the home dir of the user which is not desirable as I have group dirs for users to share behind their home dirs. When I downgrade back to jupyterhub 3.1.1, I can move around as needed I’ve also even tried this article [Troubleshooting — JupyterHub documentation](https://jupyterhub.readthedocs.io/en/stable/faq/troubleshooting.html#i-want-access-to-the-whole-filesystem-and-still-default-users-to-their-home-directory). This doesn’t appear to work at all as the redirects dup the users homedir path in the url #### Expected behaviour c.Spawner.notebook_dir should start at path specified. Currently only drops user into homedir as "/" and therefore locked within that dir. #### Actual behaviour c.Spawner.notebook_dir = ''/some/path" should start the user at some specified path instead of forced homedir ### How to reproduce user homedir ex.... /my_dir/username/nbs install 4.x jupyter hub set config: c.Spawner.notebook_dir = "/my_dir" c.Spawner.default_url = '/lab' forces user into home '/my_dir/username'. I want users to be able to go a level back for shared folders reverting to Jupyterhub 3.1.1 fixes issue ### Your personal set up Docker container cuda 11.8 ubuntu 20.08 running jupyterhub in conda base default local spawner NativeAuthenticator for auth
Thank you for opening your first issue in this project! Engagement like this is essential for open source projects! :hugs: <br>If you haven't done so already, check out [Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md). Also, please try to follow the issue template as it helps other other community members to contribute more effectively. ![welcome](https://raw.githubusercontent.com/jupyterhub/.github/master/images/welcome.jpg) You can meet the other [Jovyans](https://jupyter.readthedocs.io/en/latest/community/content-community.html?highlight=jovyan#what-is-a-jovyan) by joining our [Discourse forum](http://discourse.jupyter.org/). There is also an intro thread there where you can stop by and say Hi! :wave: <br>Welcome to the Jupyter community! :tada: I received an answer on discord thread. c.Spawner.args=['--ContentsManager.root_dir=<dir_where_you_want_to_start_jlab>']. fixed my issue on a side note where does this, ContentsManager, come from? A search on the documentation doesn’t yield anything… [Search - JupyterHub documentation](https://jupyterhub.readthedocs.io/en/stable/search.html?q=ContentsManager) There is atleast one more [discussion](https://discourse.jupyter.org/t/c-spawner-notebook-dir-does-not-have-the-expected-effect/19913) regarding the same problem in JHub 4. I guess it is a side-effect of making singleuser-user app as a Jupyter server extension. I think it is worth documenting in the `notebook_dir` traitlet that users need to pass the same directory in `ContentsManager.root_dir` in `c.Spawner.args` for JHub 4.
2023-07-03T10:57:02Z
[]
[]
jupyterhub/jupyterhub
4,542
jupyterhub__jupyterhub-4542
[ "4524" ]
862455ee5684e4d013646f19b6f492fada9dc9e6
diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py --- a/jupyterhub/apihandlers/base.py +++ b/jupyterhub/apihandlers/base.py @@ -532,7 +532,7 @@ def paginated_model(self, items, offset, limit, total_count): if next_offset < total_count: # if there's a next page next_url_parsed = urlparse(self.request.full_url()) - query = parse_qs(next_url_parsed.query) + query = parse_qs(next_url_parsed.query, keep_blank_values=True) query['offset'] = [next_offset] query['limit'] = [limit] next_url_parsed = next_url_parsed._replace(
diff --git a/jupyterhub/tests/test_api.py b/jupyterhub/tests/test_api.py --- a/jupyterhub/tests/test_api.py +++ b/jupyterhub/tests/test_api.py @@ -7,7 +7,7 @@ from copy import deepcopy from datetime import datetime, timedelta from unittest import mock -from urllib.parse import quote, urlparse +from urllib.parse import parse_qs, quote, urlparse import pytest from pytest import fixture, mark @@ -268,20 +268,22 @@ def max_page_limit(app): @mark.user @mark.role @mark.parametrize( - "n, offset, limit, accepts_pagination, expected_count", + "n, offset, limit, accepts_pagination, expected_count, include_stopped_servers", [ - (10, None, None, False, 10), - (10, None, None, True, 10), - (10, 5, None, True, 5), - (10, 5, None, False, 5), - (10, 5, 1, True, 1), - (10, 10, 10, True, 0), + (10, None, None, False, 10, False), + (10, None, None, True, 10, False), + (10, 5, None, True, 5, False), + (10, 5, None, False, 5, False), + (10, None, 5, True, 5, True), + (10, 5, 1, True, 1, True), + (10, 10, 10, True, 0, False), ( # default page limit, pagination expected 30, None, None, True, 'default', + False, ), ( # default max page limit, pagination not expected @@ -290,6 +292,7 @@ def max_page_limit(app): None, False, 'max', + False, ), ( # limit exceeded @@ -298,6 +301,7 @@ def max_page_limit(app): 500, False, 'max', + False, ), ], ) @@ -310,6 +314,7 @@ async def test_get_users_pagination( expected_count, default_page_limit, max_page_limit, + include_stopped_servers, ): db = app.db @@ -336,6 +341,11 @@ async def test_get_users_pagination( if limit: params['limit'] = limit url = url_concat(url, params) + if include_stopped_servers: + # assumes limit is set. There doesn't seem to be a way to set valueless query + # params using url_cat + url += "&include_stopped_servers" + headers = auth_header(db, 'admin') if accepts_pagination: headers['Accept'] = PAGINATION_MEDIA_TYPE @@ -348,6 +358,11 @@ async def test_get_users_pagination( "_pagination", } pagination = response["_pagination"] + if include_stopped_servers and pagination["next"]: + next_query = parse_qs( + urlparse(pagination["next"]["url"]).query, keep_blank_values=True + ) + assert "include_stopped_servers" in next_query users = response["items"] else: users = response
/hub/api/users?include_stopped_servers doesn't include ?include_stopped_servers in the next_url when paginating ### Bug description /hub/api/users?include_stopped_servers doesn't include ?include_stopped_servers in the next_url when paginating. This means the first page has stopped servers, but no subsequent pages do. I'd assume that all query params aside from offset should be replicated to the next_url. #### Expected behaviour ?include_stopped_servers should be in the pagination url #### Actual behaviour ?include_stopped_servers is not in the url so only the first page has stopped servers ### How to reproduce 1. On 2 different users create 2 stopped named servers. 2. Make an API Request: /hub/api/users?include_stopped_servers&limit=1 3. Follow the pagination url: /hub/api/users?limit=1&offset=1 4. No servers are listed for second user ### Your personal set up Zero 2 jupyterhub 2.0.
You're using an old version of JupyterHub/Z2JH. Can you check if this bug is still present in the latest version of JupyterHub (4.0.1), which is included in Z2JH `3.0.0-beta.3` and later? https://hub.jupyter.org/helm-chart/#development-releases-jupyterhub I'm waiting for the stable release of the chart. Will let you know when it comes out and I upgrade. @manics this is confirmed still a bug on 4.0.2 using the 3.0.2 helm chart. <img width="586" alt="image" src="https://github.com/jupyterhub/jupyterhub/assets/301208/f3f21583-5ffb-4dd2-9282-44fd4692007b">
2023-08-22T18:11:12Z
[]
[]
jupyterhub/jupyterhub
4,570
jupyterhub__jupyterhub-4570
[ "4569" ]
30ed97d153d3e887c15a6d98e637ba2b19d7fb89
diff --git a/jupyterhub/scopes.py b/jupyterhub/scopes.py --- a/jupyterhub/scopes.py +++ b/jupyterhub/scopes.py @@ -257,8 +257,12 @@ def groups_for_server(server): } # resolve hierarchies (group/user/server) in both directions - common_servers = common_filters[base].get("server", set()) - common_users = common_filters[base].get("user", set()) + common_servers = initial_common_servers = common_filters[base].get( + "server", frozenset() + ) + common_users = initial_common_users = common_filters[base].get( + "user", frozenset() + ) for a, b in [(filters_a, filters_b), (filters_b, filters_a)]: if 'server' in a and b.get('server') != a['server']: @@ -270,7 +274,7 @@ def groups_for_server(server): for server in servers: username, _, servername = server.partition("/") if username in b['user']: - common_servers.add(server) + common_servers = common_servers | {server} # resolve group/server hierarchy if db available servers = servers.difference(common_servers) @@ -279,7 +283,7 @@ def groups_for_server(server): for server in servers: server_groups = groups_for_server(server) if server_groups & b['group']: - common_servers.add(server) + common_servers = common_servers | {server} # resolve group/user hierarchy if db available and user sets aren't identical if ( @@ -293,14 +297,16 @@ def groups_for_server(server): for username in users: groups = groups_for_user(username) if groups & b["group"]: - common_users.add(username) + common_users = common_users | {username} - # add server filter if there wasn't one before - if common_servers and "server" not in common_filters[base]: + # add server filter if it's non-empty + # and it changed + if common_servers and common_servers != initial_common_servers: common_filters[base]["server"] = common_servers - # add user filter if it's non-empty and there wasn't one before - if common_users and "user" not in common_filters[base]: + # add user filter if it's non-empty + # and it changed + if common_users and common_users != initial_common_users: common_filters[base]["user"] = common_users intersection = unparse_scopes(common_filters)
diff --git a/jupyterhub/tests/test_scopes.py b/jupyterhub/tests/test_scopes.py --- a/jupyterhub/tests/test_scopes.py +++ b/jupyterhub/tests/test_scopes.py @@ -912,6 +912,22 @@ def test_intersect_expanded_scopes(left, right, expected, should_warn, recwarn): ["read:users!user=uy"], {"gx": ["ux"], "gy": ["uy"]}, ), + ( + # make sure the group > user > server hierarchy + # is managed + ["read:servers!server=ux/server", "read:servers!group=gy"], + ["read:servers!server=uy/server", "read:servers!user=ux"], + ["read:servers!server=ux/server", "read:servers!server=uy/server"], + {"gx": ["ux"], "gy": ["uy"]}, + ), + ( + # make sure the group > user hierarchy + # is managed + ["read:servers!user=ux", "read:servers!group=gy"], + ["read:servers!user=uy", "read:servers!group=gx"], + ["read:servers!user=ux", "read:servers!user=uy"], + {"gx": ["ux"], "gy": ["uy"]}, + ), ], ) def test_intersect_groups(request, db, left, right, expected, groups):
Call to .add on a frozenset in _intersect_expanded_scopes fails ### Bug description Unexpected frozenset in `jupyterhub.scopes._intersect_expanded_scopes` means that any call (e.g. [here](https://github.com/jupyterhub/jupyterhub/blame/4.0.2/jupyterhub/scopes.py#L269)) to `.add(...)` fails with `AttributeError: 'frozenset' object has no attribute 'add'`. ### How to reproduce Not sure if there's a specific behavior or configuration that led to this, or of it's just not commonly hit due to a default codepath that results in a mutable `set()`. With just a brief review of the code I'm not sure how this `.add()` could ever work unless the `.get(..., set())` was usually just returning the default mutable set and not the frozenset when the key is actually found. The variable `common_servers` which in this case had `.add(...)` called on it triggering the AttributeError derives from a variable originally returned from `parse_scopes`, which returns a `_memoize.FrozenDict`. That class serializes any regular sets into frozensets: https://github.com/jupyterhub/jupyterhub/blob/4.0.2/jupyterhub/_memoize.py#L137-L138 #### Expected behaviour Scope checking would succeed and the original API request (in this case to `/activity`) would succeed 200 OK. #### Actual behaviour 500 error due to `jupyterhub.scopes._intersect_expanded_scopes` ### Your personal set up Ubuntu 20.04 Python 3.8 JupyterHub 4.0.2 TLJH 1.0.0 with DockerSpawner <details><summary>Full environment</summary> ``` aiohttp==3.8.5 aiosignal==1.3.1 alembic==1.12.0 async-generator==1.10 async-timeout==4.0.3 attrs==23.1.0 backoff==2.2.1 bcrypt==4.0.1 certifi==2023.7.22 certipy==0.1.3 cffi==1.15.1 charset-normalizer==3.2.0 cryptography==41.0.3 docker==6.1.3 dockerspawner==12.1.0 escapism==1.0.1 frozenlist==1.4.0 greenlet==2.0.2 idna==3.4 importlib-metadata==6.8.0 importlib-resources==6.0.1 Jinja2==3.1.2 jsonschema==4.19.0 jsonschema-specifications==2023.7.1 jupyter-telemetry==0.1.0 jupyterhub==4.0.2 jupyterhub-firstuseauthenticator==1.0.0 jupyterhub-idle-culler==1.2.1 jupyterhub-ldapauthenticator==1.3.2 jupyterhub-nativeauthenticator==1.2.0 jupyterhub-systemdspawner==1.0.1 jupyterhub-tmpauthenticator==1.0.0 jupyterhub-traefik-proxy==1.1.0 ldap3==2.9.1 Mako==1.2.4 MarkupSafe==2.1.3 multidict==6.0.4 oauthenticator==16.0.7 oauthlib==3.2.2 onetimepass==1.0.1 packaging==23.1 pamela==1.1.0 passlib==1.7.4 pkg_resources==0.0.0 pkgutil_resolve_name==1.3.10 pluggy==1.3.0 prometheus-client==0.17.1 psycopg2-binary==2.9.7 pyasn1==0.5.0 pycparser==2.21 pycurl==7.45.2 pyOpenSSL==23.2.0 python-dateutil==2.8.2 python-json-logger==2.0.7 referencing==0.30.2 requests==2.31.0 rpds-py==0.10.3 ruamel.yaml==0.17.32 ruamel.yaml.clib==0.2.7 six==1.16.0 SQLAlchemy==2.0.20 the-littlest-jupyterhub @ git+https://github.com/jupyterhub/the-littlest-jupyterhub.git@fc8e19b1b5663f58f0e7b089903d1d1769db06b8 toml==0.10.2 tornado==6.3.3 traitlets==5.10.0 typing_extensions==4.7.1 urllib3==2.0.4 websocket-client==1.6.3 yarl==1.9.2 zipp==3.16.2 ``` </details> <details><summary>Configuration</summary> Using `DockerSpawner`, but skipping `jupyterhub_config.py` configuration since this happens pretty deep within the `jupyterhub` lib and doesn't seem too related to the configuration. Please let me know if this needs to be added. </details> <details><summary>Logs</summary> ``` [E 2023-09-14 14:25:08.361 JupyterHub web:1871] Uncaught exception POST /hub/api/users/u89d3d00/activity (172.17.0.2) HTTPServerRequest(protocol='http', host='10.1.45.4:15001', method='POST', uri='/hub/api/users/u89d3d00/activity', version='HTTP/1.1', remote_ip='172.17.0.2') Traceback (most recent call last): File "/opt/tljh/hub/lib/python3.8/site-packages/tornado/web.py", line 1765, in _execute result = await result # type: ignore File "/opt/tljh/hub/lib/python3.8/site-packages/jupyterhub/apihandlers/base.py", line 79, in prepare await super().prepare() File "/opt/tljh/hub/lib/python3.8/site-packages/jupyterhub/handlers/base.py", line 100, in prepare self._resolve_roles_and_scopes() File "/opt/tljh/hub/lib/python3.8/site-packages/jupyterhub/handlers/base.py", line 465, in _resolve_roles_and_scopes self.expanded_scopes = scopes.get_scopes_for(orm_token) File "/opt/tljh/hub/lib/python3.8/site-packages/jupyterhub/scopes.py", line 366, in get_scopes_for intersection = _intersect_expanded_scopes( File "/opt/tljh/hub/lib/python3.8/site-packages/jupyterhub/_memoize.py", line 96, in cached result = func(*args, **kwargs) File "/opt/tljh/hub/lib/python3.8/site-packages/jupyterhub/scopes.py", line 269, in _intersect_expanded_scopes common_servers.add(server) AttributeError: 'frozenset' object has no attribute 'add' [E 2023-09-14 14:25:08.363 JupyterHub log:183] { "Authorization": "token [secret]", "Content-Type": "application/json", "Connection": "close", "Host": "10.1.45.4:15001", "User-Agent": "Tornado/6.3.2", "Content-Length": "115", "Accept-Encoding": "gzip" } [E 2023-09-14 14:25:08.363 JupyterHub log:191] 500 POST /hub/api/users/u89d3d00/activity ([email protected]) 99.21ms ``` </details>
Thank you for opening your first issue in this project! Engagement like this is essential for open source projects! :hugs: <br>If you haven't done so already, check out [Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md). Also, please try to follow the issue template as it helps other other community members to contribute more effectively. ![welcome](https://raw.githubusercontent.com/jupyterhub/.github/master/images/welcome.jpg) You can meet the other [Jovyans](https://jupyter.readthedocs.io/en/latest/community/content-community.html?highlight=jovyan#what-is-a-jovyan) by joining our [Discourse forum](http://discourse.jupyter.org/). There is also an intro thread there where you can stop by and say Hi! :wave: <br>Welcome to the Jupyter community! :tada: This patch appears to fix the issue: ```diff diff --git a/jupyterhub/scopes.py b/jupyterhub/scopes.py index 00170133..5a1ec499 100644 --- a/jupyterhub/scopes.py +++ b/jupyterhub/scopes.py @@ -253,8 +253,8 @@ def _intersect_expanded_scopes(scopes_a, scopes_b, db=None): } # resolve hierarchies (group/user/server) in both directions - common_servers = common_filters[base].get("server", set()) - common_users = common_filters[base].get("user", set()) + common_servers = set(common_filters[base].get("server", set())) + common_users = set(common_filters[base].get("user", set())) for a, b in [(filters_a, filters_b), (filters_b, filters_a)]: if 'server' in a and b.get('server') != a['server']: ```
2023-09-15T07:28:05Z
[]
[]
jupyterhub/jupyterhub
4,578
jupyterhub__jupyterhub-4578
[ "4576" ]
e633199ea98f36d6a4d378520ea40fa1a9267abb
diff --git a/jupyterhub/apihandlers/users.py b/jupyterhub/apihandlers/users.py --- a/jupyterhub/apihandlers/users.py +++ b/jupyterhub/apihandlers/users.py @@ -439,6 +439,18 @@ async def post(self, user_name): token_roles = body.get("roles") token_scopes = body.get("scopes") + # check type of permissions + for key in ("roles", "scopes"): + value = body.get(key) + if value is None: + continue + if not isinstance(value, list) or not all( + isinstance(item, str) for item in value + ): + raise web.HTTPError( + 400, f"token {key} must be null or a list of strings, not {value!r}" + ) + try: api_token = user.new_api_token( note=note, @@ -446,7 +458,7 @@ async def post(self, user_name): roles=token_roles, scopes=token_scopes, ) - except ValueError as e: + except (ValueError, KeyError) as e: raise web.HTTPError(400, str(e)) if requester is not user: self.log.info(
diff --git a/jupyterhub/tests/browser/test_browser.py b/jupyterhub/tests/browser/test_browser.py --- a/jupyterhub/tests/browser/test_browser.py +++ b/jupyterhub/tests/browser/test_browser.py @@ -418,6 +418,12 @@ async def test_token_request_form_and_panel(app, browser, user): selected_value = dropdown.locator('option[selected]') await expect(selected_value).to_have_text("Never") + # check scopes field + scopes_input = browser.get_by_label("Permissions") + await expect(scopes_input).to_be_editable() + await expect(scopes_input).to_be_enabled() + await expect(scopes_input).to_be_empty() + # verify that "Your new API Token" panel shows up with the new API token await request_btn.click() await browser.wait_for_load_state("load") @@ -472,10 +478,8 @@ async def test_request_token_expiration(app, browser, token_opt, note, user): note_field = browser.get_by_role("textbox").first await note_field.fill(note) # click on Request token button - reqeust_btn = browser.locator('//div[@class="text-center"]').get_by_role( - "button" - ) - await reqeust_btn.click() + request_button = browser.locator('//button[@type="submit"]') + await request_button.click() # wait for token response to show up on the page await browser.wait_for_load_state("load") token_result = browser.locator("#token-result") @@ -483,7 +487,7 @@ async def test_request_token_expiration(app, browser, token_opt, note, user): # reload the page await browser.reload(wait_until="load") # API Tokens table: verify that elements are displayed - api_token_table_area = browser.locator('//div[@class="row"]').nth(2) + api_token_table_area = browser.locator("div#api-tokens-section").nth(0) await expect(api_token_table_area.get_by_role("table")).to_be_visible() await expect(api_token_table_area.locator("tr.token-row")).to_have_count(1) @@ -498,26 +502,30 @@ async def test_request_token_expiration(app, browser, token_opt, note, user): else: expected_note = "Requested via token page" assert orm_token.note == expected_note + note_on_page = ( await api_token_table_area.locator("tr.token-row") .get_by_role("cell") .nth(0) .inner_text() ) + assert note_on_page == expected_note + last_used_text = ( await api_token_table_area.locator("tr.token-row") .get_by_role("cell") - .nth(1) + .nth(2) .inner_text() ) + assert last_used_text == "Never" + expires_at_text = ( await api_token_table_area.locator("tr.token-row") .get_by_role("cell") - .nth(3) + .nth(4) .inner_text() ) - assert last_used_text == "Never" if token_opt == "Never": assert orm_token.expires_at is None @@ -533,15 +541,77 @@ async def test_request_token_expiration(app, browser, token_opt, note, user): assert expires_at_text == "Never" # verify that the button for revoke is presented revoke_btn = ( - api_token_table_area.locator("tr.token-row") - .get_by_role("cell") - .nth(4) - .get_by_role("button") + api_token_table_area.locator("tr.token-row").get_by_role("button").nth(0) ) await expect(revoke_btn).to_be_visible() await expect(revoke_btn).to_have_text("revoke") [email protected]( + "permissions_str, granted", + [ + ("", {"inherit"}), + ("inherit", {"inherit"}), + ("read:users!user, ", {"read:users!user"}), + ( + "read:users!user, access:servers!user", + {"read:users!user", "access:servers!user"}, + ), + ( + "read:users:name!user access:servers!user ,, read:servers!user", + {"read:users:name!user", "access:servers!user", "read:servers!user"}, + ), + # errors + ("nosuchscope", "does not exist"), + ("inherit, nosuchscope", "does not exist"), + ("admin:users", "Not assigning requested scopes"), + ], +) +async def test_request_token_permissions(app, browser, permissions_str, granted, user): + """verify request token with the different options""" + + # open the token page + await open_token_page(app, browser, user) + scopes_input = browser.get_by_label("Permissions") + await scopes_input.fill(permissions_str) + request_button = browser.locator('//button[@type="submit"]') + await request_button.click() + + if isinstance(granted, str): + expected_error = granted + granted = False + + if not granted: + error_dialog = browser.locator("#error-dialog") + await expect(error_dialog).to_be_visible() + error_message = await error_dialog.locator(".modal-body").inner_text() + assert "API request failed (400)" in error_message + assert expected_error in error_message + return + + await browser.reload(wait_until="load") + + # API Tokens table: verify that elements are displayed + api_token_table_area = browser.locator("div#api-tokens-section").nth(0) + await expect(api_token_table_area.get_by_role("table")).to_be_visible() + await expect(api_token_table_area.locator("tr.token-row")).to_have_count(1) + + # getting values from DB to compare with values on UI + assert len(user.api_tokens) == 1 + orm_token = user.api_tokens[-1] + assert set(orm_token.scopes) == granted + + permissions_on_page = ( + await api_token_table_area.locator("tr.token-row") + .get_by_role("cell") + .nth(1) + .locator('//pre[@class="token-scope"]') + .all_text_contents() + ) + # specifically use list to test that entries don't appear twice + assert sorted(permissions_on_page) == sorted(granted) + + @pytest.mark.parametrize( "token_type", [
How to create token with limited permissions? The [Assigning permissions to a token](https://jupyterhub.readthedocs.io/en/stable/howto/rest.html#assigning-permissions-to-a-token) section of the documentation says: > In JupyterHub 2.0, specific permissions are now defined as ‘scopes’, and can be assigned both at the user/service level, and at the individual token level. > > This allows e.g. a user with full admin permissions to request a token with limited permissions. But the documentation on _how_ to actually obtain a token with limited permissions/scopes is missing. The web UI allows to set only the expiration date and it also does not show which tokens have which permissions/scopes.
Thank you for opening your first issue in this project! Engagement like this is essential for open source projects! :hugs: <br>If you haven't done so already, check out [Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md). Also, please try to follow the issue template as it helps other other community members to contribute more effectively. ![welcome](https://raw.githubusercontent.com/jupyterhub/.github/master/images/welcome.jpg) You can meet the other [Jovyans](https://jupyter.readthedocs.io/en/latest/community/content-community.html?highlight=jovyan#what-is-a-jovyan) by joining our [Discourse forum](http://discourse.jupyter.org/). There is also an intro thread there where you can stop by and say Hi! :wave: <br>Welcome to the Jupyter community! :tada: There should definitely be a clear, explicit tutorial on this! The only documentation so far is in the [rest api reference](https://jupyterhub.readthedocs.io/en/stable/reference/rest-api.html#/default/post_users__name__tokens), which is not particularly well rendered, as the description of the scopes field is behind a few clicks on the request body schema: <img width="741" alt="Screenshot 2023-09-25 at 10 12 34" src="https://github.com/jupyterhub/jupyterhub/assets/151929/19a758db-2db6-4914-ad27-0d982a067b15"> While I work on these docs, the way to request a token with scopes is: ```python import json import requests api_token = "..." hub_url = "https://hub.example.com" # pick your scopes: scopes = ["read:users", "admin:servers!user"] # your name username = "myself" r = requests.post(hub_url + f"/hub/api/users/{username}/tokens", body=json.dumps({"scopes": scopes}), headers={"Authorization: f"Bearer {api_token}"}) r.raise_for_status() token_response = r.json() ``` There is not UI for this, mainly because generating good inputs for the list of scopes and filters is some tricky UI work. We could have a free text field in the meantime. Not the friendliest, but at least fully functional.
2023-09-25T10:30:00Z
[]
[]
jupyterhub/jupyterhub
4,594
jupyterhub__jupyterhub-4594
[ "4349" ]
4555d5bbb28575d02ec590979f8ed4ca2cae81d5
diff --git a/examples/user-sharing/jupyterhub_config.py b/examples/user-sharing/jupyterhub_config.py new file mode 100644 --- /dev/null +++ b/examples/user-sharing/jupyterhub_config.py @@ -0,0 +1,37 @@ +c = get_config() # noqa + + +c.JupyterHub.authenticator_class = 'dummy' +c.JupyterHub.spawner_class = 'simple' + +c.Authenticator.allowed_users = {"sharer", "shared-with"} + +# put the current directory on sys.path for shareextension.py +from pathlib import Path + +here = Path(__file__).parent.absolute() +c.Spawner.notebook_dir = str(here) + +# users need sharing permissions for their own servers +c.JupyterHub.load_roles = [ + { + "name": "user", + "scopes": ["self", "shares!user"], + }, +] + +# below are two ways to grant sharing permission to a single-user server. +# there's no reason to use both + + +# OAuth token should have sharing permissions, +# so JupyterLab javascript can manage shares +c.Spawner.oauth_client_allowed_scopes = ["access:servers!server", "shares!server"] + +# grant $JUPYTERHUB_API_TOKEN sharing permissions +# so that _python_ code can manage shares +c.Spawner.server_token_scopes = [ + "shares!server", # manage shares + "servers!server", # start/stop itself + "users:activity!server", # report activity +] diff --git a/jupyterhub/apihandlers/__init__.py b/jupyterhub/apihandlers/__init__.py --- a/jupyterhub/apihandlers/__init__.py +++ b/jupyterhub/apihandlers/__init__.py @@ -1,6 +1,6 @@ -from . import auth, groups, hub, proxy, services, users +from . import auth, groups, hub, proxy, services, shares, users from .base import * # noqa default_handlers = [] -for mod in (auth, hub, proxy, users, groups, services): +for mod in (auth, hub, proxy, users, groups, services, shares): default_handlers.extend(mod.default_handlers) diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py --- a/jupyterhub/apihandlers/base.py +++ b/jupyterhub/apihandlers/base.py @@ -200,6 +200,7 @@ def server_model(self, spawner, *, user=None): model = { 'name': orm_spawner.name, + 'full_name': f"{orm_spawner.user.name}/{orm_spawner.name}", 'last_activity': isoformat(orm_spawner.last_activity), 'started': isoformat(orm_spawner.started), 'pending': pending, diff --git a/jupyterhub/apihandlers/shares.py b/jupyterhub/apihandlers/shares.py new file mode 100644 --- /dev/null +++ b/jupyterhub/apihandlers/shares.py @@ -0,0 +1,590 @@ +"""Handlers for Shares and Share Codes""" + +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. +import json +import re +from typing import List, Optional + +from pydantic import ( + BaseModel, + ConfigDict, + ValidationError, + conint, + field_validator, + model_validator, +) +from sqlalchemy import or_ +from sqlalchemy.orm import joinedload +from tornado import web +from tornado.httputil import url_concat + +from .. import orm +from ..scopes import _check_scopes_exist, needs_scope +from ..utils import isoformat +from .base import APIHandler +from .groups import _GroupAPIHandler + +_share_code_id_pat = re.compile(r"sc_(\d+)") + + +class BaseShareRequest(BaseModel): + model_config = ConfigDict(extra='forbid') + scopes: Optional[List[str]] = None + + @field_validator("scopes") + @classmethod + def _check_scopes_exist(cls, scopes): + if not scopes: + return None + _check_scopes_exist(scopes, who_for="share") + return scopes + + +class ShareGrantRequest(BaseShareRequest): + """Validator for requests to grant sharing permission""" + + # directly granted shares don't expire + # since Shares are _modifications_ of permissions, + # expiration can get weird + # if it's going to expire, it must expire in + # at least one minute and at most 10 years (avoids nonsense values) + # expires_in: conint(ge=60, le=10 * 525600 * 60) | None = None + user: Optional[str] = None + group: Optional[str] = None + + @model_validator(mode='after') + def user_group_exclusive(self): + if self.user and self.group: + raise ValueError("Expected exactly one of `user` or `group`, not both.") + if self.user is None and self.group is None: + raise ValueError("Specify exactly one of `user` or `group`") + return self + + +class ShareRevokeRequest(ShareGrantRequest): + """Validator for requests to revoke sharing permission""" + + # currently identical to ShareGrantRequest + + +class ShareCodeGrantRequest(BaseShareRequest): + """Validator for requests to create sharing codes""" + + # must be at least one minute, at most one year, default to one day + expires_in: conint(ge=60, le=525600 * 60) = 86400 + + +class _ShareAPIHandler(APIHandler): + def server_model(self, spawner): + """Truncated server model for use in shares + + - Adds "user" field (just name for now) + - Limits fields to "name", "url", "ready" + from standard server model + """ + user = self.users[spawner.user.id] + if spawner.name in user.spawners: + # use Spawner wrapper if it's active + spawner = user.spawners[spawner.name] + full_model = super().server_model(spawner, user=user) + # filter out subset of fields + server_model = { + "user": { + "name": spawner.user.name, + } + } + # subset keys for sharing + for key in ["name", "url", "ready"]: + if key in full_model: + server_model[key] = full_model[key] + + return server_model + + def share_model(self, share): + """Compute the REST API model for a share""" + return { + "server": self.server_model(share.spawner), + "scopes": share.scopes, + "user": {"name": share.user.name} if share.user else None, + "group": {"name": share.group.name} if share.group else None, + "kind": "group" if share.group else "user", + "created_at": isoformat(share.created_at), + } + + def share_code_model(self, share_code, code=None): + """Compute the REST API model for a share code""" + model = { + "server": self.server_model(share_code.spawner), + "scopes": share_code.scopes, + "id": f"sc_{share_code.id}", + "created_at": isoformat(share_code.created_at), + "expires_at": isoformat(share_code.expires_at), + "exchange_count": share_code.exchange_count, + "last_exchanged_at": isoformat(share_code.last_exchanged_at), + } + if code: + model["code"] = code + model["accept_url"] = url_concat( + self.hub.base_url + "accept-share", {"code": code} + ) + return model + + def _init_share_query(self, kind="share"): + """Initialize a query for Shares + + before applying filters + + A method so we can consolidate joins, etc. + """ + if kind == "share": + class_ = orm.Share + elif kind == "code": + class_ = orm.ShareCode + else: + raise ValueError( + f"kind must be `share` or `code`, not {kind!r}" + ) # pragma: no cover + + query = self.db.query(class_).options( + joinedload(class_.owner).lazyload("*"), + joinedload(class_.spawner).joinedload(orm.Spawner.user).lazyload("*"), + ) + if kind == 'share': + query = query.options( + joinedload(class_.user).joinedload(orm.User.groups).lazyload("*"), + joinedload(class_.group).lazyload("*"), + ) + return query + + def _share_list_model(self, query, kind="share"): + """Finish a share query, returning the _model_""" + offset, limit = self.get_api_pagination() + if kind == "share": + model_method = self.share_model + elif kind == "code": + model_method = self.share_code_model + else: + raise ValueError( + f"kind must be `share` or `code`, not {kind!r}" + ) # pragma: no cover + + if kind == "share": + class_ = orm.Share + elif kind == "code": + class_ = orm.ShareCode + + total_count = query.count() + query = query.order_by(class_.id.asc()).offset(offset).limit(limit) + share_list = [model_method(share) for share in query if not share.expired] + return self.paginated_model(share_list, offset, limit, total_count) + + def _lookup_spawner(self, user_name, server_name, raise_404=True): + """Lookup orm.Spawner for user_name/server_name + + raise 404 if not found + """ + user = self.find_user(user_name) + if user and server_name in user.orm_spawners: + return user.orm_spawners[server_name] + if raise_404: + raise web.HTTPError(404, f"No such server: {user_name}/{server_name}") + else: + return None + + +class UserShareListAPIHandler(_ShareAPIHandler): + """List shares a user has access to + + includes access granted via group membership + """ + + @needs_scope("read:users:shares") + def get(self, user_name): + user = self.find_user(user_name) + if user is None: + raise web.HTTPError(404, f"No such user: {user_name}") + query = self._init_share_query() + filter = orm.Share.user == user + if user.groups: + filter = or_( + orm.Share.user == user, + orm.Share.group_id.in_([group.id for group in user.groups]), + ) + query = query.filter(filter) + self.finish(json.dumps(self._share_list_model(query))) + + +class UserShareAPIHandler(_ShareAPIHandler): + def _lookup_share(self, user_name, owner_name, server_name): + """Lookup the Share this URL represents + + raises 404 if not found + """ + user = self.find_user(user_name) + if user is None: + raise web.HTTPError( + 404, + f"No such share for user {user_name} on {owner_name}/{server_name}", + ) + spawner = self._lookup_spawner(owner_name, server_name, raise_404=False) + share = None + if spawner: + share = orm.Share.find(self.db, spawner, share_with=user.orm_user) + if share is not None: + return share + else: + raise web.HTTPError( + 404, + f"No such share for user {user_name} on {owner_name}/{server_name}", + ) + + @needs_scope("read:users:shares") + def get(self, user_name, owner_name, _server_name): + share = self._lookup_share(user_name, owner_name, _server_name) + self.finish(json.dumps(self.share_model(share))) + + @needs_scope("users:shares") + def delete(self, user_name, owner_name, _server_name): + share = self._lookup_share(user_name, owner_name, _server_name) + self.db.delete(share) + self.db.commit() + self.set_status(204) + + +class GroupShareListAPIHandler(_ShareAPIHandler, _GroupAPIHandler): + """List shares granted to a group""" + + @needs_scope("read:groups:shares") + def get(self, group_name): + group = self.find_group(group_name) + query = self._init_share_query() + query = query.filter(orm.Share.group == group) + self.finish(json.dumps(self._share_list_model(query))) + + +class GroupShareAPIHandler(_ShareAPIHandler, _GroupAPIHandler): + """A single group's access to a single server""" + + def _lookup_share(self, group_name, owner_name, server_name): + """Lookup the Share this URL represents + + raises 404 if not found + """ + group = self.find_group(group_name) + spawner = self._lookup_spawner(owner_name, server_name, raise_404=False) + share = None + if spawner: + share = orm.Share.find(self.db, spawner, share_with=group) + if share is not None: + return share + else: + raise web.HTTPError( + 404, + f"No such share for group {group_name} on {owner_name}/{server_name}", + ) + + @needs_scope("read:groups:shares") + def get(self, group_name, owner_name, _server_name): + share = self._lookup_share(group_name, owner_name, _server_name) + self.finish(json.dumps(self.share_model(share))) + + @needs_scope("groups:shares") + def delete(self, group_name, owner_name, _server_name): + share = self._lookup_share(group_name, owner_name, _server_name) + self.db.delete(share) + self.db.commit() + self.set_status(204) + + +class ServerShareAPIHandler(_ShareAPIHandler): + """Endpoint for shares of a single server + + This is where permissions are granted and revoked + """ + + @needs_scope("read:shares") + def get(self, user_name, server_name=None): + """List all shares for a given owner""" + + # TODO: optimize this query + # we need Share and only the _names_ of users/groups, + # no any other relationships + query = self._init_share_query() + if server_name is not None: + spawner = self._lookup_spawner(user_name, server_name) + query = query.filter_by(spawner_id=spawner.id) + else: + # lookup owner by id + row = ( + self.db.query(orm.User.id) + .where(orm.User.name == user_name) + .one_or_none() + ) + if row is None: + raise web.HTTPError(404) + owner_id = row[0] + query = query.filter_by(owner_id=owner_id) + self.finish(json.dumps(self._share_list_model(query))) + + @needs_scope('shares') + async def post(self, user_name, server_name=None): + """POST grants permissions for a given server""" + + if server_name is None: + # only GET supported `/shares/{user}` without specified server + raise web.HTTPError(405) + + model = self.get_json_body() or {} + try: + request = ShareGrantRequest(**model) + except ValidationError as e: + raise web.HTTPError(400, str(e)) + + scopes = request.scopes + # check scopes + if not scopes: + # default scopes + scopes = [f"access:servers!server={user_name}/{server_name}"] + + # validate that scopes may be granted by requesting user + try: + scopes = orm.Share._apply_filter(frozenset(scopes), user_name, server_name) + except ValueError as e: + raise web.HTTPError(400, str(e)) + + # resolve target spawner + spawner = self._lookup_spawner(user_name, server_name) + + # check permissions + for scope in scopes: + if not self.has_scope(scope): + raise web.HTTPError( + 403, f"Do not have permission to grant share with scope {scope}" + ) + + if request.user: + scope = f"read:users:name!user={request.user}" + if not self.has_scope(scope): + raise web.HTTPError( + 403, "Need scope 'read:users:name' to share with users by name" + ) + share_with = self.find_user(request.user) + if share_with is None: + raise web.HTTPError(400, f"No such user: {request.user}") + share_with = share_with.orm_user + elif request.group: + if not self.has_scope(f"read:groups:name!group={request.group}"): + raise web.HTTPError( + 403, "Need scope 'read:groups:name' to share with groups by name" + ) + share_with = orm.Group.find(self.db, name=request.group) + if share_with is None: + raise web.HTTPError(400, f"No such group: {request.group}") + + share = orm.Share.grant(self.db, spawner, share_with, scopes=scopes) + self.finish(json.dumps(self.share_model(share))) + + @needs_scope('shares') + async def patch(self, user_name, server_name=None): + """PATCH revokes permissions from single shares for a given server""" + + if server_name is None: + # only GET supported `/shares/{user}` without specified server + raise web.HTTPError(405) + + model = self.get_json_body() or {} + try: + request = ShareRevokeRequest(**model) + except ValidationError as e: + raise web.HTTPError(400, str(e)) + + # TODO: check allowed/valid scopes + + scopes = request.scopes + + # resolve target spawner + spawner = self._lookup_spawner(user_name, server_name) + + if request.user: + # don't need to check read:user permissions for revocation + share_with = self.find_user(request.user) + if share_with is None: + # No such user is the same as revoking + self.log.warning(f"No such user: {request.user}") + self.finish("{}") + return + share_with = share_with.orm_user + elif request.group: + share_with = orm.Group.find(self.db, name=request.group) + if share_with is None: + # No such group behaves the same as revoking no permissions + self.log.warning(f"No such group: {request.group}") + self.finish("{}") + return + + share = orm.Share.revoke(self.db, spawner, share_with, scopes=scopes) + if share: + self.finish(json.dumps(self.share_model(share))) + else: + # empty dict if share deleted + self.finish("{}") + + @needs_scope('shares') + async def delete(self, user_name, server_name=None): + if server_name is None: + # only GET supported `/shares/{user}` without specified server + raise web.HTTPError(405) + + spawner = self._lookup_spawner(user_name, server_name) + self.log.info(f"Deleting all shares for {user_name}/{server_name}") + q = self.db.query(orm.Share).filter_by( + spawner_id=spawner.id, + ) + res = q.delete() + self.log.info(f"Deleted {res} shares for {user_name}/{server_name}") + self.db.commit() + assert spawner.shares == [] + self.set_status(204) + + +class ServerShareCodeAPIHandler(_ShareAPIHandler): + """Endpoint for managing sharing codes of a single server + + These codes can be exchanged for actual sharing permissions by the recipient. + """ + + @needs_scope("read:shares") + def get(self, user_name, server_name=None): + """List all share codes for a given owner""" + + query = self._init_share_query(kind="code") + if server_name is None: + # lookup owner by id + row = ( + self.db.query(orm.User.id) + .where(orm.User.name == user_name) + .one_or_none() + ) + if row is None: + raise web.HTTPError(404) + owner_id = row[0] + + query = query.filter_by(owner_id=owner_id) + else: + spawner = self._lookup_spawner(user_name, server_name) + query = query.filter_by(spawner_id=spawner.id) + self.finish(json.dumps(self._share_list_model(query, kind="code"))) + + @needs_scope('shares') + async def post(self, user_name, server_name=None): + """POST creates a new share code""" + + if server_name is None: + # only GET supported `/share-codes/{user}` without specified server + raise web.HTTPError(405) + + model = self.get_json_body() or {} + try: + request = ShareCodeGrantRequest(**model) + except ValidationError as e: + raise web.HTTPError(400, str(e)) + + scopes = request.scopes + # check scopes + if not scopes: + # default scopes + scopes = [f"access:servers!server={user_name}/{server_name}"] + + try: + scopes = orm.ShareCode._apply_filter( + frozenset(scopes), user_name, server_name + ) + except ValueError as e: + raise web.HTTPError(400, str(e)) + + # validate that scopes may be granted by requesting user + for scope in scopes: + if not self.has_scope(scope): + raise web.HTTPError( + 403, f"Do not have permission to grant share with scope {scope}" + ) + + # resolve target spawner + spawner = self._lookup_spawner(user_name, server_name) + + # issue the code + (share_code, code) = orm.ShareCode.new( + self.db, spawner, scopes=scopes, expires_in=request.expires_in + ) + # return the model (including code only this one time when it's created) + self.finish(json.dumps(self.share_code_model(share_code, code=code))) + + @needs_scope('shares') + def delete(self, user_name, server_name=None): + if server_name is None: + # only GET supported `/share-codes/{user}` without specified server + raise web.HTTPError(405) + + code = self.get_argument("code", None) + share_id = self.get_argument("id", None) + spawner = self._lookup_spawner(user_name, server_name) + if code: + # delete one code, identified by the code itself + share_code = orm.ShareCode.find(self.db, code, spawner=spawner) + if share_code is None: + raise web.HTTPError(404, "No matching code found") + else: + self.log.info(f"Deleting share code for {user_name}/{server_name}") + self.db.delete(share_code) + elif share_id: + m = _share_code_id_pat.match(share_id) + four_o_four = f"No code found matching id={share_id}" + if not m: + raise web.HTTPError(404, four_o_four) + share_id = int(m.group(1)) + share_code = ( + self.db.query(orm.ShareCode) + .filter_by( + spawner_id=spawner.id, + id=share_id, + ) + .one_or_none() + ) + if share_code is None: + raise web.HTTPError(404, four_o_four) + else: + self.log.info(f"Deleting share code for {user_name}/{server_name}") + self.db.delete(share_code) + else: + self.log.info(f"Deleting all share codes for {user_name}/{server_name}") + deleted = ( + self.db.query(orm.ShareCode) + .filter_by( + spawner_id=spawner.id, + ) + .delete() + ) + self.log.info( + f"Deleted {deleted} share codes for {user_name}/{server_name}" + ) + self.db.commit() + self.set_status(204) + + +default_handlers = [ + # TODO: not implementing single all-shared endpoint yet, too hard + # (r"/api/shares", ShareListAPIHandler), + # general management of shares + (r"/api/shares/([^/]+)", ServerShareAPIHandler), + (r"/api/shares/([^/]+)/([^/]*)", ServerShareAPIHandler), + # list shared_with_me for users/groups + (r"/api/users/([^/]+)/shared", UserShareListAPIHandler), + (r"/api/groups/([^/]+)/shared", GroupShareListAPIHandler), + # single-share endpoint (only for easy self-revocation, for now) + (r"/api/users/([^/]+)/shared/([^/]+)/([^/]*)", UserShareAPIHandler), + (r"/api/groups/([^/]+)/shared/([^/]+)/([^/]*)", GroupShareAPIHandler), + # manage sharing codes + (r"/api/share-codes/([^/]+)", ServerShareCodeAPIHandler), + (r"/api/share-codes/([^/]+)/([^/]*)", ServerShareCodeAPIHandler), +] diff --git a/jupyterhub/app.py b/jupyterhub/app.py --- a/jupyterhub/app.py +++ b/jupyterhub/app.py @@ -2491,7 +2491,7 @@ def purge_expired_tokens(self): run periodically """ # this should be all the subclasses of Expiring - for cls in (orm.APIToken, orm.OAuthCode): + for cls in (orm.APIToken, orm.OAuthCode, orm.Share, orm.ShareCode): self.log.debug(f"Purging expired {cls.__name__}s") cls.purge_expired(self.db) diff --git a/jupyterhub/handlers/base.py b/jupyterhub/handlers/base.py --- a/jupyterhub/handlers/base.py +++ b/jupyterhub/handlers/base.py @@ -491,6 +491,10 @@ def no_access(orm_resource, kind): return functools.partial(scopes.check_scope_filter, sub_scope) + def has_scope(self, scope): + """Is the current request being made with the given scope?""" + return scopes.has_scope(scope, self.parsed_scopes, db=self.db) + @property def current_user(self): """Override .current_user accessor from tornado @@ -669,15 +673,14 @@ def set_login_cookie(self, user): def authenticate(self, data): return maybe_future(self.authenticator.get_authenticated_user(self, data)) - def get_next_url(self, user=None, default=None): - """Get the next_url for login redirect + def _validate_next_url(self, next_url): + """Validate next_url handling - Default URL after login: + protects against external redirects, etc. - - if redirect_to_server (default): send to user's own server - - else: /hub/home + Returns empty string if next_url is not considered safe, + resulting in same behavior as if next_url is not specified. """ - next_url = self.get_argument('next', default='') # protect against some browsers' buggy handling of backslash as slash next_url = next_url.replace('\\', '%5C') public_url = self.settings.get("public_url") @@ -723,17 +726,18 @@ def get_next_url(self, user=None, default=None): self.log.warning("Disallowing redirect outside JupyterHub: %r", next_url) next_url = '' - if next_url and next_url.startswith(url_path_join(self.base_url, 'user/')): - # add /hub/ prefix, to ensure we redirect to the right user's server. - # The next request will be handled by SpawnHandler, - # ultimately redirecting to the logged-in user's server. - without_prefix = next_url[len(self.base_url) :] - next_url = url_path_join(self.hub.base_url, without_prefix) - self.log.warning( - "Redirecting %s to %s. For sharing public links, use /user-redirect/", - self.request.uri, - next_url, - ) + return next_url + + def get_next_url(self, user=None, default=None): + """Get the next_url for login redirect + + Default URL after login: + + - if redirect_to_server (default): send to user's own server + - else: /hub/home + """ + next_url = self.get_argument('next', default='') + next_url = self._validate_next_url(next_url) # this is where we know if next_url is coming from ?next= param or we are using a default url if next_url: diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py --- a/jupyterhub/handlers/pages.py +++ b/jupyterhub/handlers/pages.py @@ -12,9 +12,9 @@ from tornado import web from tornado.httputil import url_concat -from .. import __version__ +from .. import __version__, orm from ..metrics import SERVER_POLL_DURATION_SECONDS, ServerPollStatus -from ..scopes import needs_scope +from ..scopes import describe_raw_scopes, needs_scope from ..utils import maybe_future, url_escape_path, url_path_join, utcnow from .base import BaseHandler @@ -553,6 +553,84 @@ def sort_key(client): self.finish(html) +class AcceptShareHandler(BaseHandler): + + def _get_next_url(self, owner, spawner): + """Get next_url for a given owner/spawner""" + next_url = self.get_argument("next", "") + next_url = self._validate_next_url(next_url) + if next_url: + return next_url + + # default behavior: + # if it's active, redirect to server URL + if spawner.name in owner.spawners: + spawner = owner.spawners[spawner.name] + if spawner.active: + # redirect to spawner url + next_url = owner.server_url(spawner.name) + + if not next_url: + # spawner not active + # TODO: next_url not specified and not running, what do we do? + # for now, redirect as if it's running, + # but that's very likely to fail on "You can't launch this server" + # is there a better experience for this? + next_url = owner.server_url(spawner.name) + # validate again, which strips away host to just absolute path + return self._validate_next_url(next_url) + + @web.authenticated + async def get(self): + code = self.get_argument("code") + share_code = orm.ShareCode.find(self.db, code=code) + if share_code is None: + raise web.HTTPError(404, "Share not found or expired") + if share_code.owner == self.current_user.orm_user: + raise web.HTTPError(403, "You can't share with yourself!") + + scope_descriptions = describe_raw_scopes( + share_code.scopes, + username=self.current_user.name, + ) + owner = self._user_from_orm(share_code.owner) + spawner = share_code.spawner + if spawner.name in owner.spawners: + spawner = owner.spawners[spawner.name] + spawner_ready = spawner.ready + else: + spawner_ready = False + + html = await self.render_template( + 'accept-share.html', + code=code, + owner=owner, + spawner=spawner, + spawner_ready=spawner_ready, + spawner_url=owner.server_url(spawner.name), + scope_descriptions=scope_descriptions, + next_url=self._get_next_url(owner, spawner), + ) + self.finish(html) + + @web.authenticated + def post(self): + code = self.get_argument("code") + self.log.debug("Looking up %s", code) + share_code = orm.ShareCode.find(self.db, code=code) + if share_code is None: + raise web.HTTPError(400, f"Invalid share code: {code}") + if share_code.owner == self.current_user.orm_user: + raise web.HTTPError(400, "You can't share with yourself!") + user = self.current_user + share = share_code.exchange(user.orm_user) + owner = self._user_from_orm(share.owner) + spawner = share.spawner + + next_url = self._get_next_url(owner, spawner) + self.redirect(next_url) + + class ProxyErrorHandler(BaseHandler): """Handler for rendering proxy error pages""" @@ -609,6 +687,7 @@ def get(self): (r'/spawn/([^/]+)', SpawnHandler), (r'/spawn/([^/]+)/([^/]+)', SpawnHandler), (r'/token', TokenPageHandler), + (r'/accept-share', AcceptShareHandler), (r'/error/(\d+)', ProxyErrorHandler), (r'/health$', HealthCheckHandler), (r'/api/health$', HealthCheckHandler), diff --git a/jupyterhub/orm.py b/jupyterhub/orm.py --- a/jupyterhub/orm.py +++ b/jupyterhub/orm.py @@ -5,9 +5,11 @@ import enum import json import numbers +import secrets from base64 import decodebytes, encodebytes from datetime import timedelta -from functools import partial +from functools import lru_cache, partial +from itertools import chain import alembic.command import alembic.config @@ -33,6 +35,7 @@ from sqlalchemy.orm import ( Session, declarative_base, + declared_attr, interfaces, joinedload, object_session, @@ -94,6 +97,10 @@ def process_result_value(self, value, dialect): class JSONList(JSONDict): """Represents an immutable structure as a json-encoded string (to be used for list type columns). + Accepts list, tuple, sets for assignment + + Always read as a list + Usage:: JSONList(JSONDict) @@ -101,8 +108,12 @@ class JSONList(JSONDict): """ def process_bind_param(self, value, dialect): - if isinstance(value, list) and value is not None: + if isinstance(value, (list, tuple)): value = json.dumps(value) + if isinstance(value, set): + # serialize sets as ordered lists + value = json.dumps(sorted(value)) + return value def process_result_value(self, value, dialect): @@ -226,6 +237,15 @@ class Group(Base): roles = relationship( 'Role', secondary='group_role_map', back_populates='groups', lazy="selectin" ) + shared_with_me = relationship( + "Share", + back_populates="group", + cascade="all, delete-orphan", + lazy="selectin", + ) + + # used in some model fields to differentiate 'whoami' + kind = "group" def __repr__(self): return f"<{self.__class__.__name__} {self.name}>" @@ -296,6 +316,42 @@ def orm_spawners(self): oauth_codes = relationship( "OAuthCode", back_populates="user", cascade="all, delete-orphan" ) + + # sharing relationships + shares = relationship( + "Share", + back_populates="owner", + cascade="all, delete-orphan", + foreign_keys="Share.owner_id", + ) + share_codes = relationship( + "ShareCode", + back_populates="owner", + cascade="all, delete-orphan", + foreign_keys="ShareCode.owner_id", + ) + shared_with_me = relationship( + "Share", + back_populates="user", + cascade="all, delete-orphan", + foreign_keys="Share.user_id", + lazy="selectin", + ) + + @property + def all_shared_with_me(self): + """return all shares shared with me, + + including via group + """ + + return list( + chain( + self.shared_with_me, + *[group.shared_with_me for group in self.groups], + ) + ) + cookie_id = Column(Unicode(255), default=new_token, nullable=False, unique=True) # User.state is actually Spawner state # We will need to figure something else out if/when we have multiple spawners per user @@ -304,6 +360,10 @@ def orm_spawners(self): # Encryption is handled elsewhere encrypted_auth_state = Column(LargeBinary) + # used in some model fields to differentiate whether an owner or actor + # is a user or service + kind = "user" + def __repr__(self): return "<{cls}({name} {running}/{total} running)>".format( cls=self.__class__.__name__, @@ -345,6 +405,13 @@ class Spawner(Base): cascade="all, delete-orphan", ) + shares = relationship( + "Share", back_populates="spawner", cascade="all, delete-orphan" + ) + share_codes = relationship( + "ShareCode", back_populates="spawner", cascade="all, delete-orphan" + ) + state = Column(JSONDict) name = Column(Unicode(255)) @@ -457,6 +524,9 @@ class Service(Base): single_parent=True, ) + # used in some model fields to differentiate 'whoami' + kind = "service" + def new_api_token(self, token=None, **kwargs): """Create a new API token If `token` is given, load that token. @@ -496,6 +566,14 @@ def expires_in(self): else: return None + @property + def expired(self): + """Is this object expired?""" + if not self.expires_at: + return False + else: + return self.expires_in <= 0 + @classmethod def purge_expired(cls, db): """Purge expired API Tokens from the database""" @@ -528,7 +606,7 @@ class Hashed(Expiring): @property def token(self): - raise AttributeError("token is write-only") + raise AttributeError(f"{self.__class__.__name__}.token is write-only") @token.setter def token(self, token): @@ -556,12 +634,13 @@ def check_token(cls, db, token): """Check if a token is acceptable""" if len(token) < cls.min_length: raise ValueError( - "Tokens must be at least %i characters, got %r" - % (cls.min_length, token) + f"{cls.__name__}.token must be at least {cls.min_length} characters, got {len(token)}: {token[: cls.prefix_length]}..." ) found = cls.find(db, token) if found: - raise ValueError("Collision on token: %s..." % token[: cls.prefix_length]) + raise ValueError( + f"Collision on {cls.__name__}: {token[: cls.prefix_length]}..." + ) @classmethod def find_prefix(cls, db, token): @@ -600,6 +679,339 @@ def find(cls, db, token): return orm_token +class _Share: + """Common columns for Share and ShareCode""" + + id = Column(Integer, primary_key=True, autoincrement=True) + created_at = Column(DateTime, nullable=False, default=utcnow) + + # TODO: owner_id and spawner_id columns don't need `@declared_attr` when we can require sqlalchemy 2 + + # the owner of the shared server + # this is redundant with spawner.user, but saves a join + @declared_attr + def owner_id(self): + return Column(Integer, ForeignKey('users.id', ondelete="CASCADE")) + + @declared_attr + def owner(self): + # table name happens to be appropriate 'shares', 'share_codes' + # could be another, more explicit attribute, but the values would be the same + return relationship( + "User", + back_populates=self.__tablename__, + foreign_keys=[self.owner_id], + lazy="selectin", + ) + + # the spawner the share is for + @declared_attr + def spawner_id(self): + return Column(Integer, ForeignKey('spawners.id', ondelete="CASCADE")) + + @declared_attr + def spawner(self): + return relationship( + "Spawner", + back_populates=self.__tablename__, + lazy="selectin", + ) + + # the permissions granted (!server filter will always be applied) + scopes = Column(JSONList) + expires_at = Column(DateTime, nullable=True) + + @classmethod + def apply_filter(cls, scopes, spawner): + """Apply our filter, ensures all scopes have appropriate !server filter + + Any other filters will raise ValueError. + """ + return cls._apply_filter(frozenset(scopes), spawner.user.name, spawner.name) + + @staticmethod + @lru_cache() + def _apply_filter(scopes, owner_name, server_name): + """ + implementation of Share.apply_filter + + Static method so @lru_cache is persisted across instances + """ + filtered_scopes = [] + server_filter = f"server={owner_name}/{server_name}" + for scope in scopes: + base_scope, _, filter = scope.partition("!") + if filter and filter != server_filter: + raise ValueError( + f"!{filter} not allowed on sharing {scope}, only !{server_filter}" + ) + filtered_scopes.append(f"{base_scope}!{server_filter}") + return frozenset(filtered_scopes) + + +class Share(_Share, Expiring, Base): + """A single record of a sharing permission + + granted by one user to another user (or group) + + Restricted to a single server. + """ + + __tablename__ = "shares" + + # who the share is granted to (user or group) + user_id = Column(Integer, ForeignKey('users.id', ondelete="CASCADE"), nullable=True) + user = relationship( + "User", back_populates="shared_with_me", foreign_keys=[user_id], lazy="selectin" + ) + + group_id = Column( + Integer, ForeignKey('groups.id', ondelete="CASCADE"), nullable=True + ) + group = relationship("Group", back_populates="shared_with_me", lazy="selectin") + + def __repr__(self): + if self.user: + kind = "user" + name = self.user.name + elif self.group: + kind = "group" + name = self.group.name + else: # pragma: no cover + kind = "deleted" + name = "unknown" + + if self.owner and self.spawner: + server_name = f"{self.owner.name}/{self.spawner.name}" + else: # pragma: n cover + server_name = "unknown/deleted" + + return f"<{self.__class__.__name__}(server={server_name}, scopes={self.scopes}, {kind}={name})>" + + @staticmethod + def _share_with_key(share_with): + """Get the field name for share with + + either group_id or user_id, depending on type of share_with + + raises TypeError if neither User nor Group + """ + if isinstance(share_with, User): + return "user_id" + elif isinstance(share_with, Group): + return "group_id" + else: + raise TypeError( + f"Can only share with orm.User or orm.Group, not {share_with!r}" + ) + + @classmethod + def find(cls, db, spawner, share_with): + """Find an existing + + Shares are unique for a given (spawner, user) + """ + + filter_by = { + cls._share_with_key(share_with): share_with.id, + "spawner_id": spawner.id, + "owner_id": spawner.user.id, + } + return db.query(Share).filter_by(**filter_by).one_or_none() + + @staticmethod + def _get_log_name(spawner, share_with): + """construct log snippet to refer to the share""" + return ( + f"{share_with.kind}:{share_with.name} on {spawner.user.name}/{spawner.name}" + ) + + @property + def _log_name(self): + return self._get_log_name(self.spawner, self.user or self.group) + + @classmethod + def grant(cls, db, spawner, share_with, scopes=None): + """Grant shared permissions for a server + + Updates existing Share if there is one, + otherwise creates a new Share + """ + if scopes is None: + scopes = frozenset( + [f"access:servers!server={spawner.user.name}/{spawner.name}"] + ) + scopes = cls._apply_filter(frozenset(scopes), spawner.user.name, spawner.name) + + if not scopes: + raise ValueError("Must specify scopes to grant.") + + # 1. lookup existing share and update + share = cls.find(db, spawner, share_with) + share_with_log = cls._get_log_name(spawner, share_with) + if share is not None: + # update existing permissions in-place + # extend permissions + existing_scopes = set(share.scopes) + added_scopes = set(scopes).difference(existing_scopes) + if not added_scopes: + app_log.info(f"No new scopes for {share_with_log}") + return share + new_scopes = sorted(existing_scopes | added_scopes) + app_log.info(f"Granting scopes {sorted(added_scopes)} for {share_with_log}") + share.scopes = new_scopes + db.commit() + else: + # no share for (spawner, share_with), create a new one + app_log.info(f"Sharing scopes {sorted(scopes)} for {share_with_log}") + share = cls( + created_at=cls.now(), + # copy shared fields + owner=spawner.user, + spawner=spawner, + scopes=sorted(scopes), + ) + if share_with.kind == "user": + share.user = share_with + elif share_with.kind == "group": + share.group = share_with + else: + raise TypeError(f"Expected user or group, got {share_with!r}") + db.add(share) + db.commit() + return share + + @classmethod + def revoke(cls, db, spawner, share_with, scopes=None): + """Revoke permissions for share_with on `spawner` + + If scopes are not specified, all scopes are revoked + """ + share = cls.find(db, spawner, share_with) + if share is None: + _log_name = cls._get_log_name(spawner, share_with) + app_log.info(f"No permissions to revoke from {_log_name}") + return + else: + _log_name = share._log_name + + if scopes is None: + app_log.info(f"Revoked all permissions from {_log_name}") + db.delete(share) + db.commit() + return None + + # update scopes + new_scopes = [scope for scope in share.scopes if scope not in scopes] + revoked_scopes = [scope for scope in scopes if scope in set(share.scopes)] + if new_scopes == share.scopes: + app_log.info(f"No change in scopes for {_log_name}") + return share + elif not new_scopes: + # revoked all scopes, delete the Share + app_log.info(f"Revoked all permissions from {_log_name}") + db.delete(share) + db.commit() + else: + app_log.info(f"Revoked {revoked_scopes} from {_log_name}") + share.scopes = new_scopes + db.commit() + + if new_scopes: + return share + else: + return None + + +class ShareCode(_Share, Hashed, Base): + """A code that can be exchanged for a Share + + Ultimately, the same as a Share, but has a 'code' + instead of a user or group that it is shared with. + The code can be exchanged to create or update an actual Share. + """ + + __tablename__ = "share_codes" + + hashed = Column(Unicode(255), unique=True) + prefix = Column(Unicode(16), index=True) + exchange_count = Column(Integer, default=0) + last_exchanged_at = Column(DateTime, nullable=True, default=None) + + _code_bytes = 32 + default_expires_in = 86400 + + def __repr__(self): + if self.owner and self.spawner: + server_name = f"{self.owner.name}/{self.spawner.name}" + else: + server_name = "unknown/deleted" + + return f"<{self.__class__.__name__}(server={server_name}, scopes={self.scopes}, expires_at={self.expires_at})>" + + @classmethod + def new( + cls, + db, + spawner, + *, + scopes, + expires_in=None, + **kwargs, + ): + """Create a new ShareCode""" + app_log.info(f"Creating share code for {spawner.user.name}/{spawner.name}") + # verify scopes have the necessary filter + kwargs["scopes"] = sorted(cls.apply_filter(scopes, spawner)) + if not expires_in: + expires_in = cls.default_expires_in + kwargs["expires_at"] = utcnow() + timedelta(seconds=expires_in) + kwargs["spawner"] = spawner + kwargs["owner"] = spawner.user + code = secrets.token_urlsafe(cls._code_bytes) + + # create the ShareCode + share_code = cls(**kwargs) + # setting Hashed.token property sets the `hashed` column in the db + share_code.token = code + # actually put it in the db + db.add(share_code) + db.commit() + return (share_code, code) + + @classmethod + def find(cls, db, code, *, spawner=None): + """Lookup a single ShareCode by code""" + prefix_match = cls.find_prefix(db, code) + if spawner: + prefix_match = prefix_match.filter_by(spawner_id=spawner.id) + for share_code in prefix_match: + if share_code.match(code): + return share_code + + def exchange(self, share_with): + """exchange a ShareCode for a Share + + share_with can be a User or a Group. + """ + db = inspect(self).session + share_code_log = f"Share code {self.prefix}..." + if self.expired: + db.delete(self) + db.commit() + raise ValueError(f"{share_code_log} expired") + + share_with_log = f"{share_with.kind}:{share_with.name} on {self.owner.name}/{self.spawner.name}" + app_log.info(f"Exchanging {share_code_log} for {share_with_log}") + share = Share.grant(db, self.spawner, share_with, self.scopes) + # note: we count exchanges, even if they don't modify the permissions + # (e.g. one user exchanging the same code twice) + self.exchange_count += 1 + self.last_exchanged_at = self.now() + db.commit() + return share + + # ------------------------------------ # OAuth tables # ------------------------------------ diff --git a/jupyterhub/roles.py b/jupyterhub/roles.py --- a/jupyterhub/roles.py +++ b/jupyterhub/roles.py @@ -47,6 +47,7 @@ def get_default_roles(): 'access:servers', 'read:roles', 'read:metrics', + 'shares', ], }, { diff --git a/jupyterhub/scopes.py b/jupyterhub/scopes.py --- a/jupyterhub/scopes.py +++ b/jupyterhub/scopes.py @@ -144,6 +144,36 @@ 'access:services': { 'description': 'Access services via API or browser.', }, + 'users:shares': { + 'description': "Read and revoke a user's access to shared servers.", + 'subscopes': [ + 'read:users:shares', + ], + }, + 'read:users:shares': { + 'description': "Read servers shared with a user.", + }, + 'groups:shares': { + 'description': "Read and revoke a group's access to shared servers.", + 'subscopes': [ + 'read:groups:shares', + ], + }, + 'read:groups:shares': { + 'description': "Read servers shared with a group.", + }, + 'read:shares': { + 'description': "Read information about shared access to servers.", + }, + 'shares': { + 'description': "Manage access to shared servers.", + 'subscopes': [ + 'access:servers', + 'read:shares', + 'users:shares', + 'groups:shares', + ], + }, 'proxy': { 'description': 'Read information about the proxy’s routing table, sync the Hub with the proxy and notify the Hub about a new proxy.' }, @@ -178,7 +208,6 @@ def _intersect_expanded_scopes(scopes_a, scopes_b, db=None): Otherwise, it can result in lower than intended permissions, (i.e. users!group=x & users!user=y will be empty, even if user y is in group x.) """ - empty_set = frozenset() scopes_a = frozenset(scopes_a) scopes_b = frozenset(scopes_b) @@ -189,11 +218,12 @@ def groups_for_user(username): # if we need a group lookup, the result is not cacheable nonlocal needs_db needs_db = True - user = db.query(orm.User).filter_by(name=username).first() - if user is None: - return empty_set - else: - return {group.name for group in user.groups} + group_query = ( + db.query(orm.Group.name) + .join(orm.User.groups) + .filter(orm.User.name == username) + ) + return {row[0] for row in group_query} @lru_cache() def groups_for_server(server): @@ -348,6 +378,9 @@ def get_scopes_for(orm_object): owner = orm_object.user or orm_object.service owner_roles = roles.get_roles_for(owner) owner_scopes = roles.roles_to_expanded_scopes(owner_roles, owner) + if owner is orm_object.user: + for share in owner.shared_with_me: + owner_scopes |= frozenset(share.scopes) token_scopes = set(orm_object.scopes) if 'inherit' in token_scopes: @@ -401,12 +434,28 @@ def get_scopes_for(orm_object): roles.get_roles_for(orm_object), owner=orm_object, ) - if isinstance(orm_object, (orm.User, orm.Service)): - owner = orm_object + + # add permissions granted from 'shares' + if hasattr(orm_object, "shared_with_me"): + for share in orm_object.shared_with_me: + expanded_scopes |= expand_share_scopes(share) + if isinstance(orm_object, orm.User): + for group in orm_object.groups: + for share in group.shared_with_me: + expanded_scopes |= expand_share_scopes(share) return expanded_scopes +def expand_share_scopes(share): + """Get expanded scopes for a Share""" + return expand_scopes( + share.scopes, + owner=share.user or share.group, + oauth_client=share.spawner.oauth_client, + ) + + @lru_cache() def _expand_self_scope(username): """ @@ -431,6 +480,9 @@ def _expand_self_scope(username): 'read:users', 'read:users:name', 'read:users:groups', + 'users:shares', + 'read:users:shares', + 'read:shares', 'users:activity', 'read:users:activity', 'servers', @@ -647,68 +699,112 @@ def _resolve_requested_scopes(requested_scopes, have_scopes, user, client, db): return (allowed_scopes, disallowed_scopes) -def _needs_scope_expansion(filter_, filter_value, sub_scope): +def _needs_group_expansion(filter_, filter_value, sub_scope): """ Check if there is a requirements to expand the `group` scope to individual `user` scopes. Assumptions: filter_ != Scope.ALL """ - if not (filter_ == 'user' and 'group' in sub_scope): + if not (filter_ in {'user', 'server'} and 'group' in sub_scope): return False - if 'user' in sub_scope: - return filter_value not in sub_scope['user'] + if filter_ in sub_scope: + return filter_value not in sub_scope[filter_] else: return True -def _check_user_in_expanded_scope(handler, user_name, scope_group_names): - """Check if username is present in set of allowed groups""" - user = handler.find_user(user_name) - if user is None: - raise web.HTTPError(404, "No access to resources or resources not found") - group_names = {group.name for group in user.groups} - return bool(set(scope_group_names) & group_names) +def _has_scope_key(scope, have_scopes, *, post_filter=False, db=None): + """Cache key for has_scope""" + if isinstance(have_scopes, dict): + have_scopes = FrozenDict(have_scopes) + else: + have_scopes = frozenset(have_scopes) + return (scope, have_scopes, post_filter) + +@lru_cache_key(_has_scope_key) +def has_scope(scope, have_scopes, *, post_filter=False, db=None): + """Boolean function for whether we have a given scope -def _check_scope_access(api_handler, req_scope, **kwargs): - """Check if scopes satisfy requirements - Returns True for (potentially restricted) access, False for refused access + Args: + scope (str): a single scope + have_scopes: parsed_scopes dict or expanded_scopes set + post_filter (bool): + Allows returning true if _some_ access is granted, + if not full access. + Only allowed if scope has no filter + db (optional): the db session + Required to check group membership, + unused otherwise + Returns: + True if access is allowed, False otherwise. + If post_filer is True and have_scopes contains _filtered_ access, + will return True, assuming filtered-access will be handled later + (e.g. in the listing-users handler) """ - # Parse user name and server name together - try: - api_name = api_handler.request.path - except AttributeError: - api_name = type(api_handler).__name__ - if 'user' in kwargs and 'server' in kwargs: - kwargs['server'] = "{}/{}".format(kwargs['user'], kwargs['server']) - if req_scope not in api_handler.parsed_scopes: - app_log.debug("No access to %s via %s", api_name, req_scope) + req_scope, _, full_filter = scope.partition("!") + filter_, _, filter_value = full_filter.partition("=") + if filter_ and not filter_value: + raise ValueError( + f"Unexpanded scope filter {scope} not allowed. Use expanded scopes." + ) + + if isinstance(have_scopes, dict): + parsed_scopes = have_scopes + else: + parsed_scopes = parse_scopes(have_scopes) + + if req_scope not in parsed_scopes: return False - if api_handler.parsed_scopes[req_scope] == Scope.ALL: - app_log.debug("Unrestricted access to %s via %s", api_name, req_scope) + have_scope_filters = parsed_scopes[req_scope] + if have_scope_filters == Scope.ALL: + # access to all resources return True - # Apply filters - sub_scope = api_handler.parsed_scopes[req_scope] - if not kwargs: - app_log.debug( - "Client has restricted access to %s via %s. Internal filtering may apply", - api_name, - req_scope, - ) + + if not filter_: + if post_filter: + # allow filtering after the fact + return True + else: + return False + + if post_filter: + raise ValueError("post_filter=True only allowed for unfiltered scopes") + _db_used = False + + if filter_ in have_scope_filters and filter_value in have_scope_filters[filter_]: return True - for filter_, filter_value in kwargs.items(): - if filter_ in sub_scope and filter_value in sub_scope[filter_]: - app_log.debug("Argument-based access to %s via %s", api_name, req_scope) + + # server->user + if filter_ == "server" and "user" in have_scope_filters: + user_name = filter_value.partition("/")[0] + if user_name in have_scope_filters["user"]: return True - if _needs_scope_expansion(filter_, filter_value, sub_scope): - group_names = sub_scope['group'] - if _check_user_in_expanded_scope(api_handler, filter_value, group_names): - app_log.debug("Restricted client access supported with group expansion") - return True - app_log.debug( - "Client access refused; filters do not match API endpoint %s request" % api_name - ) - raise web.HTTPError(404, "No access to resources or resources not found") + + if db and _needs_group_expansion(filter_, filter_value, have_scope_filters): + _db_used = True + if filter_ == "user": + user_name = filter_value + elif filter_ == "server": + user_name = filter_value.partition("/")[0] + else: + raise ValueError( + f"filter_ should be 'user' or 'server' here, not {filter_!r}" + ) + group_names = have_scope_filters['group'] + have_group_query = ( + db.query(orm.Group.name) + .join(orm.User.groups) + .filter(orm.User.name == user_name) + .filter(orm.Group.name.in_(group_names)) + ) + if have_group_query.count() > 0: + return DoNotCache(True) + + if _db_used: + return DoNotCache(False) + else: + return False def _check_scopes_exist(scopes, who_for=None): @@ -813,6 +909,8 @@ def parse_scopes(scope_list): if parsed_scopes[base_scope] != Scope.ALL: key, _, value = filter_.partition('=') + if not value: + raise ValueError(f"Empty string is not a valid filter: {scope}") if key not in parsed_scopes[base_scope]: parsed_scopes[base_scope][key] = {value} else: @@ -884,21 +982,53 @@ def _auth_func(self, *args, **kwargs): if resource_name in bound_sig.arguments: resource_value = bound_sig.arguments[resource_name] s_kwargs[resource] = resource_value + + if "server" in s_kwargs: + # merge user_name, server_name into server=user/server + if "user" not in s_kwargs: + raise ValueError( + "Cannot filter on 'server_name' without 'user_name'" + ) + s_kwargs["server"] = f"{s_kwargs['user']}/{s_kwargs['server']}" + s_kwargs.pop("user") + if len(s_kwargs) > 1: + raise ValueError( + f"Cannot filter on more than one field, got {s_kwargs}" + ) + elif s_kwargs: + filter_, filter_value = next(iter(s_kwargs.items())) + else: + filter_ = filter_value = None + for scope in scopes: + if filter_ is not None: + scope = f"{scope}!{filter_}={filter_value}" app_log.debug("Checking access to %s via scope %s", end_point, scope) - has_access = _check_scope_access(self, scope, **s_kwargs) + has_access = has_scope( + scope, + self.parsed_scopes, + post_filter=filter_ is None, + db=self.db, + ) if has_access: return func(self, *args, **kwargs) app_log.warning( - "Not authorizing access to {}. Requires any of [{}], not derived from scopes [{}]".format( - end_point, ", ".join(scopes), ", ".join(self.expanded_scopes) - ) + "Not authorizing access to %s. Requires any of [%s] on %s, not derived from scopes [%s]", + end_point, + ", ".join(scopes), + "*" if filter_ is None else f"{filter_}={filter_value}", + ", ".join(self.expanded_scopes), ) + if filter_ and any(scope in self.parsed_scopes for scope in scopes): + # not allowed due do filtered access, + # same error for nonexistence as missing permission + raise web.HTTPError( + 404, "No access to resources or resources not found" + ) raise web.HTTPError( 403, - "Action is not authorized with current scopes; requires any of [{}]".format( - ", ".join(scopes) - ), + "Action is not authorized with current scopes;" + f" requires any of [{', '.join(scopes)}]", ) return _auth_func @@ -939,17 +1069,28 @@ def identify_scopes(obj=None): raise TypeError(f"Expected orm.User or orm.Service, got {obj!r}") -@lru_cache_key(lambda oauth_client: oauth_client.identifier) -def access_scopes(oauth_client): +def _access_cache_key(oauth_client=None, *, spawner=None, service=None): + if oauth_client: + return ("oauth", oauth_client.identifier) + elif spawner: + return ("spawner", spawner.user.name, spawner.name) + elif service: + return ("service", service.name) + + +@lru_cache_key(_access_cache_key) +def access_scopes(oauth_client=None, *, spawner=None, service=None): """Return scope(s) required to access an oauth client""" scopes = set() - if oauth_client.identifier == "jupyterhub": + if oauth_client and oauth_client.identifier == "jupyterhub": return frozenset() - spawner = oauth_client.spawner + if spawner is None and oauth_client: + spawner = oauth_client.spawner if spawner: scopes.add(f"access:servers!server={spawner.user.name}/{spawner.name}") else: - service = oauth_client.service + if service is None: + service = oauth_client.service if service: scopes.add(f"access:services!service={service.name}") else: @@ -1009,7 +1150,6 @@ def describe_parsed_scopes(parsed_scopes, username=None): """ descriptions = [] for scope, filters in parsed_scopes.items(): - base_text = scope_definitions[scope]["description"] if filters == Scope.ALL: # no filter filter_text = "" @@ -1019,9 +1159,6 @@ def describe_parsed_scopes(parsed_scopes, username=None): if kind == 'user' and names == {username}: filter_chunks.append("only you") else: - kind_text = kind - if kind == 'group': - kind_text = "users in group" if len(names) == 1: filter_chunks.append(f"{kind}: {list(names)[0]}") else: @@ -1046,7 +1183,6 @@ def describe_raw_scopes(raw_scopes, username=None): descriptions = [] for raw_scope in raw_scopes: scope, _, filter_ = raw_scope.partition("!") - base_text = scope_definitions[scope]["description"] if not filter_: # no filter filter_text = "" diff --git a/jupyterhub/singleuser/extension.py b/jupyterhub/singleuser/extension.py --- a/jupyterhub/singleuser/extension.py +++ b/jupyterhub/singleuser/extension.py @@ -225,6 +225,7 @@ def page_config_hook(self, handler, page_config): user = handler.current_user # originally implemented in jupyterlab's LabApp page_config["hubUser"] = user.name if user else "" + page_config["hubServerUser"] = os.environ.get("JUPYTERHUB_USER", "") page_config["hubPrefix"] = hub_prefix = self.hub_auth.hub_prefix page_config["hubHost"] = self.hub_auth.hub_host page_config["shareUrl"] = url_path_join(hub_prefix, "user-redirect") diff --git a/jupyterhub/singleuser/mixins.py b/jupyterhub/singleuser/mixins.py --- a/jupyterhub/singleuser/mixins.py +++ b/jupyterhub/singleuser/mixins.py @@ -713,6 +713,7 @@ def page_config_hook(self, handler, page_config): Only has effect on jupyterlab_server >=2.9 """ page_config["token"] = self.hub_auth.get_token(handler) or "" + page_config["hubServerUser"] = os.environ.get("JUPYTERHUB_USER", "") return page_config def patch_default_headers(self):
diff --git a/jupyterhub/tests/browser/test_browser.py b/jupyterhub/tests/browser/test_browser.py --- a/jupyterhub/tests/browser/test_browser.py +++ b/jupyterhub/tests/browser/test_browser.py @@ -17,8 +17,10 @@ pytestmark = pytest.mark.browser -async def login(browser, username, password): - """filling the login form by user and pass_w parameters and iniate the login""" +async def login(browser, username, password=None): + """filling the login form by user and pass_w parameters and initiate the login""" + if password is None: + password = username await browser.get_by_label("Username:").click() await browser.get_by_label("Username:").fill(username) diff --git a/jupyterhub/tests/browser/test_share.py b/jupyterhub/tests/browser/test_share.py new file mode 100644 --- /dev/null +++ b/jupyterhub/tests/browser/test_share.py @@ -0,0 +1,64 @@ +import re + +import pytest +from playwright.async_api import expect +from tornado.httputil import url_concat + +from jupyterhub.utils import url_path_join + +from ..conftest import new_username +from ..utils import add_user, api_request, public_host +from .test_browser import login + +pytestmark = pytest.mark.browser + + +async def test_share_code_flow_full(app, browser, full_spawn, create_user_with_scopes): + share_user = add_user(app.db, name=new_username("share_with")) + user = create_user_with_scopes( + "shares!user", "self", f"read:users:name!user={share_user.name}" + ) + # start server + await user.spawn("") + await app.proxy.add_user(user) + spawner = user.spawner + + # issue_code + share_url = f"share-codes/{user.name}/{spawner.name}" + r = await api_request( + app, + share_url, + method="post", + name=user.name, + ) + r.raise_for_status() + share_model = r.json() + print(share_model) + assert "code" in share_model + code = share_model["code"] + + # visit share page + accept_share_url = url_path_join(public_host(app), app.hub.base_url, "accept-share") + accept_share_url = url_concat(accept_share_url, {"code": code}) + await browser.goto(accept_share_url) + # wait for login + await expect(browser).to_have_url(re.compile(r".*/login")) + # login + await login(browser, share_user.name) + # back to accept-share page + await expect(browser).to_have_url(re.compile(r".*/accept-share")) + + header_text = await browser.locator("//h2").first.text_content() + assert f"access {user.name}'s server" in header_text + assert f"You ({share_user.name})" in header_text + # TODO verify form + submit = browser.locator('//input[@type="submit"]') + await submit.click() + + # redirects to server, which triggers oauth approval + await expect(browser).to_have_url(re.compile(r".*/oauth2/authorize")) + submit = browser.locator('//input[@type="submit"]') + await submit.click() + + # finally, we are at the server! + await expect(browser).to_have_url(re.compile(f".*/user/{user.name}/.*")) diff --git a/jupyterhub/tests/conftest.py b/jupyterhub/tests/conftest.py --- a/jupyterhub/tests/conftest.py +++ b/jupyterhub/tests/conftest.py @@ -30,7 +30,6 @@ import copy import os import sys -from getpass import getuser from subprocess import TimeoutExpired from unittest import mock @@ -42,7 +41,13 @@ import jupyterhub.services.service from .. import crypto, orm, scopes -from ..roles import create_role, get_default_roles, mock_roles, update_roles +from ..roles import ( + assign_default_roles, + create_role, + get_default_roles, + mock_roles, + update_roles, +) from ..utils import random_port from . import mocking from .mocking import MockHub @@ -104,18 +109,18 @@ def auth_state_enabled(app): @fixture def db(): """Get a db session""" - global _db - if _db is None: - # make sure some initial db contents are filled out - # specifically, the 'default' jupyterhub oauth client - app = MockHub(db_url='sqlite:///:memory:') - app.init_db() - _db = app.db - for role in get_default_roles(): - create_role(_db, role) - user = orm.User(name=getuser()) - _db.add(user) - _db.commit() + # make sure some initial db contents are filled out + # specifically, the 'default' jupyterhub oauth client + app = MockHub(db_url='sqlite:///:memory:') + app.init_db() + _db = app.db + for role in get_default_roles(): + create_role(_db, role) + user = orm.User(name="user") + _db.add(user) + _db.commit() + assign_default_roles(_db, user) + _db.commit() return _db @@ -181,10 +186,16 @@ async def cleanup_after(request, io_loop): print(f"Stopping leftover server {spawner._log_name}") await user.stop(name) if user.name not in {'admin', 'user'}: + app.log.debug(f"Deleting test user {user.name}") app.users.delete(user.id) # delete groups for group in app.db.query(orm.Group): + app.log.debug(f"Deleting test group {group.name}") app.db.delete(group) + # delete shares + for share in app.db.query(orm.Share): + app.log.debug(f"Deleting test share {share}") + app.db.delete(share) # clear services for name, service in app._service_map.items(): diff --git a/jupyterhub/tests/test_named_servers.py b/jupyterhub/tests/test_named_servers.py --- a/jupyterhub/tests/test_named_servers.py +++ b/jupyterhub/tests/test_named_servers.py @@ -77,6 +77,7 @@ async def test_default_server(app, named_servers): 'server': user.url, 'servers': { '': { + 'full_name': f"{username}/", 'name': '', 'started': TIMESTAMP, 'last_activity': TIMESTAMP, @@ -165,6 +166,7 @@ async def test_create_named_server( 'auth_state': None, 'servers': { servername: { + 'full_name': f"{username}/{servername}", 'name': name, 'started': TIMESTAMP, 'last_activity': TIMESTAMP, diff --git a/jupyterhub/tests/test_orm.py b/jupyterhub/tests/test_orm.py --- a/jupyterhub/tests/test_orm.py +++ b/jupyterhub/tests/test_orm.py @@ -185,7 +185,9 @@ def test_service_server(db): def test_token_find(db): - service = db.query(orm.Service).first() + service = orm.Service(name='sample') + db.add(service) + db.commit() user = db.query(orm.User).first() service_token = service.new_api_token() user_token = user.new_api_token() @@ -238,7 +240,7 @@ async def start(self): def test_groups(db): - user = orm.User.find(db, name='aeofel') + user = orm.User(name='aeofel') db.add(user) group = orm.Group(name='lives') @@ -496,6 +498,71 @@ def test_group_delete_cascade(db): assert user1 not in group1.users +def test_share_user(db): + user1 = orm.User(name='user1') + user2 = orm.User(name='user2') + spawner = orm.Spawner(user=user1) + db.add(user1) + db.add(user2) + db.add(spawner) + db.commit() + + share = orm.Share( + owner=user1, + spawner=spawner, + user=user2, + ) + db.add(share) + db.commit() + assert user1.shares == [share] + assert spawner.shares == [share] + assert user1.shared_with_me == [] + assert user2.shared_with_me == [share] + db.delete(share) + db.commit() + assert user1.shares == [] + assert spawner.shares == [] + assert user1.shared_with_me == [] + assert user2.shared_with_me == [] + + +def test_share_group(db): + initial_list = list(db.query(orm.User)) + assert len(initial_list) <= 1 + user1 = orm.User(name='user1') + user2 = orm.User(name='user2') + group2 = orm.Group(name='group2') + spawner = orm.Spawner(user=user1) + db.add(user1) + db.add(user2) + db.add(group2) + db.add(spawner) + db.commit() + group2.users.append(user2) + db.commit() + share = orm.Share( + owner=user1, + spawner=spawner, + group=group2, + ) + db.add(share) + db.commit() + assert user1.shares == [share] + assert spawner.shares == [share] + assert user1.shared_with_me == [] + assert user2.shared_with_me == [] + assert group2.shared_with_me == [share] + assert user2.all_shared_with_me == [share] + db.delete(share) + db.commit() + assert user1.shares == [] + assert spawner.shares == [] + assert user1.shared_with_me == [] + assert user2.shared_with_me == [] + assert group2.shared_with_me == [] + assert user2.all_shared_with_me == [] + + def test_expiring_api_token(app, user): db = app.db token = orm.APIToken.new(expires_in=30, user=user) diff --git a/jupyterhub/tests/test_pages.py b/jupyterhub/tests/test_pages.py --- a/jupyterhub/tests/test_pages.py +++ b/jupyterhub/tests/test_pages.py @@ -764,7 +764,7 @@ async def mock_authenticate(handler, data): (False, '', '', None), # next_url is respected (False, '/hub/admin', '/hub/admin', None), - (False, '/user/other', '/hub/user/other', None), + (False, '/user/other', '/user/other', None), (False, '/absolute', '/absolute', None), (False, '/has?query#andhash', '/has?query#andhash', None), # :// in query string or fragment diff --git a/jupyterhub/tests/test_roles.py b/jupyterhub/tests/test_roles.py --- a/jupyterhub/tests/test_roles.py +++ b/jupyterhub/tests/test_roles.py @@ -21,17 +21,10 @@ def test_orm_roles(db): """Test orm roles setup""" user_role = orm.Role.find(db, name='user') + user_role.users = [] token_role = orm.Role.find(db, name='token') - service_role = orm.Role.find(db, name='service') - if not user_role: - user_role = orm.Role(name='user', scopes=['self']) - db.add(user_role) - if not token_role: - token_role = orm.Role(name='token', scopes=['inherit']) - db.add(token_role) - if not service_role: - service_role = orm.Role(name='service', scopes=[]) - db.add(service_role) + service_role = orm.Role(name="service") + db.add(service_role) db.commit() group_role = orm.Role(name='group', scopes=['read:users']) @@ -376,9 +369,10 @@ async def test_creating_roles(app, role, role_def, response_type, response, capl ('default', 'user', 'error', ValueError), ], ) -async def test_delete_roles(db, role_type, rolename, response_type, response, caplog): +async def test_delete_roles(app, role_type, rolename, response_type, response, caplog): """Test raising errors and info when deleting roles""" caplog.set_level(logging.INFO) + db = app.db if response_type == 'info': # add the role to db diff --git a/jupyterhub/tests/test_scopes.py b/jupyterhub/tests/test_scopes.py --- a/jupyterhub/tests/test_scopes.py +++ b/jupyterhub/tests/test_scopes.py @@ -1,24 +1,24 @@ """Test scopes for API handlers""" +import types from operator import itemgetter from unittest import mock import pytest from pytest import mark from tornado import web -from tornado.httputil import HTTPServerRequest from .. import orm, roles, scopes from .._memoize import FrozenDict -from ..handlers import BaseHandler +from ..apihandlers import APIHandler from ..scopes import ( Scope, - _check_scope_access, _expand_self_scope, _intersect_expanded_scopes, _resolve_requested_scopes, expand_scopes, get_scopes_for, + has_scope, identify_scopes, needs_scope, parse_scopes, @@ -27,7 +27,8 @@ def get_handler_with_scopes(scopes): - handler = mock.Mock(spec=BaseHandler) + handler = mock.Mock(spec=APIHandler) + handler.has_scope = types.MethodType(APIHandler.has_scope, handler) handler.parsed_scopes = parse_scopes(scopes) return handler @@ -56,47 +57,39 @@ def test_scope_precendence(): def test_scope_check_present(): handler = get_handler_with_scopes(['read:users']) - assert _check_scope_access(handler, 'read:users') - assert _check_scope_access(handler, 'read:users', user='maeby') + assert handler.has_scope('read:users') + assert handler.has_scope('read:users!user=maeby') def test_scope_check_not_present(): handler = get_handler_with_scopes(['read:users!user=maeby']) - assert _check_scope_access(handler, 'read:users') - with pytest.raises(web.HTTPError): - _check_scope_access(handler, 'read:users', user='gob') - with pytest.raises(web.HTTPError): - _check_scope_access(handler, 'read:users', user='gob', server='server') + assert not handler.has_scope('read:users') + assert not handler.has_scope('read:users!user=gob') + assert not handler.has_scope('read:users!server=gob/server') def test_scope_filters(): handler = get_handler_with_scopes( ['read:users', 'read:users!group=bluths', 'read:users!user=maeby'] ) - assert _check_scope_access(handler, 'read:users', group='bluth') - assert _check_scope_access(handler, 'read:users', user='maeby') - - -def test_scope_multiple_filters(): - handler = get_handler_with_scopes(['read:users!user=george_michael']) - assert _check_scope_access( - handler, 'read:users', user='george_michael', group='bluths' - ) + assert handler.has_scope('read:users!group=bluth') + assert handler.has_scope('read:users!user=maeby') def test_scope_parse_server_name(): handler = get_handler_with_scopes( ['servers!server=maeby/server1', 'read:users!user=maeby'] ) - assert _check_scope_access(handler, 'servers', user='maeby', server='server1') + assert handler.has_scope('servers!server=maeby/server1') class MockAPIHandler: def __init__(self): self.expanded_scopes = {'users'} self.parsed_scopes = {} - self.request = mock.Mock(spec=HTTPServerRequest) + self.request = mock.Mock(spec=APIHandler) self.request.path = '/path' + self.db = None def set_scopes(self, *scopes): self.expanded_scopes = set(scopes) @@ -295,7 +288,7 @@ async def test_exceeding_user_permissions( api_token = user.new_api_token() orm_api_token = orm.APIToken.find(app.db, token=api_token) # store scopes user does not have - orm_api_token.scopes = orm_api_token.scopes + ['list:users', 'read:users'] + orm_api_token.scopes = list(orm_api_token.scopes) + ['list:users', 'read:users'] headers = {'Authorization': 'token %s' % api_token} r = await api_request(app, 'users', headers=headers) assert r.status_code == 200 @@ -490,7 +483,7 @@ async def test_metascope_inherit_expansion(app, create_user_with_scopes): assert user_scope_set == token_scope_set # Check no roles means no permissions - token.scopes.clear() + token.scopes = [] app.db.commit() token_scope_set = get_scopes_for(token) assert isinstance(token_scope_set, frozenset) @@ -556,7 +549,7 @@ async def test_server_state_access( await api_request( app, 'users', user.name, 'servers', server_name, method='post' ) - service = create_service_with_scopes("read:users:name!user=", *scopes) + service = create_service_with_scopes("read:users:name!user=bianca", *scopes) api_token = service.new_api_token() headers = {'Authorization': 'token %s' % api_token} @@ -1309,3 +1302,64 @@ def format_scopes(scopes): ) assert allowed == expected_allowed assert disallowed == expected_disallowed + + [email protected]( + "scope, have_scopes, ok", + [ + # exact matches + ("read:users", "read:users", True), + ("read:users!user=USER", "read:users!user=USER", True), + ("read:servers!server=USER/x", "read:servers!server=USER/x", True), + ("read:groups!group=GROUP", "read:groups!group=GROUP", True), + # subscopes + ("read:users:name", "read:users", True), + # subscopes with matching filter + ("read:users:name!user=USER", "read:users!user=USER", True), + ("read:users!user=USER", "read:users!group=GROUP", True), + ("read:users!user=USER", "read:users", True), + ("read:servers!server=USER/x", "read:servers", True), + ("read:servers!server=USER/x", "servers!server=USER/x", True), + ("read:servers!server=USER/x", "servers!user=USER", True), + ("read:servers!server=USER/x", "servers!group=GROUP", True), + # shouldn't match + ("read:users", "read:users!user=USER", False), + ("read:users", "read:users!user=USER", False), + ("read:users!user=USER", "read:users!user=other", False), + ("read:users!user=USER", "read:users!group=other", False), + ("read:servers!server=USER/x", "servers!server=other/x", False), + ("read:servers!server=USER/x", "servers!user=other", False), + ("read:servers!server=USER/x", "servers!group=other", False), + ("servers!server=USER/x", "read:servers!server=USER/x", False), + ], +) +def test_has_scope(app, user, group, scope, have_scopes, ok): + db = app.db + user.groups.append(group) + db.commit() + + def _sub(scope): + return scope.replace("GROUP", group.name).replace("USER", user.name) + + scope = _sub(scope) + have_scopes = [_sub(s) for s in have_scopes.split(",")] + parsed_scopes = parse_scopes(expand_scopes(have_scopes)) + assert has_scope(scope, parsed_scopes, db=db) == ok + + [email protected]( + "scope, have_scopes, ok", + [ + ("read:users", "read:users", True), + ("read:users", "read:users!user=x", True), + ("read:users", "read:groups!user=x", False), + ("read:users!user=x", "read:users!group=y", ValueError), + ], +) +def test_has_scope_post_filter(scope, have_scopes, ok): + have_scopes = have_scopes.split(",") + if ok not in (True, False): + with pytest.raises(ok): + has_scope(scope, have_scopes, post_filter=True) + else: + assert has_scope(scope, have_scopes, post_filter=True) == ok diff --git a/jupyterhub/tests/test_shares.py b/jupyterhub/tests/test_shares.py new file mode 100644 --- /dev/null +++ b/jupyterhub/tests/test_shares.py @@ -0,0 +1,1487 @@ +import json +from datetime import timedelta +from functools import partial +from unittest import mock +from urllib.parse import parse_qs, urlparse, urlunparse + +import pytest +from bs4 import BeautifulSoup +from dateutil.parser import parse as parse_date +from tornado.httputil import url_concat + +from jupyterhub import orm, scopes +from jupyterhub.utils import url_path_join, utcnow + +from .conftest import new_group_name, new_username +from .utils import add_user, api_request, async_requests, get_page, public_url + + [email protected] +def share_user(app): + """The user to be shared with""" + yield add_user(app.db, name=new_username("share_with")) + + [email protected] +def share(app, user, share_user): + """Share access to `user`'s default server with `share_user`""" + db = app.db + spawner = user.spawner.orm_spawner + owner = user.orm_user + filter_ = f"server={owner.name}/{spawner.name}" + scopes = [f"access:servers!{filter_}"] + yield orm.Share.grant(db, spawner, share_user, scopes=scopes) + + [email protected] +def group_share(app, user, group, share_user): + """Share with share_user via group membership""" + db = app.db + app.db.commit() + spawner = user.spawner.orm_spawner + owner = user.orm_user + filter_ = f"server={owner.name}/{spawner.name}" + scopes = [f"read:servers!{filter_}"] + group.users.append(share_user) + yield orm.Share.grant(db, spawner, group, scopes=scopes) + + [email protected] +def populate_shares(app, user, group, share_user): + group_a = orm.Group(name=new_group_name("a")) + group_b = orm.Group(name=new_group_name("b")) + group_c = orm.Group(name=new_group_name("c")) + app.db.add(group_a) + app.db.add(group_b) + app.db.add(group_c) + app.db.commit() + in_a = add_user(app.db, name=new_username("in-a")) + in_a_b = add_user(app.db, name=new_username("in-a-b")) + in_b = add_user(app.db, name=new_username("in-b")) + not_in = add_user(app.db, name=new_username("not-in")) + + group_a.users = [in_a, in_a_b] + group_b.users = [in_b, in_a_b] + app.db.commit() + + user_1 = add_user(app.db, name=new_username("server")) + user_2 = add_user(app.db, name=new_username("server")) + user_3 = add_user(app.db, name=new_username("server")) + user_4 = add_user(app.db, name=new_username("server")) + + # group a has access to user_1 + # group b has access to user_2 + # both groups have access to user_3 + # user in-a also has access to user_4 + orm.Share.grant( + app.db, + app.users[user_1].spawner.orm_spawner, + group_a, + ) + orm.Share.grant( + app.db, + app.users[user_2].spawner.orm_spawner, + group_b, + ) + orm.Share.grant( + app.db, + app.users[user_3].spawner.orm_spawner, + group_a, + ) + orm.Share.grant( + app.db, + app.users[user_3].spawner.orm_spawner, + group_b, + ) + orm.Share.grant( + app.db, + app.users[user_4].spawner.orm_spawner, + in_a, + ) + orm.Share.grant( + app.db, + app.users[user_4].spawner.orm_spawner, + not_in, + ) + + # return a dict showing who should have access to what + return { + "users": { + in_a.name: [user_1.name, user_3.name, user_4.name], + in_b.name: [user_2.name, user_3.name], + # shares are _not_ deduplicated if granted + # both via user and group + in_a_b.name: [user_1.name, user_2.name, user_3.name, user_3.name], + not_in.name: [user_4.name], + }, + "groups": { + group_a.name: [user_1.name, user_3.name], + group_b.name: [user_2.name, user_3.name], + group_c.name: [], + }, + } + + +def expand_scopes(scope_str, user, group=None, share_with=None): + """utility to expand scopes used in parametrized tests + + Turns "read:users!user=USER,shares!group=SHARE_WITH" + into + [ + "read:users!user=username", + "shares!group=groupname", + ] + """ + scopes = [] + replacements = {} + + def _get_name(str_or_obj): + """Allow a string name or an object with a name attribute + + string names are used for tests where something doesn't exist + """ + if isinstance(str_or_obj, str): + return str_or_obj + else: + return str_or_obj.name + + username = _get_name(user) + replacements["USER"] = username + replacements["SERVER"] = username + "/" + if group: + replacements["GROUP"] = _get_name(group) + if share_with: + replacements["SHARE_WITH"] = _get_name(share_with) + for scope in scope_str.split(","): + for a, b in replacements.items(): + scope = scope.replace(a, b) + scopes.append(scope) + return scopes + + [email protected]("share_with", ["user", "group"]) +def test_create_share(app, user, share_user, group, share_with): + db = app.db + spawner = user.spawner.orm_spawner + owner = user.orm_user + share_attr = share_with + if share_with == "group": + share_with = group + elif share_with == "user": + share_with = share_user + scopes = [f"access:servers!server={owner.name}/{spawner.name}"] + before = orm.Share.now() + share = orm.Share.grant(db, spawner, share_with, scopes=scopes) + after = orm.Share.now() + assert share.scopes == scopes + assert share.owner is owner + assert share.spawner is spawner + assert getattr(share, share_attr) is share_with + assert share.created_at + assert before <= share.created_at <= after + assert share in share_with.shared_with_me + assert share in spawner.shares + assert share in owner.shares + if share_attr == 'user': + assert share not in share_with.shares + assert share not in owner.shared_with_me + # compute repr for coverage + repr(share) + db.delete(share_with) + db.commit() + + +def test_create_share_bad(app, user, share_user, mockservice): + db = app.db + service = mockservice + spawner = user.spawner.orm_spawner + owner = user.orm_user + scopes = [f"access:servers!server={owner.name}/{spawner.name}"] + with pytest.raises(ValueError): + orm.Share.grant(db, spawner, share_user, scopes=[]) + with pytest.raises(TypeError): + orm.Share.grant(db, spawner, service, scopes=scopes) + + +def test_update_share(app, share): + db = app.db + # shift into past + share.created_at -= timedelta(hours=1) + created_at = share.created_at + db.commit() + + # grant additional scopes + filter = f"server={share.owner.name}/{share.spawner.name}" + more_scopes = [f"read:servers!{filter}"] + all_scopes = sorted(share.scopes + more_scopes) + + share2 = orm.Share.grant(db, share.spawner, share.user, scopes=more_scopes) + assert share2 is share + assert share.created_at == created_at + assert share.scopes == all_scopes + + # fully overlapping scopes + share3 = orm.Share.grant(db, share.spawner, share.user, scopes=share.scopes[:1]) + assert share3 is share + assert share.created_at == created_at + assert share.scopes == all_scopes + + # revoke scopes not held + share4 = orm.Share.revoke( + db, + share.spawner, + share.user, + scopes=[f"admin:servers!{filter}"], + ) + assert share4 is share + assert share.created_at == created_at + assert share.scopes == all_scopes + + # revoke some scopes + share5 = orm.Share.revoke( + db, + share.spawner, + share.user, + scopes=all_scopes[:1], + ) + remaining_scopes = all_scopes[1:] + assert share5 is share + assert share.created_at == created_at + assert share.scopes == remaining_scopes + + # revoke remaining scopes + share5 = orm.Share.revoke( + db, + share.spawner, + share.user, + scopes=remaining_scopes, + ) + assert share5 is None + found_share = orm.Share.find(db, spawner=share.spawner, share_with=share.user) + assert found_share is None + + [email protected]( + "to_delete", + [ + "owner", + "spawner", + "share_with_user", + "share_with_group", + ], +) +def test_share_delete_cascade(to_delete, app, user, group): + db = app.db + if "group" in to_delete: + share_with = group + else: + share_with = add_user(db, app, name=new_username("share_with")).orm_user + spawner = user.spawner.orm_spawner + owner = user.orm_user + scopes = [f"access:servers!server={owner.name}/{spawner.name}"] + assert spawner.name is not None + assert spawner.user.name + assert share_with.name + share = orm.Share.grant(db, spawner, share_with, scopes=scopes) + assert share in share_with.shared_with_me + share_id = share.id + if to_delete == "owner": + app.users.delete(owner) + assert share_with.shared_with_me == [] + elif to_delete == "spawner": + # pass + db.delete(spawner) + user.spawners.pop(spawner.name) + db.commit() + assert owner.shares == [] + assert share_with.shared_with_me == [] + elif to_delete == "share_with_user": + app.users.delete(share_with) + assert owner.shares == [] + assert spawner.shares == [] + elif to_delete == "share_with_group": + db.delete(share_with) + db.commit() + assert owner.shares == [] + assert spawner.shares == [] + else: + raise ValueError(f"unexpected {to_delete=}") + # make sure it's gone + assert db.query(orm.Share).filter_by(id=share_id).one_or_none() is None + + +def test_share_scopes(app, share_user, share): + db = app.db + user_scopes = scopes.get_scopes_for(share_user) + assert set(share.scopes).issubset(user_scopes) + # delete share, no longer have scopes + db.delete(share) + db.commit() + user_scopes = scopes.get_scopes_for(share_user) + assert not set(share.scopes).intersection(user_scopes) + + +def test_share_group_scopes(app, share_user, group_share): + # make sure share is actually in the group (make sure group_share fixture worked) + db = app.db + share = group_share + assert group_share.group in share_user.groups + user_scopes = scopes.get_scopes_for(share_user) + assert set(share.scopes).issubset(user_scopes) + # delete share, no longer have scopes + db.delete(share) + db.commit() + user_scopes = scopes.get_scopes_for(share_user) + assert not set(share.scopes).intersection(user_scopes) + + +def test_share_code(app, user, share_user): + spawner = user.spawner.orm_spawner + user = spawner.user + code_scopes = sorted( + [ + f"read:servers!server={user.name}/", + f"access:servers!server={user.name}/", + ] + ) + orm_code, code = orm.ShareCode.new( + app.db, + spawner, + scopes=code_scopes, + ) + assert sorted(orm_code.scopes) == code_scopes + assert orm_code.owner is user + assert orm_code.spawner is spawner + assert orm_code in spawner.share_codes + assert orm_code in user.share_codes + + share_with_scopes = scopes.get_scopes_for(share_user) + for scope in code_scopes: + assert scope not in share_with_scopes + + assert orm_code.exchange_count == 0 + assert orm_code.last_exchanged_at is None + # do it twice, shouldn't change anything + orm_code.exchange(share_user) + assert orm_code.exchange_count == 1 + assert orm_code.last_exchanged_at is not None + now = orm_code.now() + assert now - timedelta(10) <= orm_code.last_exchanged_at <= now + timedelta(10) + + share_with_scopes = scopes.get_scopes_for(share_user) + for scope in code_scopes: + assert scope in share_with_scopes + + +def test_share_code_expires(app, user, share_user): + db = app.db + spawner = user.spawner.orm_spawner + user = spawner.user + orm_code, code = orm.ShareCode.new( + db, + spawner, + scopes=[ + f"access:servers!server={user.name}/", + ], + ) + # check expiration + assert orm_code.expires_at + now = orm_code.now() + assert ( + now - timedelta(10) + <= orm_code.expires_at + <= now + timedelta(seconds=orm.ShareCode.default_expires_in + 10) + ) + orm.ShareCode.purge_expired(db) + found = orm.ShareCode.find(db, code=code) + assert found + assert found.id == orm_code.id + + with mock.patch( + 'jupyterhub.orm.ShareCode.now', staticmethod(lambda: now + timedelta(hours=1)) + ): + orm.ShareCode.purge_expired(db) + found = orm.ShareCode.find(db, code=code) + assert found + assert found.id == orm_code.id + + with mock.patch( + 'jupyterhub.orm.ShareCode.now', staticmethod(lambda: now + timedelta(hours=25)) + ): + found = orm.ShareCode.find(db, code=code) + assert found is None + # try exchanging expired code + with pytest.raises(ValueError): + orm_code.exchange(share_user) + + # expired code, should have been deleted + found = orm.ShareCode.find(db, code=code) + assert found is None + assert db.query(orm.ShareCode).filter_by(id=orm_code.id).one_or_none() is None + + +# API tests + + [email protected]( + "kind", + [ + ("user"), + ("group"), + ], +) +async def test_shares_api_user_group_doesnt_exist( + app, + user, + group, + share_user, + kind, +): + # make sure default spawner exists + spawner = user.spawner # noqa + body = {} + if kind == "user": + body["user"] = "nosuchuser" + elif kind == "group": + body["group"] = "nosuchgroup" + + r = await api_request( + app, f"/shares/{user.name}/", method="post", data=json.dumps(body) + ) + assert r.status_code == 400 + + [email protected]( + "which", + [ + ("user"), + ("server"), + ], +) +async def test_shares_api_target_doesnt_exist( + app, + user, + group, + share_user, + which, +): + # make sure default spawner exists + if which == "server": + share_path = f"/shares/{user.name}/nosuchserver" + elif which == "user": + share_path = "/shares/nosuchuser/" + body = {"user": share_user.name} + + r = await api_request(app, share_path, method="post", data=json.dumps(body)) + assert r.status_code == 404 + + [email protected]( + "have_scopes, share_scopes, with_user, with_group, status", + [ + (None, None, True, False, 200), + ("shares", None, True, False, 403), + ( + "shares!server=SERVER,servers!server=SERVER,read:users:name!user=SHARE_WITH", + "read:servers!server=SERVER,access:servers!server=SERVER", + True, + False, + 200, + ), + (None, "read:servers!server=other/", False, True, 400), + ( + "shares,access:servers,read:users:name", + "admin:servers!server=SERVER", + False, + True, + 403, + ), + (None, None, False, False, 400), + (None, None, "nosuchuser", False, 400), + (None, None, False, "nosuchgroup", 400), + (None, None, True, True, 400), + (None, None, True, False, 200), + ], +) +async def test_shares_api_create( + app, + user, + group, + share_user, + create_user_with_scopes, + have_scopes, + share_scopes, + with_user, + with_group, + status, +): + # make sure default spawner exists + spawner = user.spawner # noqa + body = {} + share_with = share_user + if with_user: + body["user"] = share_user.name if with_user == True else with_user + if with_group: + body["group"] = group.name if with_group == True else with_group + share_with = group + _expand_scopes = partial(expand_scopes, user=user, share_with=share_with) + + expected_scopes = _expand_scopes("access:servers!server=SERVER") + if share_scopes: + share_scopes = _expand_scopes(share_scopes) + expected_scopes.extend(share_scopes) + body["scopes"] = share_scopes + + expected_scopes = sorted(set(expected_scopes)) + + if have_scopes is None: + # default: needed permissions + have_scopes = "shares,read:users:name,read:groups:name" + + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + r = await api_request( + app, + f"/shares/{user.name}/", + method="post", + data=json.dumps(body), + name=requester.name, + ) + assert r.status_code == status + if r.status_code < 300: + share_model = r.json() + assert "scopes" in share_model + assert sorted(share_model["scopes"]) == expected_scopes + + [email protected]( + "have_scopes, before_scopes, revoke_scopes, after_scopes, with_user, with_group, status", + [ + ("read:shares", None, None, None, True, False, 403), + ("shares", None, None, None, True, False, 200), + ("shares!user=USER", None, None, None, False, True, 200), + (None, "read:servers!server=SERVER", None, None, True, False, 200), + ( + None, + "access:servers!server=SERVER", + "read:servers!server=SERVER", + "access:servers!server=SERVER", + True, + False, + 200, + ), + (None, None, None, None, "nosuchuser", False, 200), + (None, None, None, None, False, "nosuchgroup", 200), + (None, None, None, None, False, False, 400), + (None, None, None, None, True, True, 400), + ], +) +async def test_shares_api_revoke( + app, + user, + group, + share_user, + create_user_with_scopes, + have_scopes, + before_scopes, + revoke_scopes, + after_scopes, + with_user, + with_group, + status, +): + db = app.db + # make sure default spawner exists + spawner = user.spawner.orm_spawner # noqa + body = {} + share_with = share_user + if with_user: + body["user"] = share_user.name if with_user == True else with_user + if with_group: + body["group"] = group.name if with_group == True else with_group + share_with = group + _expand_scopes = partial(expand_scopes, user=user, share_with=share_with) + + if revoke_scopes: + revoke_scopes = _expand_scopes(revoke_scopes) + body["scopes"] = revoke_scopes + + if after_scopes: + after_scopes = _expand_scopes(after_scopes) + + if before_scopes: + orm.Share.grant(db, spawner, share_with, scopes=_expand_scopes(before_scopes)) + + if have_scopes is None: + # default: needed permissions + have_scopes = "shares,read:users:name,read:groups:name" + + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + r = await api_request( + app, + f"/shares/{user.name}/", + method="patch", + data=json.dumps(body), + name=requester.name, + ) + assert r.status_code == status + if r.status_code < 300: + share_model = r.json() + if not after_scopes: + # no scopes specified, full revocation + assert share_model == {} + return + assert share_model["scopes"] == after_scopes + + [email protected]( + "have_scopes, status", + [ + ("shares", 204), + ("shares!user=USER", 204), + ("shares!server=SERVER", 204), + ("read:shares", 403), + ("shares!server=USER/other", 404), + ("shares!user=other", 404), + ], +) +async def test_shares_api_revoke_all( + app, + user, + group, + share_user, + create_user_with_scopes, + have_scopes, + status, +): + db = app.db + # make sure default spawner exists + spawner = user.spawner.orm_spawner # noqa + orm.Share.grant(db, spawner, share_user) + orm.Share.grant(db, spawner, group) + _expand_scopes = partial(expand_scopes, user=user) + + if have_scopes is None: + # default: needed permissions + have_scopes = "shares" + + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + r = await api_request( + app, + f"/shares/{user.name}/", + method="delete", + name=requester.name, + ) + assert r.status_code == status + + # get updated share list + r = await api_request( + app, + f"/shares/{user.name}/", + ) + share_list = r.json() + + if status >= 400: + assert len(share_list["items"]) == 2 + else: + assert len(share_list["items"]) == 0 + + [email protected]( + "kind, case", + [ + ("users", "in-a"), + ("users", "in-b"), + ("users", "in-a-b"), + ("users", "not-in"), + ("users", "notfound"), + ("groups", "a"), + ("groups", "b"), + ("groups", "c"), + ("groups", "notfound"), + ], +) +async def test_shared_api_list_user_group( + app, populate_shares, create_user_with_scopes, kind, case +): + if case == "notfound": + name = "notfound" + else: + # find exact name, which will look like `{kind}-123` + for name, server_names in populate_shares[kind].items(): + if name.rpartition("-")[0] == case: + break + else: + raise ValueError(f"Did not find {case} in {populate_shares[kind].keys()}") + + r = await api_request(app, f"/{kind}/{name}/shared") + if name == "notfound": + assert r.status_code == 404 + return + else: + assert r.status_code == 200 + shares = r.json() + found_shares = sorted( + [share["server"]["user"]["name"] for share in shares["items"]] + ) + expected_shares = sorted(server_names) + assert found_shares == expected_shares + + [email protected]( + "kind, have_scopes, get_status, delete_status", + [ + ("users", "users", 403, 403), + ("users", "read:users", 403, 403), + ("users", "read:users!user=USER", 403, 403), + ("users", "read:users:shares!user=SHARE_WITH", 200, 403), + ("users", "users:shares!user=SHARE_WITH", 200, 204), + ("users", "users:shares!user=other", 404, 404), + ("groups", "groups", 403, 403), + ("groups", "read:groups", 403, 403), + ("groups", "read:groups!group=group", 403, 403), + ("groups", "read:groups:shares!group=SHARE_WITH", 200, 403), + ("groups", "groups:shares!group=SHARE_WITH", 200, 204), + ("groups", "groups:shares!group=other", 404, 404), + ], +) +async def test_single_shared_api( + app, + user, + share_user, + group, + create_user_with_scopes, + kind, + have_scopes, + get_status, + delete_status, +): + db = app.db + share_user.groups.append(group) + db.commit() + spawner = user.spawner.orm_spawner + + if kind == "users": + share_with = share_user + else: + share_with = group + + _expand_scopes = partial(expand_scopes, user=user, share_with=share_with) + + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + share_scopes = [f"access:servers!server={user.name}/"] + share = orm.Share.grant( + db, spawner=spawner, share_with=share_with, scopes=share_scopes + ) + api_url = f"/{kind}/{share_with.name}/shared/{user.name}/" + + fetch_share = partial(api_request, app, api_url, name=requester.name) + r = await fetch_share() + assert r.status_code == get_status + if get_status < 300: + # check content + pass + + r = await fetch_share(method="delete") + assert r.status_code == delete_status + if delete_status < 300: + assert r.text == "" + else: + # manual delete + db.delete(share) + db.commit() + + # now share doesn't exist, should 404 + assert orm.Share.find(db, spawner=spawner, share_with=share_with) is None + + r = await fetch_share() + assert r.status_code == 404 if get_status < 300 else get_status + r = await fetch_share(method="delete") + assert r.status_code == 404 if delete_status < 300 else delete_status + + [email protected]( + "kind, have_scopes, get_status, delete_status", + [ + ("users", "read:users:shares!user=SHARE_WITH", 404, 403), + ("users", "users:shares!user=SHARE_WITH", 404, 404), + ("users", "users:shares!user=other", 404, 404), + ("groups", "read:groups:shares!group=SHARE_WITH", 404, 403), + ("groups", "groups:shares!group=SHARE_WITH", 404, 404), + ("groups", "groups:shares!group=other", 404, 404), + ], +) +async def test_single_shared_api_no_such_owner( + app, + user, + share_user, + group, + create_user_with_scopes, + kind, + have_scopes, + get_status, + delete_status, +): + db = app.db + share_user.groups.append(group) + db.commit() + spawner = user.spawner.orm_spawner # noqa + + if kind == "users": + share_with = share_user + else: + share_with = group + + owner_name = "nosuchname" + + _expand_scopes = partial(expand_scopes, user=owner_name, share_with=share_with) + + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + api_url = f"/{kind}/{share_with.name}/shared/{owner_name}/" + + fetch_share = partial(api_request, app, api_url, name=requester.name) + r = await fetch_share() + assert r.status_code == get_status + + r = await fetch_share(method="delete") + assert r.status_code == delete_status + + [email protected]( + "kind", + [ + ("users"), + ("groups"), + ], +) +async def test_single_shared_api_no_such_target( + app, user, share_user, group, create_user_with_scopes, kind +): + db = app.db + share_user.groups.append(group) + db.commit() + spawner = user.spawner.orm_spawner # noqa + share_with = "nosuch" + kind + + requester = create_user_with_scopes(f"{kind}:shares") + + api_url = f"/{kind}/{share_with}/shared/{user.name}/" + + fetch_share = partial(api_request, app, api_url, name=requester.name) + r = await fetch_share() + assert r.status_code == 404 + + r = await fetch_share(method="delete") + assert r.status_code == 404 + + [email protected]( + "have_scopes, n_groups, n_users, ok", + [ + ("shares", 0, 0, True), + ("read:shares", 0, 2, True), + ("read:shares!user=USER", 3, 0, True), + ("read:shares!server=SERVER", 2, 1, True), + ("read:users:shares", 0, 0, False), + ], +) +async def test_shares_api_list_server( + app, user, share_user, create_user_with_scopes, have_scopes, n_groups, n_users, ok +): + db = app.db + spawner = user.spawner.orm_spawner + + _expand_scopes = partial(expand_scopes, user=user, share_with=share_user) + + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + expected_shares = [] + for i in range(n_users): + u = create_user_with_scopes().orm_user + orm.Share.grant(db, spawner, u) + expected_shares.append(f"user:{u.name}") + + for i in range(n_groups): + group = orm.Group(name=new_group_name()) + db.add(group) + db.commit() + orm.Share.grant(db, spawner, group) + expected_shares.append(f"group:{group.name}") + expected_shares = sorted(expected_shares) + r = await api_request(app, f"/shares/{user.name}/", name=requester.name) + if ok: + assert r.status_code == 200 + else: + assert r.status_code == 403 + return + shares = r.json() + found_shares = [] + for share in shares["items"]: + assert share["user"] or share["group"] + if share["user"]: + found_shares.append(f"user:{share['user']['name']}") + elif share["group"]: + found_shares.append(f"group:{share['group']['name']}") + found_shares = sorted(found_shares) + assert found_shares == expected_shares + + [email protected]( + "have_scopes, n_groups, n_users, status", + [ + ("shares", 0, 0, 200), + ("read:shares", 0, 2, 200), + ("read:shares!user=USER", 3, 0, 200), + ("read:shares!user=other", 3, 0, 404), + ("read:shares!server=SERVER", 2, 1, 404), + ("read:users:shares", 0, 0, 403), + ], +) +async def test_shares_api_list_user( + app, + user, + share_user, + create_user_with_scopes, + have_scopes, + n_groups, + n_users, + status, +): + db = app.db + spawner = user.spawner.orm_spawner + + _expand_scopes = partial(expand_scopes, user=user, share_with=share_user) + + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + expected_shares = [] + for i in range(n_users): + u = create_user_with_scopes().orm_user + orm.Share.grant(db, spawner, u) + expected_shares.append(f"user:{u.name}") + + for i in range(n_groups): + group = orm.Group(name=new_group_name()) + db.add(group) + db.commit() + orm.Share.grant(db, spawner, group) + expected_shares.append(f"group:{group.name}") + expected_shares = sorted(expected_shares) + r = await api_request(app, f"/shares/{user.name}", name=requester.name) + assert r.status_code == status + if status >= 400: + return + shares = r.json() + found_shares = [] + for share in shares["items"]: + assert share["user"] or share["group"] + if share["user"]: + found_shares.append(f"user:{share['user']['name']}") + elif share["group"]: + found_shares.append(f"group:{share['group']['name']}") + found_shares = sorted(found_shares) + assert found_shares == expected_shares + + +async def test_shares_api_list_no_such_owner(app): + r = await api_request(app, "/shares/nosuchuser") + assert r.status_code == 404 + r = await api_request(app, "/shares/nosuchuser/") + assert r.status_code == 404 + r = await api_request(app, "/shares/nosuchuser/namedserver") + assert r.status_code == 404 + + [email protected]( + "method", + [ + "post", + "patch", + "delete", + ], +) +async def test_share_api_server_required(app, user, method): + """test methods defined on /shares/:user/:server not defined on /shares/:user""" + r = await api_request(app, f"/shares/{user.name}", method=method) + assert r.status_code == 405 + + +async def test_share_flow_full( + app, full_spawn, user, share_user, create_user_with_scopes +): + """Exercise the full process of sharing and then accessing a shared server""" + user = create_user_with_scopes( + "shares!user", "self", f"read:users:name!user={share_user.name}" + ) + # start server + await user.spawn("") + await app.proxy.add_user(user) + spawner = user.spawner + + # grant access + share_url = f"shares/{user.name}/{spawner.name}" + r = await api_request( + app, + share_url, + method="post", + name=user.name, + data=json.dumps({"user": share_user.name}), + ) + r.raise_for_status() + share_model = r.json() + + # attempt to _use_ access + user_url = public_url(app, user) + "api/contents/" + token = share_user.new_api_token() + r = await async_requests.get(user_url, headers={"Authorization": f"Bearer {token}"}) + r.raise_for_status() + + # revoke access + r = await api_request( + app, + share_url, + method="patch", + name=user.name, + data=json.dumps( + { + "scopes": share_model["scopes"], + "user": share_user.name, + } + ), + ) + r.raise_for_status() + # new request with new token to avoid cache + token = share_user.new_api_token() + r = await async_requests.get(user_url, headers={"Authorization": f"Bearer {token}"}) + assert r.status_code == 403 + + +# share codes + + [email protected]( + "method", + [ + "post", + "delete", + ], +) +async def test_share_codes_api_server_required(app, user, method): + """test methods defined on /share-codes/:user/:server not defined on /share-codes/:user""" + r = await api_request(app, f"/share-codes/{user.name}", method=method) + assert r.status_code == 405 + + [email protected]( + "have_scopes, n_codes, level, status", + [ + ("shares", 0, 'user', 200), + ("read:shares", 2, 'server', 200), + ("read:shares!user=USER", 3, 'user', 200), + ("read:shares!server=SERVER", 2, 'server', 200), + ("read:shares!server=SERVER", 2, 'user', 404), + ("read:users:shares", 0, 'user', 403), + ("users:shares", 1, 'server', 403), + ], +) +async def test_share_codes_api_list( + app, user, share_user, create_user_with_scopes, have_scopes, n_codes, level, status +): + db = app.db + spawner = user.spawner.orm_spawner + + _expand_scopes = partial(expand_scopes, user=user, share_with=share_user) + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + expected_shares = [] + for i in range(n_codes): + code = orm.ShareCode( + spawner=spawner, + owner=spawner.user, + scopes=sorted(scopes.access_scopes(spawner=spawner)), + ) + db.add(code) + db.commit() + expected_shares.append(f"sc_{code.id}") + + expected_shares = sorted(expected_shares) + if level == 'user': + path = f"/share-codes/{user.name}" + else: + path = f"/share-codes/{user.name}/" + r = await api_request(app, path, name=requester.name) + assert r.status_code == status + if status >= 400: + return + share_codes = r.json() + found_shares = [] + for share_code in share_codes["items"]: + assert 'code' not in share_code + assert 'id' in share_code + assert 'server' in share_code + found_shares.append(share_code["id"]) + found_shares = sorted(found_shares) + assert found_shares == expected_shares + + +async def test_share_codes_api_list_no_such_owner(app, user): + spawner = user.spawner.orm_spawner # noqa + r = await api_request(app, "/share-codes/nosuchuser") + assert r.status_code == 404 + r = await api_request(app, "/share-codes/nosuchuser/") + assert r.status_code == 404 + r = await api_request(app, f"/share-codes/{user.name}/nosuchserver") + assert r.status_code == 404 + + [email protected]( + "have_scopes, share_scopes, status", + [ + (None, None, 200), + ("shares", None, 200), + ("shares!user=other", None, 404), + ( + "shares!server=SERVER,servers!server=SERVER", + "read:servers!server=SERVER,access:servers!server=SERVER", + 200, + ), + (None, "read:servers!server=other/", 400), + ( + "shares,access:servers", + "admin:servers!server=SERVER", + 403, + ), + (None, None, 200), + ], +) +async def test_share_codes_api_create( + app, + user, + group, + share_user, + create_user_with_scopes, + have_scopes, + share_scopes, + status, +): + # make sure default spawner exists + spawner = user.spawner # noqa + body = {} + share_with = share_user + _expand_scopes = partial(expand_scopes, user=user, share_with=share_with) + + expected_scopes = _expand_scopes("access:servers!server=SERVER") + if share_scopes: + share_scopes = _expand_scopes(share_scopes) + expected_scopes.extend(share_scopes) + body["scopes"] = share_scopes + + expected_scopes = sorted(set(expected_scopes)) + + if have_scopes is None: + # default: needed permissions + have_scopes = "shares" + + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + r = await api_request( + app, + f"/share-codes/{user.name}/", + method="post", + data=json.dumps(body), + name=requester.name, + ) + assert r.status_code == status + if r.status_code >= 400: + return + + share_model = r.json() + assert "scopes" in share_model + assert sorted(share_model["scopes"]) == expected_scopes + assert "code" in share_model + assert "accept_url" in share_model + parsed_accept_url = urlparse(share_model["accept_url"]) + accept_query = parse_qs(parsed_accept_url.query) + assert accept_query == {"code": [share_model["code"]]} + assert parsed_accept_url.path == url_path_join(app.base_url, "hub/accept-share") + + [email protected]( + "expires_in, status", + [ + (None, 200), + ("notanumber", 400), + (-1, 400), + (60, 200), + (525600 * 59, 200), + (525600 * 60 + 1, 400), + ], +) +async def test_share_codes_api_create_expires_in( + app, + user, + group, + create_user_with_scopes, + expires_in, + status, +): + # make sure default spawner exists + spawner = user.spawner # noqa + body = {} + now = utcnow() + if expires_in: + body["expires_in"] = expires_in + + r = await api_request( + app, + f"/share-codes/{user.name}/", + method="post", + data=json.dumps(body), + ) + assert r.status_code == status + if r.status_code >= 400: + return + + share_model = r.json() + assert "expires_at" in share_model + assert share_model["expires_at"] + expires_at = parse_date(share_model["expires_at"]) + + expected_expires_at = now + timedelta( + seconds=expires_in or orm.ShareCode.default_expires_in + ) + window = timedelta(seconds=60) + assert expected_expires_at - window <= expires_at <= expected_expires_at + window + + async def get_code(): + r = await api_request( + app, + f"/share-codes/{user.name}/", + ) + r.raise_for_status() + codes = r.json()["items"] + assert len(codes) <= 1 + if len(codes) == 1: + return codes[0] + else: + return None + + code = await get_code() + assert code + + with mock.patch( + 'jupyterhub.orm.ShareCode.now', + staticmethod(lambda: (expires_at + timedelta(seconds=1)).replace(tzinfo=None)), + ): + code = await get_code() + assert code is None + + [email protected]( + "have_scopes, delete_by, status", + [ + (None, None, 204), + ("shares", "id=ID", 204), + ( + "shares!server=SERVER", + "code=CODE", + 204, + ), + ("shares!user=other", None, 404), + ("read:shares", "code=CODE", 403), + ("shares", "id=invalid", 404), + ("shares", "id=sc_9999", 404), + ("shares", "code=nomatch", 404), + ], +) +async def test_share_codes_api_revoke( + app, + user, + group, + share_user, + create_user_with_scopes, + have_scopes, + delete_by, + status, +): + db = app.db + spawner = user.spawner.orm_spawner + + _expand_scopes = partial(expand_scopes, user=user, share_with=share_user) + # make sure default spawner exists + spawner = user.spawner.orm_spawner + share_code, code = orm.ShareCode.new( + db, spawner, scopes=list(scopes.access_scopes(spawner=spawner)) + ) + + assert orm.ShareCode.find(db, code=code) + other_share_code, other_code = orm.ShareCode.new( + db, spawner, scopes=list(scopes.access_scopes(spawner=spawner)) + ) + + if have_scopes is None: + # default: needed permissions + have_scopes = "shares" + + requester = create_user_with_scopes(*_expand_scopes(have_scopes)) + + url = f"/share-codes/{user.name}/" + if delete_by: + query = delete_by.replace("CODE", code).replace("ID", f"sc_{share_code.id}") + url = f"{url}?{query}" + + r = await api_request( + app, + url, + method="delete", + name=requester.name, + ) + assert r.status_code == status + + # other code unaffected + if r.status_code >= 400: + assert orm.ShareCode.find(db, code=code) + return + # code has been deleted + assert orm.ShareCode.find(db, code=code) is None + if delete_by is None: + assert orm.ShareCode.find(db, code=other_code) is None + else: + assert orm.ShareCode.find(db, code=other_code) + + [email protected]( + "who, code_arg, get_status, post_status", + [ + ("share", None, 400, 400), + ("share", "nosuchcode", 404, 400), + ("share", "CODE", 200, 302), + ("self", "CODE", 403, 400), + ], +) +async def test_accept_share_page( + app, user, share_user, who, code_arg, get_status, post_status +): + db = app.db + spawner = user.spawner.orm_spawner + orm_code, code = orm.ShareCode.new( + db, spawner, scopes=list(scopes.access_scopes(spawner=spawner)) + ) + if who == "self": + cookies = await app.login_user(user.name) + else: + cookies = await app.login_user(share_user.name) + + url = "accept-share" + form_data = {"_xsrf": cookies['_xsrf']} + if code_arg: + code_arg = code_arg.replace("CODE", code) + form_data["code"] = code_arg + url = url + f"?code={code_arg}" + + r = await get_page(url, app, cookies=cookies) + assert r.status_code == get_status + + # try submitting the form with the same inputs + accept_url = public_url(app) + "hub/accept-share" + r = await async_requests.post( + accept_url, + cookies=cookies, + data=form_data, + allow_redirects=False, + ) + assert r.status_code == post_status + if post_status < 400: + assert orm_code.exchange_count == 1 + # share is accepted + assert len(share_user.shared_with_me) == 1 + assert share_user.shared_with_me[0].spawner is spawner + else: + assert orm_code.exchange_count == 0 + assert not share_user.shared_with_me + + [email protected]( + "running, next_url, expected_next", + [ + (False, None, "{USER_URL}"), + (True, None, "{USER_URL}"), + (False, "https://example.com{BASE_URL}", "{USER_URL}"), + (False, "{BASE_URL}hub", ""), + (True, "{USER_URL}lab/tree/notebook.ipynb?param=5", ""), + (False, "{USER_URL}lab/tree/notebook.ipynb?param=5", ""), + ], +) +async def test_accept_share_page_next_url( + app, + user, + share_user, + running, + next_url, + expected_next, +): + db = app.db + spawner = user.spawner.orm_spawner + orm_code, code = orm.ShareCode.new( + db, spawner, scopes=list(scopes.access_scopes(spawner=spawner)) + ) + cookies = await app.login_user(share_user.name) + + if running: + await user.spawn() + await user.spawner.server.wait_up(http=True) + await app.proxy.add_user(user) + else: + pass + + def expand_url(url): + url = url.format( + USER=user.name, + USER_URL=user.server_url(""), + BASE_URL=app.base_url, + ) + return url + + if next_url: + next_url = expand_url(next_url) + if expected_next: + expected_next = expand_url(expected_next) + else: + # empty: expect match + expected_next = next_url + + url = f"accept-share?code={code}" + form_data = {"_xsrf": cookies['_xsrf']} + if next_url: + url = url_concat(url, {"next": next_url}) + + r = await get_page(url, app, cookies=cookies) + assert r.status_code == 200 + + page = BeautifulSoup(r.text) + page_body = page.find("div", class_="container").get_text() + if running: + assert "not currently running" not in page_body + else: + assert "not currently running" in page_body + + # try submitting the form with the same inputs + accept_url = r.url + r = await async_requests.post( + accept_url, + cookies=cookies, + data=form_data, + allow_redirects=False, + ) + assert r.status_code == 302 + target = r.headers["Location"] + # expect absolute path redirect + expected_next_target = urlunparse( + urlparse(expected_next)._replace(scheme="", netloc="") + ) + assert target == expected_next_target + # is it worth following the redirect? diff --git a/jupyterhub/tests/test_spawner.py b/jupyterhub/tests/test_spawner.py --- a/jupyterhub/tests/test_spawner.py +++ b/jupyterhub/tests/test_spawner.py @@ -19,7 +19,7 @@ from .. import spawner as spawnermod from ..objects import Hub, Server from ..scopes import access_scopes -from ..spawner import LocalProcessSpawner, Spawner +from ..spawner import SimpleLocalProcessSpawner, Spawner from ..user import User from ..utils import AnyTimeoutError, maybe_future, new_token, url_path_join from .mocking import public_url @@ -47,7 +47,7 @@ def setup(): def new_spawner(db, **kwargs): - user = kwargs.setdefault('user', User(db.query(orm.User).first(), {})) + user = kwargs.setdefault("user", User(db.query(orm.User).one(), {})) kwargs.setdefault('cmd', [sys.executable, '-c', _echo_sleep]) kwargs.setdefault('hub', Hub()) kwargs.setdefault('notebook_dir', os.getcwd()) @@ -57,7 +57,7 @@ def new_spawner(db, **kwargs): kwargs.setdefault('term_timeout', 1) kwargs.setdefault('kill_timeout', 1) kwargs.setdefault('poll_interval', 1) - return user._new_spawner('', spawner_class=LocalProcessSpawner, **kwargs) + return user._new_spawner('', spawner_class=SimpleLocalProcessSpawner, **kwargs) async def test_spawner(db, request): @@ -381,7 +381,11 @@ async def test_spawner_bad_api_token(app): ( ["self", "read:groups!group=x", "users:activity"], ["admin:groups", "users:activity"], - ["read:groups!group=x", "read:groups:name!group=x", "users:activity"], + [ + "read:groups!group=x", + "read:groups:name!group=x", + "users:activity", + ], ), ], )
User-initiated sharing, distinct from roles ### Proposed change Introduce a first-class concept of a 'share', as distinct from general 'roles', to address the use case of "I want to easily grant someone access to my server and then maybe revoke it later". For users granting access, I've long had in mind #3858 as the solution, which would allow users to create roles and role assignments at runtime, _one_ application of which could be granting someone else access to their server. But after testing out sharing and RTC use cases, I increasingly think that's not going to be enough. Even when we get to that (and I think we should still do that), we're probably going to want 'share' as a first-class concept. ### Alternative options Implement fully generic roles, as described in #3858. This will _technically_ enable the same thing (likely _more_ things), but because it's a generic permissions-granting system, it may be complicated to implement a simple sharing access UI on top. ### Who would use this feature? JupyterHub deployments where _users_ are in control of granting other users' access to their servers. ### (Optional): Suggest a solution Introduce a first-class `share` entity in the JupyterHub, with REST API for create/delete/list. Shares would: - be owned by a specific user - target a single, specific server - be listable and revocable - expire - implementation-wise, ultimately be a restricted and runtime-defined role assignment - grant the minimal `access:servers!server=...` permission - (maybe) optionally grant additional `!server=`-filtered permissions. This will be necessary for e.g. read-only access. It's unclear to me what level of control will be needed on that. - (maybe) optionally apply on wider filters, such as `!user`. I plan to go with "not in the first implementation" on this one. Implementation-wise, this would effectively be a runtime role assignment, but with a higher-level API for creating/managing them, because they would be specifically used for accessing services (i.e. a 'share' couldn't be used to grant admin access). In addition to the new APIs and database entries for managing share objects themselves, the only change should be in resolving the permissions for a specific user to add Shares, which will be treated identically to Roles. Features could include: - 'share with link' where a special link could be used to _claim_ sharing access without the sharer needing to invite users by name (this would be the exact jupyterlab-link-share experience but with full JupyterHub permission controls) - expiration, for short-lived one-time sharing session - revoke sharing permissions by multiple keys, such as: - all shares for this server - a specific sharing link, and any access granted thereby - individual sharing permissions
I support this change. Especially, as I mentioned in #3858, we are have to restart Hub to create new roles through configuration file. Please, let me know if I can help in any way @ktaletsk thanks! Do you only use that for granting access, not general-purpose runtime-defined roles? If so, you'll be a perfect tester for the APIs as they develop. @minrk yes, only for granting access of one user servers to a group of other users. Currently the access is granted using a role per user and the audience for the sharing is defined by the group. Group can be updated in the runtime, but the role currently cannot, necessitating the need for restarting the Hub, every time the new user joins. Restarts add some instability to otherwise perfectly stable Hub, as existing active users see outages, and the new users will ocassionally need to refresh the page to spawn their first server. I will keep and eye on this issue and test on our clusters.
2023-10-03T06:57:38Z
[]
[]
jupyterhub/jupyterhub
4,630
jupyterhub__jupyterhub-4630
[ "4629" ]
5e570f94b6f1ba508c52fef43eabcfb405e89401
diff --git a/jupyterhub/services/auth.py b/jupyterhub/services/auth.py --- a/jupyterhub/services/auth.py +++ b/jupyterhub/services/auth.py @@ -1079,19 +1079,30 @@ def hub_auth(self): def hub_auth(self, auth): self._hub_auth = auth + _hub_login_url = None + def get_login_url(self): """Return the Hub's login URL""" - login_url = self.hub_auth.login_url + if self._hub_login_url is not None: + # cached value, don't call this more than once per handler + return self._hub_login_url + # temporary override at setting level, + # to allow any subclass overrides of get_login_url to preserve their effect + # for example, APIHandler raises 403 to prevent redirects + with mock.patch.dict( + self.application.settings, {"login_url": self.hub_auth.login_url} + ): + login_url = super().get_login_url() + app_log.debug("Redirecting to login url: %s", login_url) + if isinstance(self.hub_auth, HubOAuth): # add state argument to OAuth url + # must do this _after_ allowing get_login_url to raise + # so we don't set unused cookies state = self.hub_auth.set_state_cookie(self, next_url=self.request.uri) login_url = url_concat(login_url, {'state': state}) - # temporary override at setting level, - # to allow any subclass overrides of get_login_url to preserve their effect - # for example, APIHandler raises 403 to prevent redirects - with mock.patch.dict(self.application.settings, {"login_url": login_url}): - app_log.debug("Redirecting to login url: %s", login_url) - return super().get_login_url() + self._hub_login_url = login_url + return login_url def check_hub_user(self, model): """Check whether Hub-authenticated user or service should be allowed. diff --git a/jupyterhub/singleuser/extension.py b/jupyterhub/singleuser/extension.py --- a/jupyterhub/singleuser/extension.py +++ b/jupyterhub/singleuser/extension.py @@ -130,22 +130,30 @@ def _default_hub_auth(self): def _patch_get_login_url(self, handler): original_get_login_url = handler.get_login_url + _hub_login_url = None + def get_login_url(): """Return the Hub's login URL, to begin login redirect""" - login_url = self.hub_auth.login_url - # add state argument to OAuth url - state = self.hub_auth.set_state_cookie( - handler, next_url=handler.request.uri - ) - login_url = url_concat(login_url, {'state': state}) - # temporary override at setting level, + nonlocal _hub_login_url + if _hub_login_url is not None: + # cached value, don't call this more than once per handler + return _hub_login_url + # temporary override at settings level, # to allow any subclass overrides of get_login_url to preserve their effect; # for example, APIHandler raises 403 to prevent redirects with mock.patch.dict( - handler.application.settings, {"login_url": login_url} + handler.application.settings, {"login_url": self.hub_auth.login_url} ): - self.log.debug("Redirecting to login url: %s", login_url) - return original_get_login_url() + login_url = original_get_login_url() + self.log.debug("Redirecting to login url: %s", login_url) + # add state argument to OAuth url + # must do this _after_ allowing get_login_url to raise + # so we don't set unused cookies + state = self.hub_auth.set_state_cookie( + handler, next_url=handler.request.uri + ) + _hub_login_url = url_concat(login_url, {'state': state}) + return _hub_login_url handler.get_login_url = get_login_url
diff --git a/jupyterhub/tests/test_singleuser.py b/jupyterhub/tests/test_singleuser.py --- a/jupyterhub/tests/test_singleuser.py +++ b/jupyterhub/tests/test_singleuser.py @@ -400,3 +400,16 @@ async def test_token_url_cookie(app, user, full_spawn): assert r.status_code == 200 await user.stop() + + +async def test_api_403_no_cookie(app, user, full_spawn): + """unused oauth cookies don't get set for failed requests to API handlers""" + await user.spawn() + await app.proxy.add_user(user) + url = url_path_join(public_url(app, user), "/api/contents/") + r = await async_requests.get(url, allow_redirects=False) + # 403, not redirect + assert r.status_code == 403 + # no state cookie set + assert not r.cookies + await user.stop()
singleuser sets unused oauth state cookie when not redirecting Attempting to start oauth login too many times fill browser cookies, eventually ending in [431 headers too big](https://discourse.jupyter.org/t/error-431-headers-too-big-during-singleuser-oauth/21833/15). Background: - tornado's `@web.authenticated` checks if a user is authenticated, and redirects to `get_login_url()` - jupyterlab makes lots of API requests, even while not logged in, which can't follow those redirects - oauth state means that the login URL includes the oauth state, which is also stored in a cookie - jupyter-server [overrides get_login_url](https://github.com/jupyter-server/jupyter_server/blob/v2.10.0/jupyter_server/base/handlers.py#L755-L762) for API requests to return 403 instead of redirecting - HubOAuth _also_ [overrides get_login_url](https://github.com/jupyter-server/jupyter_server/blob/v2.10.0/jupyter_server/base/handlers.py#L755-L762) in order to inject the oauth state into the login URL This combination means that when login url is _computed_, the oauth state cookie is set, even if the request doesn't actually get redirected, [producing logs like this](https://discourse.jupyter.org/t/error-431-headers-too-big-during-singleuser-oauth/21833/14): ``` 2023-11-08T13:29:06.511Z | No user identified -- | -- 2023-11-08T13:29:06.511Z | Detected unused OAuth state cookies 2023-11-08T13:29:06.511Z | Redirecting to login url: /hub/api/oauth2/authorize?client_id=jupyterhub-user-[user]&redirect_uri=%2Fuser%2F[user]%2Foauth_callback&response_type=code&state=[state] 2023-11-08T13:29:06.511Z | wrote error: ‘Forbidden’ 2023-11-08T13:29:06.511Z | 403 GET /user/[user]/api/contents/notebooks/jump_detection/replay_jump_detection.ipynb?content=0&1699450145824 (@10.49.241.58) 2.80ms ``` What we need to do is avoid setting the oauth state cookie until _after_ we know that it will be redirected to. Best solution: move the cookie-setting step to inside an overridded `redirect` method, to ensure it's really truly overridden in the event of a redirect. Downside: I don't think I can solve this without breaking a public API in HubAuth (that the cookie is computed only by setting it) Second-best solution that's doable without any breakage: append `state` to the login URL after computing it with `super()`. I think this should be pretty straightforward.
2023-11-13T09:17:02Z
[]
[]
jupyterhub/jupyterhub
4,701
jupyterhub__jupyterhub-4701
[ "4487" ]
dc234a79f0391c55e53603a7d9d0c15167798cda
diff --git a/jupyterhub/app.py b/jupyterhub/app.py --- a/jupyterhub/app.py +++ b/jupyterhub/app.py @@ -2077,6 +2077,9 @@ async def init_users(self): "auth_state is enabled, but encryption is not available: %s" % e ) + # give the authenticator a chance to check its own config + self.authenticator.check_allow_config() + if self.admin_users and not self.authenticator.admin_users: self.log.warning( "\nJupyterHub.admin_users is deprecated since version 0.7.2." @@ -2104,9 +2107,9 @@ async def init_users(self): new_users.append(user) else: user.admin = True + # the admin_users config variable will never be used after this point. # only the database values will be referenced. - allowed_users = [ self.authenticator.normalize_username(name) for name in self.authenticator.allowed_users @@ -2116,10 +2119,10 @@ async def init_users(self): if not self.authenticator.validate_username(username): raise ValueError("username %r is not valid" % username) - if not allowed_users: - self.log.info( - "Not using allowed_users. Any authenticated user will be allowed." - ) + if self.authenticator.allowed_users and self.authenticator.admin_users: + # make sure admin users are in the allowed_users set, if defined, + # otherwise they won't be able to login + self.authenticator.allowed_users |= self.authenticator.admin_users # add allowed users to the db for name in allowed_users: diff --git a/jupyterhub/auth.py b/jupyterhub/auth.py --- a/jupyterhub/auth.py +++ b/jupyterhub/auth.py @@ -121,6 +121,55 @@ def _deprecated_db(self): """ ).tag(config=True) + any_allow_config = Bool( + False, + help="""Is there any allow config? + + Used to show a warning if it looks like nobody can access the Hub, + which can happen when upgrading to JupyterHub 5, + now that `allow_all` defaults to False. + + Deployments can set this explicitly to True to suppress + the "No allow config found" warning. + + Will be True if any config tagged with `.tag(allow_config=True)` + or starts with `allow` is truthy. + + .. versionadded:: 5.0 + """, + ).tag(config=True) + + @default("any_allow_config") + def _default_any_allowed(self): + for trait_name, trait in self.traits(config=True).items(): + if trait.metadata.get("allow_config", False) or trait_name.startswith( + "allow" + ): + # this is only used for a helpful warning, so not the biggest deal if it's imperfect + if getattr(self, trait_name): + return True + return False + + def check_allow_config(self): + """Log a warning if no allow config can be found. + + Could get a false positive if _only_ unrecognized allow config is used. + Authenticators can apply `.tag(allow_config=True)` to label this config + to make sure it is found. + + Subclasses can override to perform additonal checks and warn about likely + authenticator configuration problems. + + .. versionadded:: 5.0 + """ + if not self.any_allow_config: + self.log.warning( + "No allow config found, it's possible that nobody can login to your Hub!\n" + "You can set `c.Authenticator.allow_all = True` to allow any user who can login to access the Hub,\n" + "or e.g. `allowed_users` to a set of users who should have access.\n" + "You may suppress this warning by setting c.Authenticator.any_allow_config = True." + ) + whitelist = Set( help="Deprecated, use `Authenticator.allowed_users`", config=True, @@ -132,7 +181,7 @@ def _deprecated_db(self): Use this to limit which authenticated users may login. Default behavior: only users in this set are allowed. - + If empty, does not perform any restriction, in which case any authenticated user is allowed. @@ -144,6 +193,83 @@ def _deprecated_db(self): """ ).tag(config=True) + allow_all = Bool( + False, + config=True, + help=""" + Allow every user who can successfully authenticate to access JupyterHub. + + False by default, which means for most Authenticators, + _some_ allow-related configuration is required to allow users to log in. + + Authenticator subclasses may override the default with e.g.:: + + @default("allow_all") + def _default_allow_all(self): + # if _any_ auth config (depends on the Authenticator) + if self.allowed_users or self.allowed_groups or self.allow_existing_users: + return False + else: + return True + + .. versionadded:: 5.0 + + .. versionchanged:: 5.0 + Prior to 5.0, `allow_all` wasn't defined on its own, + and was instead implicitly True when no allow config was provided, + i.e. `allowed_users` unspecified or empty on the base Authenticator class. + + To preserve pre-5.0 behavior, + set `allow_all = True` if you have no other allow configuration. + """, + ).tag(allow_config=True) + + allow_existing_users = Bool( + # dynamic default computed from allowed_users + config=True, + help=""" + Allow existing users to login. + + Defaults to True if `allowed_users` is set for historical reasons, and + False otherwise. + + With this enabled, all users present in the JupyterHub database are allowed to login. + This has the effect of any user who has _previously_ been allowed to login + via any means will continue to be allowed until the user is deleted via the /hub/admin page + or REST API. + + .. warning:: + + Before enabling this you should review the existing users in the + JupyterHub admin panel at `/hub/admin`. You may find users existing + there because they have previously been declared in config such as + `allowed_users` or allowed to sign in. + + .. warning:: + + When this is enabled and you wish to remove access for one or more + users previously allowed, you must make sure that they + are removed from the jupyterhub database. This can be tricky to do + if you stop allowing an externally managed group of users for example. + + With this enabled, JupyterHub admin users can visit `/hub/admin` or use + JupyterHub's REST API to add and remove users to manage who can login. + + .. versionadded:: 5.0 + """, + ).tag(allow_config=True) + + @default("allow_existing_users") + def _allow_existing_users_default(self): + """ + Computes the default value of allow_existing_users based on if + allowed_users to align with original behavior not introduce a breaking + change. + """ + if self.allowed_users: + return True + return False + blocked_users = Set( help=""" Set of usernames that are not allowed to log in. @@ -472,8 +598,7 @@ def check_allowed(self, username, authentication=None): web.HTTPError(403): Raising HTTPErrors directly allows customizing the message shown to the user. """ - if not self.allowed_users: - # No allowed set means any name is allowed + if self.allow_all: return True return username in self.allowed_users @@ -525,8 +650,9 @@ async def get_authenticated_user(self, handler, data): The various stages can be overridden separately: - `authenticate` turns formdata into a username - `normalize_username` normalizes the username - - `check_allowed` checks against the allowed usernames - `check_blocked_users` check against the blocked usernames + - `allow_all` is checked + - `check_allowed` checks against the allowed usernames - `is_admin` check if a user is an admin .. versionchanged:: 0.8 @@ -560,7 +686,11 @@ async def get_authenticated_user(self, handler, data): self.log.warning("User %r blocked. Stop authentication", username) return - allowed_pass = await maybe_future(self.check_allowed(username, authenticated)) + allowed_pass = self.allow_all + if not allowed_pass: + allowed_pass = await maybe_future( + self.check_allowed(username, authenticated) + ) if allowed_pass: if authenticated['admin'] is None: @@ -677,25 +807,31 @@ def add_user(self, user): """Hook called when a user is added to JupyterHub This is called: - - When a user first authenticates - - When the hub restarts, for all users. + - When a user first authenticates, _after_ all allow and block checks have passed + - When the hub restarts, for all users in the database (i.e. users previously allowed) + - When a user is added to the database, either via configuration or REST API This method may be a coroutine. - By default, this just adds the user to the allowed_users set. + By default, this adds the user to the allowed_users set if + allow_existing_users is true. - Subclasses may do more extensive things, such as adding actual unix users, + Subclasses may do more extensive things, such as creating actual system users, but they should call super to ensure the allowed_users set is updated. Note that this should be idempotent, since it is called whenever the hub restarts for all users. + .. versionchanged:: 5.0 + Now adds users to the allowed_users set if allow_all is False and allow_existing_users is True, + instead of if allowed_users is not empty. + Args: user (User): The User wrapper object """ if not self.validate_username(user.name): raise ValueError("Invalid username: %s" % user.name) - if self.allowed_users: + if self.allow_existing_users and not self.allow_all: self.allowed_users.add(user.name) def delete_user(self, user): @@ -902,23 +1038,16 @@ def _add_user_cmd_default(self): help=""" Allow login from all users in these UNIX groups. - If set, allowed username set is ignored. + .. versionchanged:: 5.0 + `allowed_groups` may be specified together with allowed_users, + to grant access by group OR name. """ - ).tag(config=True) - - @observe('allowed_groups') - def _allowed_groups_changed(self, change): - """Log a warning if mutually exclusive user and group allowed sets are specified.""" - if self.allowed_users: - self.log.warning( - "Ignoring Authenticator.allowed_users set because Authenticator.allowed_groups supplied!" - ) + ).tag(config=True, allow_config=True) def check_allowed(self, username, authentication=None): - if self.allowed_groups: - return self.check_allowed_groups(username, authentication) - else: - return super().check_allowed(username, authentication) + if self.check_allowed_groups(username, authentication): + return True + return super().check_allowed(username, authentication) def check_allowed_groups(self, username, authentication=None): """ @@ -1248,8 +1377,20 @@ class DummyAuthenticator(Authenticator): if it logs in with that password. .. versionadded:: 1.0 + + .. versionadded:: 5.0 + `allow_all` defaults to True, + preserving default behavior. """ + @default("allow_all") + def _allow_all_default(self): + if self.allowed_users: + return False + else: + # allow all by default + return True + password = Unicode( config=True, help=""" @@ -1259,6 +1400,12 @@ class DummyAuthenticator(Authenticator): """, ) + def check_allow_config(self): + super().check_allow_config() + self.log.warning( + f"Using testing authenticator {self.__class__.__name__}! This is not meant for production!" + ) + async def authenticate(self, handler, data): """Checks against a global password if it's been set. If not, allow any user/pass combo""" if self.password:
diff --git a/jupyterhub/tests/mocking.py b/jupyterhub/tests/mocking.py --- a/jupyterhub/tests/mocking.py +++ b/jupyterhub/tests/mocking.py @@ -243,6 +243,8 @@ def __init__(self, *args, **kwargs): cert_location = kwargs['internal_certs_location'] kwargs['external_certs'] = ssl_setup(cert_location, 'hub-ca') super().__init__(*args, **kwargs) + if 'allow_all' not in self.config.Authenticator: + self.config.Authenticator.allow_all = True @default('subdomain_host') def _subdomain_host_default(self): diff --git a/jupyterhub/tests/test_app.py b/jupyterhub/tests/test_app.py --- a/jupyterhub/tests/test_app.py +++ b/jupyterhub/tests/test_app.py @@ -475,6 +475,7 @@ async def test_user_creation(tmpdir, request): ] cfg = Config() + cfg.Authenticator.allow_all = False cfg.Authenticator.allowed_users = allowed_users cfg.JupyterHub.load_groups = groups cfg.JupyterHub.load_roles = roles diff --git a/jupyterhub/tests/test_auth.py b/jupyterhub/tests/test_auth.py --- a/jupyterhub/tests/test_auth.py +++ b/jupyterhub/tests/test_auth.py @@ -3,12 +3,13 @@ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. import logging +from itertools import chain from unittest import mock from urllib.parse import urlparse import pytest from requests import HTTPError -from traitlets import Any +from traitlets import Any, Tuple from traitlets.config import Config from jupyterhub import auth, crypto, orm @@ -18,7 +19,7 @@ async def test_pam_auth(): - authenticator = MockPAMAuthenticator() + authenticator = MockPAMAuthenticator(allow_all=True) authorized = await authenticator.get_authenticated_user( None, {'username': 'match', 'password': 'match'} ) @@ -37,7 +38,7 @@ async def test_pam_auth(): async def test_pam_auth_account_check_disabled(): - authenticator = MockPAMAuthenticator(check_account=False) + authenticator = MockPAMAuthenticator(allow_all=True, check_account=False) authorized = await authenticator.get_authenticated_user( None, {'username': 'allowedmatch', 'password': 'allowedmatch'} ) @@ -82,7 +83,9 @@ def getgrouplist(name, group): return user_group_map[name] authenticator = MockPAMAuthenticator( - admin_groups={'jh_admins', 'wheel'}, admin_users={'override_admin'} + admin_groups={'jh_admins', 'wheel'}, + admin_users={'override_admin'}, + allow_all=True, ) # Check admin_group applies as expected @@ -141,7 +144,10 @@ def getgrouplist(name, group): async def test_pam_auth_allowed(): - authenticator = MockPAMAuthenticator(allowed_users={'wash', 'kaylee'}) + authenticator = MockPAMAuthenticator( + allowed_users={'wash', 'kaylee'}, allow_all=False + ) + authorized = await authenticator.get_authenticated_user( None, {'username': 'kaylee', 'password': 'kaylee'} ) @@ -162,7 +168,7 @@ async def test_pam_auth_allowed_groups(): def getgrnam(name): return MockStructGroup('grp', ['kaylee']) - authenticator = MockPAMAuthenticator(allowed_groups={'group'}) + authenticator = MockPAMAuthenticator(allowed_groups={'group'}, allow_all=False) with mock.patch.object(authenticator, '_getgrnam', getgrnam): authorized = await authenticator.get_authenticated_user( @@ -179,14 +185,14 @@ def getgrnam(name): async def test_pam_auth_blocked(): # Null case compared to next case - authenticator = MockPAMAuthenticator() + authenticator = MockPAMAuthenticator(allow_all=True) authorized = await authenticator.get_authenticated_user( None, {'username': 'wash', 'password': 'wash'} ) assert authorized['name'] == 'wash' - # Blacklist basics - authenticator = MockPAMAuthenticator(blocked_users={'wash'}) + # Blocklist basics + authenticator = MockPAMAuthenticator(blocked_users={'wash'}, allow_all=True) authorized = await authenticator.get_authenticated_user( None, {'username': 'wash', 'password': 'wash'} ) @@ -194,7 +200,9 @@ async def test_pam_auth_blocked(): # User in both allowed and blocked: default deny. Make error someday? authenticator = MockPAMAuthenticator( - blocked_users={'wash'}, allowed_users={'wash', 'kaylee'} + blocked_users={'wash'}, + allowed_users={'wash', 'kaylee'}, + allow_all=True, ) authorized = await authenticator.get_authenticated_user( None, {'username': 'wash', 'password': 'wash'} @@ -203,7 +211,8 @@ async def test_pam_auth_blocked(): # User not in blocked set can log in authenticator = MockPAMAuthenticator( - blocked_users={'wash'}, allowed_users={'wash', 'kaylee'} + blocked_users={'wash'}, + allowed_users={'wash', 'kaylee'}, ) authorized = await authenticator.get_authenticated_user( None, {'username': 'kaylee', 'password': 'kaylee'} @@ -221,7 +230,8 @@ async def test_pam_auth_blocked(): # User in neither list authenticator = MockPAMAuthenticator( - blocked_users={'mal'}, allowed_users={'wash', 'kaylee'} + blocked_users={'mal'}, + allowed_users={'wash', 'kaylee'}, ) authorized = await authenticator.get_authenticated_user( None, {'username': 'simon', 'password': 'simon'} @@ -257,7 +267,9 @@ def deprecated_xlist(self, username): async def test_pam_auth_no_such_group(): - authenticator = MockPAMAuthenticator(allowed_groups={'nosuchcrazygroup'}) + authenticator = MockPAMAuthenticator( + allowed_groups={'nosuchcrazygroup'}, + ) authorized = await authenticator.get_authenticated_user( None, {'username': 'kaylee', 'password': 'kaylee'} ) @@ -405,7 +417,7 @@ async def test_auth_state_disabled(app, auth_state_unavailable): async def test_normalize_names(): - a = MockPAMAuthenticator() + a = MockPAMAuthenticator(allow_all=True) authorized = await a.get_authenticated_user( None, {'username': 'ZOE', 'password': 'ZOE'} ) @@ -428,7 +440,7 @@ async def test_normalize_names(): async def test_username_map(): - a = MockPAMAuthenticator(username_map={'wash': 'alpha'}) + a = MockPAMAuthenticator(username_map={'wash': 'alpha'}, allow_all=True) authorized = await a.get_authenticated_user( None, {'username': 'WASH', 'password': 'WASH'} ) @@ -458,7 +470,7 @@ def test_auth_hook(authenticator, handler, authentication): authentication['testkey'] = 'testvalue' return authentication - a = MockPAMAuthenticator(post_auth_hook=test_auth_hook) + a = MockPAMAuthenticator(allow_all=True, post_auth_hook=test_auth_hook) authorized = await a.get_authenticated_user( None, {'username': 'test_user', 'password': 'test_user'} @@ -566,6 +578,7 @@ async def test_auth_managed_groups( parent=app, authenticated_groups=authenticated_groups, refresh_groups=refresh_groups, + allow_all=True, ) user.groups.append(group) @@ -593,3 +606,193 @@ async def test_auth_managed_groups( assert not app.db.dirty groups = sorted(g.name for g in user.groups) assert groups == expected_refresh_groups + + [email protected]( + "allowed_users, allow_existing_users", + [ + ('specified', True), + ('', False), + ], +) +async def test_allow_defaults(app, user, allowed_users, allow_existing_users): + if allowed_users: + allowed_users = set(allowed_users.split(',')) + else: + allowed_users = set() + authenticator = auth.Authenticator(allowed_users=allowed_users) + authenticator.authenticate = lambda handler, data: data["username"] + assert authenticator.allow_all is False + assert authenticator.allow_existing_users == allow_existing_users + + # user was already in the database + # this happens during hub startup + authenticator.add_user(user) + if allowed_users: + assert user.name in authenticator.allowed_users + else: + authenticator.allowed_users == set() + + specified_allowed = await authenticator.get_authenticated_user( + None, {"username": "specified"} + ) + if "specified" in allowed_users: + assert specified_allowed is not None + else: + assert specified_allowed is None + + existing_allowed = await authenticator.get_authenticated_user( + None, {"username": user.name} + ) + if allow_existing_users: + assert existing_allowed is not None + else: + assert existing_allowed is None + + [email protected]("allow_all", [None, True, False]) [email protected]("allow_existing_users", [None, True, False]) [email protected]("allowed_users", ["existing", ""]) +def test_allow_existing_users( + app, user, allowed_users, allow_all, allow_existing_users +): + if allowed_users: + allowed_users = set(allowed_users.split(',')) + else: + allowed_users = set() + authenticator = auth.Authenticator( + allowed_users=allowed_users, + ) + if allow_all is None: + # default allow_all + allow_all = authenticator.allow_all + else: + authenticator.allow_all = allow_all + if allow_existing_users is None: + # default allow_all + allow_existing_users = authenticator.allow_existing_users + else: + authenticator.allow_existing_users = allow_existing_users + + # first, nobody in the database + assert authenticator.check_allowed("newuser") == allow_all + + # user was already in the database + # this happens during hub startup + authenticator.add_user(user) + if allow_existing_users or allow_all: + assert authenticator.check_allowed(user.name) + else: + assert not authenticator.check_allowed(user.name) + for username in allowed_users: + assert authenticator.check_allowed(username) + + assert authenticator.check_allowed("newuser") == allow_all + + [email protected]("allow_all", [True, False]) [email protected]("allow_existing_users", [True, False]) +def test_allow_existing_users_first_time(user, allow_all, allow_existing_users): + # make sure that calling add_user doesn't change results + authenticator = auth.Authenticator( + allow_all=allow_all, + allow_existing_users=allow_existing_users, + ) + allowed_before_one = authenticator.check_allowed(user.name) + allowed_before_two = authenticator.check_allowed("newuser") + # add_user is called after successful login + # it shouldn't change results (e.g. by switching .allowed_users from empty to non-empty) + if allowed_before_one: + authenticator.add_user(user) + assert authenticator.check_allowed(user.name) == allowed_before_one + assert authenticator.check_allowed("newuser") == allowed_before_two + + +class AllowAllIgnoringAuthenticator(auth.Authenticator): + """Test authenticator with custom check_allowed + + not updated for allow_all, allow_existing_users + + Make sure new config doesn't break backward-compatibility + or grant unintended access for Authenticators written before JupyterHub 5. + """ + + allowed_letters = Tuple(config=True, help="Initial letters to allow") + + def authenticate(self, handler, data): + return {"name": data["username"]} + + def check_allowed(self, username, auth=None): + if not self.allowed_users and not self.allowed_letters: + # this subclass doesn't know about the JupyterHub 5 allow_all config + # no allow config, allow all! + return True + if self.allowed_users and username in self.allowed_users: + return True + if self.allowed_letters and username.startswith(self.allowed_letters): + return True + return False + + +# allow_all is not recognized by Authenticator subclass +# make sure it doesn't make anything more permissive, at least [email protected]("allow_all", [True, False]) [email protected]( + "allowed_users, allowed_letters, allow_existing_users, allowed, not_allowed", + [ + ("", "", None, "anyone,should-be,allowed,existing", ""), + ("", "a,b", None, "alice,bebe", "existing,other"), + ("", "a,b", False, "alice,bebe", "existing,other"), + ("", "a,b", True, "alice,bebe,existing", "other"), + ("specified", "a,b", None, "specified,alice,bebe,existing", "other"), + ("specified", "a,b", False, "specified,alice,bebe", "existing,other"), + ("specified", "a,b", True, "specified,alice,bebe,existing", "other"), + ], +) +async def test_authenticator_without_allow_all( + app, + allowed_users, + allowed_letters, + allow_existing_users, + allowed, + not_allowed, + allow_all, +): + kwargs = {} + if allow_all is not None: + kwargs["allow_all"] = allow_all + if allow_existing_users is not None: + kwargs["allow_existing_users"] = allow_existing_users + if allowed_users: + kwargs["allowed_users"] = set(allowed_users.split(',')) + if allowed_letters: + kwargs["allowed_letters"] = tuple(allowed_letters.split(',')) + + authenticator = AllowAllIgnoringAuthenticator(**kwargs) + + # load one user from db + existing_user = add_user(app.db, app, name="existing") + authenticator.add_user(existing_user) + + if allowed: + allowed = allowed.split(",") + if not_allowed: + not_allowed = not_allowed.split(",") + + expected_allowed = sorted(allowed) + expected_not_allowed = sorted(not_allowed) + to_check = list(chain(expected_allowed, expected_not_allowed)) + if allow_all: + expected_allowed = to_check + expected_not_allowed = [] + + are_allowed = [] + are_not_allowed = [] + for username in to_check: + if await authenticator.get_authenticated_user(None, {"username": username}): + are_allowed.append(username) + else: + are_not_allowed.append(username) + + assert are_allowed == expected_allowed + assert are_not_allowed == expected_not_allowed
Ideas for new authenticator config I agreed with @minrk in a the recent June jupyterhub team meeting to describe an idea of new config without coupling it to suggesting how it should be implemented. I want directly configured behaviors instead of behavior following from if `allowed_users` is truthy or falsy. Currently the following behaviors can follow from declaring `allowed_users`: 1. Declaring `allowed_users` truthy currently [implies allowing existing users](https://github.com/jupyterhub/jupyterhub/blob/4.0.0/jupyterhub/auth.py#L647-L648) to login as well. 2. Declaring `allowed_users` falsy can for some authenticators in certain configurations imply allowing all authenticated users to login. I suggest new authentication config like below to provide a way to directly configure the otherwise implied behavior: 1. `allow_existing_users` 2. `allow_all` I think such new config could make: - a deployments jupyterhub config easier to understand without reading the docs - easier to provide as secure by default - easier to document, maintain, and implement ## Related - Discussion related to `allow_existing_users`: #4483 - Discussion related to `allow_all`: #4484 - Discussion related to defaulting to not allowing existing users in OAuthenticator, introducing `allow_existing_users` and defaulting to False: https://github.com/jupyterhub/oauthenticator/issues/619 - Discussion related to defaulting to not allowing all users in OAuthenticator when no users is explicitly allowed, introducing `allow_all` and defaulting to False: https://github.com/jupyterhub/oauthenticator/issues/620
Is this a separate issue? It seems to be a summary of the two already existing issues and there are no additional actions to take or separate conversation to have. Do you want to close the other two and merge the discussion here? I opened this understanding that you wanted to take a step back and think about what we want to accomplish and how from the perspective of what config to provide, separated from technically how which the other issues had started considering. This is highly coupled to all issues linked under the Related heading, and I'm not sure how to organize the discussion. =/ I think your proposal is solid, and we have a good path forward on the issues you've linked, individually. Maybe here is a good place to try to get a high-level overview of the use cases we want to make sure work, and what their config looks like, so we have something that won't get auto-closed by PRs addressing individual issues. I think it's appropriate to start at a high level and say "I want these user login/creation patterns to work, and I think the config should look like this" ignoring (at first) how everything actually works. Then we can try to fill the gap between what we have and what we want. For example: - fully deferred configuration to an Authenticator's authentication mechanism (e.g. PAM, LDAP, default behavior) - users managed via the API (e.g. most target uses of nativeauthenticator) - always-config-specified users and groups (revoking membership revokes permission - `allow_existing_users`) - documenting that allowed_users when combined with other allow mechanisms should be a _union_ instead of an intersection (in-progress implementation change in oauthenticator, can be part of the 'spec' in base Authenticator, though implementation is still up to subclasses). Maybe some API changes in the base class can make this easier to implement in a consistent way. - pre-populating a user list while also allowing future logins Further related questions, that might help (or might not): - do we need to more explicitly separate authentication from authorization? We have historically conflated these two, but we do now have _granular_ authorization in role membership. What there isn't currently (for the most part) is a separation between an _authenticated_ user and membership in the `user` role, despite the fact that it's technically possible. There aren't easy ways to make it happen. There's an open PR adding some authorisation support (user/admin/none) for Azure: https://github.com/jupyterhub/oauthenticator/pull/446 So there's clearly demand for it. I think we should come up with a position regarding authorisation, even if it's that we can't support it, to provide clarity to developers. We do already have support for logging in and being granted a variety of permission levels (JupyterHub supports it, authenticators must implement the mapping within authenticate and can return things like group membership). I don't yet see any demand for allowing users to complete authentication _at the jupyterhub level_ without granting at least `user`-level permission.
2024-02-10T15:05:52Z
[]
[]
jupyterhub/jupyterhub
4,713
jupyterhub__jupyterhub-4713
[ "3804" ]
da128fb99be652134e427262c7189f68bcfef0d6
diff --git a/jupyterhub/singleuser/_disable_user_config.py b/jupyterhub/singleuser/_disable_user_config.py --- a/jupyterhub/singleuser/_disable_user_config.py +++ b/jupyterhub/singleuser/_disable_user_config.py @@ -20,19 +20,35 @@ """ import os +from pathlib import Path from jupyter_core import paths +def _is_relative_to(path, prefix): + """ + Backport Path.is_relative_to for Python < 3.9 + + added in Python 3.9 + """ + if hasattr(path, "is_relative_to"): + # Python >= 3.9 + return path.is_relative_to(prefix) + else: + return path == prefix or prefix in path.parents + + def _exclude_home(path_list): """Filter out any entries in a path list that are in my home directory. Used to disable per-user configuration. """ - home = os.path.expanduser('~/') - for p in path_list: - if not p.startswith(home): - yield p + # resolve paths before comparison + # so we do the right thing when $HOME is a symlink + home = Path.home().resolve() + for path in path_list: + if not _is_relative_to(Path(path).resolve(), home): + yield path # record patches
diff --git a/jupyterhub/tests/test_singleuser.py b/jupyterhub/tests/test_singleuser.py --- a/jupyterhub/tests/test_singleuser.py +++ b/jupyterhub/tests/test_singleuser.py @@ -3,6 +3,7 @@ import os import sys from contextlib import nullcontext +from pathlib import Path from pprint import pprint from subprocess import CalledProcessError, check_output from unittest import mock @@ -145,7 +146,7 @@ async def test_singleuser_auth( @pytest.mark.skipif( IS_JUPYVERSE, reason="jupyverse doesn't look up directories for configuration files" ) -async def test_disable_user_config(request, app, tmpdir, full_spawn): +async def test_disable_user_config(request, app, tmp_path, full_spawn): # login, start the server cookies = await app.login_user('nandy') user = app.users['nandy'] @@ -156,14 +157,21 @@ async def test_disable_user_config(request, app, tmpdir, full_spawn): # start with new config: user.spawner.debug = True user.spawner.disable_user_config = True - home_dir = tmpdir.join("home") - home_dir.mkdir() + user.spawner.default_url = "/jupyterhub-test-info" + + # make sure it's resolved to start + tmp_path = tmp_path.resolve() + real_home_dir = tmp_path / "realhome" + real_home_dir.mkdir() + # make symlink to test resolution + home_dir = tmp_path / "home" + home_dir.symlink_to(real_home_dir) # home_dir is defined on SimpleSpawner - user.spawner.home_dir = home = str(home_dir) - jupyter_config_dir = home_dir.join(".jupyter") + user.spawner.home_dir = str(home_dir) + jupyter_config_dir = home_dir / ".jupyter" jupyter_config_dir.mkdir() # verify config paths - with jupyter_config_dir.join("jupyter_server_config.py").open("w") as f: + with (jupyter_config_dir / "jupyter_server_config.py").open("w") as f: f.write("c.TestSingleUser.jupyter_config_py = True") await user.spawn() @@ -174,29 +182,22 @@ async def test_disable_user_config(request, app, tmpdir, full_spawn): # with cookies, login successful r = await async_requests.get(url, cookies=cookies) r.raise_for_status() - assert r.url.rstrip('/').endswith( - url_path_join('/user/nandy', user.spawner.default_url or "/tree") - ) + assert r.url.endswith('/user/nandy/jupyterhub-test-info') assert r.status_code == 200 - - r = await async_requests.get( - url_path_join(public_url(app, user), 'jupyterhub-test-info'), cookies=cookies - ) - r.raise_for_status() info = r.json() pprint(info) assert info['disable_user_config'] server_config = info['config'] settings = info['settings'] assert 'TestSingleUser' not in server_config - # check config paths - norm_home = os.path.realpath(os.path.abspath(home)) + # check against tmp_path, the parent of both our home directories + # (symlink and real) def assert_not_in_home(path, name): - path = os.path.realpath(os.path.abspath(path)) - assert not path.startswith( - norm_home + os.path.sep - ), f"{name}: {path} is in home {norm_home}" + path = Path(path).resolve() + assert not (str(path) + os.path.sep).startswith( + str(tmp_path) + os.path.sep + ), f"{name}: {path} is in home {tmp_path}" for path in info['config_file_paths']: assert_not_in_home(path, 'config_file_paths')
Unable to disable user config with Jupyter Server The hub administrator is supposed to be able to prevent per-user notebook configuration scripts from running by setting ``` c.Spawner.disable_user_config = True ``` In the `jupyterhub_config.py` config. This sets the environment variable `JUPYTERHUB_DISABLE_USER_CONFIG=1` for the spawned notebook server. However this seems to be being ignored? <details> <summary>Using this Dockerfile</summary> ``` FROM jupyterhub/jupyterhub:2 RUN python3 -m pip install --no-cache jupyterlab RUN \ adduser -q --gecos "" --disabled-password user1 && \ echo user1:user1 | chpasswd ADD jupyterhub_config.py . RUN mkdir -p /home/user1/.jupyter ADD jupyter_notebook_config.py /home/user1/.jupyter/. RUN chown -R user1:user1 /home/user1/.jupyter CMD ["jupyterhub"] ``` </details> <details><summary> with this `jupyterhub_config.py` and example notebook config for `user1`: </summary> ``` c.Spawner.disable_user_config = True ``` ``` import os print("HELLO FROM THE NOTEBOOK CONFIG") print(os.getenv("JUPYTERHUB_DISABLE_USER_CONFIG")) c.ServerApp.shutdown_no_activity_timeout = 600 c.MappingKernelManager.cull_idle_timeout = 600 c.TerminalManager.cull_inactive_timeout = 600 ``` </details> I see the "HELLO" message and the value 1 printed when the notebook starts up, and the timeout message indicating that my config setting is in effect: ``` [I 2022-02-22 22:35:23.167 SingleUserLabApp serverapp:2161] Will shut down after 600 seconds with no kernels or terminals. ``` Am I misunderstanding exactly what config files are excluded? I see there's a test for this but I wonder is it actually verifying that the config is being ignored?
It looks like `disable_user_config` modifies the config search path by overriding some methods in a mixin: https://github.com/jupyterhub/jupyterhub/blob/874c91a086345246044ab70d6b5a8c371db89cd2/jupyterhub/singleuser/mixins.py#L434-L456 which is eventually mixed with the actual singleuser app: https://github.com/jupyterhub/jupyterhub/blob/874c91a086345246044ab70d6b5a8c371db89cd2/jupyterhub/singleuser/mixins.py#L870 https://github.com/jupyterhub/jupyterhub/blob/874c91a086345246044ab70d6b5a8c371db89cd2/jupyterhub/singleuser/app.py#L41 I spent a few minutes digging into https://github.com/jupyter-server/jupyter_server and https://github.com/jupyter/jupyter_core to see if they might be bypassing the above overridden methods by loading config in a different function but got very lost in the code! It's related to the interaction between JupyterHub and JupyterServer. The config is correctly ignored in `NotebookApp` (default in 1.5.0) but not `ServerApp` (default in 2.0.0): | JupyterHub version | `JUPYTERHUB_SINGLEUSER_APP` | `c.Spawner.disable_user_config = True` | |---|---|---| | 1.5.0 | notebook.notebookapp.NotebookApp | :heavy_check_mark: | | 1.5.0 | jupyter_server.serverapp.ServerApp | ❌ | | 2.0.0 | notebook.notebookapp.NotebookApp | :heavy_check_mark: | | 2.0.0 | jupyter_server.serverapp.ServerApp | ❌ | | 2.1.1 | notebook.notebookapp.NotebookApp | :heavy_check_mark: | | 2.1.1 | jupyter_server.serverapp.ServerApp | ❌ | @manics you are amazing! Thanks for all your investigative work!! @rcthomas beautifully summarized issue as well!! :tada: :heart: Thanks @manics, did you look further into `jupyter_server` and get a sense of what happened or would it make sense for us to open an issue over there? I haven't looked any further. @blink1073 any chance you could give us some help? It looks like [SingleUserLabApp](https://github.com/jupyterlab/jupyterlab/blob/886b2ab9999dd281d47e9970f60e7d3398e7020f/jupyterlab/labhubapp.py#L27) needs to add handling of [`disable_user_config`](https://github.com/jupyterhub/jupyterhub/blob/3ed345f4960f083b2d35c72b330254aa3cf31fe9/jupyterhub/singleuser/mixins.py#L370). cc @fcollonval @jtpio @blink1073 shouldn't it be `jupyter_sever.serverapp.ServerApp` as `SingleUserLabApp` is inheriting from it (as well as other alternative app)? I don't think so, this is Hub-level only. cc @Zsailer @blink1073 thanks for the clues..... It looks like Jupyter-server calls `jupyter_config_path()` https://github.com/jupyter-server/jupyter_server/blob/62607ce28576e50358217534161f9e7c4aab2611/jupyter_server/serverapp.py#L2142-L2157 which is defined in https://github.com/jupyter/jupyter_core/blob/5ae115928f57ae55928d217f9221c68ef188153c/jupyter_core/paths.py#L213-L261 This could be relevant: ```py if os.environ.get('JUPYTER_NO_CONFIG'): # jupyter_config_dir makes a blank config when JUPYTER_NO_CONFIG is set. return [jupyter_config_dir()] ``` So maybe setting `JUPYTER_NO_CONFIG=1` in the spawner environment will disable the user config? > So maybe setting JUPYTER_NO_CONFIG=1 in the spawner environment will disable the user config? But that will block configuration from the environment too, isn't it? That is probably too broad. I think it's `nbclassic` that's loading this config (maybe notebook_shim? Unclear to me.), but technically any ExtensionApp could introduce the same problem. The general issue is that each ExtensionApp may load its own config, have its own paths to search, and the patches applied by JupyterHub to the base ServerApp do not apply to extensions. When we put together JupyterHub, all Applications were a single entrypoint for loading config, but ExtensionApps make this quite a lot more complicated. They are much more independent, so it's not feasible to patch them all. https://github.com/jupyter-server/jupyter_server/pull/715 solves two problems: 1. locating server extensions ignored config_file_paths, reimplementing the same logic in a way that cannot be overridden (the original notebookapp does this, too, for server extensions) 2. ExtensionApps ignore the parent's config file paths I think just number 2 solves this particular case, because the nbclassic ExtensionApp would inherit config_file_path. Maybe we also need to special-case patches for nbclassic for other properties. Perhaps the 'no_user_config' implementation really belongs in ServerApp (or even jupyter_core)? It really is jupyterhub-specific, but server extensions make it challenging to exert the control we need. I really want to move `jupyterhub-singleuser` to just _be_ a jupyter server extension, rather than a wrapper, but right now that's hard (especially while keeping support for NotebookApp).
2024-02-27T12:27:34Z
[]
[]
jupyterhub/jupyterhub
4,720
jupyterhub__jupyterhub-4720
[ "4709" ]
9c3f98d4273c7ebc7a4d8bc5e05dc0d97c4e4d3e
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -215,13 +215,6 @@ def run(self): shell=shell, ) - print("Copying JSX admin app to static/js") - check_call( - ["npm", "run", "place"], - cwd=self.jsx_dir, - shell=shell, - ) - # update data-files in case this created new files self.distribution.data_files = get_data_files() assert not self.should_run(), 'JSX.run failed'
diff --git a/jsx/src/components/Groups/Groups.test.js b/jsx/src/components/Groups/Groups.test.js --- a/jsx/src/components/Groups/Groups.test.js +++ b/jsx/src/components/Groups/Groups.test.js @@ -2,9 +2,9 @@ import React from "react"; import "@testing-library/jest-dom"; import { act } from "react-dom/test-utils"; import { render, screen, fireEvent } from "@testing-library/react"; -import { Provider, useDispatch, useSelector } from "react-redux"; +import { Provider, useSelector } from "react-redux"; import { createStore } from "redux"; -import { HashRouter } from "react-router-dom"; +import { HashRouter, useSearchParams } from "react-router-dom"; // eslint-disable-next-line import regeneratorRuntime from "regenerator-runtime"; @@ -16,6 +16,11 @@ jest.mock("react-redux", () => ({ useSelector: jest.fn(), })); +jest.mock("react-router-dom", () => ({ + ...jest.requireActual("react-router-dom"), + useSearchParams: jest.fn(), +})); + var mockAsync = () => jest.fn().mockImplementation(() => Promise.resolve({ key: "value" })); @@ -50,11 +55,6 @@ var mockAppState = () => offset: 0, limit: 2, total: 4, - next: { - offset: 2, - limit: 2, - url: "http://localhost:8000/hub/api/groups?offset=2&limit=2", - }, }, }); @@ -62,11 +62,15 @@ beforeEach(() => { useSelector.mockImplementation((callback) => { return callback(mockAppState()); }); + useSearchParams.mockImplementation(() => { + return [new URLSearchParams(), jest.fn()]; + }); }); afterEach(() => { useSelector.mockClear(); mockReducers.mockClear(); + useSearchParams.mockClear(); }); test("Renders", async () => { @@ -109,13 +113,23 @@ test("Renders nothing if required data is not available", async () => { }); test("Interacting with PaginationFooter causes state update and refresh via useEffect call", async () => { - let callbackSpy = mockAsync(); - + let upgradeGroupsSpy = mockAsync(); + let setSearchParamsSpy = mockAsync(); + let searchParams = new URLSearchParams({ limit: "2" }); + useSearchParams.mockImplementation(() => [ + searchParams, + (callback) => { + searchParams = callback(searchParams); + setSearchParamsSpy(searchParams.toString()); + }, + ]); + let _, setSearchParams; await act(async () => { - render(groupsJsx(callbackSpy)); + render(groupsJsx(upgradeGroupsSpy)); + [_, setSearchParams] = useSearchParams(); }); - expect(callbackSpy).toBeCalledWith(0, 2); + expect(upgradeGroupsSpy).toBeCalledWith(0, 2); var lastState = mockReducers.mock.results[mockReducers.mock.results.length - 1].value; @@ -123,12 +137,10 @@ test("Interacting with PaginationFooter causes state update and refresh via useE expect(lastState.groups_page.limit).toEqual(2); let next = screen.getByTestId("paginate-next"); - fireEvent.click(next); - - lastState = - mockReducers.mock.results[mockReducers.mock.results.length - 1].value; - expect(lastState.groups_page.offset).toEqual(2); - expect(lastState.groups_page.limit).toEqual(2); + await act(async () => { + fireEvent.click(next); + }); + expect(setSearchParamsSpy).toBeCalledWith("limit=2&offset=2"); // FIXME: mocked useSelector, state seem to prevent updateGroups from being called // making the test environment not representative diff --git a/jsx/src/components/ServerDashboard/ServerDashboard.test.js b/jsx/src/components/ServerDashboard/ServerDashboard.test.js --- a/jsx/src/components/ServerDashboard/ServerDashboard.test.js +++ b/jsx/src/components/ServerDashboard/ServerDashboard.test.js @@ -1,4 +1,5 @@ import React from "react"; +import { withProps } from "recompose"; import "@testing-library/jest-dom"; import { act } from "react-dom/test-utils"; import userEvent from "@testing-library/user-event"; @@ -9,7 +10,8 @@ import { getByText, getAllByRole, } from "@testing-library/react"; -import { HashRouter, Switch } from "react-router-dom"; +import { HashRouter, Routes, Route, useSearchParams } from "react-router-dom"; +// import { CompatRouter, } from "react-router-dom-v5-compat"; import { Provider, useSelector } from "react-redux"; import { createStore } from "redux"; // eslint-disable-next-line @@ -17,31 +19,35 @@ import regeneratorRuntime from "regenerator-runtime"; import ServerDashboard from "./ServerDashboard"; import { initialState, reducers } from "../../Store"; -import * as sinon from "sinon"; - -let clock; jest.mock("react-redux", () => ({ ...jest.requireActual("react-redux"), useSelector: jest.fn(), })); +jest.mock("react-router-dom", () => ({ + ...jest.requireActual("react-router-dom"), + useSearchParams: jest.fn(), +})); -var serverDashboardJsx = (spy) => ( - <Provider store={createStore(mockReducers, mockAppState())}> - <HashRouter> - <Switch> - <ServerDashboard - updateUsers={spy} - shutdownHub={spy} - startServer={spy} - stopServer={spy} - startAll={spy} - stopAll={spy} - /> - </Switch> - </HashRouter> - </Provider> -); +const serverDashboardJsx = (props) => { + // create mock ServerDashboard + // spies is a dict of properties to mock in + // any API calls that will fire during the test should be mocked + props = props || {}; + const defaultSpy = mockAsync(); + if (!props.updateUsers) { + props.updateUsers = defaultSpy; + } + return ( + <Provider store={createStore(mockReducers, mockAppState())}> + <HashRouter> + <Routes> + <Route path="/" element={withProps(props)(ServerDashboard)()} /> + </Routes> + </HashRouter> + </Provider> + ); +}; var mockAsync = (data) => jest.fn().mockImplementation(() => Promise.resolve(data ? data : { k: "v" })); @@ -137,34 +143,41 @@ var mockReducers = jest.fn((state, action) => { return state; }); +let searchParams = new URLSearchParams(); + beforeEach(() => { - clock = sinon.useFakeTimers(); + jest.useFakeTimers(); useSelector.mockImplementation((callback) => { return callback(mockAppState()); }); + searchParams = new URLSearchParams(); + + useSearchParams.mockImplementation(() => [ + searchParams, + (callback) => { + searchParams = callback(searchParams); + }, + ]); }); afterEach(() => { + useSearchParams.mockClear(); useSelector.mockClear(); mockReducers.mockClear(); - clock.restore(); + jest.runAllTimers(); }); test("Renders", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); expect(screen.getByTestId("container")).toBeVisible(); }); test("Renders users from props.user_data into table", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); let foo = screen.getByTestId("user-name-div-foo"); @@ -177,10 +190,8 @@ test("Renders users from props.user_data into table", async () => { }); test("Renders correctly the status of a single-user server", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); let start_elems = screen.getAllByText("Start Server"); @@ -194,10 +205,8 @@ test("Renders correctly the status of a single-user server", async () => { }); test("Renders spawn page link", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); for (let server in bar_servers) { @@ -212,7 +221,7 @@ test("Invokes the startServer event on button click", async () => { let callbackSpy = mockAsync(); await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx({ startServer: callbackSpy })); }); let start_elems = screen.getAllByText("Start Server"); @@ -229,7 +238,7 @@ test("Invokes the stopServer event on button click", async () => { let callbackSpy = mockAsync(); await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx({ stopServer: callbackSpy })); }); let stop = screen.getByText("Stop Server"); @@ -245,7 +254,7 @@ test("Invokes the shutdownHub event on button click", async () => { let callbackSpy = mockAsync(); await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx({ shutdownHub: callbackSpy })); }); let shutdown = screen.getByText("Shutdown Hub"); @@ -258,10 +267,8 @@ test("Invokes the shutdownHub event on button click", async () => { }); test("Sorts according to username", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); let handler = screen.getByTestId("user-sort"); @@ -277,10 +284,8 @@ test("Sorts according to username", async () => { }); test("Sorts according to admin", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); let handler = screen.getByTestId("admin-sort"); @@ -296,10 +301,8 @@ test("Sorts according to admin", async () => { }); test("Sorts according to last activity", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); let handler = screen.getByTestId("last-activity-sort"); @@ -315,10 +318,8 @@ test("Sorts according to last activity", async () => { }); test("Sorts according to server status (running/not running)", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); let handler = screen.getByTestId("running-status-sort"); @@ -334,10 +335,8 @@ test("Sorts according to server status (running/not running)", async () => { }); test("Shows server details with button click", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); let button = screen.getByTestId("foo-collapse-button"); let collapse = screen.getByTestId("foo-collapse"); @@ -350,16 +349,16 @@ test("Shows server details with button click", async () => { await act(async () => { fireEvent.click(button); + jest.runAllTimers(); }); - clock.tick(400); expect(collapse).toHaveClass("collapse show"); expect(collapseBar).not.toHaveClass("show"); await act(async () => { fireEvent.click(button); + jest.runAllTimers(); }); - clock.tick(400); expect(collapse).toHaveClass("collapse"); expect(collapse).not.toHaveClass("show"); @@ -367,8 +366,8 @@ test("Shows server details with button click", async () => { await act(async () => { fireEvent.click(button); + jest.runAllTimers(); }); - clock.tick(400); expect(collapse).toHaveClass("collapse show"); expect(collapseBar).not.toHaveClass("show"); @@ -379,10 +378,8 @@ test("Renders nothing if required data is not available", async () => { return callback({}); }); - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); let noShow = screen.getByTestId("no-show"); @@ -391,26 +388,8 @@ test("Renders nothing if required data is not available", async () => { }); test("Shows a UI error dialogue when start all servers fails", async () => { - let spy = mockAsync(); - let rejectSpy = mockAsyncRejection; - await act(async () => { - render( - <Provider store={createStore(() => {}, {})}> - <HashRouter> - <Switch> - <ServerDashboard - updateUsers={spy} - shutdownHub={spy} - startServer={spy} - stopServer={spy} - startAll={rejectSpy} - stopAll={spy} - /> - </Switch> - </HashRouter> - </Provider>, - ); + render(serverDashboardJsx({ startAll: mockAsyncRejection })); }); let startAll = screen.getByTestId("start-all"); @@ -425,26 +404,8 @@ test("Shows a UI error dialogue when start all servers fails", async () => { }); test("Shows a UI error dialogue when stop all servers fails", async () => { - let spy = mockAsync(); - let rejectSpy = mockAsyncRejection; - await act(async () => { - render( - <Provider store={createStore(() => {}, {})}> - <HashRouter> - <Switch> - <ServerDashboard - updateUsers={spy} - shutdownHub={spy} - startServer={spy} - stopServer={spy} - startAll={spy} - stopAll={rejectSpy} - /> - </Switch> - </HashRouter> - </Provider>, - ); + render(serverDashboardJsx({ stopAll: mockAsyncRejection })); }); let stopAll = screen.getByTestId("stop-all"); @@ -459,26 +420,8 @@ test("Shows a UI error dialogue when stop all servers fails", async () => { }); test("Shows a UI error dialogue when start user server fails", async () => { - let spy = mockAsync(); - let rejectSpy = mockAsyncRejection(); - await act(async () => { - render( - <Provider store={createStore(() => {}, {})}> - <HashRouter> - <Switch> - <ServerDashboard - updateUsers={spy} - shutdownHub={spy} - startServer={rejectSpy} - stopServer={spy} - startAll={spy} - stopAll={spy} - /> - </Switch> - </HashRouter> - </Provider>, - ); + render(serverDashboardJsx({ startServer: mockAsyncRejection() })); }); let start_elems = screen.getAllByText("Start Server"); @@ -494,26 +437,9 @@ test("Shows a UI error dialogue when start user server fails", async () => { }); test("Shows a UI error dialogue when start user server returns an improper status code", async () => { - let spy = mockAsync(); let rejectSpy = mockAsync({ status: 403 }); - await act(async () => { - render( - <Provider store={createStore(() => {}, {})}> - <HashRouter> - <Switch> - <ServerDashboard - updateUsers={spy} - shutdownHub={spy} - startServer={rejectSpy} - stopServer={spy} - startAll={spy} - stopAll={spy} - /> - </Switch> - </HashRouter> - </Provider>, - ); + render(serverDashboardJsx({ startServer: rejectSpy })); }); let start_elems = screen.getAllByText("Start Server"); @@ -533,22 +459,7 @@ test("Shows a UI error dialogue when stop user servers fails", async () => { let rejectSpy = mockAsyncRejection(); await act(async () => { - render( - <Provider store={createStore(() => {}, {})}> - <HashRouter> - <Switch> - <ServerDashboard - updateUsers={spy} - shutdownHub={spy} - startServer={spy} - stopServer={rejectSpy} - startAll={spy} - stopAll={spy} - /> - </Switch> - </HashRouter> - </Provider>, - ); + render(serverDashboardJsx({ stopServer: rejectSpy })); }); let stop = screen.getByText("Stop Server"); @@ -567,22 +478,7 @@ test("Shows a UI error dialogue when stop user server returns an improper status let rejectSpy = mockAsync({ status: 403 }); await act(async () => { - render( - <Provider store={createStore(() => {}, {})}> - <HashRouter> - <Switch> - <ServerDashboard - updateUsers={spy} - shutdownHub={spy} - startServer={spy} - stopServer={rejectSpy} - startAll={spy} - stopAll={spy} - /> - </Switch> - </HashRouter> - </Provider>, - ); + render(serverDashboardJsx({ stopServer: rejectSpy })); }); let stop = screen.getByText("Stop Server"); @@ -613,72 +509,55 @@ test("Search for user calls updateUsers with name filter", async () => { }); }); await act(async () => { - render( - <Provider store={createStore(mockReducers, mockAppState())}> - <HashRouter> - <Switch> - <ServerDashboard - updateUsers={mockUpdateUsers} - shutdownHub={spy} - startServer={spy} - stopServer={spy} - startAll={spy} - stopAll={spy} - /> - </Switch> - </HashRouter> - </Provider>, - ); + searchParams.set("offset", "2"); + render(serverDashboardJsx({ updateUsers: mockUpdateUsers })); }); let search = screen.getByLabelText("user-search"); expect(mockUpdateUsers.mock.calls).toHaveLength(1); + expect(searchParams.get("offset")).toEqual("2"); userEvent.type(search, "a"); expect(search.value).toEqual("a"); - clock.tick(400); - expect(mockReducers.mock.calls).toHaveLength(3); - var lastState = - mockReducers.mock.results[mockReducers.mock.results.length - 1].value; - expect(lastState.name_filter).toEqual("a"); - // TODO: this should - expect(mockUpdateUsers.mock.calls).toHaveLength(1); + await act(async () => { + jest.runAllTimers(); + }); + expect(searchParams.get("name_filter")).toEqual("a"); + expect(searchParams.get("offset")).toEqual(null); + // FIXME: useSelector mocks prevent updateUsers from being called + // expect(mockUpdateUsers.mock.calls).toHaveLength(2); + // expect(mockUpdateUsers).toBeCalledWith(0, 100, "a"); userEvent.type(search, "b"); expect(search.value).toEqual("ab"); - clock.tick(400); - expect(mockReducers.mock.calls).toHaveLength(4); - lastState = - mockReducers.mock.results[mockReducers.mock.results.length - 1].value; - expect(lastState.name_filter).toEqual("ab"); - expect(lastState.user_page.offset).toEqual(0); + await act(async () => { + jest.runAllTimers(); + }); + expect(searchParams.get("name_filter")).toEqual("ab"); + // expect(mockUpdateUsers).toBeCalledWith(0, 100, "ab"); }); test("Interacting with PaginationFooter causes state update and refresh via useEffect call", async () => { - let callbackSpy = mockAsync(); + let updateUsers = mockAsync(); await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx({ updateUsers: updateUsers })); }); - expect(callbackSpy).toBeCalledWith(0, 2, ""); + expect(updateUsers).toBeCalledWith(0, 100, ""); - expect(mockReducers.mock.results).toHaveLength(2); - lastState = - mockReducers.mock.results[mockReducers.mock.results.length - 1].value; - console.log(lastState); - expect(lastState.user_page.offset).toEqual(0); - expect(lastState.user_page.limit).toEqual(2); + var n = 3; + expect(searchParams.get("offset")).toEqual(null); + expect(searchParams.get("limit")).toEqual(null); let next = screen.getByTestId("paginate-next"); - fireEvent.click(next); - clock.tick(400); + await act(async () => { + fireEvent.click(next); + jest.runAllTimers(); + }); - expect(mockReducers.mock.results).toHaveLength(3); - var lastState = - mockReducers.mock.results[mockReducers.mock.results.length - 1].value; - expect(lastState.user_page.offset).toEqual(2); - expect(lastState.user_page.limit).toEqual(2); + expect(searchParams.get("offset")).toEqual("100"); + expect(searchParams.get("limit")).toEqual(null); // FIXME: should call updateUsers, does in reality. // tests don't reflect reality due to mocked state/useSelector @@ -688,10 +567,8 @@ test("Interacting with PaginationFooter causes state update and refresh via useE }); test("Server delete button exists for named servers", async () => { - let callbackSpy = mockAsync(); - await act(async () => { - render(serverDashboardJsx(callbackSpy)); + render(serverDashboardJsx()); }); for (let server in bar_servers) { @@ -705,11 +582,9 @@ test("Server delete button exists for named servers", async () => { }); test("Start server and confirm pending state", async () => { - let spy = mockAsync(); - let mockStartServer = jest.fn(() => { return new Promise(async (resolve) => - clock.setTimeout(() => { + setTimeout(() => { resolve({ status: 200 }); }, 100), ); @@ -719,20 +594,10 @@ test("Start server and confirm pending state", async () => { await act(async () => { render( - <Provider store={createStore(mockReducers, {})}> - <HashRouter> - <Switch> - <ServerDashboard - updateUsers={mockUpdateUsers} - shutdownHub={spy} - startServer={mockStartServer} - stopServer={spy} - startAll={spy} - stopAll={spy} - /> - </Switch> - </HashRouter> - </Provider>, + serverDashboardJsx({ + updateUsers: mockUpdateUsers, + startServer: mockStartServer, + }), ); }); @@ -755,7 +620,7 @@ test("Start server and confirm pending state", async () => { expect(buttons[1]).toBeEnabled(); await act(async () => { - await clock.tick(100); + jest.runAllTimers(); }); expect(mockUpdateUsers.mock.calls).toHaveLength(2); }); diff --git a/jupyterhub/tests/browser/test_browser.py b/jupyterhub/tests/browser/test_browser.py --- a/jupyterhub/tests/browser/test_browser.py +++ b/jupyterhub/tests/browser/test_browser.py @@ -1041,7 +1041,7 @@ async def test_start_stop_all_servers_on_admin_page(app, browser, admin_user): ) [email protected]("added_count_users", [10, 47, 48, 49, 110]) [email protected]("added_count_users", [10, 49, 50, 51, 99, 100, 101]) async def test_paging_on_admin_page( app, browser, admin_user, added_count_users, create_user_with_scopes ): @@ -1057,7 +1057,7 @@ async def test_paging_on_admin_page( btn_next = browser.get_by_role("button", name="Next") # verify "Previous"/"Next" button clickability depending on users number on the page await expect(displaying).to_have_text( - re.compile(".*" + f"0-{min(users_count_db,50)}" + ".*") + re.compile(".*" + f"1-{min(users_count_db, 50)}" + ".*") ) if users_count_db > 50: await expect(btn_next.locator("//span")).to_have_class("active-pagination") @@ -1065,10 +1065,10 @@ async def test_paging_on_admin_page( await btn_next.click() if users_count_db <= 100: await expect(displaying).to_have_text( - re.compile(".*" + f"50-{users_count_db}" + ".*") + re.compile(".*" + f"51-{users_count_db}" + ".*") ) else: - await expect(displaying).to_have_text(re.compile(".*" + "50-100" + ".*")) + await expect(displaying).to_have_text(re.compile(".*" + "51-100" + ".*")) await expect(btn_next.locator("//span")).to_have_class("active-pagination") await expect(btn_previous.locator("//span")).to_have_class("active-pagination") # click on Previous button @@ -1117,7 +1117,7 @@ async def test_search_on_admin_page( if users_count_db_filtered <= 50: await expect(filtered_list_on_page).to_have_count(users_count_db_filtered) await expect(displaying).to_contain_text( - re.compile(f"0-{users_count_db_filtered}") + re.compile(f"1-{users_count_db_filtered}") ) # check that users names contain the search value in the filtered list for element in await filtered_list_on_page.get_by_test_id( @@ -1126,7 +1126,7 @@ async def test_search_on_admin_page( await expect(element).to_contain_text(re.compile(f".*{search_value}.*")) else: await expect(filtered_list_on_page).to_have_count(50) - await expect(displaying).to_contain_text(re.compile("0-50")) + await expect(displaying).to_contain_text(re.compile("1-50")) # click on Next button to verify that the rest part of filtered list is displayed on the next page await browser.get_by_role("button", name="Next").click() filtered_list_on_next_page = browser.locator('//tr[@class="user-row"]')
modifyable api_page_default_limit variable on the admin page <!-- Thank you for contributing. These HTML comments will not render in the issue, but you can delete them once you've read them if you prefer! --> ### Proposed change It is rather annoying that the number of the listed servers (`api_page_default_limit`?) cannot be changed on the admin page, but only use the number fixed in `jupyterhub_config.py`. It would help the administration of jupyter HUB very much if this number could be changed on the admin page. ### Alternative options <!-- Use this section to describe alternative options and why you've decided on the proposed feature above. --> The variable `api_page_default_limit` can be changed in `jupyterhub_config.py`, however, this requires restarting the server. ### Who would use this feature? <!-- Describe who would benefit from using this feature. --> Most of the users with _admin_ access. ### (Optional): Suggest a solution <!-- Describe what you think needs to be done. Doing that is an excellent first step to get the feature implemented. --> Completing the admin page template(?) with a choice box (50|100|200|500) or input box.
The admin pages should definitely have control for selecting the number of items on the page. This should probably be able to come from the URL. In fact, all input parameters should be in the URL so refreshing the page doesn't result in a change of view. Existing API parameters that should be supported in UI: - state (not settable at all) - limit (not persisted) - offset (in UI via pager, not persisted) - name_filter (in UI, not persisted) and `order_by` should be in the UI if/when #3816 is implemented on the API side. I don't think the admin page should be able to modify the Hub's _internal_ API limits, which would affect other requests and not persist across hub restarts, but it should certainly be able to control the current browser's view.
2024-03-04T14:44:46Z
[]
[]
jupyterhub/jupyterhub
4,722
jupyterhub__jupyterhub-4722
[ "4482" ]
943e4a70724cfb76c992238cb06f1e5ff6be6bec
diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py --- a/jupyterhub/apihandlers/base.py +++ b/jupyterhub/apihandlers/base.py @@ -352,6 +352,10 @@ def user_model(self, user): if include_stopped_servers: # add any stopped servers in the db seen = set(servers.keys()) + if isinstance(user, orm.User): + # need high-level User wrapper for spawner model + # FIXME: this shouldn't be needed! + user = self.users[user] for name, orm_spawner in user.orm_spawners.items(): if name not in seen and scope_filter(orm_spawner, kind='server'): servers[name] = self.server_model(orm_spawner, user=user)
diff --git a/jsx/src/components/ServerDashboard/ServerDashboard.test.js b/jsx/src/components/ServerDashboard/ServerDashboard.test.js --- a/jsx/src/components/ServerDashboard/ServerDashboard.test.js +++ b/jsx/src/components/ServerDashboard/ServerDashboard.test.js @@ -34,9 +34,8 @@ const serverDashboardJsx = (props) => { // spies is a dict of properties to mock in // any API calls that will fire during the test should be mocked props = props || {}; - const defaultSpy = mockAsync(); if (!props.updateUsers) { - props.updateUsers = defaultSpy; + props.updateUsers = mockUpdateUsers; } return ( <Provider store={createStore(mockReducers, mockAppState())}> @@ -55,6 +54,14 @@ var mockAsync = (data) => var mockAsyncRejection = () => jest.fn().mockImplementation(() => Promise.reject()); +const defaultUpdateUsersParams = { + offset: 0, + limit: 2, + name_filter: "", + sort: "id", + state: "", +}; + var bar_servers = { "": { name: "", @@ -80,44 +87,64 @@ var bar_servers = { }, }; -var mockAppState = () => - Object.assign({}, initialState, { - user_data: [ - { - kind: "user", - name: "foo", - admin: true, - groups: [], - server: "/user/foo/", - pending: null, - created: "2020-12-07T18:46:27.112695Z", - last_activity: "2020-12-07T21:00:33.336354Z", - servers: { - "": { - name: "", - last_activity: "2020-12-07T20:58:02.437408Z", - started: "2020-12-07T20:58:01.508266Z", - pending: null, - ready: true, - state: { pid: 28085 }, - url: "/user/foo/", - user_options: {}, - progress_url: "/hub/api/users/foo/server/progress", - }, - }, - }, - { - kind: "user", - name: "bar", - admin: false, - groups: [], - server: null, +/* create new user models */ +const newUser = (name) => { + return { + kind: "user", + name: name, + admin: false, + groups: [], + server: `/user/${name}`, + created: "2020-12-07T18:46:27.112695Z", + last_activity: "2020-12-07T21:00:33.336354Z", + servers: {}, + }; +}; + +const allUsers = [ + { + kind: "user", + name: "foo", + admin: true, + groups: [], + server: "/user/foo/", + pending: null, + created: "2020-12-07T18:46:27.112695Z", + last_activity: "2020-12-07T21:00:33.336354Z", + servers: { + "": { + name: "", + last_activity: "2020-12-07T20:58:02.437408Z", + started: "2020-12-07T20:58:01.508266Z", pending: null, - created: "2020-12-07T18:46:27.115528Z", - last_activity: "2020-12-07T20:43:51.013613Z", - servers: bar_servers, + ready: true, + state: { pid: 28085 }, + url: "/user/foo/", + user_options: {}, + progress_url: "/hub/api/users/foo/server/progress", }, - ], + }, + }, + { + kind: "user", + name: "bar", + admin: false, + groups: [], + server: null, + pending: null, + created: "2020-12-07T18:46:27.115528Z", + last_activity: "2020-12-07T20:43:51.013613Z", + servers: bar_servers, + }, +]; + +for (var i = 2; i < 10; i++) { + allUsers.push(newUser(`test-${i}`)); +} + +var mockAppState = () => + Object.assign({}, initialState, { + user_data: allUsers.slice(0, 2), user_page: { offset: 0, limit: 2, @@ -125,7 +152,7 @@ var mockAppState = () => next: { offset: 2, limit: 2, - url: "http://localhost:8000/hub/api/groups?offset=2&limit=2", + url: "http://localhost:8000/hub/api/users?offset=2&limit=2", }, }, }); @@ -143,6 +170,40 @@ var mockReducers = jest.fn((state, action) => { return state; }); +let mockUpdateUsers = jest.fn(({ offset, limit, sort, name_filter, state }) => { + /* mock updating users + + this has tom implement the server-side filtering, sorting, etc. + (at least whatever we want to test of it) + */ + let matchingUsers = allUsers; + if (state === "active") { + // only first user is active + matchingUsers = allUsers.slice(0, 1); + } + if (name_filter) { + matchingUsers = matchingUsers.filter((user) => + user.name.startsWith(name_filter), + ); + } + + const total = matchingUsers.length; + const items = matchingUsers.slice(offset, offset + limit); + + return Promise.resolve({ + items: items, + _pagination: { + offset: offset, + limit: limit, + total: total, + next: { + offset: offset + limit, + limit: limit, + }, + }, + }); +}); + let searchParams = new URLSearchParams(); beforeEach(() => { @@ -151,6 +212,7 @@ beforeEach(() => { return callback(mockAppState()); }); searchParams = new URLSearchParams(); + searchParams.set("limit", "2"); useSearchParams.mockImplementation(() => [ searchParams, @@ -164,6 +226,7 @@ afterEach(() => { useSearchParams.mockClear(); useSelector.mockClear(); mockReducers.mockClear(); + mockUpdateUsers.mockClear(); jest.runAllTimers(); }); @@ -267,71 +330,93 @@ test("Invokes the shutdownHub event on button click", async () => { }); test("Sorts according to username", async () => { + let rerender; + const testId = "user-sort"; await act(async () => { - render(serverDashboardJsx()); + rerender = render(serverDashboardJsx()).rerender; }); - let handler = screen.getByTestId("user-sort"); + expect(searchParams.get("sort")).toEqual(null); + let handler = screen.getByTestId(testId); fireEvent.click(handler); + expect(searchParams.get("sort")).toEqual("name"); - let first = screen.getAllByTestId("user-row-name")[0]; - expect(first.textContent).toContain("bar"); - - fireEvent.click(handler); - - first = screen.getAllByTestId("user-row-name")[0]; - expect(first.textContent).toContain("foo"); -}); - -test("Sorts according to admin", async () => { await act(async () => { - render(serverDashboardJsx()); + rerender(serverDashboardJsx()); + handler = screen.getByTestId(testId); }); - let handler = screen.getByTestId("admin-sort"); fireEvent.click(handler); + expect(searchParams.get("sort")).toEqual("-name"); - let first = screen.getAllByTestId("user-row-admin")[0]; - expect(first.textContent).toBe("admin"); + await act(async () => { + rerender(serverDashboardJsx()); + handler = screen.getByTestId(testId); + }); fireEvent.click(handler); - - first = screen.getAllByTestId("user-row-admin")[0]; - expect(first.textContent).toBe(""); + expect(searchParams.get("sort")).toEqual("name"); }); test("Sorts according to last activity", async () => { + let rerender; + const testId = "last-activity-sort"; await act(async () => { - render(serverDashboardJsx()); + rerender = render(serverDashboardJsx()).rerender; }); - let handler = screen.getByTestId("last-activity-sort"); + expect(searchParams.get("sort")).toEqual(null); + let handler = screen.getByTestId(testId); fireEvent.click(handler); + expect(searchParams.get("sort")).toEqual("last_activity"); - let first = screen.getAllByTestId("user-row-name")[0]; - expect(first.textContent).toContain("foo"); + await act(async () => { + rerender(serverDashboardJsx()); + handler = screen.getByTestId(testId); + }); fireEvent.click(handler); + expect(searchParams.get("sort")).toEqual("-last_activity"); + + await act(async () => { + rerender(serverDashboardJsx()); + handler = screen.getByTestId(testId); + }); - first = screen.getAllByTestId("user-row-name")[0]; - expect(first.textContent).toContain("bar"); + fireEvent.click(handler); + expect(searchParams.get("sort")).toEqual("last_activity"); }); -test("Sorts according to server status (running/not running)", async () => { +test("Filter according to server status (running/not running)", async () => { + let rerender; await act(async () => { - render(serverDashboardJsx()); + rerender = render(serverDashboardJsx()).rerender; }); - - let handler = screen.getByTestId("running-status-sort"); + console.log(rerender); + console.log("begin test"); + const label = "only active servers"; + let handler = screen.getByLabelText(label); + expect(handler.checked).toEqual(false); fireEvent.click(handler); - let first = screen.getAllByTestId("user-row-name")[0]; - expect(first.textContent).toContain("foo"); + // FIXME: need to force a rerender to get updated checkbox + // I don't think this should be required + await act(async () => { + rerender(serverDashboardJsx()); + handler = screen.getByLabelText(label); + }); + expect(searchParams.get("state")).toEqual("active"); + expect(handler.checked).toEqual(true); fireEvent.click(handler); - first = screen.getAllByTestId("user-row-name")[0]; - expect(first.textContent).toContain("bar"); + await act(async () => { + rerender(serverDashboardJsx()); + handler = screen.getByLabelText(label); + }); + handler = screen.getByLabelText(label); + expect(handler.checked).toEqual(false); + expect(searchParams.get("state")).toEqual(null); }); test("Shows server details with button click", async () => { @@ -494,23 +579,9 @@ test("Shows a UI error dialogue when stop user server returns an improper status test("Search for user calls updateUsers with name filter", async () => { let spy = mockAsync(); - let mockUpdateUsers = jest.fn((offset, limit, name_filter) => { - return Promise.resolve({ - items: [], - _pagination: { - offset: offset, - limit: limit, - total: offset + limit * 2, - next: { - offset: offset + limit, - limit: limit, - }, - }, - }); - }); await act(async () => { searchParams.set("offset", "2"); - render(serverDashboardJsx({ updateUsers: mockUpdateUsers })); + render(serverDashboardJsx()); }); let search = screen.getByLabelText("user-search"); @@ -538,17 +609,15 @@ test("Search for user calls updateUsers with name filter", async () => { }); test("Interacting with PaginationFooter causes state update and refresh via useEffect call", async () => { - let updateUsers = mockAsync(); - await act(async () => { - render(serverDashboardJsx({ updateUsers: updateUsers })); + render(serverDashboardJsx()); }); - expect(updateUsers).toBeCalledWith(0, 100, ""); + expect(mockUpdateUsers).toBeCalledWith(defaultUpdateUsersParams); var n = 3; expect(searchParams.get("offset")).toEqual(null); - expect(searchParams.get("limit")).toEqual(null); + expect(searchParams.get("limit")).toEqual("2"); let next = screen.getByTestId("paginate-next"); await act(async () => { @@ -556,8 +625,8 @@ test("Interacting with PaginationFooter causes state update and refresh via useE jest.runAllTimers(); }); - expect(searchParams.get("offset")).toEqual("100"); - expect(searchParams.get("limit")).toEqual(null); + expect(searchParams.get("offset")).toEqual("2"); + expect(searchParams.get("limit")).toEqual("2"); // FIXME: should call updateUsers, does in reality. // tests don't reflect reality due to mocked state/useSelector @@ -590,12 +659,9 @@ test("Start server and confirm pending state", async () => { ); }); - let mockUpdateUsers = jest.fn(() => Promise.resolve(mockAppState())); - await act(async () => { render( serverDashboardJsx({ - updateUsers: mockUpdateUsers, startServer: mockStartServer, }), ); @@ -604,16 +670,17 @@ test("Start server and confirm pending state", async () => { let actions = screen.getAllByTestId("user-row-server-activity")[1]; let buttons = getAllByRole(actions, "button"); - expect(buttons.length).toBe(2); + expect(buttons.length).toBe(3); expect(buttons[0].textContent).toBe("Start Server"); expect(buttons[1].textContent).toBe("Spawn Page"); + expect(buttons[2].textContent).toBe("Edit User"); await act(async () => { fireEvent.click(buttons[0]); }); expect(mockUpdateUsers.mock.calls).toHaveLength(1); - expect(buttons.length).toBe(2); + expect(buttons.length).toBe(3); expect(buttons[0].textContent).toBe("Start Server"); expect(buttons[0]).toBeDisabled(); expect(buttons[1].textContent).toBe("Spawn Page"); diff --git a/jupyterhub/tests/browser/test_browser.py b/jupyterhub/tests/browser/test_browser.py --- a/jupyterhub/tests/browser/test_browser.py +++ b/jupyterhub/tests/browser/test_browser.py @@ -1116,8 +1116,9 @@ async def test_search_on_admin_page( displaying = browser.get_by_text("Displaying") if users_count_db_filtered <= 50: await expect(filtered_list_on_page).to_have_count(users_count_db_filtered) + start = 1 if users_count_db_filtered else 0 await expect(displaying).to_contain_text( - re.compile(f"1-{users_count_db_filtered}") + re.compile(f"{start}-{users_count_db_filtered}") ) # check that users names contain the search value in the filtered list for element in await filtered_list_on_page.get_by_test_id(
Cannot perform a global sort by 'Running' status in the admin panel <!-- Thank you for contributing. These HTML comments will not render in the issue, but you can delete them once you've read them if you prefer! --> ### Bug description The /hub/admin#/ endpoint shows a table of users and their servers. However, in jupyterhub-3.x the sorting seems to be broken. We're upgrading from 1.x where sorting by the 'Running' column in the ascending order would list all running servers. However, in jhub-3.x only running servers present on the current 'page' are shown. Since pagination only shows 50 entries per page other running servers are shown at the top of the pages they were paginated to. With a large list of users this sorting option becomes less than useful. #### Expected behaviour Sorting /hub/admin#/ by 'Running' in the ascending order should show all running servers on the first page(s) i.e. all running servers must be at the top of the list. #### Actual behaviour only running servers present on the current 'page' are shown. Since pagination only shows 50 entries per page other running servers are shown at the top of the pages they were paginated to. ### How to reproduce <!-- Use this section to describe the steps that a user would take to experience this bug. --> 1. Go to /hub/admin#/ 2. Click on 'Running' column header 3. Expect to see all running servers at the top of the table ### Your personal set up <!-- Tell us a little about the system you're using. Please include information about how you installed, e.g. are you using a distribution such as zero-to-jupyterhub or the-littlest-jupyterhub. --> - OS: <!-- [e.g. ubuntu 20.04, macOS 11.0] --> - Version(s): Conda environment on RHEL7 <!-- e.g. jupyterhub --version, python --version ---> <details><summary>Full environment</summary> <!-- For reproduction, it's useful to have the full environment. For example, the output of `pip freeze` or `conda list` ---> ``` # Name Version Build Channel _libgcc_mutex 0.1 conda_forge conda-forge _openmp_mutex 4.5 2_gnu conda-forge aiofiles 22.1.0 pyhd8ed1ab_0 conda-forge aiosqlite 0.18.0 pyhd8ed1ab_0 conda-forge alembic 1.10.3 pyhd8ed1ab_0 conda-forge anyio 3.6.2 pyhd8ed1ab_0 conda-forge argon2-cffi 21.3.0 pyhd8ed1ab_0 conda-forge argon2-cffi-bindings 21.2.0 py311hd4cff14_3 conda-forge asttokens 2.2.1 pyhd8ed1ab_0 conda-forge async_generator 1.10 py_0 conda-forge attrs 22.2.0 pyh71513ae_0 conda-forge babel 2.12.1 pyhd8ed1ab_1 conda-forge backcall 0.2.0 pyh9f0ad1d_0 conda-forge backports 1.0 pyhd8ed1ab_3 conda-forge backports.functools_lru_cache 1.6.4 pyhd8ed1ab_0 conda-forge batchspawner 1.2.0 pyhd8ed1ab_0 conda-forge beautifulsoup4 4.12.2 pyha770c72_0 conda-forge bleach 6.0.0 pyhd8ed1ab_0 conda-forge blinker 1.6.2 pyhd8ed1ab_0 conda-forge brotlipy 0.7.0 py311hd4cff14_1005 conda-forge bzip2 1.0.8 h7f98852_4 conda-forge c-ares 1.18.1 h7f98852_0 conda-forge ca-certificates 2023.5.7 hbcca054_0 conda-forge certifi 2023.5.7 pyhd8ed1ab_0 conda-forge certipy 0.1.3 py_0 conda-forge cffi 1.15.1 py311h409f033_3 conda-forge charset-normalizer 3.1.0 pyhd8ed1ab_0 conda-forge colorama 0.4.6 pyhd8ed1ab_0 conda-forge comm 0.1.3 pyhd8ed1ab_0 conda-forge configurable-http-proxy 4.5.4 he2f69ee_2 conda-forge cryptography 40.0.2 py311h9b4c7bb_0 conda-forge debugpy 1.6.7 py311hcafe171_0 conda-forge decorator 5.1.1 pyhd8ed1ab_0 conda-forge defusedxml 0.7.1 pyhd8ed1ab_0 conda-forge entrypoints 0.4 pyhd8ed1ab_0 conda-forge executing 1.2.0 pyhd8ed1ab_0 conda-forge flit-core 3.8.0 pyhd8ed1ab_0 conda-forge gitdb 4.0.10 pyhd8ed1ab_0 conda-forge gitpython 3.1.31 pyhd8ed1ab_0 conda-forge greenlet 2.0.2 py311hcafe171_0 conda-forge icu 70.1 h27087fc_0 conda-forge idna 3.4 pyhd8ed1ab_0 conda-forge importlib-metadata 6.5.0 pyha770c72_0 conda-forge importlib_metadata 6.5.0 hd8ed1ab_0 conda-forge importlib_resources 5.12.0 pyhd8ed1ab_0 conda-forge ipykernel 6.22.0 pyh210e3f2_0 conda-forge ipython 8.12.0 pyh41d4057_0 conda-forge ipython_genutils 0.2.0 py_1 conda-forge jedi 0.18.2 pyhd8ed1ab_0 conda-forge jhub-remote-user-authenticator 0.1.0 pypi_0 pypi jinja2 3.1.2 pyhd8ed1ab_1 conda-forge json5 0.9.5 pyh9f0ad1d_0 conda-forge jsonschema 4.17.3 pyhd8ed1ab_0 conda-forge jupyter-server-mathjax 0.2.6 pyh5bfe37b_1 conda-forge jupyter_client 8.2.0 pyhd8ed1ab_0 conda-forge jupyter_core 5.3.0 py311h38be061_0 conda-forge jupyter_events 0.6.3 pyhd8ed1ab_0 conda-forge jupyter_server 2.5.0 pyhd8ed1ab_0 conda-forge jupyter_server_fileid 0.9.0 pyhd8ed1ab_0 conda-forge jupyter_server_terminals 0.4.4 pyhd8ed1ab_1 conda-forge jupyter_server_ydoc 0.8.0 pyhd8ed1ab_0 conda-forge jupyter_telemetry 0.1.0 pyhd8ed1ab_1 conda-forge jupyter_ydoc 0.2.3 pyhd8ed1ab_0 conda-forge jupyterhub 3.1.1 pyh2a2186d_0 conda-forge jupyterhub-base 3.1.1 pyh2a2186d_0 conda-forge jupyterhub-moss 6.0.0 pypi_0 pypi jupyterlab 3.6.3 pyhd8ed1ab_0 conda-forge jupyterlab-git 0.41.0 pyhd8ed1ab_1 conda-forge jupyterlab_pygments 0.2.2 pyhd8ed1ab_0 conda-forge jupyterlab_server 2.22.1 pyhd8ed1ab_0 conda-forge keyutils 1.6.1 h166bdaf_0 conda-forge krb5 1.20.1 h81ceb04_0 conda-forge ld_impl_linux-64 2.40 h41732ed_0 conda-forge libcurl 8.0.1 h588be90_0 conda-forge libedit 3.1.20191231 he28a2e2_2 conda-forge libev 4.33 h516909a_1 conda-forge libexpat 2.5.0 hcb278e6_1 conda-forge libffi 3.4.2 h7f98852_5 conda-forge libgcc-ng 12.2.0 h65d4601_19 conda-forge libgomp 12.2.0 h65d4601_19 conda-forge libnghttp2 1.52.0 h61bc06f_0 conda-forge libnsl 2.0.0 h7f98852_0 conda-forge libpq 15.2 hb675445_0 conda-forge libsodium 1.0.18 h36c2ea0_1 conda-forge libsqlite 3.40.0 h753d276_0 conda-forge libssh2 1.10.0 hf14f497_3 conda-forge libstdcxx-ng 12.2.0 h46fd767_19 conda-forge libuuid 2.38.1 h0b41bf4_0 conda-forge libuv 1.44.2 h166bdaf_0 conda-forge libzlib 1.2.13 h166bdaf_4 conda-forge mako 1.2.4 pyhd8ed1ab_0 conda-forge markupsafe 2.1.2 py311h2582759_0 conda-forge matplotlib-inline 0.1.6 pyhd8ed1ab_0 conda-forge mistune 2.0.5 pyhd8ed1ab_0 conda-forge nbclassic 0.5.5 pyhb4ecaf3_1 conda-forge nbclient 0.7.3 pyhd8ed1ab_0 conda-forge nbconvert 7.3.1 pyhd8ed1ab_0 conda-forge nbconvert-core 7.3.1 pyhd8ed1ab_0 conda-forge nbconvert-pandoc 7.3.1 pyhd8ed1ab_0 conda-forge nbdime 3.2.1 pyhd8ed1ab_0 conda-forge nbformat 5.8.0 pyhd8ed1ab_0 conda-forge ncurses 6.3 h27087fc_1 conda-forge nest-asyncio 1.5.6 pyhd8ed1ab_0 conda-forge nodejs 18.15.0 h8d033a5_0 conda-forge notebook 6.5.4 pyha770c72_0 conda-forge notebook-shim 0.2.2 pyhd8ed1ab_0 conda-forge oauthlib 3.2.2 pyhd8ed1ab_0 conda-forge openssl 3.1.1 hd590300_1 conda-forge packaging 23.1 pyhd8ed1ab_0 conda-forge pamela 1.0.0 py_0 conda-forge pandoc 2.19.2 h32600fe_2 conda-forge pandocfilters 1.5.0 pyhd8ed1ab_0 conda-forge parso 0.8.3 pyhd8ed1ab_0 conda-forge pexpect 4.8.0 pyh1a96a4e_2 conda-forge pickleshare 0.7.5 py_1003 conda-forge pip 23.1 pyhd8ed1ab_0 conda-forge pkgutil-resolve-name 1.3.10 pyhd8ed1ab_0 conda-forge platformdirs 3.2.0 pyhd8ed1ab_0 conda-forge prometheus_client 0.16.0 pyhd8ed1ab_0 conda-forge prompt-toolkit 3.0.38 pyha770c72_0 conda-forge prompt_toolkit 3.0.38 hd8ed1ab_0 conda-forge psutil 5.9.5 py311h2582759_0 conda-forge psycopg2 2.9.3 py311h968e94b_2 conda-forge ptyprocess 0.7.0 pyhd3deb0d_0 conda-forge pure_eval 0.2.2 pyhd8ed1ab_0 conda-forge pycparser 2.21 pyhd8ed1ab_0 conda-forge pycurl 7.45.1 py311hae980a4_3 conda-forge pydantic 1.10.7 pypi_0 pypi pygments 2.15.0 pyhd8ed1ab_0 conda-forge pyjwt 2.6.0 pyhd8ed1ab_0 conda-forge pyopenssl 23.1.1 pyhd8ed1ab_0 conda-forge pyrsistent 0.19.3 py311h2582759_0 conda-forge pysocks 1.7.1 pyha2e5f31_6 conda-forge python 3.11.3 h2755cc3_0_cpython conda-forge python-dateutil 2.8.2 pyhd8ed1ab_0 conda-forge python-fastjsonschema 2.16.3 pyhd8ed1ab_0 conda-forge python-json-logger 2.0.7 pyhd8ed1ab_0 conda-forge python_abi 3.11 3_cp311 conda-forge pytz 2023.3 pyhd8ed1ab_0 conda-forge pyyaml 6.0 py311hd4cff14_5 conda-forge pyzmq 25.0.2 py311hd6ccaeb_0 conda-forge readline 8.2 h8228510_1 conda-forge requests 2.28.2 pyhd8ed1ab_1 conda-forge rfc3339-validator 0.1.4 pyhd8ed1ab_0 conda-forge rfc3986-validator 0.1.1 pyh9f0ad1d_0 conda-forge ruamel.yaml 0.17.21 py311h2582759_3 conda-forge ruamel.yaml.clib 0.2.7 py311h2582759_1 conda-forge send2trash 1.8.0 pyhd8ed1ab_0 conda-forge setuptools 67.6.1 pyhd8ed1ab_0 conda-forge six 1.16.0 pyh6c4a22f_0 conda-forge smmap 3.0.5 pyh44b312d_0 conda-forge sniffio 1.3.0 pyhd8ed1ab_0 conda-forge soupsieve 2.3.2.post1 pyhd8ed1ab_0 conda-forge sqlalchemy 2.0.9 py311h2582759_0 conda-forge stack_data 0.6.2 pyhd8ed1ab_0 conda-forge terminado 0.17.1 pyh41d4057_0 conda-forge tinycss2 1.2.1 pyhd8ed1ab_0 conda-forge tk 8.6.12 h27826a3_0 conda-forge tomli 2.0.1 pyhd8ed1ab_0 conda-forge tornado 6.3 py311h2582759_0 conda-forge traitlets 5.9.0 pyhd8ed1ab_0 conda-forge typing-extensions 4.5.0 hd8ed1ab_0 conda-forge typing_extensions 4.5.0 pyha770c72_0 conda-forge tzdata 2023c h71feb2d_0 conda-forge urllib3 1.26.15 pyhd8ed1ab_0 conda-forge wcwidth 0.2.6 pyhd8ed1ab_0 conda-forge webencodings 0.5.1 py_1 conda-forge websocket-client 1.5.1 pyhd8ed1ab_0 conda-forge wheel 0.40.0 pyhd8ed1ab_0 conda-forge wrapspawner 1.0.1 pypi_0 pypi xz 5.2.6 h166bdaf_0 conda-forge y-py 0.5.9 py311hfe55011_0 conda-forge yaml 0.2.5 h7f98852_2 conda-forge ypy-websocket 0.8.2 pyhd8ed1ab_0 conda-forge zeromq 4.3.4 h9c3ff4c_1 conda-forge zipp 3.15.0 pyhd8ed1ab_0 conda-forge zlib 1.2.13 h166bdaf_4 conda-forge zstd 1.5.2 h3eb15da_6 conda-forge ``` </details> <details><summary>Configuration</summary> <!-- For JupyterHub, especially include information such as what Spawner and Authenticator are being used. Be careful not to share any sensitive information. You can paste jupyterhub_config.py below. To exclude lots of comments and empty lines from auto-generated jupyterhub_config.py, you can do: grep -v '\(^#\|^[[:space:]]*$\)' jupyterhub_config.py --> ```python # jupyterhub_config.py c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S' c.JupyterHub.active_server_limit = 200 c.JupyterHub.authenticator_class = 'jhub_remote_user_authenticator.remote_user_auth.RemoteUserAuthenticator' c.JupyterHub.bind_url = 'REDACTED' c.JupyterHub.cleanup_servers = False c.JupyterHub.concurrent_spawn_limit = 100 c.JupyterHub.cookie_max_age_days = 1 c.JupyterHub.db_url = 'REDACTED' c.JupyterHub.extra_handlers = [(r"/api/batchspawner", 'batchspawner.api.BatchSpawnerAPIHandler')] c.JupyterHub.hub_connect_url = 'REDACTED' c.JupyterHub.logo_file = '/opt/jupyterhub/rc_jhub_logo.png' c = get_config() c.JupyterHub.spawner_class = 'wrapspawner.ProfilesSpawner' c.Spawner.http_timeout = 300 c.ProfilesSpawner.profiles = [ ( "Teaching - 1 CPU core, 2GB RAM, 2h", "1x2x2", "batchspawner.SlurmSpawner", dict(req_partition = "hpg-dev", req_nprocs = "1", req_runtime = "02:00:00", req_memory = "2gb" )), ("Interactive - 1 CPU core, 2GB RAM, 8h", "1x2x8", "batchspawner.SlurmSpawner", dict(req_partition = "hpg-dev", req_nprocs = "1", req_runtime = "8:00:00", req_memory = "2gb" )), ("Interactive - 1 CPU core, 4GB RAM, 12h", "1x4x8", "batchspawner.SlurmSpawner", dict(req_partition = "hpg-dev", req_nprocs = "1", req_memory = "1gb", req_runtime = "12:00:00")), ("Interactive - 1 CPU core, 8GB RAM, 12h", "1x8x8", "batchspawner.SlurmSpawner", dict(req_partition = "hpg-dev", req_nprocs = "1", req_memory = "8gb", req_runtime = "12:00:00")), ("Compute - 1 CPU core, 32GB RAM, 144h", "1x32x144", "batchspawner.SlurmSpawner", dict(req_nprocs = "1", req_memory = "32gb", req_runtime = "144:00:00")), ("Compute - 4 CPU cores, 32GB RAM, 144h", "4x32x144", "batchspawner.SlurmSpawner", dict(req_nprocs = "4", req_memory = "8gb", req_runtime = "48:00:00")), ("Compute - 4 CPU cores, 32GB RAM, 144h", "4x32x144", "batchspawner.SlurmSpawner", dict(req_nprocs = "4", req_memory = "32gb", req_runtime = "144:00:00")), ("Compute - 8 CPU cores, 32GB RAM, 144h", "8x24x144", "batchspawner.SlurmSpawner", dict(req_nprocs = "8", req_memory = "32gb", req_runtime = "144:00:00")), ("Compute - 16 CPU cores, 32GB RAM, 144h", "16x24x144", "batchspawner.SlurmSpawner", dict(req_nprocs = "16", req_memory = "32gb", req_runtime = "144:00:00")), ("Compute - 24 CPU cores, 32GB RAM, 144h", "24x32x144", "batchspawner.SlurmSpawner", dict(req_nprocs = "24", req_memory = "32gb", req_runtime = "144:00:00")), ("Compute - 4 CPU cores, 48GB RAM, 24h", "4x48x24", "batchspawner.SlurmSpawner", dict(req_nprocs = "4", req_memory = "40gb", req_runtime = "24:00:00")), ("Compute - 32 CPU cores, 120GB RAM, 72h", "32x120x72", "batchspawner.SlurmSpawner", dict(req_nprocs = "32", req_memory = "120gb", req_runtime = "72:00:00")), ("GPU Teaching - 1 GPU, 1 CPU core, 4GB RAM, 2h", "1x1x4x2", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "1", req_memory = "4gb", req_runtime = "2:00:00", req_options="--gpus=1")), ("GPU Interactive - 1 GPU, 1 CPU core, 6GB RAM, 6h", "1x1x6x6", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "1", req_memory = "6gb", req_runtime = "6:00:00", req_options="--gpus=1")), ("GPU Interactive - 1 GPU, 4 CPU cores, 24GB RAM, 8h", "1x4x24x8", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "4", req_memory = "24gb", req_runtime = "8:00:00", req_options="--gpus=1")), ("GPU Interactive - 2 GPUs, 2 CPU cores, 12GB RAM, 6h", "2x2x12x6", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "2", req_memory = "12gb", req_runtime = "6:00:00", req_options="--gpus=2")), ("GPU Compute - 2 GPUs, 2 CPU cores, 12GB RAM, 24h", "2x2x12x24", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "2", req_memory = "12gb", req_runtime = "24:00:00", req_options="--gpus=2")), ("GPU Compute - 3 GPUs, 3 CPU cores, 18GB RAM, 24h", "2x2x18x24", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "3", req_memory = "18gb", req_runtime = "24:00:00", req_options="--gpus=3")), ("GPU ML - 4 GPUs, 4 CPU cores, 24GB RAM, 24h", "4x4x24x24", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "4", req_memory = "24gb", req_runtime = "24:00:00", req_options="--gpus=4")), ("GPU Large ML - 4 GPUs, 4 CPU cores, 64GB RAM, 72h", "4x4x64x72", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "4", req_memory = "64gb", req_runtime = "72:00:00", req_options="--gpus=4")), ("GPU Large ML - 8 GPUs, 8 CPU cores, 64GB RAM, 72h", "8x8x64x72", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "8", req_memory = "64gb", req_runtime = "72:00:00", req_options="--gpus=8")), ("GPU A100 SM - 1 GPUs, 2 CPU cores, 24GB RAM, 24h", "1x2x24x24", "batchspawner.SlurmSpawner", dict(req_partition = "gpu", req_nprocs = "2", req_memory = "24gb", req_runtime = "24:00:00", req_options="--gpus=1 --constraint=a100")), ] c.BatchSpawnerBase.batch_submit_cmd = '/opt/slurm/bin/sbatch --parsable' c.BatchSpawnerBase.batch_query_cmd = "/opt/slurm/bin/squeue -h -j {job_id} -o '%T %B'" c.BatchSpawnerBase.batch_cancel_cmd = "/opt/slurm/bin/scancel {job_id}" c.BatchSpawnerBase.req_srun = 'source /etc/profile.d/modules.sh; unset XDG_RUNTIME_DIR; export PATH=/apps/jupyterhub/1.1.0/bin:$PATH; /opt/slurm/bin/srun' c.Spawner.cmd = ['jupyter-labhub'] c.Spawner.cpu_guarantee = 1 c.Spawner.cpu_limit = 1 c.Spawner.env_keep = ['PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL'] c.Spawner.start_timeout = 300 c.Authenticator.admin_users = 'http://10.13.143.123:8001' c.PAMAuthenticator.check_account = True c.PAMAuthenticator.encoding = 'utf8' c.PAMAuthenticator.open_sessions = True c.PAMAuthenticator.service = 'login' ``` </details> <details><summary>Logs</summary> <!-- Errors are often logged by jupytehub. How you get logs depends on your deployment. With kubernetes it might be: kubectl get pod # hub pod name starts with hub... kubectl logs hub-... # or for a single-user server kubectl logs jupyter-username Or the-littlest-jupyterhub: journalctl -u jupyterhub # or for a single-user server journalctl -u jupyter-username --> ``` No relevant logs. ``` </details>
Thank you for opening your first issue in this project! Engagement like this is essential for open source projects! :hugs: <br>If you haven't done so already, check out [Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md). Also, please try to follow the issue template as it helps other other community members to contribute more effectively. ![welcome](https://raw.githubusercontent.com/jupyterhub/.github/master/images/welcome.jpg) You can meet the other [Jovyans](https://jupyter.readthedocs.io/en/latest/community/content-community.html?highlight=jovyan#what-is-a-jovyan) by joining our [Discourse forum](http://discourse.jupyter.org/). There is also an intro thread there where you can stop by and say Hi! :wave: <br>Welcome to the Jupyter community! :tada: I believe this is a duplicate of #3816.
2024-03-06T22:27:17Z
[]
[]
jupyterhub/jupyterhub
4,750
jupyterhub__jupyterhub-4750
[ "4749" ]
b7b2558ab725fda066e9e9a7916a020090495974
diff --git a/jupyterhub/_xsrf_utils.py b/jupyterhub/_xsrf_utils.py --- a/jupyterhub/_xsrf_utils.py +++ b/jupyterhub/_xsrf_utils.py @@ -10,11 +10,9 @@ import base64 import hashlib -from datetime import datetime, timedelta, timezone from http.cookies import SimpleCookie from tornado import web -from tornado.httputil import format_timestamp from tornado.log import app_log @@ -60,41 +58,76 @@ def _create_signed_value_urlsafe(handler, name, value): return base64.urlsafe_b64encode(signed_value).rstrip(b"=") -def _clear_invalid_xsrf_cookie(handler, cookie_path): +def _get_xsrf_token_cookie(handler): """ - Clear invalid XSRF cookie + Get the _valid_ XSRF token and id from Cookie - This may an old XSRF token, or one set on / by another application. - Because we cannot trust browsers or tornado to give us the more specific cookie, - try to clear _both_ on / and on our prefix, - then reload the page. + Returns (xsrf_token, xsrf_id) found in Cookies header. + + multiple xsrf cookies may be set on multiple paths; + + RFC 6265 states that they should be in order of more specific path to less, + but ALSO states that servers should never rely on order. + + Tornado (6.4) and stdlib (3.12) SimpleCookie explicitly use the _last_ value, + which means the cookie with the _least_ specific prefix will be used if more than one is present. + + Because we sign values, we can get the first valid cookie and not worry about order too much. + + This is simplified from tornado's HTTPRequest.cookies property + only looking for a single cookie. """ - expired = format_timestamp(datetime.now(timezone.utc) - timedelta(days=366)) - cookie = SimpleCookie() - cookie["_xsrf"] = "" - morsel = cookie["_xsrf"] - morsel["expires"] = expired - morsel["path"] = "/" - # use Set-Cookie directly, - # because tornado's set_cookie and clear_cookie use a single _dict_, - # so we can't clear a cookie on multiple paths and then set it - handler.add_header("Set-Cookie", morsel.OutputString(None)) - if cookie_path != "/": - # clear it multiple times! - morsel["path"] = cookie_path - handler.add_header("Set-Cookie", morsel.OutputString(None)) - - if ( - handler.request.method.lower() == "get" - and handler.request.headers.get("Sec-Fetch-Mode", "navigate") == "navigate" - ): - # reload current page because any subsequent set_cookie - # will cancel the clearing of the cookie - # this only makes sense on GET requests - handler.redirect(handler.request.uri) - # halt any other processing of the request - raise web.Finish() + if "Cookie" not in handler.request.headers: + return (None, None) + + for chunk in handler.request.headers["Cookie"].split(";"): + key = chunk.partition("=")[0].strip() + if key != "_xsrf": + # we are only looking for the _xsrf cookie + # ignore everything else + continue + + # use stdlib parsing to handle quotes, validation, etc. + try: + xsrf_token = SimpleCookie(chunk)[key].value.encode("ascii") + except (ValueError, KeyError): + continue + + xsrf_token_id = _get_signed_value_urlsafe(handler, "_xsrf", xsrf_token) + + if xsrf_token_id: + # only return if we found a _valid_ xsrf cookie + # otherwise, keep looking + return (xsrf_token, xsrf_token_id) + # no valid token found found + return (None, None) + + +def _set_xsrf_cookie(handler, xsrf_id, *, cookie_path="", authenticated=None): + """Set xsrf token cookie""" + xsrf_token = _create_signed_value_urlsafe(handler, "_xsrf", xsrf_id) + xsrf_cookie_kwargs = {} + xsrf_cookie_kwargs.update(handler.settings.get('xsrf_cookie_kwargs', {})) + xsrf_cookie_kwargs.setdefault("path", cookie_path) + if authenticated is None: + try: + current_user = handler.current_user + except Exception: + authenticated = False + else: + authenticated = bool(current_user) + if not authenticated: + # limit anonymous xsrf cookies to one hour + xsrf_cookie_kwargs.pop("expires", None) + xsrf_cookie_kwargs.pop("expires_days", None) + xsrf_cookie_kwargs["max_age"] = 3600 + app_log.info( + "Setting new xsrf cookie for %r %r", + xsrf_id, + xsrf_cookie_kwargs, + ) + handler.set_cookie("_xsrf", xsrf_token, **xsrf_cookie_kwargs) def get_xsrf_token(handler, cookie_path=""): @@ -110,23 +143,8 @@ def get_xsrf_token(handler, cookie_path=""): _set_cookie = False # the raw cookie is the token - xsrf_token = xsrf_cookie = handler.get_cookie("_xsrf") - if xsrf_token: - try: - xsrf_token = xsrf_token.encode("ascii") - except UnicodeEncodeError: - xsrf_token = None - - xsrf_id_cookie = _get_signed_value_urlsafe(handler, "_xsrf", xsrf_token) - if xsrf_cookie and not xsrf_id_cookie: - # we have a cookie, but it's invalid! - # handle possibility of _xsrf being set multiple times, - # e.g. on / and on /hub/ - # this will reload the page if it's a GET request - app_log.warning( - "Attempting to clear invalid _xsrf cookie %r", xsrf_cookie[:4] + "..." - ) - _clear_invalid_xsrf_cookie(handler, cookie_path) + xsrf_token, xsrf_id_cookie = _get_xsrf_token_cookie(handler) + cookie_token = xsrf_token # check the decoded, signed value for validity xsrf_id = handler._xsrf_token_id @@ -146,22 +164,16 @@ def get_xsrf_token(handler, cookie_path=""): _set_cookie = ( handler.request.headers.get("Sec-Fetch-Mode", "navigate") == "navigate" ) + if xsrf_id_cookie and not _set_cookie: + # if we aren't setting a cookie here but we got one, + # this means things probably aren't going to work + app_log.warning( + "Not accepting incorrect xsrf token id in cookie on %s", + handler.request.path, + ) if _set_cookie: - xsrf_cookie_kwargs = {} - xsrf_cookie_kwargs.update(handler.settings.get('xsrf_cookie_kwargs', {})) - xsrf_cookie_kwargs.setdefault("path", cookie_path) - if not handler.current_user: - # limit anonymous xsrf cookies to one hour - xsrf_cookie_kwargs.pop("expires", None) - xsrf_cookie_kwargs.pop("expires_days", None) - xsrf_cookie_kwargs["max_age"] = 3600 - app_log.info( - "Setting new xsrf cookie for %r %r", - xsrf_id, - xsrf_cookie_kwargs, - ) - handler.set_cookie("_xsrf", xsrf_token, **xsrf_cookie_kwargs) + _set_xsrf_cookie(handler, xsrf_id, cookie_path=cookie_path) handler._xsrf_token = xsrf_token return xsrf_token diff --git a/jupyterhub/handlers/base.py b/jupyterhub/handlers/base.py --- a/jupyterhub/handlers/base.py +++ b/jupyterhub/handlers/base.py @@ -24,7 +24,12 @@ from tornado.web import RequestHandler, addslash from .. import __version__, orm, roles, scopes -from .._xsrf_utils import _anonymous_xsrf_id, check_xsrf_cookie, get_xsrf_token +from .._xsrf_utils import ( + _anonymous_xsrf_id, + _set_xsrf_cookie, + check_xsrf_cookie, + get_xsrf_token, +) from ..metrics import ( PROXY_ADD_DURATION_SECONDS, PROXY_DELETE_DURATION_SECONDS, @@ -730,6 +735,13 @@ def set_login_cookie(self, user): if not self.get_current_user_cookie(): self.set_hub_cookie(user) + # make sure xsrf cookie is updated + # this avoids needing a second request to set the right xsrf cookie + self._jupyterhub_user = user + _set_xsrf_cookie( + self, self._xsrf_token_id, cookie_path=self.hub.base_url, authenticated=True + ) + def authenticate(self, data): return maybe_future(self.authenticator.get_authenticated_user(self, data)) diff --git a/jupyterhub/services/auth.py b/jupyterhub/services/auth.py --- a/jupyterhub/services/auth.py +++ b/jupyterhub/services/auth.py @@ -45,6 +45,7 @@ from tornado.httputil import url_concat from tornado.log import app_log from tornado.web import HTTPError, RequestHandler +from tornado.websocket import WebSocketHandler from traitlets import ( Any, Bool, @@ -59,7 +60,12 @@ ) from traitlets.config import SingletonConfigurable -from .._xsrf_utils import _anonymous_xsrf_id, check_xsrf_cookie, get_xsrf_token +from .._xsrf_utils import ( + _anonymous_xsrf_id, + _set_xsrf_cookie, + check_xsrf_cookie, + get_xsrf_token, +) from ..scopes import _intersect_expanded_scopes from ..utils import _bool_env, get_browser_protocol, url_path_join @@ -800,6 +806,10 @@ def _persist_url_token_if_set(self, handler): if not hasattr(self, 'set_cookie'): # only HubOAuth can persist cookies return + fetch_mode = handler.request.headers.get("Sec-Fetch-Mode", "navigate") + if isinstance(handler, WebSocketHandler) or fetch_mode != "navigate": + # don't do this on websockets or non-navigate requests + return self.log.info( "Storing token from url in cookie for %s", handler.request.remote_ip, @@ -851,6 +861,8 @@ def state_cookie_name(self): def _get_token_cookie(self, handler): """Base class doesn't store tokens in cookies""" + if hasattr(handler, "_hub_auth_token_cookie"): + return handler._hub_auth_token_cookie fetch_mode = handler.request.headers.get("Sec-Fetch-Mode", "unset") if fetch_mode == "websocket" and not self.allow_websocket_cookie_auth: @@ -962,8 +974,8 @@ async def _get_user_cookie(self, handler): try: handler.check_xsrf_cookie() except HTTPError as e: - self.log.error( - f"Not accepting cookie auth on {handler.request.method} {handler.request.path}: {e}" + self.log.debug( + f"Not accepting cookie auth on {handler.request.method} {handler.request.path}: {e.log_message}" ) # don't proceed with cookie auth unless xsrf is okay # don't raise either, because that makes a mess @@ -1187,6 +1199,15 @@ def set_cookie(self, handler, access_token): kwargs, ) handler.set_secure_cookie(self.cookie_name, access_token, **kwargs) + # set updated xsrf token cookie, + # which changes after login + handler._hub_auth_token_cookie = access_token + _set_xsrf_cookie( + handler, + handler._xsrf_token_id, + cookie_path=self.base_url, + authenticated=True, + ) def clear_cookie(self, handler): """Clear the OAuth cookie"""
diff --git a/jupyterhub/tests/browser/test_browser.py b/jupyterhub/tests/browser/test_browser.py --- a/jupyterhub/tests/browser/test_browser.py +++ b/jupyterhub/tests/browser/test_browser.py @@ -12,6 +12,7 @@ from tornado.httputil import url_concat from jupyterhub import orm, roles, scopes +from jupyterhub.tests.test_named_servers import named_servers # noqa from jupyterhub.tests.utils import async_requests, public_host, public_url, ujoin from jupyterhub.utils import url_escape_path, url_path_join @@ -1127,6 +1128,7 @@ async def click_stop_button(browser, username): "fresh", "invalid", "valid-prefix-invalid-root", + "valid-prefix-invalid-other-prefix", ], ) async def test_login_xsrf_initial_cookies(app, browser, case, username): @@ -1136,6 +1138,7 @@ async def test_login_xsrf_initial_cookies(app, browser, case, username): """ hub_root = public_host(app) hub_url = url_path_join(public_host(app), app.hub.base_url) + hub_parent = hub_url.rstrip("/").rsplit("/", 1)[0] + "/" login_url = url_path_join( hub_url, url_concat("login", {"next": url_path_join(app.base_url, "/hub/home")}) ) @@ -1145,7 +1148,11 @@ async def test_login_xsrf_initial_cookies(app, browser, case, username): await browser.context.add_cookies( [{"name": "_xsrf", "value": "invalid-hub-prefix", "url": hub_url}] ) - elif case == "valid-prefix-invalid-root": + elif case.startswith("valid-prefix"): + if "invalid-root" in case: + invalid_url = hub_root + else: + invalid_url = hub_parent await browser.goto(login_url) # first visit sets valid xsrf cookie cookies = await browser.context.cookies() @@ -1157,7 +1164,7 @@ async def test_login_xsrf_initial_cookies(app, browser, case, username): # currently, this test assumes the observed behavior, # which is that the invalid cookie on `/` has _higher_ priority await browser.context.add_cookies( - [{"name": "_xsrf", "value": "invalid-root", "url": hub_root}] + [{"name": "_xsrf", "value": "invalid-root", "url": invalid_url}] ) cookies = await browser.context.cookies() assert len(cookies) == 2 @@ -1190,7 +1197,9 @@ def _cookie_dict(cookie_list): return cookie_dict -async def test_singleuser_xsrf(app, browser, user, create_user_with_scopes, full_spawn): +async def test_singleuser_xsrf( + app, browser, user, create_user_with_scopes, full_spawn, named_servers # noqa: F811 +): # full login process, checking XSRF handling # start two servers target_user = user @@ -1311,33 +1320,61 @@ async def iframe(src): # check that server page can still connect to its own kernels token = target_user.new_api_token(scopes=["access:servers!user"]) - url = url_path_join(public_url(app, target_user), "/api/kernels") - headers = {"Authorization": f"Bearer {token}"} - r = await async_requests.post(url, headers=headers) - r.raise_for_status() - kernel = r.json() - kernel_id = kernel["id"] - kernel_url = url_path_join(url, kernel_id) - kernel_ws_url = "ws" + url_path_join(kernel_url, "channels")[4:] - try: - result = await browser.evaluate( - """ - async (ws_url) => { - ws = new WebSocket(ws_url); - finished = await new Promise((resolve, reject) => { - ws.onerror = (err) => { - reject(err); - }; - ws.onopen = () => { - resolve("ok"); - }; - }); - return finished; - } - """, - kernel_ws_url, - ) - finally: - r = await async_requests.delete(kernel_url, headers=headers) + + async def test_kernel(kernels_url): + headers = {"Authorization": f"Bearer {token}"} + r = await async_requests.post(kernels_url, headers=headers) r.raise_for_status() - assert result == "ok" + kernel = r.json() + kernel_id = kernel["id"] + kernel_url = url_path_join(kernels_url, kernel_id) + kernel_ws_url = "ws" + url_path_join(kernel_url, "channels")[4:] + try: + result = await browser.evaluate( + """ + async (ws_url) => { + ws = new WebSocket(ws_url); + finished = await new Promise((resolve, reject) => { + ws.onerror = (err) => { + reject(err); + }; + ws.onopen = () => { + resolve("ok"); + }; + }); + return finished; + } + """, + kernel_ws_url, + ) + finally: + r = await async_requests.delete(kernel_url, headers=headers) + r.raise_for_status() + assert result == "ok" + + kernels_url = url_path_join(public_url(app, target_user), "/api/kernels") + await test_kernel(kernels_url) + + # final check: make sure named servers work. + # first, visit spawn page to launch server, + # will issue cookies, etc. + server_name = "named" + url = url_path_join( + public_host(app), + url_path_join(app.base_url, f"hub/spawn/{browser_user.name}/{server_name}"), + ) + await browser.goto(url) + await expect(browser).to_have_url( + re.compile(rf".*/user/{browser_user.name}/{server_name}/.*") + ) + # from named server URL, make sure we can talk to a kernel + token = browser_user.new_api_token(scopes=["access:servers!user"]) + # named-server URL + kernels_url = url_path_join( + public_url(app, browser_user), server_name, "api/kernels" + ) + await test_kernel(kernels_url) + # go back to user's own page, test again + # make sure we didn't break anything + await browser.goto(public_url(app, browser_user)) + await test_kernel(url_path_join(public_url(app, browser_user), "api/kernels")) diff --git a/jupyterhub/tests/conftest.py b/jupyterhub/tests/conftest.py --- a/jupyterhub/tests/conftest.py +++ b/jupyterhub/tests/conftest.py @@ -448,8 +448,6 @@ def temp_user_creator(*scopes, name=None): return app.users[orm_user.id] yield temp_user_creator - for user in temp_users: - app.users.delete(user) @fixture diff --git a/jupyterhub/tests/test_services_auth.py b/jupyterhub/tests/test_services_auth.py --- a/jupyterhub/tests/test_services_auth.py +++ b/jupyterhub/tests/test_services_auth.py @@ -528,7 +528,7 @@ async def test_oauth_cookie_collision(app, mockservice_url, create_user_with_sco print(url) s = AsyncSession() name = 'mypha' - user = create_user_with_scopes("access:services", name=name) + create_user_with_scopes("access:services", name=name) s.cookies = await app.login_user(name) state_cookie_name = 'service-%s-oauth-state' % service.name service_cookie_name = 'service-%s' % service.name @@ -551,10 +551,9 @@ async def test_oauth_cookie_collision(app, mockservice_url, create_user_with_sco assert s.cookies[state_cookie_name] == state_1 # finish oauth 2 + hub_xsrf = s.cookies.get("_xsrf", path=app.hub.base_url) # submit the oauth form to complete authorization - r = await s.post( - oauth_2.url, data={'scopes': ['identify'], "_xsrf": s.cookies["_xsrf"]} - ) + r = await s.post(oauth_2.url, data={'scopes': ['identify'], "_xsrf": hub_xsrf}) r.raise_for_status() assert r.url == url # after finishing, state cookie is cleared @@ -564,9 +563,7 @@ async def test_oauth_cookie_collision(app, mockservice_url, create_user_with_sco service_cookie_2 = s.cookies[service_cookie_name] # finish oauth 1 - r = await s.post( - oauth_1.url, data={'scopes': ['identify'], "_xsrf": s.cookies["_xsrf"]} - ) + r = await s.post(oauth_1.url, data={'scopes': ['identify'], "_xsrf": hub_xsrf}) r.raise_for_status() assert r.url == url @@ -635,7 +632,7 @@ def auth_tokens(): r = await s.get(public_url(app, path='hub/logout')) r.raise_for_status() # verify that all cookies other than the service cookie are cleared - assert sorted(s.cookies.keys()) == ["_xsrf", service_cookie_name] + assert sorted(set(s.cookies.keys())) == ["_xsrf", service_cookie_name] # verify that clearing session id invalidates service cookie # i.e. redirect back to login page r = await s.get(url)
Named servers broken in 4.1.0 ### Bug description Spawning multiple servers (containers) results in 'too many redirects' error. ### How to reproduce A JupyterHub + multiple JupyterLab images with JupyterHub 4.1.1 installed. #### Expected behaviour No issue running multiple servers (containers) in parallel. #### Actual behaviour Logs of a JupyterLab container: Repeated ``` [I 2024-03-24 13:57:09.790 ServerApp] 302 GET /user/benz0li/qgis-latest/lab -> /user/benz0li/qgis-latest/lab (benz0li@[redacted]) 6.93ms [W 2024-03-24 13:57:09.821 ServerApp] Attempting to clear invalid _xsrf cookie 'Mnwx...' ``` 👉 JupyterLab (`/lab`) not loading. ### Your personal set up JupyterHub: [`glcr.b-data.ch/jupyterhub/jupyterhub:4.1.1-oauth-docker`](https://gitlab.b-data.ch/jupyterhub/jupyterhub/container_registry) JupyterLab: * [JupyterLab QGIS docker stack](https://github.com/b-data/jupyterlab-qgis-docker-stack) * [JupyterLab Python docker stack](https://github.com/b-data/jupyterlab-python-docker-stack) * [JupyterLab Julia docker stack](https://github.com/b-data/jupyterlab-julia-docker-stack) * [JupyterLab R docker stack](https://github.com/b-data/jupyterlab-r-docker-stack) ℹ️ base-images built with `JUPYTERHUB_VERSION=4.1.1`. Ping @minrk
Thanks for reporting! I think I know what this is, I will try to fix it.
2024-03-25T11:14:47Z
[]
[]
jupyterhub/jupyterhub
4,774
jupyterhub__jupyterhub-4774
[ "4437" ]
7ca2105b8054725280c35f6d8d2fd682efd4d6e8
diff --git a/jupyterhub/handlers/static.py b/jupyterhub/handlers/static.py --- a/jupyterhub/handlers/static.py +++ b/jupyterhub/handlers/static.py @@ -15,7 +15,9 @@ def compute_etag(self): return None def set_extra_headers(self, path): - if "v" not in self.request.arguments: + if "v" not in self.request.arguments or self.settings.get( + "no_cache_static", False + ): self.add_header("Cache-Control", "no-cache") diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -113,27 +113,34 @@ def run(self): class CSS(BaseCommand): - description = "compile CSS from LESS" + description = "compile CSS" def should_run(self): - """Does less need to run?""" - # from IPython.html.tasks.py - + """Does CSS need to run?""" css_targets = [pjoin(static, 'css', 'style.min.css')] css_maps = [t + '.map' for t in css_targets] targets = css_targets + css_maps - if not all(os.path.exists(t) for t in targets): - # some generated files don't exist - return True - earliest_target = sorted(mtime(t) for t in targets)[0] - - # check if any .less files are newer than the generated targets + earliest_target_mtime = float('inf') + earliest_target_name = '' + for t in targets: + if not os.path.exists(t): + print(f"Need to build css target: {t}") + return True + target_mtime = mtime(t) + if target_mtime < earliest_target_mtime: + earliest_target_name = t + earliest_target_mtime = target_mtime + + # check if any .scss files are newer than the generated targets for dirpath, dirnames, filenames in os.walk(static): for f in filenames: - if f.endswith('.less'): + if f.endswith('.scss'): path = pjoin(static, dirpath, f) timestamp = mtime(path) - if timestamp > earliest_target: + if timestamp > earliest_target_mtime: + print( + f"mtime for {path} > {earliest_target_name}, needs update" + ) return True return False @@ -144,33 +151,18 @@ def run(self): return self.run_command('js') - print("Building css with less") - - style_less = pjoin(static, 'less', 'style.less') - style_css = pjoin(static, 'css', 'style.min.css') - sourcemap = style_css + '.map' - - args = [ - 'npm', - 'run', - 'lessc', - '--', - '--clean-css', - f'--source-map-basepath={static}', - f'--source-map={sourcemap}', - '--source-map-rootpath=../', - style_less, - style_css, - ] + print("Building css") + + args = ['npm', 'run', 'css'] try: check_call(args, cwd=here, shell=shell) except OSError as e: - print("Failed to run lessc: %s" % e, file=sys.stderr) + print("Failed to build css: %s" % e, file=sys.stderr) print("You can install js dependencies with `npm install`", file=sys.stderr) raise # update data-files in case this created new files self.distribution.data_files = get_data_files() - assert not self.should_run(), 'CSS.run failed' + assert not self.should_run(), 'CSS.run did not produce up-to-date output' class JSX(BaseCommand):
diff --git a/jupyterhub/tests/browser/test_browser.py b/jupyterhub/tests/browser/test_browser.py --- a/jupyterhub/tests/browser/test_browser.py +++ b/jupyterhub/tests/browser/test_browser.py @@ -329,6 +329,64 @@ async def open_home_page(app, browser, user): await expect(browser).to_have_url(re.compile(".*/hub/home")) +async def test_home_nav_collapse(app, browser, user_special_chars): + user = user_special_chars.user + await open_home_page(app, browser, user) + nav = browser.locator(".navbar") + navbar_collapse = nav.locator(".navbar-collapse") + logo = nav.locator("#jupyterhub-logo") + home = nav.get_by_text("Home") + logout_name = nav.get_by_text(user.name) + logout_btn = nav.get_by_text("Logout") + toggler = nav.locator(".navbar-toggler") + + await expect(nav).to_be_visible() + + await browser.set_viewport_size({"width": 640, "height": 480}) + # links visible, nav items visible, collapse not visible + await expect(logo).to_be_visible() + await expect(home).to_be_visible() + await expect(logout_name).to_be_visible() + await expect(logout_btn).to_be_visible() + await expect(toggler).not_to_be_visible() + + # below small breakpoint (576px) + await browser.set_viewport_size({"width": 500, "height": 480}) + # logo visible, links and logout not visible, toggler visible + await expect(logo).to_be_visible() + await expect(home).not_to_be_visible() + await expect(logout_name).not_to_be_visible() + await expect(logout_btn).not_to_be_visible() + await expect(toggler).to_be_visible() + + # click toggler, links should be visible + await toggler.click() + # wait for expand to finish + # expand animates through `collapse -> collapsing -> collapse show` + await expect(navbar_collapse).to_have_class(re.compile(r"\bshow\b")) + await expect(home).to_be_visible() + await expect(logout_name).to_be_visible() + await expect(logout_btn).to_be_visible() + await expect(toggler).to_be_visible() + # wait for expand animation + # click toggler again, links should hide + # need to wait for expand to complete + await toggler.click() + await expect(navbar_collapse).not_to_have_class(re.compile(r"\bshow\b")) + await expect(home).not_to_be_visible() + await expect(logout_name).not_to_be_visible() + await expect(logout_btn).not_to_be_visible() + await expect(toggler).to_be_visible() + + # resize, should re-show + await browser.set_viewport_size({"width": 640, "height": 480}) + await expect(logo).to_be_visible() + await expect(home).to_be_visible() + await expect(logout_name).to_be_visible() + await expect(logout_btn).to_be_visible() + await expect(toggler).not_to_be_visible() + + async def test_start_button_server_not_started(app, browser, user_special_chars): """verify that when server is not started one button is available, after starting 2 buttons are available""" @@ -413,7 +471,7 @@ async def test_token_request_form_and_panel(app, browser, user_special_chars): """verify elements of the request token form""" await open_token_page(app, browser, user_special_chars.user) - request_btn = browser.locator('//div[@class="text-center"]').get_by_role("button") + request_btn = browser.locator('//button[@type="submit"]') expected_btn_name = 'Request new API token' # check if the request token button is enabled # check the buttons name @@ -455,7 +513,7 @@ async def test_token_request_form_and_panel(app, browser, user_special_chars): expected_panel_token_heading = "Your new API Token" token_area = browser.locator('#token-area') await expect(token_area).to_be_visible() - token_area_heading = token_area.locator('//div[@class="panel-heading"]') + token_area_heading = token_area.locator('div.card-header') await expect(token_area_heading).to_have_text(expected_panel_token_heading) token_result = browser.locator('#token-result') await expect(token_result).not_to_be_empty() @@ -463,7 +521,7 @@ async def test_token_request_form_and_panel(app, browser, user_special_chars): # verify that "Your new API Token" panel is hidden after refresh the page await browser.reload(wait_until="load") await expect(token_area).to_be_hidden() - api_token_table_area = browser.locator('//div[@class="row"]').nth(2) + api_token_table_area = browser.locator("div#api-tokens-section") await expect(api_token_table_area.get_by_role("table")).to_be_visible() expected_table_name = "API Tokens" await expect(api_token_table_area.get_by_role("heading")).to_have_text( @@ -516,7 +574,7 @@ async def test_request_token_expiration( # reload the page await browser.reload(wait_until="load") # API Tokens table: verify that elements are displayed - api_token_table_area = browser.locator("div#api-tokens-section").nth(0) + api_token_table_area = browser.locator("div#api-tokens-section") await expect(api_token_table_area.get_by_role("table")).to_be_visible() await expect(api_token_table_area.locator("tr.token-row")).to_have_count(1) @@ -619,12 +677,14 @@ async def test_request_token_permissions( error_message = await error_dialog.locator(".modal-body").inner_text() assert "API request failed (400)" in error_message assert expected_error in error_message + await error_dialog.locator("button[aria-label='Close']").click() + await expect(error_dialog).not_to_be_visible() return await browser.reload(wait_until="load") # API Tokens table: verify that elements are displayed - api_token_table_area = browser.locator("div#api-tokens-section").nth(0) + api_token_table_area = browser.locator("div#api-tokens-section") await expect(api_token_table_area.get_by_role("table")).to_be_visible() await expect(api_token_table_area.locator("tr.token-row")).to_have_count(1) @@ -670,9 +730,7 @@ async def test_revoke_token(app, browser, token_type, user_special_chars): await browser.wait_for_load_state("load") await expect(browser).to_have_url(re.compile(".*/hub/token")) if token_type == "both" or token_type == "request_by_user": - request_btn = browser.locator('//div[@class="text-center"]').get_by_role( - "button" - ) + request_btn = browser.locator('//button[@type="submit"]') await request_btn.click() # wait for token response to show up on the page await browser.wait_for_load_state("load") @@ -879,9 +937,9 @@ async def test_oauth_page( # login user await login(browser, user.name, password=str(user.name)) - auth_btn = browser.locator('//input[@type="submit"]') + auth_btn = browser.locator('//button[@type="submit"]') await expect(auth_btn).to_be_enabled() - text_permission = browser.get_by_role("paragraph") + text_permission = browser.get_by_role("paragraph").nth(1) await expect(text_permission).to_contain_text(f"JupyterHub service {service.name}") await expect(text_permission).to_contain_text(f"oauth URL: {expected_redirect_url}") @@ -1348,7 +1406,7 @@ async def test_singleuser_xsrf( # visit target user, sets credentials for second server await browser.goto(public_url(app, target_user)) await expect(browser).to_have_url(re.compile(r".*/oauth2/authorize")) - auth_button = browser.locator('//input[@type="submit"]') + auth_button = browser.locator('//button[@type="submit"]') await expect(auth_button).to_be_enabled() await auth_button.click() await expect(browser).to_have_url(re.compile(rf".*/user/{target_user.name}/.*")) diff --git a/jupyterhub/tests/browser/test_share.py b/jupyterhub/tests/browser/test_share.py --- a/jupyterhub/tests/browser/test_share.py +++ b/jupyterhub/tests/browser/test_share.py @@ -48,16 +48,16 @@ async def test_share_code_flow_full(app, browser, full_spawn, create_user_with_s # back to accept-share page await expect(browser).to_have_url(re.compile(r".*/accept-share")) - header_text = await browser.locator("//h2").first.text_content() + header_text = await browser.locator("p.lead").first.text_content() assert f"access {user.name}'s server" in header_text assert f"You ({share_user.name})" in header_text # TODO verify form - submit = browser.locator('//input[@type="submit"]') + submit = browser.locator('//button[@type="submit"]') await submit.click() # redirects to server, which triggers oauth approval await expect(browser).to_have_url(re.compile(r".*/oauth2/authorize")) - submit = browser.locator('//input[@type="submit"]') + submit = browser.locator('//button[@type="submit"]') await submit.click() # finally, we are at the server! diff --git a/jupyterhub/tests/test_pages.py b/jupyterhub/tests/test_pages.py --- a/jupyterhub/tests/test_pages.py +++ b/jupyterhub/tests/test_pages.py @@ -1328,7 +1328,7 @@ async def test_services_nav_links( r = await get_page("home", app, cookies=cookies) assert r.status_code == 200 page = BeautifulSoup(r.text) - nav = page.find("ul", class_="nav") + nav = page.find("ul", class_="navbar-nav") # find service links nav_urls = [a["href"] for a in nav.find_all("a")] if present: diff --git a/testing/jupyterhub_config.py b/testing/jupyterhub_config.py --- a/testing/jupyterhub_config.py +++ b/testing/jupyterhub_config.py @@ -6,16 +6,25 @@ c = get_config() # noqa -from jupyterhub.auth import DummyAuthenticator - -c.JupyterHub.authenticator_class = DummyAuthenticator +c.JupyterHub.authenticator_class = "dummy" # Optionally set a global password that all users must use # c.DummyAuthenticator.password = "your_password" -from jupyterhub.spawner import SimpleLocalProcessSpawner - -c.JupyterHub.spawner_class = SimpleLocalProcessSpawner +c.JupyterHub.spawner_class = "simple" # only listen on localhost for testing c.JupyterHub.bind_url = 'http://127.0.0.1:8000' + +# don't cache static files +c.JupyterHub.tornado_settings = { + "no_cache_static": True, + "slow_spawn_timeout": 0, +} + +c.JupyterHub.allow_named_servers = True +c.JupyterHub.default_url = "/hub/home" + +# make sure admin UI is available and any user can login +c.Authenticator.admin_users = {"admin"} +c.Authenticator.allow_all = True
Upgrade non-admin parts of UI to Bootstrap 5 ### Proposed change Currently, we are on Bootstrap 3, which is almost 10 years old. We should upgrade to Bootstrap 5! This issue is focused on bootstrap 5 for our non-admin panel, as the admin panel is using react and already on bootstrap 4. ## Switch to webpack for CSS customization We customize our CSS with bootstrap with custom less stuff. We should switch to using webpack instead, which would allow us to use https://getbootstrap.com/docs/5.0/getting-started/webpack/ for customization. ## Files to be modified Now that the admin panel is react, this is simpler. The following template files need to be modified to support bootstrap 5 - [ ] 404.html - [ ] admin.html - [ ] error.html - [ ] home.html - [ ] login.html - [ ] logout.html - [ ] not_running.html - [ ] oauth.html - [ ] page.html - [ ] spawn.html - [ ] spawn_pending.html - [ ] stop_pending.html - [ ] token.html
2024-04-04T14:24:49Z
[]
[]
jupyterhub/jupyterhub
4,779
jupyterhub__jupyterhub-4779
[ "4778" ]
f9fb650a7b1cfbc5306cb674ec3038f1801f4e17
diff --git a/jupyterhub/singleuser/_decorator.py b/jupyterhub/singleuser/_decorator.py new file mode 100644 --- /dev/null +++ b/jupyterhub/singleuser/_decorator.py @@ -0,0 +1,14 @@ +from typing import Any, Callable, TypeVar + +try: + from jupyter_server.auth.decorator import allow_unauthenticated +except ImportError: + FuncT = TypeVar("FuncT", bound=Callable[..., Any]) + + # if using an older jupyter-server version this can be a no-op, + # as these do not support marking endpoints anyways + def allow_unauthenticated(method: FuncT) -> FuncT: + return method + + +__all__ = ["allow_unauthenticated"] diff --git a/jupyterhub/singleuser/extension.py b/jupyterhub/singleuser/extension.py --- a/jupyterhub/singleuser/extension.py +++ b/jupyterhub/singleuser/extension.py @@ -51,6 +51,7 @@ url_path_join, ) +from ._decorator import allow_unauthenticated from ._disable_user_config import _disable_user_config SINGLEUSER_TEMPLATES_DIR = str(Path(__file__).parent.resolve().joinpath("templates")) @@ -68,6 +69,7 @@ def _exclude_home(path_list): class JupyterHubLogoutHandler(LogoutHandler): + @allow_unauthenticated def get(self): hub_auth = self.identity_provider.hub_auth # clear token stored in single-user cookie (set by hub_auth) @@ -95,6 +97,10 @@ class JupyterHubOAuthCallbackHandler(HubOAuthCallbackHandler): def initialize(self, hub_auth): self.hub_auth = hub_auth + @allow_unauthenticated + async def get(self): + return await super().get() + class JupyterHubIdentityProvider(IdentityProvider): """Identity Provider for JupyterHub OAuth diff --git a/jupyterhub/singleuser/mixins.py b/jupyterhub/singleuser/mixins.py --- a/jupyterhub/singleuser/mixins.py +++ b/jupyterhub/singleuser/mixins.py @@ -52,6 +52,7 @@ make_ssl_context, url_path_join, ) +from ._decorator import allow_unauthenticated from ._disable_user_config import _disable_user_config, _exclude_home # Authenticate requests with the Hub @@ -132,6 +133,7 @@ def validate_security(cls, app, ssl_options=None): class JupyterHubLogoutHandlerMixin: + @allow_unauthenticated def get(self): self.settings['hub_auth'].clear_cookie(self) self.redirect( @@ -147,6 +149,10 @@ class OAuthCallbackHandlerMixin(HubOAuthCallbackHandler): def hub_auth(self): return self.settings['hub_auth'] + @allow_unauthenticated + async def get(self): + return await super().get() + # register new hub related command-line aliases aliases = {
diff --git a/jupyterhub/tests/test_singleuser.py b/jupyterhub/tests/test_singleuser.py --- a/jupyterhub/tests/test_singleuser.py +++ b/jupyterhub/tests/test_singleuser.py @@ -2,6 +2,7 @@ import os import sys +import warnings from contextlib import nullcontext from pathlib import Path from pprint import pprint @@ -291,6 +292,57 @@ async def test_notebook_dir( raise ValueError(f"No contents check for {notebook_dir=}") [email protected]("extension", [True, False]) [email protected](IS_JUPYVERSE, reason="jupyverse has no auth configuration") +async def test_forbid_unauthenticated_access( + request, app, tmp_path, user, full_spawn, extension +): + try: + from jupyter_server.auth.decorator import allow_unauthenticated # noqa + except ImportError: + pytest.skip("needs jupyter-server 2.13") + + from jupyter_server.utils import JupyterServerAuthWarning + + # login, start the server + cookies = await app.login_user('nandy') + s = AsyncSession() + s.cookies = cookies + user = app.users['nandy'] + # stop spawner, if running: + if user.running: + await user.stop() + # start with new config: + user.spawner.default_url = "/jupyterhub-test-info" + + if extension: + user.spawner.environment["JUPYTERHUB_SINGLEUSER_EXTENSION"] = "1" + else: + user.spawner.environment["JUPYTERHUB_SINGLEUSER_EXTENSION"] = "0" + + # make sure it's resolved to start + tmp_path = tmp_path.resolve() + real_home_dir = tmp_path / "realhome" + real_home_dir.mkdir() + # make symlink to test resolution + home_dir = tmp_path / "home" + home_dir.symlink_to(real_home_dir) + # home_dir is defined on SimpleSpawner + user.spawner.home_dir = str(home_dir) + jupyter_config_dir = home_dir / ".jupyter" + jupyter_config_dir.mkdir() + # verify config paths + with (jupyter_config_dir / "jupyter_server_config.py").open("w") as f: + f.write("c.ServerApp.allow_unauthenticated_access = False") + + # If there are core endpoints (added by jupyterhub) without decorators, + # spawn will error out. If there are extension endpoints without decorators + # these will be logged as warnings. + with warnings.catch_warnings(): + warnings.simplefilter("error", JupyterServerAuthWarning) + await user.spawn() + + @pytest.mark.skipif(IS_JUPYVERSE, reason="jupyverse has no --help-all") def test_help_output(): out = check_output(
Support `ServerApp.allow_unauthenticated_access = False` With `c.ServerApp.allow_unauthenticated_access = False` I see: ``` │ Exception: Core endpoints without @allow_unauthenticated, @ws_authenticated, nor @web.authenticated: │ - GET of JupyterHubLogoutHandler registered for /user/myname/logout │ - GET of JupyterHubOAuthCallbackHandler registered for /user/myname/oauth_callback ``` Both endpoints are likely safe to mark as `@allow_unauthenticated` (I am less clear on `logout` one).
2024-04-10T09:45:14Z
[]
[]
jupyterhub/jupyterhub
4,794
jupyterhub__jupyterhub-4794
[ "4789" ]
fb1614e20ac36bc1d7de826bca9c7b4ea199a048
diff --git a/jupyterhub/apihandlers/users.py b/jupyterhub/apihandlers/users.py --- a/jupyterhub/apihandlers/users.py +++ b/jupyterhub/apihandlers/users.py @@ -10,7 +10,7 @@ from async_generator import aclosing from dateutil.parser import parse as parse_date from sqlalchemy import func, or_ -from sqlalchemy.orm import joinedload, selectinload +from sqlalchemy.orm import joinedload, raiseload, selectinload # noqa from tornado import web from tornado.iostream import StreamClosedError @@ -122,8 +122,7 @@ def get(self): post_filter = None # starting query - # fetch users and groups, which will be used for filters - query = self.db.query(orm.User).outerjoin(orm.Group, orm.User.groups) + query = self.db.query(orm.User) if state_filter in {"active", "ready"}: # only get users with active servers @@ -137,6 +136,8 @@ def get(self): .join(orm.Spawner, orm.User._orm_spawners) # this implicitly gets Users with *any* active server .filter(orm.Spawner.server != None) + # group-by ensures the count is correct + .group_by(orm.User.id) ) if state_filter == "ready": # have to post-process query results because active vs ready @@ -156,17 +157,16 @@ def get(self): ) elif state_filter: raise web.HTTPError(400, "Unrecognized state filter: %r" % state_filter) - else: - # no filter, return all users - query = query.outerjoin(orm.Spawner, orm.User._orm_spawners).outerjoin( - orm.Server, orm.Spawner.server - ) # apply eager load options query = query.options( selectinload(orm.User.roles), selectinload(orm.User.groups), - joinedload(orm.User._orm_spawners), + joinedload(orm.User._orm_spawners).joinedload(orm.Spawner.user), + # raiseload here helps us make sure we've loaded everything in one query + # but since we share a single db session, we can't do this for real + # but it's useful in testing + # raiseload("*"), ) sub_scope = self.parsed_scopes['list:users'] @@ -217,6 +217,8 @@ def get(self): data = user_list self.write(json.dumps(data)) + # if testing with raiselaod above, need expire_all to avoid affecting other operations + # self.db.expire_all() @needs_scope('admin:users') async def post(self):
diff --git a/jupyterhub/tests/test_api.py b/jupyterhub/tests/test_api.py --- a/jupyterhub/tests/test_api.py +++ b/jupyterhub/tests/test_api.py @@ -333,14 +333,25 @@ async def test_get_users_pagination( # populate users usernames = [] + groups = [] + for i in range(3): + group = orm.Group(name=f"pagination-{i}") + db.add(group) + db.commit() existing_users = db.query(orm.User).order_by(orm.User.id.asc()) usernames.extend(u.name for u in existing_users) for i in range(n - existing_users.count()): name = new_username() usernames.append(name) - add_user(db, app, name=name) - print(f"{db.query(orm.User).count()} total users") + user = add_user(db, app, name=name) + # add some users to groups + # make sure group membership doesn't affect pagination count + if i % 2: + user.groups = groups + db.commit() + + total_users = db.query(orm.User).count() url = 'users' params = {} @@ -372,6 +383,7 @@ async def test_get_users_pagination( ) assert "include_stopped_servers" in next_query users = response["items"] + assert pagination["total"] == total_users else: users = response assert len(users) == expected_count @@ -400,6 +412,7 @@ async def test_get_users_state_filter(app, state): has_two_inactive = add_user(db, app=app, name='has_two_inactive') # has_zero: no Spawners registered at all has_zero = add_user(db, app=app, name='has_zero') + total_users = db.query(orm.User).count() test_usernames = { "has_one_active", @@ -442,14 +455,26 @@ def add_spawner(user, name='', active=True, ready=True): add_spawner(has_one_active, active=True, ready=False) add_spawner(has_one_active, "inactive", active=False) - r = await api_request(app, f'users?state={state}') + r = await api_request( + app, f'users?state={state}', headers={"Accept": PAGINATION_MEDIA_TYPE} + ) if state == "invalid": assert r.status_code == 400 return assert r.status_code == 200 - usernames = sorted(u["name"] for u in r.json() if u["name"] in test_usernames) + response = r.json() + users = response["items"] + page = response["_pagination"] + + usernames = sorted(u["name"] for u in users if u["name"] in test_usernames) assert usernames == expected + if state == "ready": + # "ready" can't actually get a correct count because it has post-filtering applied + # but it has an upper bound + assert page["total"] >= len(users) + else: + assert page["total"] == len(users) @mark.user
pagination total is not correct with named servers ### Bug description `total` in the pagination of `GET /api/users` appears to count the number of _servers_, not the number of users. ### How to reproduce 1. enable named servers 2. create one or more named servers 3. `curl -H "Accept: application/jupyterhub-pagination+json" -H "Authorization: token $token" http://127.0.0.1:8000/hub/api/users` or visit the admin page and look at the page info in the footer Admin page will see e.g. `1-3 of 5` when there are 5 servers, but only 3 users. #### Expected behavior `total` is correct #### Actual behaviour `total` is too high ### Your personal set up jupyterhub @ 6a93abbe1c285f33d2cf13e08d2cb98dd03be54b
2024-04-19T10:01:47Z
[]
[]
jupyterhub/jupyterhub
4,797
jupyterhub__jupyterhub-4797
[ "4777" ]
8d298922e523393c01ccf457c7f0f389edcee97f
diff --git a/examples/service-whoami/jupyterhub_config.py b/examples/service-whoami/jupyterhub_config.py --- a/examples/service-whoami/jupyterhub_config.py +++ b/examples/service-whoami/jupyterhub_config.py @@ -7,6 +7,7 @@ 'name': 'whoami-api', 'url': 'http://127.0.0.1:10101', 'command': [sys.executable, './whoami.py'], + 'display': False, }, { 'name': 'whoami-oauth', @@ -36,3 +37,5 @@ c.JupyterHub.authenticator_class = 'dummy' c.JupyterHub.spawner_class = 'simple' c.JupyterHub.ip = '127.0.0.1' # let's just run on localhost while dummy auth is enabled +# default to home page, since we don't want to start servers for this demo +c.JupyterHub.default_url = "/hub/home" diff --git a/jupyterhub/services/auth.py b/jupyterhub/services/auth.py --- a/jupyterhub/services/auth.py +++ b/jupyterhub/services/auth.py @@ -250,7 +250,6 @@ class HubAuth(SingletonConfigurable): fetched from JUPYTERHUB_API_URL by default. - cookie_cache_max_age: the number of seconds responses from the Hub should be cached. - - login_url (the *public* ``/hub/login`` URL of the Hub). """ hub_host = Unicode( @@ -331,17 +330,18 @@ def _default_hub_prefix(self): return url_path_join(os.getenv('JUPYTERHUB_BASE_URL') or '/', 'hub') + '/' login_url = Unicode( - '/hub/login', - help="""The login URL to use - - Typically /hub/login + '', + help="""The login URL to use, if any. + + The base HubAuth class doesn't support login via URL, + and will raise 403 on `@web.authenticated` requests without a valid token. + + An empty string here raises 403 errors instead of redirecting. + + HubOAuth will redirect to /hub/api/oauth2/authorize. """, ).tag(config=True) - @default('login_url') - def _default_login_url(self): - return self.hub_host + url_path_join(self.hub_prefix, 'login') - keyfile = Unicode( os.getenv('JUPYTERHUB_SSL_KEYFILE', ''), help="""The ssl key to use for requests @@ -1385,6 +1385,12 @@ def get_login_url(self): if self._hub_login_url is not None: # cached value, don't call this more than once per handler return self._hub_login_url + + if not self.hub_auth.login_url: + # HubOAuth is required for login via redirect, + # base class can only raise to avoid redirect loops + raise HTTPError(403) + # temporary override at setting level, # to allow any subclass overrides of get_login_url to preserve their effect # for example, APIHandler raises 403 to prevent redirects
diff --git a/jupyterhub/tests/test_services_auth.py b/jupyterhub/tests/test_services_auth.py --- a/jupyterhub/tests/test_services_auth.py +++ b/jupyterhub/tests/test_services_auth.py @@ -91,11 +91,7 @@ async def test_hubauth_token(app, mockservice_url, create_user_with_scopes): public_url(app, mockservice_url) + '/whoami/?token=%s' % token, allow_redirects=False, ) - assert r.status_code == 302 - assert 'Location' in r.headers - location = r.headers['Location'] - path = urlparse(location).path - assert path.endswith('/hub/login') + assert r.status_code == 403 @pytest.mark.parametrize( @@ -177,11 +173,7 @@ async def test_hubauth_service_token(request, app, mockservice_url, scopes, allo public_url(app, mockservice_url) + 'whoami/?token=%s' % token, allow_redirects=False, ) - assert r.status_code == 302 - assert 'Location' in r.headers - location = r.headers['Location'] - path = urlparse(location).path - assert path.endswith('/hub/login') + assert r.status_code == 403 @pytest.mark.parametrize(
examples/service-whoami is broken in 4.1.5 ### Bug description The whoami example seems to be broken: https://github.com/jupyterhub/jupyterhub/tree/4.1.5/examples/service-whoami (also verified on `main`) ### How to reproduce 1. `cd examples/service-whoami` 2. `jupyterhub --debug` 3. Go to http://localhost:8000/services/whoami-api/ in a private/incognito browser window 4. You should be sent to the login page, login with a dummy account #### Expected behaviour You should see the whoami-api response as described in the readme https://github.com/jupyterhub/jupyterhub/tree/4.1.5/examples/service-whoami #### Actual behaviour You will see an error, `The page isn’t redirecting properly` (in Firefox) Hub logs: ``` [I 2024-04-09 17:43:09.987 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:09.988 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 11.68ms [I 2024-04-09 17:43:10.003 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:10.004 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 11.05ms [I 2024-04-09 17:43:10.018 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:10.019 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 6.99ms [I 2024-04-09 17:43:10.030 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:10.031 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 5.90ms [I 2024-04-09 17:43:10.041 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:10.042 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 5.42ms [I 2024-04-09 17:43:10.053 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:10.054 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 6.13ms [I 2024-04-09 17:43:10.064 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:10.064 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 5.50ms [I 2024-04-09 17:43:10.075 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:10.076 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 5.23ms [I 2024-04-09 17:43:10.086 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:10.086 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 5.22ms [I 2024-04-09 17:43:10.093 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'14fbce37f602474bafe7b5a440164f55:da54c4c0630f4a01be7e33a97422dd7c' {'path': '/hub/'} [I 2024-04-09 17:43:10.094 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fwhoami-api%2F -> /services/whoami-api/ ([email protected]) 4.39ms ```
We noticed a similar behaviour in our [announcement service](https://gitlab.com/idris-cnrs/jupyter/jupyter-services/jupyterhub-announcement) which is based [jupyterhub-announcement](https://github.com/rcthomas/jupyterhub-announcement ). The main take away is that when we put the `GET` method of announcement under `authenticated` decorator, these redirect loops happen. This can be verified from the official [service-announcement](https://github.com/jupyterhub/jupyterhub/tree/4.1.5/examples/service-announcement) as well. Simply add `web.authenticated` to [get method here](https://github.com/jupyterhub/jupyterhub/blob/b4053616743eb011e5bb568700f25afca17aef53/examples/service-announcement/announcement.py#L28) and running the example as per instructions will show a bunch of redirects in browser console. Server logs show them as well: ``` [I 2024-04-10 18:44:31.501 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.502 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 1.63ms [I 2024-04-10 18:44:31.588 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.588 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 1.42ms [I 2024-04-10 18:44:31.621 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.621 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 1.69ms [I 2024-04-10 18:44:31.664 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.665 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 5.38ms [I 2024-04-10 18:44:31.681 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.682 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 2.75ms [I 2024-04-10 18:44:31.696 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.697 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 3.04ms [I 2024-04-10 18:44:31.713 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.714 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 2.86ms [I 2024-04-10 18:44:31.724 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.724 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 1.64ms [I 2024-04-10 18:44:31.733 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.733 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 1.43ms [I 2024-04-10 18:44:31.742 JupyterHub _xsrf_utils:125] Setting new xsrf cookie for b'818e4faa1d8443a683ad8e92b98af799:1104f2f2b64e4265b081fadaadfe1099' {'path': '/hub/'} [I 2024-04-10 18:44:31.743 JupyterHub log:192] 302 GET /hub/login?next=%2Fservices%2Fannouncement%2F -> /services/announcement/ ([email protected]) 1.43ms ```
2024-04-23T08:11:49Z
[]
[]
jupyterhub/jupyterhub
4,805
jupyterhub__jupyterhub-4805
[ "4802" ]
49f88450d5a5b553688eef595e0a7f206c38695c
diff --git a/jupyterhub/handlers/base.py b/jupyterhub/handlers/base.py --- a/jupyterhub/handlers/base.py +++ b/jupyterhub/handlers/base.py @@ -1537,6 +1537,16 @@ class PrefixRedirectHandler(BaseHandler): """Redirect anything outside a prefix inside. Redirects /foo to /prefix/foo, etc. + + Redirect specifies hub domain when public_url or subdomains are enabled. + + Mainly handles requests for non-running servers, e.g. to + + /user/tree/ -> /hub/user/tree/ + + UserUrlHandler will handle the request after redirect. + Don't do anything but redirect here because cookies, etc. won't be available to this request, + due to not being on the hub's path or possibly domain. """ def get(self): @@ -1554,7 +1564,19 @@ def get(self): # default / -> /hub/ redirect # avoiding extra hop through /hub path = '/' - self.redirect(url_path_join(self.hub.base_url, path), permanent=False) + + redirect_url = redirect_path = url_path_join(self.hub.base_url, path) + + # when using subdomains, + # make sure we redirect `user.domain/user/foo` -> `hub.domain/hub/user/foo/...` + # so that the Hub handles it properly with cookies and all + public_url = self.settings.get("public_url") + subdomain_host = self.settings.get("subdomain_host") + if public_url: + redirect_url = urlunparse(public_url._replace(path=redirect_path)) + elif subdomain_host: + redirect_url = url_path_join(subdomain_host, redirect_path) + self.redirect(redirect_url, permanent=False) class UserUrlHandler(BaseHandler):
diff --git a/jupyterhub/tests/browser/test_browser.py b/jupyterhub/tests/browser/test_browser.py --- a/jupyterhub/tests/browser/test_browser.py +++ b/jupyterhub/tests/browser/test_browser.py @@ -32,6 +32,20 @@ async def login(browser, username, password=None): await browser.get_by_role("button", name="Sign in").click() +async def login_home(browser, app, username): + """Visit login page, login, go home + + A good way to start a session + """ + login_url = url_concat( + url_path_join(public_url(app), "hub/login"), + {"next": ujoin(app.hub.base_url, "home")}, + ) + await browser.goto(login_url) + async with browser.expect_navigation(url=re.compile(".*/hub/home")): + await login(browser, username) + + async def test_open_login_page(app, browser): login_url = url_path_join(public_host(app), app.hub.base_url, "login") await browser.goto(login_url) @@ -1367,6 +1381,17 @@ async def test_login_xsrf_initial_cookies(app, browser, case, username): await login(browser, username, username) +async def test_prefix_redirect_not_running(browser, app, user): + # tests PrefixRedirectHandler for stopped servers + await login_home(browser, app, user.name) + # visit user url (includes subdomain, if enabled) + url = public_url(app, user, "/tree/") + await browser.goto(url) + # make sure we end up on the Hub (domain included) + expected_url = url_path_join(public_url(app), f"hub/user/{user.name}/tree/") + await expect(browser).to_have_url(expected_url) + + def _cookie_dict(cookie_list): """Convert list of cookies to dict of the form
Broken login flow with user subdomains and external proxy on 5.0.0b1 <!-- Thank you for contributing. These HTML comments will not render in the issue, but you can delete them once you've read them if you prefer! --> ### Bug description Under the following conditions: - version 5.0.0b1. In particular, after the XSS fix and in particular the change to no longer set domain on the hub's login cookie. (I suspect all 4.1.x versions are similarly affected, though I have not verified. we're on the main branch for reasons) - have enabled per-user subdomains - be using an external proxy, in particular an HA one like traefik + redis, which inherently has consistency delays, which the "redirect loop" mechanism is specifically designed to accommodate. In the event that the route is still not setup, the hub will handle the request: https://user-subdomain.hubdomain/user/user-name/lab This is handled by the `PrefixRedirectHandler` handler class. This route is unauthenticated. It will redirect to: [/hub/user/user-name/lab](/hub/user/user-name/lab) which of course, resolves to the full URL **on the subdomain**: http://user-subdomain.hubdomain/hub/user/user-name/lab which is handled by the `UserUrlRedirect` handler class. This route is authenticated. #### Previous Behavior Previously, as the login cookie had domain set, the browser would still send the login cookie even when interacting with the hub via the subdomain, and so the redirect flow would work fine. Eventually, the redirect loop would cease when the routes reached consistency, and the end-user will settle on their singleuser server. I've verified this on an older revision on the main branch. #### Observed Behavior Since the change to the domain property of the cookie, the user is now redirected to the **login page** - on the subdomain in fact - because the login cookie is not sent by the browser to the hub, as the browser is navigating to the hub via the subdomain, so the hub doesn't recognize the session as being logged in. The end-user never lands on their singleuser server. <!-- Use this section to clearly and concisely describe the bug. --> ### How to reproduce Without actually going and manually disabling a route, I found that comparing the behavior of http://user-subdomain.hubdomain/hub/user/user-name/lab before and after the change to the domain property of the cookie is sufficient to demonstrate the issue. I imagine this technique would work with e.g. the z2jh CSP with subdomains enabled (though of course in a CSP-based setup, with the single-node proxy and synchronous api, I doubt this issue would manifest) <!-- Use this section to describe the steps that a user would take to experience this bug. -->
Thank you for opening your first issue in this project! Engagement like this is essential for open source projects! :hugs: <br>If you haven't done so already, check out [Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md). Also, please try to follow the issue template as it helps other other community members to contribute more effectively. ![welcome](https://raw.githubusercontent.com/jupyterhub/.github/master/images/welcome.jpg) You can meet the other [Jovyans](https://jupyter.readthedocs.io/en/latest/community/content-community.html?highlight=jovyan#what-is-a-jovyan) by joining our [Discourse forum](http://discourse.jupyter.org/). There is also an intro thread there where you can stop by and say Hi! :wave: <br>Welcome to the Jupyter community! :tada: I have a low-confidence suggestion to fix: when subdomains are enabled, the `PrefixRedirectHandler` could (and maybe only for `/user`) strip the subdomain, or otherwise hard-code the host of the hub domain in the redirect, rather than the current behavior which is to redirect with a relative location `/hub/user`. This way, https://user-subdomain.hubdomain/user/user-name/lab would redirect to https://hubdomain/hub/user/user-name/lab in which case, things should work as expected. I'll try to do some testing, but I think you're right that it should explicitly redirect to the hub domain when domains are enabled.
2024-05-01T13:46:02Z
[]
[]
jupyterhub/jupyterhub
4,806
jupyterhub__jupyterhub-4806
[ "4657" ]
c135e109abf3ba7918707b60a5dbd927fc09e50a
diff --git a/jupyterhub/auth.py b/jupyterhub/auth.py --- a/jupyterhub/auth.py +++ b/jupyterhub/auth.py @@ -1115,13 +1115,16 @@ def check_allowed_groups(self, username, authentication=None): """ if not self.allowed_groups: return False + user_group_gids = set( + self._getgrouplist(username, self._getpwnam(username).pw_gid) + ) for grnam in self.allowed_groups: try: group = self._getgrnam(grnam) except KeyError: self.log.error('No such group: [%s]' % grnam) continue - if username in group.gr_mem: + if group.gr_gid in user_group_gids: return True return False
diff --git a/jupyterhub/tests/test_auth.py b/jupyterhub/tests/test_auth.py --- a/jupyterhub/tests/test_auth.py +++ b/jupyterhub/tests/test_auth.py @@ -165,21 +165,35 @@ async def test_pam_auth_allowed(): async def test_pam_auth_allowed_groups(): - def getgrnam(name): - return MockStructGroup('grp', ['kaylee']) - - authenticator = MockPAMAuthenticator(allowed_groups={'group'}, allow_all=False) + class TestAuthenticator(MockPAMAuthenticator): + @staticmethod + def _getpwnam(name): + return MockStructPasswd(name=name) + + @staticmethod + def _getgrnam(name): + if name == "group": + return MockStructGroup('grp', ['kaylee'], gid=1234) + else: + return None + + @staticmethod + def _getgrouplist(username, gid): + gids = [gid] + if username == "kaylee": + gids.append(1234) + return gids + + authenticator = TestAuthenticator(allowed_groups={'group'}, allow_all=False) - with mock.patch.object(authenticator, '_getgrnam', getgrnam): - authorized = await authenticator.get_authenticated_user( - None, {'username': 'kaylee', 'password': 'kaylee'} - ) + authorized = await authenticator.get_authenticated_user( + None, {'username': 'kaylee', 'password': 'kaylee'} + ) assert authorized['name'] == 'kaylee' - with mock.patch.object(authenticator, '_getgrnam', getgrnam): - authorized = await authenticator.get_authenticated_user( - None, {'username': 'mal', 'password': 'mal'} - ) + authorized = await authenticator.get_authenticated_user( + None, {'username': 'mal', 'password': 'mal'} + ) assert authorized is None @@ -270,6 +284,7 @@ async def test_pam_auth_no_such_group(): authenticator = MockPAMAuthenticator( allowed_groups={'nosuchcrazygroup'}, ) + authenticator._getpwnam = MockStructPasswd authorized = await authenticator.get_authenticated_user( None, {'username': 'kaylee', 'password': 'kaylee'} )
PAMAuthenticator / LocalAuthenticator not checking primary group when using allowed_groups <!-- Thank you for contributing. These HTML comments will not render in the issue, but you can delete them once you've read them if you prefer! --> ### Bug description When trying to use PAMAuthenticator or LocalAuthenticator with the c.PAMAuthenticator.allowed_groups configuration parameter I wasn't able to log in even though users were in the set group and the correct password was entered. The error message in the logs was just a vague "User 'XY' not allowed.". Turns out the Authenticators seem to only check secondary groups. <!-- Use this section to clearly and concisely describe the bug. --> ### How to reproduce <!-- Use this section to describe the steps that a user would take to experience this bug. --> 1. Set `c.PAMAuthenticator.allowed_groups = {"jupyter"}` in jupyterhub_config.py 2. Create user using `useradd testuser -N -g jupyter` 3. Set password using passwd 4. Start Jupyterhub 5. Try to log in #### Expected behaviour <!-- Tell us what you thought would happen. --> The user should be able to log in because he has the correct credentials and is member of the allowed group. In my opinion the primary group of the user should also be checked or at least there should be a note in the documentation of the allowed_groups configuration parameter. #### Actual behaviour The user isn't able to log in and the log message is very vague with `[... JupyterHub auth:533] User 'testuser not allowed.` <!-- Tell us what actually happens. --> ### Your personal set up <!-- Tell us a little about the system you're using. Please include information about how you installed, e.g. are you using a distribution such as zero-to-jupyterhub or the-littlest-jupyterhub. --> - OS: - 1. ubuntu 22.04 WSL on Win 11 - 2. Docker quay.io/jupyterhub/jupyterhub:latest (4.0.2) on ubuntu 22.04. WSL on Win 11 <!-- [e.g. ubuntu 20.04, macOS 11.0] --> - Version(s): 4.0.2 <!-- e.g. jupyterhub --version, python --version ---> <details><summary>Full environment</summary> <!-- For reproduction, it's useful to have the full environment. For example, the output of `pip freeze` or `conda list` ---> ``` # packages in environment at /root/anaconda3: # # Name Version Build Channel _anaconda_depends 2023.09 py311_mkl_1 _libgcc_mutex 0.1 main _openmp_mutex 5.1 1_gnu abseil-cpp 20211102.0 hd4dd3e8_0 aiobotocore 2.5.0 py311h06a4308_0 aiofiles 22.1.0 py311h06a4308_0 aiohttp 3.8.5 py311h5eee18b_0 aioitertools 0.7.1 pyhd3eb1b0_0 aiosignal 1.2.0 pyhd3eb1b0_0 aiosqlite 0.18.0 py311h06a4308_0 alabaster 0.7.12 pyhd3eb1b0_0 alembic 1.13.0 pyhd8ed1ab_0 conda-forge anaconda-anon-usage 0.4.2 py311hfc0e8ea_0 anaconda-catalogs 0.2.0 py311h06a4308_0 anaconda-client 1.12.1 py311h06a4308_0 anaconda-cloud-auth 0.1.3 py311h06a4308_0 anaconda-navigator 2.5.0 py311h06a4308_0 anaconda-project 0.11.1 py311h06a4308_0 anyio 3.5.0 py311h06a4308_0 aom 3.6.0 h6a678d5_0 appdirs 1.4.4 pyhd3eb1b0_0 argon2-cffi 21.3.0 pyhd3eb1b0_0 argon2-cffi-bindings 21.2.0 py311h5eee18b_0 arrow 1.2.3 py311h06a4308_1 arrow-cpp 11.0.0 h374c478_2 astroid 2.14.2 py311h06a4308_0 astropy 5.1 py311hbed6279_0 asttokens 2.0.5 pyhd3eb1b0_0 async-timeout 4.0.2 py311h06a4308_0 async_generator 1.10 py_0 conda-forge atomicwrites 1.4.0 py_0 attrs 22.1.0 py311h06a4308_0 automat 20.2.0 py_0 autopep8 1.6.0 pyhd3eb1b0_1 aws-c-common 0.6.8 h5eee18b_1 aws-c-event-stream 0.1.6 h6a678d5_6 aws-checksums 0.1.11 h5eee18b_2 aws-sdk-cpp 1.8.185 h721c034_1 babel 2.11.0 py311h06a4308_0 backcall 0.2.0 pyhd3eb1b0_0 backports 1.1 pyhd3eb1b0_0 backports.functools_lru_cache 1.6.4 pyhd3eb1b0_0 backports.tempfile 1.0 pyhd3eb1b0_1 backports.weakref 1.0.post1 py_1 bcrypt 3.2.0 py311h5eee18b_1 beautifulsoup4 4.12.2 py311h06a4308_0 binaryornot 0.4.4 pyhd3eb1b0_1 black 23.3.0 py311h06a4308_0 blas 1.0 mkl bleach 4.1.0 pyhd3eb1b0_0 blinker 1.7.0 pyhd8ed1ab_0 conda-forge blosc 1.21.3 h6a678d5_0 bokeh 3.2.1 py311h92b7b1e_0 boltons 23.0.0 py311h06a4308_0 boost-cpp 1.73.0 h7f8727e_12 botocore 1.29.76 py311h06a4308_0 bottleneck 1.3.5 py311hbed6279_0 brotli 1.0.9 h5eee18b_7 brotli-bin 1.0.9 h5eee18b_7 brotlipy 0.7.0 py311h5eee18b_1002 brunsli 0.1 h2531618_0 bzip2 1.0.8 h7b6447c_0 c-ares 1.19.1 h5eee18b_0 c-blosc2 2.8.0 h6a678d5_0 ca-certificates 2023.08.22 h06a4308_0 certifi 2023.11.17 py311h06a4308_0 certipy 0.1.3 py_0 conda-forge cffi 1.15.1 py311h5eee18b_3 cfitsio 3.470 h5893167_7 chardet 4.0.0 py311h06a4308_1003 charls 2.2.0 h2531618_0 charset-normalizer 2.0.4 pyhd3eb1b0_0 click 8.0.4 py311h06a4308_0 cloudpickle 2.2.1 py311h06a4308_0 clyent 1.2.2 py311h06a4308_1 colorama 0.4.6 py311h06a4308_0 colorcet 3.0.1 py311h06a4308_0 comm 0.1.2 py311h06a4308_0 conda 23.7.4 py311h06a4308_0 conda-build 3.26.1 py311h06a4308_0 conda-content-trust 0.2.0 py311h06a4308_0 conda-index 0.3.0 py311h06a4308_0 conda-libmamba-solver 23.7.0 py311h06a4308_0 conda-pack 0.6.0 pyhd3eb1b0_0 conda-package-handling 2.2.0 py311h06a4308_0 conda-package-streaming 0.9.0 py311h06a4308_0 conda-repo-cli 1.0.75 py311h06a4308_0 conda-token 0.4.0 pyhd3eb1b0_0 conda-verify 3.4.2 py_1 configurable-http-proxy 4.6.0 h06a4308_0 constantly 15.1.0 py311h06a4308_0 contourpy 1.0.5 py311hdb19cb5_0 cookiecutter 1.7.3 pyhd3eb1b0_0 cryptography 41.0.3 py311hdda0065_0 cssselect 1.1.0 pyhd3eb1b0_0 curl 8.2.1 hdbd6064_0 cycler 0.11.0 pyhd3eb1b0_0 cyrus-sasl 2.1.28 h52b45da_1 cytoolz 0.12.0 py311h5eee18b_0 daal4py 2023.1.1 py311h4cb112f_0 dal 2023.1.1 hdb19cb5_48679 dask 2023.6.0 py311h06a4308_0 dask-core 2023.6.0 py311h06a4308_0 datasets 2.12.0 py311h06a4308_0 datashader 0.15.2 py311h06a4308_0 datashape 0.5.4 py311h06a4308_1 dav1d 1.2.1 h5eee18b_0 dbus 1.13.18 hb2f20db_0 debugpy 1.6.7 py311h6a678d5_0 decorator 5.1.1 pyhd3eb1b0_0 defusedxml 0.7.1 pyhd3eb1b0_0 diff-match-patch 20200713 pyhd3eb1b0_0 dill 0.3.6 py311h06a4308_0 distributed 2023.6.0 py311h06a4308_0 docstring-to-markdown 0.11 py311h06a4308_0 docutils 0.18.1 py311h06a4308_3 entrypoints 0.4 py311h06a4308_0 et_xmlfile 1.1.0 py311h06a4308_0 executing 0.8.3 pyhd3eb1b0_0 expat 2.5.0 h6a678d5_0 filelock 3.9.0 py311h06a4308_0 flake8 6.0.0 py311h06a4308_0 flask 2.2.2 py311h06a4308_0 fmt 9.1.0 hdb19cb5_0 font-ttf-dejavu-sans-mono 2.37 hd3eb1b0_0 font-ttf-inconsolata 2.001 hcb22688_0 font-ttf-source-code-pro 2.030 hd3eb1b0_0 font-ttf-ubuntu 0.83 h8b1ccd4_0 fontconfig 2.14.1 h4c34cd2_2 fonts-anaconda 1 h8fa9717_0 fonttools 4.25.0 pyhd3eb1b0_0 freetype 2.12.1 h4a9f257_0 frozenlist 1.3.3 py311h5eee18b_0 fsspec 2023.4.0 py311h06a4308_0 future 0.18.3 py311h06a4308_0 gensim 4.3.0 py311hba01205_1 gflags 2.2.2 he6710b0_0 giflib 5.2.1 h5eee18b_3 glib 2.69.1 he621ea3_2 glob2 0.7 pyhd3eb1b0_0 glog 0.5.0 h2531618_0 gmp 6.2.1 h295c915_3 gmpy2 2.1.2 py311hc9b5ff0_0 greenlet 2.0.1 py311h6a678d5_0 grpc-cpp 1.48.2 he1ff14a_1 gst-plugins-base 1.14.1 h6a678d5_1 gstreamer 1.14.1 h5eee18b_1 h5py 3.9.0 py311hdd6beaf_0 hdf5 1.12.1 h2b7332f_3 heapdict 1.0.1 pyhd3eb1b0_0 holoviews 1.17.1 py311h06a4308_0 huggingface_hub 0.15.1 py311h06a4308_0 hvplot 0.8.4 py311h06a4308_0 hyperlink 21.0.0 pyhd3eb1b0_0 icu 58.2 he6710b0_3 idna 3.4 py311h06a4308_0 imagecodecs 2023.1.23 py311h8105a5c_0 imageio 2.31.1 py311h06a4308_0 imagesize 1.4.1 py311h06a4308_0 imbalanced-learn 0.10.1 py311h06a4308_1 importlib-metadata 6.0.0 py311h06a4308_0 importlib_metadata 6.0.0 hd3eb1b0_0 importlib_resources 6.1.1 pyhd8ed1ab_0 conda-forge incremental 21.3.0 pyhd3eb1b0_0 inflection 0.5.1 py311h06a4308_0 iniconfig 1.1.1 pyhd3eb1b0_0 intake 0.6.8 py311h06a4308_0 intel-openmp 2023.1.0 hdb19cb5_46305 intervaltree 3.1.0 pyhd3eb1b0_0 ipykernel 6.25.0 py311h92b7b1e_0 ipython 8.15.0 py311h06a4308_0 ipython_genutils 0.2.0 pyhd3eb1b0_1 ipywidgets 8.0.4 py311h06a4308_0 isort 5.9.3 pyhd3eb1b0_0 itemadapter 0.3.0 pyhd3eb1b0_0 itemloaders 1.0.4 pyhd3eb1b0_1 itsdangerous 2.0.1 pyhd3eb1b0_0 jaraco.classes 3.2.1 pyhd3eb1b0_0 jedi 0.18.1 py311h06a4308_1 jeepney 0.7.1 pyhd3eb1b0_0 jellyfish 1.0.1 py311hb02cf49_0 jinja2 3.1.2 py311h06a4308_0 jinja2-time 0.2.0 pyhd3eb1b0_3 jmespath 0.10.0 pyhd3eb1b0_0 joblib 1.2.0 py311h06a4308_0 jpeg 9e h5eee18b_1 jq 1.6 h27cfd23_1000 json5 0.9.6 pyhd3eb1b0_0 jsonpatch 1.32 pyhd3eb1b0_0 jsonpointer 2.1 pyhd3eb1b0_0 jsonschema 4.17.3 py311h06a4308_0 jupyter 1.0.0 py311h06a4308_8 jupyter_client 7.4.9 py311h06a4308_0 jupyter_console 6.6.3 py311h06a4308_0 jupyter_core 5.3.0 py311h06a4308_0 jupyter_events 0.6.3 py311h06a4308_0 jupyter_server 1.23.4 py311h06a4308_0 jupyter_server_fileid 0.9.0 py311h06a4308_0 jupyter_server_ydoc 0.8.0 py311h06a4308_1 jupyter_telemetry 0.1.0 pyhd8ed1ab_1 conda-forge jupyter_ydoc 0.2.4 py311h06a4308_0 jupyterhub 4.0.2 pyh31011fe_0 conda-forge jupyterhub-base 4.0.2 pyh31011fe_0 conda-forge jupyterlab 3.6.3 py311h06a4308_0 jupyterlab_pygments 0.1.2 py_0 jupyterlab_server 2.22.0 py311h06a4308_0 jupyterlab_widgets 3.0.5 py311h06a4308_0 jxrlib 1.1 h7b6447c_2 kaleido-core 0.2.1 h7c8854e_0 keyring 23.13.1 py311h06a4308_0 kiwisolver 1.4.4 py311h6a678d5_0 krb5 1.20.1 h143b758_1 lazy-object-proxy 1.6.0 py311h5eee18b_0 lazy_loader 0.2 py311h06a4308_0 lcms2 2.12 h3be6417_0 ld_impl_linux-64 2.38 h1181459_1 lerc 3.0 h295c915_0 libaec 1.0.4 he6710b0_1 libarchive 3.6.2 h6ac8c49_2 libavif 0.11.1 h5eee18b_0 libboost 1.73.0 h28710b8_12 libbrotlicommon 1.0.9 h5eee18b_7 libbrotlidec 1.0.9 h5eee18b_7 libbrotlienc 1.0.9 h5eee18b_7 libclang 14.0.6 default_hc6dbbc7_1 libclang13 14.0.6 default_he11475f_1 libcups 2.4.2 h2d74bed_1 libcurl 8.2.1 h251f7ec_0 libdeflate 1.17 h5eee18b_0 libedit 3.1.20221030 h5eee18b_0 libev 4.33 h7f8727e_1 libevent 2.1.12 hdbd6064_1 libffi 3.4.4 h6a678d5_0 libgcc-ng 11.2.0 h1234567_1 libgfortran-ng 11.2.0 h00389a5_1 libgfortran5 11.2.0 h1234567_1 libgomp 11.2.0 h1234567_1 liblief 0.12.3 h6a678d5_0 libllvm14 14.0.6 hdb19cb5_3 libmamba 1.5.1 haf1ee3a_0 libmambapy 1.5.1 py311h2dafd23_0 libnghttp2 1.52.0 h2d74bed_1 libpng 1.6.39 h5eee18b_0 libpq 12.15 hdbd6064_1 libprotobuf 3.20.3 he621ea3_0 libsodium 1.0.18 h7b6447c_0 libsolv 0.7.24 he621ea3_0 libspatialindex 1.9.3 h2531618_0 libssh2 1.10.0 hdbd6064_2 libstdcxx-ng 11.2.0 h1234567_1 libthrift 0.15.0 h1795dd8_2 libtiff 4.5.1 h6a678d5_0 libuuid 1.41.5 h5eee18b_0 libuv 1.44.2 h5eee18b_0 libwebp 1.3.2 h11a3e52_0 libwebp-base 1.3.2 h5eee18b_0 libxcb 1.15 h7f8727e_0 libxkbcommon 1.0.1 h5eee18b_1 libxml2 2.10.4 hcbfbd50_0 libxslt 1.1.37 h2085143_0 libzopfli 1.0.3 he6710b0_0 linkify-it-py 2.0.0 py311h06a4308_0 llvmlite 0.40.0 py311he621ea3_0 locket 1.0.0 py311h06a4308_0 lxml 4.9.3 py311hdbbb534_0 lz4 4.3.2 py311h5eee18b_0 lz4-c 1.9.4 h6a678d5_0 lzo 2.10 h7b6447c_2 mako 1.3.0 pyhd8ed1ab_0 conda-forge markdown 3.4.1 py311h06a4308_0 markdown-it-py 2.2.0 py311h06a4308_1 markupsafe 2.1.1 py311h5eee18b_0 mathjax 2.7.5 h06a4308_0 matplotlib 3.7.2 py311h06a4308_0 matplotlib-base 3.7.2 py311ha02d727_0 matplotlib-inline 0.1.6 py311h06a4308_0 mccabe 0.7.0 pyhd3eb1b0_0 mdit-py-plugins 0.3.0 py311h06a4308_0 mdurl 0.1.0 py311h06a4308_0 mistune 0.8.4 py311h5eee18b_1000 mkl 2023.1.0 h213fc3f_46343 mkl-service 2.4.0 py311h5eee18b_1 mkl_fft 1.3.8 py311h5eee18b_0 mkl_random 1.2.4 py311hdb19cb5_0 more-itertools 8.12.0 pyhd3eb1b0_0 mpc 1.1.0 h10f8cd9_1 mpfr 4.0.2 hb69a4c5_1 mpi 1.0 mpich mpich 4.1.1 hbae89fd_0 mpmath 1.3.0 py311h06a4308_0 msgpack-python 1.0.3 py311hdb19cb5_0 multidict 6.0.2 py311h5eee18b_0 multipledispatch 0.6.0 py311h06a4308_0 multiprocess 0.70.14 py311h06a4308_0 munkres 1.1.4 py_0 mypy_extensions 1.0.0 py311h06a4308_0 mysql 5.7.24 h721c034_2 navigator-updater 0.4.0 py311h06a4308_1 nbclassic 0.5.5 py311h06a4308_0 nbclient 0.5.13 py311h06a4308_0 nbconvert 6.5.4 py311h06a4308_0 nbformat 5.9.2 py311h06a4308_0 ncurses 6.4 h6a678d5_0 nest-asyncio 1.5.6 py311h06a4308_0 networkx 3.1 py311h06a4308_0 nltk 3.8.1 py311h06a4308_0 nodejs 18.16.0 h2d74bed_0 notebook 6.5.4 py311h06a4308_1 notebook-shim 0.2.2 py311h06a4308_0 nspr 4.35 h6a678d5_0 nss 3.89.1 h6a678d5_0 numba 0.57.1 py311ha02d727_0 numexpr 2.8.4 py311h65dcdc2_1 numpy 1.24.3 py311h08b1b3b_1 numpy-base 1.24.3 py311hf175353_1 numpydoc 1.5.0 py311h06a4308_0 oauthlib 3.2.2 pyhd8ed1ab_0 conda-forge oniguruma 6.9.7.1 h27cfd23_0 openjpeg 2.4.0 h3ad879b_0 openpyxl 3.0.10 py311h5eee18b_0 openssl 3.0.12 h7f8727e_0 orc 1.7.4 hb3bc3d3_1 packaging 23.1 py311h06a4308_0 pamela 1.1.0 pyh1a96a4e_0 conda-forge pandas 2.0.3 py311ha02d727_0 pandocfilters 1.5.0 pyhd3eb1b0_0 panel 1.2.3 py311h06a4308_0 param 1.13.0 py311h06a4308_0 parsel 1.6.0 py311h06a4308_0 parso 0.8.3 pyhd3eb1b0_0 partd 1.4.0 py311h06a4308_0 patch 2.7.6 h7b6447c_1001 patchelf 0.17.2 h6a678d5_0 pathlib 1.0.1 pyhd3eb1b0_1 pathspec 0.10.3 py311h06a4308_0 patsy 0.5.3 py311h06a4308_0 pcre 8.45 h295c915_0 pcre2 10.42 hebb0a14_0 pep8 1.7.1 py311h06a4308_1 pexpect 4.8.0 pyhd3eb1b0_3 pickleshare 0.7.5 pyhd3eb1b0_1003 pillow 9.4.0 py311h6a678d5_1 pip 23.2.1 py311h06a4308_0 pkce 1.0.3 py311h06a4308_0 pkginfo 1.9.6 py311h06a4308_0 platformdirs 3.10.0 py311h06a4308_0 plotly 5.9.0 py311h06a4308_0 pluggy 1.0.0 py311h06a4308_1 ply 3.11 py311h06a4308_0 poyo 0.5.0 pyhd3eb1b0_0 prometheus_client 0.14.1 py311h06a4308_0 prompt-toolkit 3.0.36 py311h06a4308_0 prompt_toolkit 3.0.36 hd3eb1b0_0 protego 0.1.16 py_0 psutil 5.9.0 py311h5eee18b_0 ptyprocess 0.7.0 pyhd3eb1b0_2 pure_eval 0.2.2 pyhd3eb1b0_0 py-cpuinfo 8.0.0 pyhd3eb1b0_1 py-lief 0.12.3 py311h6a678d5_0 pyarrow 11.0.0 py311hd8e8d9b_1 pyasn1 0.4.8 pyhd3eb1b0_0 pyasn1-modules 0.2.8 py_0 pybind11-abi 4 hd3eb1b0_1 pycodestyle 2.10.0 py311h06a4308_0 pycosat 0.6.4 py311h5eee18b_0 pycparser 2.21 pyhd3eb1b0_0 pyct 0.5.0 py311h06a4308_0 pycurl 7.45.2 py311hdbd6064_1 pydantic 1.10.8 py311h5eee18b_0 pydispatcher 2.0.5 py311h06a4308_2 pydocstyle 6.3.0 py311h06a4308_0 pyerfa 2.0.0 py311h5eee18b_0 pyflakes 3.0.1 py311h06a4308_0 pygments 2.15.1 py311h06a4308_1 pyjwt 2.4.0 py311h06a4308_0 pylint 2.16.2 py311h06a4308_0 pylint-venv 2.3.0 py311h06a4308_0 pyls-spyder 0.4.0 pyhd3eb1b0_0 pyodbc 4.0.34 py311h6a678d5_0 pyopenssl 23.2.0 py311h06a4308_0 pyparsing 3.0.9 py311h06a4308_0 pyqt 5.15.7 py311h6a678d5_0 pyqt5-sip 12.11.0 py311h6a678d5_0 pyqtwebengine 5.15.7 py311h6a678d5_0 pyrsistent 0.18.0 py311h5eee18b_0 pysocks 1.7.1 py311h06a4308_0 pytables 3.8.0 py311hb8ae3fc_3 pytest 7.4.0 py311h06a4308_0 python 3.11.5 h955ad1f_0 python-dateutil 2.8.2 pyhd3eb1b0_0 python-dotenv 0.21.0 py311h06a4308_0 python-fastjsonschema 2.16.2 py311h06a4308_0 python-json-logger 2.0.7 py311h06a4308_0 python-kaleido 0.2.1 py311h06a4308_0 python-libarchive-c 2.9 pyhd3eb1b0_1 python-lmdb 1.4.1 py311h6a678d5_0 python-lsp-black 1.2.1 py311h06a4308_0 python-lsp-jsonrpc 1.0.0 pyhd3eb1b0_0 python-lsp-server 1.7.2 py311h06a4308_0 python-slugify 5.0.2 pyhd3eb1b0_0 python-snappy 0.6.1 py311h6a678d5_0 python-tzdata 2023.3 pyhd3eb1b0_0 python-xxhash 2.0.2 py311h5eee18b_1 pytoolconfig 1.2.5 py311h06a4308_1 pytz 2023.3.post1 py311h06a4308_0 pyviz_comms 2.3.0 py311h06a4308_0 pywavelets 1.4.1 py311h5eee18b_0 pyxdg 0.27 pyhd3eb1b0_0 pyyaml 6.0 py311h5eee18b_1 pyzmq 23.2.0 py311h6a678d5_0 qdarkstyle 3.0.2 pyhd3eb1b0_0 qstylizer 0.2.2 py311h06a4308_0 qt-main 5.15.2 h7358343_9 qt-webengine 5.15.9 h9ab4d14_7 qtawesome 1.2.2 py311h06a4308_0 qtconsole 5.4.2 py311h06a4308_0 qtpy 2.2.0 py311h06a4308_0 qtwebkit 5.212 h3fafdc1_5 queuelib 1.5.0 py311h06a4308_0 re2 2022.04.01 h295c915_0 readline 8.2 h5eee18b_0 regex 2022.7.9 py311h5eee18b_0 reproc 14.2.4 h295c915_1 reproc-cpp 14.2.4 h295c915_1 requests 2.31.0 py311h06a4308_0 requests-file 1.5.1 pyhd3eb1b0_0 requests-toolbelt 1.0.0 py311h06a4308_0 responses 0.13.3 pyhd3eb1b0_0 rfc3339-validator 0.1.4 py311h06a4308_0 rfc3986-validator 0.1.1 py311h06a4308_0 rope 1.7.0 py311h06a4308_0 rtree 1.0.1 py311h06a4308_0 ruamel.yaml 0.17.21 py311h5eee18b_0 ruamel_yaml 0.17.21 py311h5eee18b_0 s3fs 2023.4.0 py311h06a4308_0 safetensors 0.3.2 py311hb02cf49_0 scikit-image 0.20.0 py311h6a678d5_0 scikit-learn 1.3.0 py311ha02d727_0 scikit-learn-intelex 2023.1.1 py311h06a4308_0 scipy 1.11.1 py311h08b1b3b_0 scrapy 2.8.0 py311h06a4308_0 seaborn 0.12.2 py311h06a4308_0 secretstorage 3.3.1 py311h06a4308_1 send2trash 1.8.0 pyhd3eb1b0_1 service_identity 18.1.0 pyhd3eb1b0_1 setuptools 68.0.0 py311h06a4308_0 sip 6.6.2 py311h6a678d5_0 six 1.16.0 pyhd3eb1b0_1 smart_open 5.2.1 py311h06a4308_0 snappy 1.1.9 h295c915_0 sniffio 1.2.0 py311h06a4308_1 snowballstemmer 2.2.0 pyhd3eb1b0_0 sortedcontainers 2.4.0 pyhd3eb1b0_0 soupsieve 2.4 py311h06a4308_0 sphinx 5.0.2 py311h06a4308_0 sphinxcontrib-applehelp 1.0.2 pyhd3eb1b0_0 sphinxcontrib-devhelp 1.0.2 pyhd3eb1b0_0 sphinxcontrib-htmlhelp 2.0.0 pyhd3eb1b0_0 sphinxcontrib-jsmath 1.0.1 pyhd3eb1b0_0 sphinxcontrib-qthelp 1.0.3 pyhd3eb1b0_0 sphinxcontrib-serializinghtml 1.1.5 pyhd3eb1b0_0 spyder 5.4.3 py311h06a4308_1 spyder-kernels 2.4.4 py311h06a4308_0 sqlalchemy 1.4.39 py311h5eee18b_0 sqlite 3.41.2 h5eee18b_0 stack_data 0.2.0 pyhd3eb1b0_0 statsmodels 0.14.0 py311hf4808d0_0 sympy 1.11.1 py311h06a4308_0 tabulate 0.8.10 py311h06a4308_0 tbb 2021.8.0 hdb19cb5_0 tbb4py 2021.8.0 py311hdb19cb5_0 tblib 1.7.0 pyhd3eb1b0_0 tenacity 8.2.2 py311h06a4308_0 terminado 0.17.1 py311h06a4308_0 text-unidecode 1.3 pyhd3eb1b0_0 textdistance 4.2.1 pyhd3eb1b0_0 threadpoolctl 2.2.0 pyh0d69192_0 three-merge 0.1.1 pyhd3eb1b0_0 tifffile 2023.4.12 py311h06a4308_0 tinycss2 1.2.1 py311h06a4308_0 tk 8.6.12 h1ccaba5_0 tldextract 3.2.0 pyhd3eb1b0_0 tokenizers 0.13.2 py311h22610ee_1 toml 0.10.2 pyhd3eb1b0_0 tomlkit 0.11.1 py311h06a4308_0 toolz 0.12.0 py311h06a4308_0 tornado 6.3.2 py311h5eee18b_0 tqdm 4.65.0 py311h92b7b1e_0 traitlets 5.7.1 py311h06a4308_0 transformers 4.32.1 py311h06a4308_0 twisted 22.10.0 py311h5eee18b_0 typing-extensions 4.7.1 py311h06a4308_0 typing_extensions 4.7.1 py311h06a4308_0 tzdata 2023c h04d1e81_0 uc-micro-py 1.0.1 py311h06a4308_0 ujson 5.4.0 py311h6a678d5_0 unidecode 1.2.0 pyhd3eb1b0_0 unixodbc 2.3.11 h5eee18b_0 urllib3 1.26.16 py311h06a4308_0 utf8proc 2.6.1 h27cfd23_0 w3lib 1.21.0 pyhd3eb1b0_0 watchdog 2.1.6 py311h06a4308_0 wcwidth 0.2.5 pyhd3eb1b0_0 webencodings 0.5.1 py311h06a4308_1 websocket-client 0.58.0 py311h06a4308_4 werkzeug 2.2.3 py311h06a4308_0 whatthepatch 1.0.2 py311h06a4308_0 wheel 0.38.4 py311h06a4308_0 widgetsnbextension 4.0.5 py311h06a4308_0 wrapt 1.14.1 py311h5eee18b_0 wurlitzer 3.0.2 py311h06a4308_0 xarray 2023.6.0 py311h06a4308_0 xxhash 0.8.0 h7f8727e_3 xyzservices 2022.9.0 py311h06a4308_1 xz 5.4.2 h5eee18b_0 y-py 0.5.9 py311h52d8a92_0 yaml 0.2.5 h7b6447c_0 yaml-cpp 0.7.0 h295c915_1 yapf 0.31.0 pyhd3eb1b0_0 yarl 1.8.1 py311h5eee18b_0 ypy-websocket 0.8.2 py311h06a4308_0 zeromq 4.3.4 h2531618_0 zfp 1.0.0 h6a678d5_0 zict 2.2.0 py311h06a4308_0 zipp 3.11.0 py311h06a4308_0 zlib 1.2.13 h5eee18b_0 zlib-ng 2.0.7 h5eee18b_0 zope 1.0 py311h06a4308_1 zope.interface 5.4.0 py311h5eee18b_0 zstandard 0.19.0 py311h5eee18b_0 zstd 1.5.5 hc292b87_0 ``` </details> <details><summary>Configuration</summary> <!-- For JupyterHub, especially include information such as what Spawner and Authenticator are being used. Be careful not to share any sensitive information. You can paste jupyterhub_config.py below. To exclude lots of comments and empty lines from auto-generated jupyterhub_config.py, you can do: grep -v '\(^#\|^[[:space:]]*$\)' jupyterhub_config.py --> ```python c = get_config() #noqa c.PAMAuthenticator.allowed_groups = {"jupyter"} ``` </details> <details><summary>Logs</summary> ``` [I 2023-12-09 12:00:38.248 JupyterHub app:2859] Running JupyterHub version 4.0.2 [I 2023-12-09 12:00:38.248 JupyterHub app:2889] Using Authenticator: jupyterhub.auth.PAMAuthenticator-4.0.2 [I 2023-12-09 12:00:38.248 JupyterHub app:2889] Using Spawner: jupyterhub.spawner.LocalProcessSpawner-4.0.2 [I 2023-12-09 12:00:38.248 JupyterHub app:2889] Using Proxy: jupyterhub.proxy.ConfigurableHTTPProxy-4.0.2 [I 2023-12-09 12:00:38.253 JupyterHub app:1664] Loading cookie_secret from /opt/docker/jupyterhub_cookie_secret [I 2023-12-09 12:00:38.272 JupyterHub proxy:556] Generating new CONFIGPROXY_AUTH_TOKEN [I 2023-12-09 12:00:38.276 JupyterHub app:1984] Not using allowed_users. Any authenticated user will be allowed. [I 2023-12-09 12:00:38.284 JupyterHub app:2928] Initialized 0 spawners in 0.001 seconds [I 2023-12-09 12:00:38.286 JupyterHub metrics:278] Found 4 active users in the last ActiveUserPeriods.twenty_four_hours [I 2023-12-09 12:00:38.286 JupyterHub metrics:278] Found 4 active users in the last ActiveUserPeriods.seven_days [I 2023-12-09 12:00:38.286 JupyterHub metrics:278] Found 4 active users in the last ActiveUserPeriods.thirty_days [W 2023-12-09 12:00:38.287 JupyterHub proxy:746] Running JupyterHub without SSL. I hope there is SSL termination happening somewhere else... [I 2023-12-09 12:00:38.287 JupyterHub proxy:750] Starting proxy @ http://:8000 12:00:38.395 [ConfigProxy] info: Proxying http://*:8000 to (no default) 12:00:38.396 [ConfigProxy] info: Proxy API at http://127.0.0.1:8001/api/routes [I 2023-12-09 12:00:38.492 JupyterHub app:3178] Hub API listening on http://127.0.0.1:8081/hub/ 12:00:38.492 [ConfigProxy] info: 200 GET /api/routes 12:00:38.493 [ConfigProxy] info: 200 GET /api/routes [I 2023-12-09 12:00:38.493 JupyterHub proxy:477] Adding route for Hub: / => http://127.0.0.1:8081 12:00:38.494 [ConfigProxy] info: Adding route / -> http://127.0.0.1:8081 12:00:38.494 [ConfigProxy] info: Route added / -> http://127.0.0.1:8081 12:00:38.494 [ConfigProxy] info: 201 POST /api/routes/ [I 2023-12-09 12:00:38.495 JupyterHub app:3245] JupyterHub is now running at http://:8000 [W 2023-12-09 12:00:44.988 JupyterHub auth:533] User 'testuser' not allowed. [W 2023-12-09 12:00:44.988 JupyterHub base:843] Failed login for testuser [I 2023-12-09 12:00:44.998 JupyterHub log:191] 200 POST /hub/login?next= (@::1) 29.80ms ^C[C 2023-12-09 12:00:54.478 JupyterHub app:3336] Received signal SIGINT, initiating shutdown... [I 2023-12-09 12:00:54.478 JupyterHub app:2981] Cleaning up single-user servers... [I 2023-12-09 12:00:54.478 JupyterHub proxy:820] Cleaning up proxy[707]... [I 2023-12-09 12:00:54.479 JupyterHub app:3013] ...done ``` </details>
Thank you for opening your first issue in this project! Engagement like this is essential for open source projects! :hugs: <br>If you haven't done so already, check out [Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md). Also, please try to follow the issue template as it helps other other community members to contribute more effectively. ![welcome](https://raw.githubusercontent.com/jupyterhub/.github/master/images/welcome.jpg) You can meet the other [Jovyans](https://jupyter.readthedocs.io/en/latest/community/content-community.html?highlight=jovyan#what-is-a-jovyan) by joining our [Discourse forum](http://discourse.jupyter.org/). There is also an intro thread there where you can stop by and say Hi! :wave: <br>Welcome to the Jupyter community! :tada: Thanks for reporting! This is definitely not the intended behavior, and we in fact had a recent related fix for spawner gids in #4628. The same change (`os.getgrouplist` instead of checking `grp.gr_mem`) should work for LocalAuthenticator, as well. Would you like to try a PR?
2024-05-03T07:21:24Z
[]
[]
jupyterhub/jupyterhub
4,807
jupyterhub__jupyterhub-4807
[ "4803" ]
c135e109abf3ba7918707b60a5dbd927fc09e50a
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -289,6 +289,7 @@ def setup(app): r"https://github.com/[^/]*$", # too many github usernames / searches in changelog "https://github.com/jupyterhub/jupyterhub/pull/", # too many PRs in changelog "https://github.com/jupyterhub/jupyterhub/compare/", # too many comparisons in changelog + "https://schema.jupyter.org/jupyterhub/.*", # schemas are not published yet r"https?://(localhost|127.0.0.1).*", # ignore localhost references in auto-links r"https://linux.die.net/.*", # linux.die.net seems to block requests from CI with 403 sometimes # don't check links to unpublished advisories diff --git a/jupyterhub/app.py b/jupyterhub/app.py --- a/jupyterhub/app.py +++ b/jupyterhub/app.py @@ -21,6 +21,7 @@ from functools import partial from getpass import getuser from operator import itemgetter +from pathlib import Path from textwrap import dedent from typing import Optional from urllib.parse import unquote, urlparse, urlunparse @@ -29,7 +30,7 @@ import tornado.options from dateutil.parser import parse as parse_date from jinja2 import ChoiceLoader, Environment, FileSystemLoader, PrefixLoader -from jupyter_telemetry.eventlog import EventLog +from jupyter_events.logger import EventLogger from sqlalchemy.exc import OperationalError, SQLAlchemyError from sqlalchemy.orm import joinedload from tornado import gen, web @@ -3251,13 +3252,10 @@ def init_pycurl(self): def init_eventlog(self): """Set up the event logging system.""" - self.eventlog = EventLog(parent=self) + self.eventlog = EventLogger(parent=self) - for dirname, _, files in os.walk(os.path.join(here, 'event-schemas')): - for file in files: - if not file.endswith('.yaml'): - continue - self.eventlog.register_schema_file(os.path.join(dirname, file)) + for schema in (Path(here) / "event-schemas").glob("**/*.yaml"): + self.eventlog.register_event_schema(schema) def write_pid_file(self): pid = os.getpid() diff --git a/jupyterhub/handlers/base.py b/jupyterhub/handlers/base.py --- a/jupyterhub/handlers/base.py +++ b/jupyterhub/handlers/base.py @@ -1128,10 +1128,13 @@ async def finish_user_spawn(): SERVER_SPAWN_DURATION_SECONDS.labels( status=ServerSpawnStatus.success ).observe(time.perf_counter() - spawn_start_time) - self.eventlog.record_event( - 'hub.jupyter.org/server-action', - 1, - {'action': 'start', 'username': user.name, 'servername': server_name}, + self.eventlog.emit( + schema_id='https://schema.jupyter.org/jupyterhub/events/server-action', + data={ + 'action': 'start', + 'username': user.name, + 'servername': server_name, + }, ) proxy_add_start_time = time.perf_counter() spawner._proxy_pending = True @@ -1334,10 +1337,9 @@ async def stop(): SERVER_STOP_DURATION_SECONDS.labels( status=ServerStopStatus.success ).observe(toc - tic) - self.eventlog.record_event( - 'hub.jupyter.org/server-action', - 1, - { + self.eventlog.emit( + schema_id='https://schema.jupyter.org/jupyterhub/events/server-action', + data={ 'action': 'stop', 'username': user.name, 'servername': server_name,
diff --git a/jupyterhub/tests/test_eventlog.py b/jupyterhub/tests/test_eventlog.py --- a/jupyterhub/tests/test_eventlog.py +++ b/jupyterhub/tests/test_eventlog.py @@ -19,20 +19,22 @@ # and `invalid_events` dictionary below. # To test valid events, add event item with the form: -# { ( '<schema id>', <version> ) : { <event_data> } } +# ( '<schema id>', { <event_data> } ) valid_events = [ ( - 'hub.jupyter.org/server-action', - 1, + 'https://schema.jupyter.org/jupyterhub/events/server-action', dict(action='start', username='test-username', servername='test-servername'), ) ] # To test invalid events, add event item with the form: -# { ( '<schema id>', <version> ) : { <event_data> } } +# ( '<schema id>', { <event_data> } ) invalid_events = [ # Missing required keys - ('hub.jupyter.org/server-action', 1, dict(action='start')) + ( + 'https://schema.jupyter.org/jupyterhub/events/server-action', + dict(action='start'), + ) ] @@ -41,11 +43,11 @@ def eventlog_sink(app): """Return eventlog and sink objects""" sink = io.StringIO() handler = logging.StreamHandler(sink) - # Update the EventLog config with handler + # Update the EventLogger config with handler cfg = Config() - cfg.EventLog.handlers = [handler] + cfg.EventLogger.handlers = [handler] - with mock.patch.object(app.config, 'EventLog', cfg.EventLog): + with mock.patch.object(app.config, 'EventLogger', cfg.EventLogger): # recreate the eventlog object with our config app.init_eventlog() # return the sink from the fixture @@ -54,12 +56,12 @@ def eventlog_sink(app): app.init_eventlog() [email protected]('schema, version, event', valid_events) -def test_valid_events(eventlog_sink, schema, version, event): [email protected]('schema, event', valid_events) +def test_valid_events(eventlog_sink, schema, event): eventlog, sink = eventlog_sink eventlog.allowed_schemas = [schema] # Record event - eventlog.record_event(schema, version, event) + eventlog.emit(schema_id=schema, data=event) # Inspect consumed event output = sink.getvalue() assert output @@ -68,11 +70,11 @@ def test_valid_events(eventlog_sink, schema, version, event): assert data is not None [email protected]('schema, version, event', invalid_events) -def test_invalid_events(eventlog_sink, schema, version, event): [email protected]('schema, event', invalid_events) +def test_invalid_events(eventlog_sink, schema, event): eventlog, sink = eventlog_sink eventlog.allowed_schemas = [schema] # Make sure an error is thrown when bad events are recorded with pytest.raises(jsonschema.ValidationError): - recorded_event = eventlog.record_event(schema, version, event) + recorded_event = eventlog.emit(schema_id=schema, data=event)
Migrate from jupyter_telemetry to jupyter_events? The `jupyter_telemetry` package is [said to be unmaintained](https://github.com/jupyter/telemetry/pull/69) and it test suite does not work with Python 3.12. It does not seem to be used heavily in JupyterHub, can you migrate to [jupyter_events](https://github.com/jupyter/jupyter_events) instead?
Yes, we should do that. Thanks for bringing it up!
2024-05-03T10:02:23Z
[]
[]
jupyterhub/jupyterhub
4,831
jupyterhub__jupyterhub-4831
[ "1772", "4028" ]
d8bb3f44029ddb7a04976c8a8aeb328147d7b180
diff --git a/jupyterhub/apihandlers/users.py b/jupyterhub/apihandlers/users.py --- a/jupyterhub/apihandlers/users.py +++ b/jupyterhub/apihandlers/users.py @@ -489,10 +489,29 @@ async def post(self, user_name): 400, f"token {key} must be null or a list of strings, not {value!r}" ) + expires_in = body.get('expires_in', None) + if not (expires_in is None or isinstance(expires_in, int)): + raise web.HTTPError( + 400, + f"token expires_in must be null or integer, not {expires_in!r}", + ) + expires_in_max = self.settings["token_expires_in_max_seconds"] + if expires_in_max: + # validate expires_in against limit + if expires_in is None: + # expiration unspecified, use max value + # (default before max limit was introduced was 'never', this is closest equivalent) + expires_in = expires_in_max + elif expires_in > expires_in_max: + raise web.HTTPError( + 400, + f"token expires_in: {expires_in} must not exceed {expires_in_max}", + ) + try: api_token = user.new_api_token( note=note, - expires_in=body.get('expires_in', None), + expires_in=expires_in, roles=token_roles, scopes=token_scopes, ) diff --git a/jupyterhub/app.py b/jupyterhub/app.py --- a/jupyterhub/app.py +++ b/jupyterhub/app.py @@ -464,6 +464,26 @@ def _cookie_max_age_seconds(self): # convert cookie max age days to seconds return int(self.cookie_max_age_days * 24 * 3600) + token_expires_in_max_seconds = Integer( + 0, + config=True, + help=""" + Set the maximum expiration (in seconds) of tokens created via the API. + + Set to any positive value to disallow creation of tokens with no expiration. + + 0 (default) = no limit. + + Does not affect: + + - Server API tokens ($JUPYTERHUB_API_TOKEN is tied to lifetime of the server) + - Tokens issued during oauth (use `oauth_token_expires_in`) + - Tokens created via the API before configuring this limit + + .. versionadded:: 5.1 + """, + ) + redirect_to_server = Bool( True, help="Redirect user to server (if running), instead of control panel." ).tag(config=True) @@ -3192,6 +3212,7 @@ def init_tornado_settings(self): static_path=os.path.join(self.data_files_path, 'static'), static_url_prefix=url_path_join(self.hub.base_url, 'static/'), static_handler_class=CacheControlStaticFilesHandler, + token_expires_in_max_seconds=self.token_expires_in_max_seconds, subdomain_hook=self.subdomain_hook, template_path=self.template_paths, template_vars=self.template_vars, diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py --- a/jupyterhub/handlers/pages.py +++ b/jupyterhub/handlers/pages.py @@ -542,11 +542,50 @@ def sort_key(client): oauth_clients = sorted(oauth_clients, key=sort_key, reverse=True) auth_state = await self.current_user.get_auth_state() + expires_in_max = self.settings["token_expires_in_max_seconds"] + options = [ + (3600, "1 Hour"), + (86400, "1 Day"), + (7 * 86400, "1 Week"), + (30 * 86400, "1 Month"), + (365 * 86400, "1 Year"), + ] + if expires_in_max: + # omit items that exceed the limit + options = [ + (seconds, label) + for (seconds, label) in options + if seconds <= expires_in_max + ] + if expires_in_max not in (seconds for (seconds, label) in options): + # max not exactly in list, add it + # this also ensures options_list is never empty + max_hours = expires_in_max / 3600 + max_days = max_hours / 24 + if max_days < 3: + max_label = f"{max_hours:.0f} hours" + else: + # this could be a lot of days, but no need to get fancy + max_label = f"{max_days:.0f} days" + options.append(("", f"Max ({max_label})")) + else: + options.append(("", "Never")) + + options_html_elements = [ + f'<option value="{value}">{label}</option>' for value, label in options + ] + # make the last item selected + options_html_elements[-1] = options_html_elements[-1].replace( + "<option ", '<option selected="selected"' + ) + expires_in_options_html = "\n".join(options_html_elements) html = await self.render_template( 'token.html', api_tokens=api_tokens, oauth_clients=oauth_clients, auth_state=auth_state, + token_expires_in_options_html=expires_in_options_html, + token_expires_in_max_seconds=expires_in_max, ) self.finish(html)
diff --git a/jupyterhub/tests/browser/test_browser.py b/jupyterhub/tests/browser/test_browser.py --- a/jupyterhub/tests/browser/test_browser.py +++ b/jupyterhub/tests/browser/test_browser.py @@ -481,6 +481,70 @@ async def open_token_page(app, browser, user): await expect(browser).to_have_url(re.compile(".*/hub/token")) [email protected]( + "expires_in_max, expected_options", + [ + pytest.param( + None, + [ + ('1 Hour', '3600'), + ('1 Day', '86400'), + ('1 Week', '604800'), + ('1 Month', '2592000'), + ('1 Year', '31536000'), + ('Never', ''), + ], + id="default", + ), + pytest.param( + 86400, + [ + ('1 Hour', '3600'), + ('1 Day', '86400'), + ], + id="1day", + ), + pytest.param( + 3600 * 36, + [ + ('1 Hour', '3600'), + ('1 Day', '86400'), + ('Max (36 hours)', ''), + ], + id="36hours", + ), + pytest.param( + 86400 * 10, + [ + ('1 Hour', '3600'), + ('1 Day', '86400'), + ('1 Week', '604800'), + ('Max (10 days)', ''), + ], + id="10days", + ), + ], +) +async def test_token_form_expires_in( + app, browser, user_special_chars, expires_in_max, expected_options +): + with mock.patch.dict( + app.tornado_settings, {"token_expires_in_max_seconds": expires_in_max} + ): + await open_token_page(app, browser, user_special_chars.user) + # check the list of tokens duration + dropdown = browser.locator('#token-expiration-seconds') + options = await dropdown.locator('option').all() + actual_values = [ + (await option.text_content(), await option.get_attribute('value')) + for option in options + ] + assert actual_values == expected_options + # get the value of the 'selected' attribute of the currently selected option + selected_value = dropdown.locator('option[selected]') + await expect(selected_value).to_have_text(expected_options[-1][0]) + + async def test_token_request_form_and_panel(app, browser, user_special_chars): """verify elements of the request token form""" @@ -497,24 +561,6 @@ async def test_token_request_form_and_panel(app, browser, user_special_chars): await expect(field_note).to_be_enabled() await expect(field_note).to_be_empty() - # check the list of tokens duration - dropdown = browser.locator('#token-expiration-seconds') - options = await dropdown.locator('option').all() - expected_values_in_list = { - '1 Hour': '3600', - '1 Day': '86400', - '1 Week': '604800', - 'Never': '', - } - actual_values = { - await option.text_content(): await option.get_attribute('value') - for option in options - } - assert actual_values == expected_values_in_list - # get the value of the 'selected' attribute of the currently selected option - selected_value = dropdown.locator('option[selected]') - await expect(selected_value).to_have_text("Never") - # check scopes field scopes_input = browser.get_by_label("Permissions") await expect(scopes_input).to_be_editable() diff --git a/jupyterhub/tests/test_api.py b/jupyterhub/tests/test_api.py --- a/jupyterhub/tests/test_api.py +++ b/jupyterhub/tests/test_api.py @@ -12,6 +12,7 @@ from urllib.parse import parse_qs, quote, urlparse import pytest +from dateutil.parser import parse as parse_date from pytest import fixture, mark from tornado.httputil import url_concat @@ -1726,6 +1727,46 @@ async def test_get_new_token(app, headers, status, note, expires_in): assert r.status_code == 404 [email protected]( + "expires_in_max, expires_in, expected", + [ + (86400, None, 86400), + (86400, 86400, 86400), + (86400, 86401, 'error'), + (3600, 100, 100), + (None, None, None), + (None, 86400, 86400), + ], +) +async def test_token_expires_in_max(app, user, expires_in_max, expires_in, expected): + options = { + "expires_in": expires_in, + } + # request a new token + with mock.patch.dict( + app.tornado_settings, {"token_expires_in_max_seconds": expires_in_max} + ): + r = await api_request( + app, + f'users/{user.name}/tokens', + method='post', + data=json.dumps(options), + ) + if expected == 'error': + assert r.status_code == 400 + assert f"must not exceed {expires_in_max}" in r.json()["message"] + return + else: + assert r.status_code == 201 + token_model = r.json() + if expected is None: + assert token_model["expires_at"] is None + else: + expected_expires_at = utcnow() + timedelta(seconds=expected) + expires_at = parse_date(token_model["expires_at"]) + assert abs((expires_at - expected_expires_at).total_seconds()) < 30 + + @mark.parametrize( "as_user, for_user, status", [
Expire user generated tokens at /hub/token Similar to `config.JupyterHub.cookie_max_age_days` could be `config.JupyterHub.user_tokens_max_age_days` with default to the current state `0` == no expiration. Controls for JupyterHub API tokens JupyterHub has a [REST API](https://jupyterhub.readthedocs.io/en/stable/reference/rest.html) that can be accessed using an API token. The API token can be created at in the Hub UI at `/hub/token` or through a [POST](https://jupyterhub.readthedocs.io/en/stable/reference/rest-api.html#/default/post_users__name__tokens) to the REST API itself. In the Hub UI a user can create a token with expiration of 1 day, 1 week, 1 month, or never. The API allows a user to set an arbitrary value for expiration. A user's tokens can be revoked by the user or by a user with the ability to revoke their tokens (e.g. an admin). I was wondering what controls JupyterHub admins have or could have to mitigate risk from lost or stolen JupyterHub REST API tokens. I'm opening this issue to capture discussion about potential controls to implement in JupyterHub, or that may already be available. Some of this has been scoped out with @minrk. In terms of remediation, if a user's token is known to be compromised, their accounts are possibly already locked and they may not have a way to revoke their own tokens. If an admin is informed that a user's token is compromised, they can delete the user from their Hub, use `python -m jupyterhub.dbutil shell` to remove the token, or make an API call to revoke the user's token. It may be a good idea to add to the Hub UI some way for users who can revoke other users' tokens a way to do that. This could be done by updating the URL scheme so such a user (e.g. an admin) can see another user's `/hub/token` page to do the revocation. In terms of more preemptive mitigations, might there be ways for admins to: 1. Disable generation of these general-purpose tokens altogether? 2. Enable generation of only more restrictively scoped tokens (a more restricted user role)? 3. Enforce a maximum lifetime for these general-purpose tokens, necessitating a re-auth? 4. Only allow token access from specified IP ranges? Option 1 works already through JupyterHub RBAC by replacing the default `user` role with one that cannot create API tokens. Thanks to Min for enumerating all the scopes here: ``` c.JupyterHub.load_roles = [ { "name": "user", "scopes": [ # read the user model "read:users!user", # manage the user's servers "servers!user", # access the user's servers "access:servers!user", # update activity (required for server activity) "users:activity!user", # keep read-only access to tokens # can't add or revoke tokens! "read:tokens!user", ] } ] ``` With this redefined `user` role, attempts to create a token fail with 403 Forbidden. Other tokens that are needed to operate notebooks and services still work just fine. This solution seems like a good one for folks who need to require their users to re-authenticate periodically to interact with Jupyter/JupyterHub. This may be worth including as documentation via example, and referencing that example from JupyterHub's RBAC and security docs? Option 2 is a little bit different from the above which just shuts off the ability to generate tokens. There might be some argument that users could have the ability to generate more restricted tokens and that's somehow OK. Are there use cases where that makes sense, and what kind of interface would be needed to manage all the different things you could pick and choose? Option 3 is something that is not possible now technically (this has been discussed previously in #1772 as well). It is something Min says he would like to be able to address as part of a better singleuser experience. I went into this favoring Option 4 personally, just allowing users to use tokens from specified IP ranges, but Min pointed out that mileage may vary due to issues with reverse proxying. If one were to try this it may just be best to do it at the edge or where SSL is terminated. Do folks have other thoughts, opinions, or other options?
0.9 adds timestamps (created and last_activity) to API tokens, so this should now be possible. I'm reticent to do it by default without adding refresh tokens, because it would mean that single-user servers would have a max uptime of token_max_age_days. Thanks. So far I'm not sure how to handle an employee living the company for instance. i.e. even though they can't login anymore the generated tokens would still be valid. Hi all, I'm a little new to jupyterhub inner workings so please forgive my ignorance in advance. Would this change affect the `jupyter-hub-token` cookie as well as `user-*` cookies? Also to my knowledge the `set_login_cookie` mehtod does not expose passing all kwargs to tornado's `set_secure_cookie` method. Can we also fit this in in a nice way? My use case involves only setting the expiry date to a lot shorter than the 30 days standard one. I got it to work with some local modifications, but it's obviously not something anyone would like to manage over time and it's mandatory to have it for my organization's single-sign-on. We're using jupyterhub 0.8.1 if that helps. Hi, how are other JupyterHub installations dealing with this in the enterprise? does someone has newer feedback? Any news on how to do this? This method is still patched on our installation and it's pretty ugly. Adding a config option described in OP for default token max age with a default preserving the current behavior makes sense to me in the short term. Doing so at the moment does mean that you are setting a max age of user servers before they must be forced to restart in order to get a fresh token, but for many deployments that's likely not an issue, so I think it's okay to add the option with this caveat. Then we can turn it on by default perhaps when someone gets a chance to implement refresh token support. Thanks for writing this up! Allowing users to manage other users' tokens (assuming they have permission) should be pretty straightforward. The API already does this and the UI behind `/hub/token` should be pretty easy to update to support `/hub/token/:user`. I think there are other ways we can make token revocation easier (it can only be done now by token _id_, not if you only have the token itself - which means you need to find out the token id you want to revoke!). I think writing up some sample use cases to cover desirable workflows will help. I think options for default and maximum expiry are fairly easy to add. Until/unless we have refresh tokens in place and a good experience for refreshing tokens when talking to a user server during long sessions, token max age should be at least _server_ max age. Because if a server's own API token expires, things start to break in a way that can only be solved by server restart. And if a user's browser token expires, that can result in data loss due to losing the ability to save without refreshing the page. For securing access from a location, I would favor using internal_ssl to lockdown internal, local communication, and validate ips at the edge. Because inside the Hub has to handle too many different layers of proxy headers, etc., and we already haven't been able to get this robustly correct for just the protocol of the original request.
2024-06-03T10:57:55Z
[]
[]
urllib3/urllib3
83
urllib3__urllib3-83
[ "8" ]
4059daae19a943834a3dd995c277448713483734
diff --git a/urllib3/util.py b/urllib3/util.py --- a/urllib3/util.py +++ b/urllib3/util.py @@ -6,6 +6,7 @@ from base64 import b64encode +from collections import namedtuple try: from select import poll, POLLIN @@ -20,94 +21,70 @@ from .exceptions import LocationParseError -def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, - basic_auth=None): +class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])): """ - Shortcuts for generating request headers. - - :param keep_alive: - If ``True``, adds 'connection: keep-alive' header. - - :param accept_encoding: - Can be a boolean, list, or string. - ``True`` translates to 'gzip,deflate'. - List will get joined by comma. - String will be used as provided. - - :param user_agent: - String representing the user-agent you want, such as - "python-urllib3/0.6" - - :param basic_auth: - Colon-separated username:password string for 'authorization: basic ...' - auth header. - - Example: :: - - >>> make_headers(keep_alive=True, user_agent="Batman/1.0") - {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} - >>> make_headers(accept_encoding=True) - {'accept-encoding': 'gzip,deflate'} + Datastructure for representing an HTTP URL. Used as a return value for + :func:`parse_url`. """ - headers = {} - if accept_encoding: - if isinstance(accept_encoding, str): - pass - elif isinstance(accept_encoding, list): - accept_encoding = ','.join(accept_encoding) - else: - accept_encoding = 'gzip,deflate' - headers['accept-encoding'] = accept_encoding + slots = () - if user_agent: - headers['user-agent'] = user_agent + def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None): + return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment) - if keep_alive: - headers['connection'] = 'keep-alive' - - if basic_auth: - headers['authorization'] = 'Basic ' + \ - b64encode(six.b(basic_auth)).decode('utf-8') - - return headers + @property + def hostname(self): + """For backwards-compatibility with urlparse. We're nice like that.""" + return self.host def split_first(s, delims): """ Given a string and an iterable of delimiters, split on the first found - delimiter. Return two split parts. + delimiter. Return two split parts and the matched delimiter. If not found, then the first part is the full input string. + Example: :: + + >>> split_first('foo/bar?baz', '?/=') + ('foo', 'bar?baz', '/') + >>> split_first('foo/bar?baz', '123') + ('foo/bar?baz', '', None) + Scales linearly with number of delims. Not ideal for large number of delims. """ min_idx = None + min_delim = None for d in delims: idx = s.find(d) if idx < 0: continue - if not min_idx: + if min_idx is None or idx < min_idx: min_idx = idx - else: - min_idx = min(idx, min_idx) + min_delim = d if min_idx < 0: - return s, '' + return s, '', None - return s[:min_idx], s[min_idx+1:] + return s[:min_idx], s[min_idx+1:], min_delim -def get_host(url): +def parse_url(url): """ - Given a url, return its scheme, host and port (None if it's not there). + Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is + performed to parse incomplete urls. Fields not provided will be None. - For example: :: + Partly backwards-compatible with :module:`urlparse`. - >>> get_host('http://google.com/mail/') - ('http', 'google.com', None) - >>> get_host('google.com:80') - ('http', 'google.com', 80) + Example: :: + + >>> parse_url('http://google.com/mail/') + Url(scheme='http', host='google.com', port=None, path='/', ...) + >>> prase_url('google.com:80') + Url(scheme=None, host='google.com', port=80, path=None, ...) + >>> prase_url('/foo?bar') + Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) """ # While this code has overlap with stdlib's urlparse, it is much @@ -115,9 +92,13 @@ def get_host(url): # Additionally, this imeplementations does silly things to be optimal # on CPython. - scheme = 'http' + scheme = None + auth = None host = None port = None + path = None + fragment = None + query = None # Scheme if '://' in url: @@ -125,11 +106,15 @@ def get_host(url): # Find the earliest Authority Terminator # (http://tools.ietf.org/html/rfc3986#section-3.2) - url, _path = split_first(url, ['/', '?', '#']) + url, path_, delim = split_first(url, ['/', '?', '#']) + + if delim: + # Reassemble the path + path = delim + path_ # Auth if '@' in url: - _auth, url = url.split('@', 1) + auth, url = url.split('@', 1) # IPv6 if url and url[0] == '[': @@ -147,10 +132,85 @@ def get_host(url): port = int(port) - elif not host: + elif not host and url: host = url - return scheme, host, port + if not path: + return Url(scheme, auth, host, port, path, query, fragment) + + # Fragment + if '#' in path: + path, fragment = path.split('#', 1) + + # Query + if '?' in path: + path, query = path.split('?', 1) + + # Paths start with '/' + if path and path[0] != '/': + path = '/' + path + + return Url(scheme, auth, host, port, path, query, fragment) + + +def get_host(url): + """ + Deprecated. Use :func:`parse_url` instead. + """ + p = parse_url(url) + return p.scheme or 'http', p.hostname, p.port + + +def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, + basic_auth=None): + """ + Shortcuts for generating request headers. + + :param keep_alive: + If ``True``, adds 'connection: keep-alive' header. + + :param accept_encoding: + Can be a boolean, list, or string. + ``True`` translates to 'gzip,deflate'. + List will get joined by comma. + String will be used as provided. + + :param user_agent: + String representing the user-agent you want, such as + "python-urllib3/0.6" + + :param basic_auth: + Colon-separated username:password string for 'authorization: basic ...' + auth header. + + Example: :: + + >>> make_headers(keep_alive=True, user_agent="Batman/1.0") + {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} + >>> make_headers(accept_encoding=True) + {'accept-encoding': 'gzip,deflate'} + """ + headers = {} + if accept_encoding: + if isinstance(accept_encoding, str): + pass + elif isinstance(accept_encoding, list): + accept_encoding = ','.join(accept_encoding) + else: + accept_encoding = 'gzip,deflate' + headers['accept-encoding'] = accept_encoding + + if user_agent: + headers['user-agent'] = user_agent + + if keep_alive: + headers['connection'] = 'keep-alive' + + if basic_auth: + headers['authorization'] = 'Basic ' + \ + b64encode(six.b(basic_auth)).decode('utf-8') + + return headers def is_connection_dropped(conn):
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -2,7 +2,7 @@ import logging from urllib3 import add_stderr_logger -from urllib3.util import get_host, make_headers, split_first +from urllib3.util import get_host, make_headers, split_first, parse_url, Url from urllib3.exceptions import LocationParseError @@ -62,6 +62,25 @@ def test_invalid_host(self): for location in invalid_host: self.assertRaises(LocationParseError, get_host, location) + def test_parse_url(self): + url_host_map = { + 'http://google.com/mail': Url('http', None, 'google.com', None, '/mail'), + 'http://google.com/mail/': Url('http', None, 'google.com', None, '/mail/'), + 'google.com/mail': Url(None, None, 'google.com', None, '/mail'), + 'http://google.com/': Url('http', None, 'google.com', None, '/'), + 'http://google.com': Url('http', None, 'google.com', None, None), + '': Url(), + '/': Url(path='/'), + '?': Url(path='', query=''), + '#': Url(path='', fragment=''), + '#?/!google.com/?foo#bar': Url(path='', fragment='?/!google.com/?foo#bar'), + '/foo': Url(path='/foo'), + '/foo?bar=baz': Url(path='/foo', query='bar=baz'), + '/foo?bar=baz#banana?apple/orange': Url(path='/foo', query='bar=baz', fragment='banana?apple/orange'), + } + for url, expected_host in url_host_map.items(): + returned_host = parse_url(url) + self.assertEquals(returned_host, expected_host) def test_make_headers(self): self.assertEqual( @@ -95,9 +114,11 @@ def test_make_headers(self): def test_split_first(self): test_cases = { - ('abcd', 'b'): ('a', 'cd'), - ('abcd', 'cb'): ('a', 'cd'), - ('abcd', ''): ('abcd', ''), + ('abcd', 'b'): ('a', 'cd', 'b'), + ('abcd', 'cb'): ('a', 'cd', 'b'), + ('abcd', ''): ('abcd', '', None), + ('abcd', 'a'): ('', 'bcd', 'a'), + ('abcd', 'ab'): ('', 'bcd', 'a'), } for input, expected in test_cases.iteritems(): output = split_first(*input)
Scheme and host erroneously passed to HTTPConnection request method I think there is a problem in the use of `httplib.HTTPConnection` method `request` when called at [line 213 of urllib3/connectionpool.py](https://github.com/shazow/urllib3/blob/master/urllib3/connectionpool.py#L213) where you pass it the full URL, containing the scheme and host, instead of just the path (and query part), as show in [httplib usage examples](http://docs.python.org/library/httplib.html#examples). This ends up in a wrong HTTP request performed to the server. To see it, you can for instance run ``` python -m SimpleHTTPServer ``` in a shell and then, in another one, run ``` python -c 'from urllib3 import PoolManager; http = PoolManager(); http.request( "GET", "http://localhost:8000/this/is/an/example" )' ``` and compare what the access log in the first shell reports as compared to what happens if you do ``` curl "http://localhost:8000/this/is/an/example" ``` I can submit a patch, but I'm not an urllib3 expert so I will probably miss some other place where the same error occurs.
Hi there, thank you for the report! This was a conscious decision, but perhaps not the correct one. The goal was to reduce complexity and avoid inexplicit behaviour. That is, when you make a request to "http://localhost:8000/this/is/an/example", that's exactly the request that urllib3 should be making. The current workaround to achieve what you want is: ``` python from urllib3 import PoolManager http = PoolManager() conn = http.connection_from_url("http://localhost:8000") response = conn.request("GET", "/this/is/an/example") ``` When we do PoolManager.request, it does the same thing behind the scenes except it doesn't strip away the host like we did here manually. I agree that there should be an option to strip away the host (perhaps even by default). Should this option be specified in the PoolManager constructor? Such as `PoolManager(strip_host=True)`. But then when should the stripping occur? If it happens in urlopen, then should we backport the same functionality outside of PoolManager? (ie. into ConnectionPool objects.) The request should definitely be made with the path (and the query) only, because urllib3 is a HTTP/1.1 client. [RFC2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2): > To allow for transition to absoluteURIs in all requests in future versions of HTTP, all HTTP/1.1 servers MUST accept the > absoluteURI form in requests, even though HTTP/1.1 clients will only generate them in requests to proxies. Excellent info. Thanks :) This isn't a bug in urllib3. It's doing exactly what it's told. Btw, if anyone is in dire need, here's a handy basic recipe for doing "proper" url passing with redirection in urllib3: ``` python import urlparse import urllib3 http = urllib3.PoolManager() def request(method, url, conn=None): if conn: # Request within the current host connection (used for redirect handling) if not url.startswith('/'): url = '/' + url r = conn.request(method, url, redirect=False, assert_same_host=False) else: p = urlparse.urlparse(url) conn = http.connection_from_host(p.hostname, p.port, p.scheme) r = conn.request(method, p.path, redirect=False, assert_same_host=False) is_redirect = r.get_redirect_location() if not is_redirect: return r print "Redirecting: %s" % is_redirect if '://' not in is_redirect: # Redirect to same host return request('GET', is_redirect, conn) return request('GET', is_redirect) ```
2012-06-23T15:42:26Z
[]
[]
urllib3/urllib3
85
urllib3__urllib3-85
[ "8" ]
7c899375cbce1817cdc8a3f10781dff6427d07a5
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -268,15 +268,16 @@ def _make_request(self, conn, method, url, timeout=_Default, log.debug("\"%s %s %s\" %s %s" % (method, url, http_version, httplib_response.status, httplib_response.length)) - return httplib_response - def is_same_host(self, url): """ Check if the given ``url`` is a member of the same host as this connection pool. """ + if url.startswith('/'): + return True + # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) @@ -284,8 +285,7 @@ def is_same_host(self, url): # Use explicit default port for comparison when none is given. port = port_by_scheme.get(scheme) - return (url.startswith('/') or - (scheme, host, port) == (self.scheme, self.host, self.port)) + return (scheme, host, port) == (self.scheme, self.host, self.port) def urlopen(self, method, url, body=None, headers=None, retries=3, redirect=True, assert_same_host=True, timeout=_Default, diff --git a/urllib3/poolmanager.py b/urllib3/poolmanager.py --- a/urllib3/poolmanager.py +++ b/urllib3/poolmanager.py @@ -8,9 +8,10 @@ from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool -from .connectionpool import get_host, connection_from_url, port_by_scheme +from .connectionpool import connection_from_url, port_by_scheme from .exceptions import HostChangedError from .request import RequestMethods +from .util import parse_url __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] @@ -54,13 +55,15 @@ def __init__(self, num_pools=10, **connection_pool_kw): self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) - def connection_from_host(self, host, port=80, scheme='http'): + def connection_from_host(self, host, port=None, scheme='http'): """ Get a :class:`ConnectionPool` based on the host, port, and scheme. - Note that an appropriate ``port`` value is required here to normalize - connection pools in our container most effectively. + If ``port`` isn't given, it will be derived from the ``scheme`` using + ``urllib3.connectionpool.port_by_scheme``. """ + port = port or port_by_scheme.get(scheme, 80) + pool_key = (scheme, host, port) # If the scheme, host, or port doesn't match existing open connections, @@ -86,26 +89,39 @@ def connection_from_url(self, url): Additional parameters are taken from the :class:`.PoolManager` constructor. """ - scheme, host, port = get_host(url) + u = parse_url(url) + return self.connection_from_host(u.host, port=u.port, scheme=u.scheme) - port = port or port_by_scheme.get(scheme, 80) + def handle_redirect(response): + pass - return self.connection_from_host(host, port=port, scheme=scheme) - - def urlopen(self, method, url, **kw): + def urlopen(self, method, url, redirect=True, **kw): """ - Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`. + Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` + with custom cross-host redirect logic and only sends the request-uri + portion of the ``url``. - ``url`` must be absolute, such that an appropriate + The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ - conn = self.connection_from_url(url) - try: - return conn.urlopen(method, url, **kw) + u = parse_url(url) + conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) + + kw['assert_same_host'] = False + kw['redirect'] = False + + response = conn.urlopen(method, u.request_uri, **kw) + + redirect_location = redirect and response.get_redirect_location() + if not redirect_location: + return response + + if response.status == 303: + method = 'GET' - except HostChangedError as e: - kw['retries'] = e.retries # Persist retries countdown - return self.urlopen(method, e.url, **kw) + log.info("Redirecting %s -> %s" % (url, redirect_location)) + kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown + return self.urlopen(method, redirect_location, **kw) class ProxyManager(RequestMethods):
diff --git a/test/with_dummyserver/test_poolmanager.py b/test/with_dummyserver/test_poolmanager.py --- a/test/with_dummyserver/test_poolmanager.py +++ b/test/with_dummyserver/test_poolmanager.py @@ -32,7 +32,7 @@ def test_cross_host_redirect(self): try: http.request('GET', '%s/redirect' % self.base_url, fields={'target': cross_host_location}, - timeout=0.01, retries=1) + timeout=0.01, retries=0) self.fail("Request succeeded instead of raising an exception like it should.") except MaxRetryError: @@ -40,7 +40,7 @@ def test_cross_host_redirect(self): r = http.request('GET', '%s/redirect' % self.base_url, fields={'target': '%s/echo?a=b' % self.base_url_alt}, - timeout=0.01, retries=2) + timeout=0.01, retries=1) self.assertEqual(r._pool.host, self.host_alt)
Scheme and host erroneously passed to HTTPConnection request method I think there is a problem in the use of `httplib.HTTPConnection` method `request` when called at [line 213 of urllib3/connectionpool.py](https://github.com/shazow/urllib3/blob/master/urllib3/connectionpool.py#L213) where you pass it the full URL, containing the scheme and host, instead of just the path (and query part), as show in [httplib usage examples](http://docs.python.org/library/httplib.html#examples). This ends up in a wrong HTTP request performed to the server. To see it, you can for instance run ``` python -m SimpleHTTPServer ``` in a shell and then, in another one, run ``` python -c 'from urllib3 import PoolManager; http = PoolManager(); http.request( "GET", "http://localhost:8000/this/is/an/example" )' ``` and compare what the access log in the first shell reports as compared to what happens if you do ``` curl "http://localhost:8000/this/is/an/example" ``` I can submit a patch, but I'm not an urllib3 expert so I will probably miss some other place where the same error occurs.
Hi there, thank you for the report! This was a conscious decision, but perhaps not the correct one. The goal was to reduce complexity and avoid inexplicit behaviour. That is, when you make a request to "http://localhost:8000/this/is/an/example", that's exactly the request that urllib3 should be making. The current workaround to achieve what you want is: ``` python from urllib3 import PoolManager http = PoolManager() conn = http.connection_from_url("http://localhost:8000") response = conn.request("GET", "/this/is/an/example") ``` When we do PoolManager.request, it does the same thing behind the scenes except it doesn't strip away the host like we did here manually. I agree that there should be an option to strip away the host (perhaps even by default). Should this option be specified in the PoolManager constructor? Such as `PoolManager(strip_host=True)`. But then when should the stripping occur? If it happens in urlopen, then should we backport the same functionality outside of PoolManager? (ie. into ConnectionPool objects.) The request should definitely be made with the path (and the query) only, because urllib3 is a HTTP/1.1 client. [RFC2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2): > To allow for transition to absoluteURIs in all requests in future versions of HTTP, all HTTP/1.1 servers MUST accept the > absoluteURI form in requests, even though HTTP/1.1 clients will only generate them in requests to proxies. Excellent info. Thanks :) This isn't a bug in urllib3. It's doing exactly what it's told. Btw, if anyone is in dire need, here's a handy basic recipe for doing "proper" url passing with redirection in urllib3: ``` python import urlparse import urllib3 http = urllib3.PoolManager() def request(method, url, conn=None): if conn: # Request within the current host connection (used for redirect handling) if not url.startswith('/'): url = '/' + url r = conn.request(method, url, redirect=False, assert_same_host=False) else: p = urlparse.urlparse(url) conn = http.connection_from_host(p.hostname, p.port, p.scheme) r = conn.request(method, p.path, redirect=False, assert_same_host=False) is_redirect = r.get_redirect_location() if not is_redirect: return r print "Redirecting: %s" % is_redirect if '://' not in is_redirect: # Redirect to same host return request('GET', is_redirect, conn) return request('GET', is_redirect) ```
2012-07-01T01:00:32Z
[]
[]
urllib3/urllib3
187
urllib3__urllib3-187
[ "182" ]
5e9a25dcdaac0ddcc0b527ea03fe5db7a97a1041
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -7,6 +7,7 @@ import logging import zlib +import io from .exceptions import DecodeError from .packages.six import string_types as basestring, binary_type @@ -48,7 +49,7 @@ def _get_decoder(mode): return DeflateDecoder() -class HTTPResponse(object): +class HTTPResponse(io.IOBase): """ HTTP Response container. @@ -239,3 +240,35 @@ def getheaders(self): def getheader(self, name, default=None): return self.headers.get(name, default) + + # Overrides from io.IOBase + def close(self): + if not self.closed: + self._fp.close() + + @property + def closed(self): + if self._fp is None: + return True + elif hasattr(self._fp, 'closed'): + return self._fp.closed + elif hasattr(self._fp, 'isclosed'): # Python 2 + return self._fp.isclosed() + else: + return True + + def fileno(self): + if self._fp is None: + raise IOError("HTTPResponse has no file to get a fileno from") + elif hasattr(self._fp, "fileno"): + return self._fp.fileno() + else: + raise IOError("The file-like object this HTTPResponse is wrapped " + "around has no file descriptor") + + def flush(self): + if self._fp is not None and hasattr(self._fp, 'flush'): + return self._fp.flush() + + def readable(self): + return True
diff --git a/test/test_response.py b/test/test_response.py --- a/test/test_response.py +++ b/test/test_response.py @@ -1,6 +1,6 @@ import unittest -from io import BytesIO +from io import BytesIO, BufferedReader from urllib3.response import HTTPResponse from urllib3.exceptions import DecodeError @@ -112,5 +112,53 @@ def test_chunked_decoding_gzip(self): self.assertEqual(r.read(1), b'f') self.assertEqual(r.read(2), b'oo') + def test_io(self): + import socket + try: + from http.client import HTTPResponse as OldHTTPResponse + except: + from httplib import HTTPResponse as OldHTTPResponse + + fp = BytesIO(b'foo') + resp = HTTPResponse(fp, preload_content=False) + + self.assertEqual(resp.closed, False) + self.assertEqual(resp.readable(), True) + self.assertEqual(resp.writable(), False) + self.assertRaises(IOError, resp.fileno) + + resp.close() + self.assertEqual(resp.closed, True) + + # Try closing with an `httplib.HTTPResponse`, because it has an + # `isclosed` method. + hlr = OldHTTPResponse(socket.socket()) + resp2 = HTTPResponse(hlr, preload_content=False) + self.assertEqual(resp2.closed, False) + resp2.close() + self.assertEqual(resp2.closed, True) + + #also try when only data is present. + resp3 = HTTPResponse('foodata') + self.assertRaises(IOError, resp3.fileno) + + resp3._fp = 2 + # A corner case where _fp is present but doesn't have `closed`, + # `isclosed`, or `fileno`. Unlikely, but possible. + self.assertEqual(resp3.closed, True) + self.assertRaises(IOError, resp3.fileno) + + def test_io_bufferedreader(self): + fp = BytesIO(b'foo') + resp = HTTPResponse(fp, preload_content=False) + br = BufferedReader(resp) + + self.assertEqual(br.read(), b'foo') + + br.close() + self.assertEqual(resp.closed, True) + + + if __name__ == '__main__': unittest.main()
Support `io` framework for HTTPResponse This suggestion is motivated by the following issue that pops up in `requests` (see kennethreitz/requests#1364 - they redirected me here): ``` >>> import io, requests >>> r = requests.get('http://www.google.com',stream=True) >>> io.BufferedReader(r.raw).read() AttributeError: 'HTTPResponse' object has no attribute 'readable' ``` The base problem here is that `requests.packages.urllib3.response.HTTPResponse` object (that's what `r.raw` is) does not respect the `io` API. The following _does_ work, however: ``` >>> r = requests.get('http://www.google.com',stream=True) >>> r.raw.readable=lambda:True >>> r.raw.closed=False >>> io.BufferedReader(r.raw).read() ``` Suggesting that the main thing that is neeeded is adding a `readable` method and a `closed` attribute to `HTTPResponse`. (I think `flush` and `close` are probably also necessary, and maybe also `writable` and `seekable`). Is there a reason this is not possible, or would it be fine to just add the relevant methods to `HTTPResponse`?
I don't see any reason why not. :) You're welcome to. Please include tests accordingly.
2013-05-31T07:20:43Z
[]
[]
urllib3/urllib3
262
urllib3__urllib3-262
[ "261" ]
cfcb1fc4fb4f415d1f1be64af11d69926cade23f
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -292,9 +292,7 @@ def _make_request(self, conn, method, url, timeout=_Default, read_timeout = timeout_obj.read_timeout # App Engine doesn't have a sock attr - if hasattr(conn, 'sock') and \ - read_timeout is not None and \ - read_timeout is not Timeout.DEFAULT_TIMEOUT: + if hasattr(conn, 'sock'): # In Python 3 socket.py will catch EAGAIN and return None when you # try and read into the file pointer created by http.client, which # instead raises a BadStatusLine exception. Instead of catching @@ -304,7 +302,10 @@ def _make_request(self, conn, method, url, timeout=_Default, raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % read_timeout) - conn.sock.settimeout(read_timeout) + if read_timeout is Timeout.DEFAULT_TIMEOUT: + conn.sock.settimeout(socket.getdefaulttimeout()) + else: # None or a value + conn.sock.settimeout(read_timeout) # Receive the response from the server try:
diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -176,6 +176,18 @@ def test_connect_timeout(self): timeout=timeout) + def test_timeout_reset(self): + """ If the read timeout isn't set, socket timeout should reset """ + url = '/sleep?seconds=0.005' + timeout = util.Timeout(connect=0.001) + pool = HTTPConnectionPool(self.host, self.port, timeout=timeout) + conn = pool._get_conn() + try: + pool._make_request(conn, 'GET', url) + except ReadTimeoutError: + self.fail("This request shouldn't trigger a read timeout.") + + @timed(0.1) def test_total_timeout(self): url = '/sleep?seconds=0.005'
read timeout is not properly getting un-set I discovered this issue when digging into the code here: https://github.com/kennethreitz/requests/commit/c64c0ab1215168adf2384888f3d52bd99217d723#diff-31f6e77c031977d33226530924b4337aR303 On my machine at least, creating a Timeout like this ``` python from urllib3.util import Timeout t = Timeout(connect=5) make_some_request(timeout=t) ``` The socket timeout for the connect is properly set to 5 seconds. [This line](https://github.com/shazow/urllib3/blob/master/urllib3/connectionpool.py#L297), however, keeps us from resetting the timeout for the read, so it's still at 5 seconds for the read. It should be set instead to `socket.getdefaulttimeout()`.
2013-10-18T18:56:26Z
[]
[]
urllib3/urllib3
271
urllib3__urllib3-271
[ "270" ]
74638c0c0f37826b39330dad0a5e573961ff3e71
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -461,6 +461,13 @@ def urlopen(self, method, url, body=None, headers=None, retries=3, conn = None + # Merge the proxy headers. Only do this in HTTP. We have to copy the + # headers dict so we can safely change it without those changes being + # reflected in anyone else's copy. + if self.scheme == 'http': + headers = headers.copy() + headers.update(self.proxy_headers) + try: # Request a connection from the queue conn = self._get_conn(timeout=pool_timeout) diff --git a/urllib3/poolmanager.py b/urllib3/poolmanager.py --- a/urllib3/poolmanager.py +++ b/urllib3/poolmanager.py @@ -245,12 +245,11 @@ def urlopen(self, method, url, redirect=True, **kw): u = parse_url(url) if u.scheme == "http": - # It's too late to set proxy headers on per-request basis for - # tunnelled HTTPS connections, should use - # constructor's proxy_headers instead. + # For proxied HTTPS requests, httplib sets the necessary headers + # on the CONNECT to the proxy. For HTTP, we'll definitely + # need to set 'Host' at the very least. kw['headers'] = self._set_proxy_headers(url, kw.get('headers', self.headers)) - kw['headers'].update(self.proxy_headers) return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -205,11 +205,42 @@ def echo_socket_handler(listener): b'GET http://google.com/ HTTP/1.1', b'Host: google.com', b'Accept-Encoding: identity', - b'Accept: */*', b'', b'', ])) + def test_headers(self): + def echo_socket_handler(listener): + sock = listener.accept()[0] + + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf += sock.recv(65536) + + sock.send(('HTTP/1.1 200 OK\r\n' + 'Content-Type: text/plain\r\n' + 'Content-Length: %d\r\n' + '\r\n' + '%s' % (len(buf), buf.decode('utf-8'))).encode('utf-8')) + sock.close() + + self._start_server(echo_socket_handler) + base_url = 'http://%s:%d' % (self.host, self.port) + + # Define some proxy headers. + proxy_headers = {'For The Proxy': 'YEAH!'} + proxy = proxy_from_url(base_url, proxy_headers=proxy_headers) + + conn = proxy.connection_from_url('http://www.google.com/') + + r = conn.urlopen('GET', 'http://www.google.com/', assert_same_host=False) + + self.assertEqual(r.status, 200) + # FIXME: The order of the headers is not predictable right now. We + # should fix that someday (maybe when we migrate to + # OrderedDict/MultiDict). + self.assertTrue(b'For The Proxy: YEAH!\r\n' in r.data) + class TestSSL(SocketDummyServerTestCase):
Proxy Headers don't get applied when using ProxyManager.connection_from_url() Brought up in the Requests IRC room: it turns out Requests no longer correctly applies Proxy-Authentication headers to HTTP messages via proxies. If you use `ProxyManager.urlopen()`, [this block of code](https://github.com/shazow/urllib3/blob/master/urllib3/poolmanager.py#L247-L253) applies some HTTP headers: ``` python if u.scheme == "http": # It's too late to set proxy headers on per-request basis for # tunnelled HTTPS connections, should use # constructor's proxy_headers instead. kw['headers'] = self._set_proxy_headers(url, kw.get('headers', self.headers)) kw['headers'].update(self.proxy_headers) ``` But no such code back exists when using `ProxyManager.connection_from_url`. I'm open to providing code to fix this, but am not sure I can do so tonight, so if someone else would like to they should feel free.
Sadness. Fix appreciated whenever you can provide. :)
2013-10-22T21:32:45Z
[]
[]
urllib3/urllib3
343
urllib3__urllib3-343
[ "342" ]
5e5ceaf533fc3afa092ced87bcdadb7df7cbaf1a
diff --git a/urllib3/connection.py b/urllib3/connection.py --- a/urllib3/connection.py +++ b/urllib3/connection.py @@ -4,6 +4,7 @@ # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php +import sys import socket from socket import timeout as SocketTimeout @@ -38,6 +39,7 @@ class BaseSSLError(BaseException): ConnectTimeoutError, ) from .packages.ssl_match_hostname import match_hostname +from .packages import six from .util import ( assert_fingerprint, resolve_cert_reqs, @@ -53,27 +55,40 @@ class BaseSSLError(BaseException): class HTTPConnection(_HTTPConnection, object): + """ + Based on httplib.HTTPConnection but provides an extra constructor + backwards-compatibility layer between older and newer Pythons. + """ + default_port = port_by_scheme['http'] # By default, disable Nagle's Algorithm. tcp_nodelay = 1 + def __init__(self, *args, **kw): + if six.PY3: # Python 3 + kw.pop('strict', None) + + if sys.version_info < (2, 7): # Python 2.6 and earlier + kw.pop('source_address', None) + self.source_address = None + + _HTTPConnection.__init__(self, *args, **kw) + def _new_conn(self): """ Establish a socket connection and set nodelay settings on it :return: a new socket connection """ - try: - conn = socket.create_connection( - (self.host, self.port), - self.timeout, - self.source_address, - ) - except AttributeError: # Python 2.6 - conn = socket.create_connection( - (self.host, self.port), - self.timeout, - ) + extra_args = [] + if self.source_address: # Python 2.7+ + extra_args.append(self.source_address) + + conn = socket.create_connection( + (self.host, self.port), + self.timeout, + *extra_args + ) conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, self.tcp_nodelay) return conn @@ -95,10 +110,12 @@ class HTTPSConnection(HTTPConnection): def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): - try: - HTTPConnection.__init__(self, host, port, strict, timeout, source_address) - except TypeError: # Python 2.6 - HTTPConnection.__init__(self, host, port, strict, timeout) + + HTTPConnection.__init__(self, host, port, + strict=strict, + timeout=timeout, + source_address=source_address) + self.key_file = key_file self.cert_file = cert_file diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -170,13 +170,9 @@ def _new_conn(self): log.info("Starting new HTTP connection (%d): %s" % (self.num_connections, self.host)) - extra_params = {} - if not six.PY3: # Python 2 - extra_params['strict'] = self.strict - conn = self.ConnectionCls(host=self.host, port=self.port, timeout=self.timeout.connect_timeout, - **extra_params) + strict=self.strict) if self.proxy is not None: # Enable Nagle's algorithm for proxies, to avoid packet # fragmentation. @@ -238,8 +234,9 @@ def _put_conn(self, conn): pass except Full: # This should never happen if self.block == True - log.warning("HttpConnectionPool is full, discarding connection: %s" - % self.host) + log.warning( + "Connection pool is full, discarding connection: %s" % + self.host) # Connection never got put back into the pool, close it. if conn: @@ -538,8 +535,8 @@ def urlopen(self, method, url, body=None, headers=None, retries=3, if not conn: # Try again - log.warn("Retrying (%d attempts remain) after connection " - "broken by '%r': %s" % (retries, err, url)) + log.warning("Retrying (%d attempts remain) after connection " + "broken by '%r': %s" % (retries, err, url)) return self.urlopen(method, url, body, headers, retries - 1, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout,
diff --git a/test/__init__.py b/test/__init__.py --- a/test/__init__.py +++ b/test/__init__.py @@ -7,6 +7,7 @@ from urllib3.exceptions import MaxRetryError from urllib3.packages import six + def onlyPY3(test): """Skips this test unless you are on Python3.x""" diff --git a/test/test_compatibility.py b/test/test_compatibility.py new file mode 100644 --- /dev/null +++ b/test/test_compatibility.py @@ -0,0 +1,23 @@ +import unittest +import warnings + +from urllib3.connection import HTTPConnection + + +class TestVersionCompatibility(unittest.TestCase): + def test_connection_strict(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + # strict=True is deprecated in Py33+ + conn = HTTPConnection('localhost', 12345, strict=True) + + if w: + self.fail('HTTPConnection raised warning on strict=True: %r' % w[0].message) + + def test_connection_source_address(self): + try: + # source_address does not exist in Py26- + conn = HTTPConnection('localhost', 12345, source_address='127.0.0.1') + except TypeError as e: + self.fail('HTTPConnection raised TypeError on source_adddress: %r' % e) diff --git a/test/test_filepost.py b/test/test_filepost.py --- a/test/test_filepost.py +++ b/test/test_filepost.py @@ -124,7 +124,7 @@ def test_request_fields(self): encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY) - self.assertEquals(encoded, + self.assertEqual(encoded, b'--' + b(BOUNDARY) + b'\r\n' b'Content-Type: image/jpeg\r\n' b'\r\n' diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -64,7 +64,7 @@ def test_get_host(self): } for url, expected_host in url_host_map.items(): returned_host = get_host(url) - self.assertEquals(returned_host, expected_host) + self.assertEqual(returned_host, expected_host) def test_invalid_host(self): # TODO: Add more tests @@ -110,7 +110,7 @@ def test_parse_url(self): } for url, expected_url in url_host_map.items(): returned_url = parse_url(url) - self.assertEquals(returned_url, expected_url) + self.assertEqual(returned_url, expected_url) def test_parse_url_invalid_IPv6(self): self.assertRaises(ValueError, parse_url, '[::1') @@ -129,7 +129,7 @@ def test_request_uri(self): } for url, expected_request_uri in url_host_map.items(): returned_url = parse_url(url) - self.assertEquals(returned_url.request_uri, expected_request_uri) + self.assertEqual(returned_url.request_uri, expected_request_uri) def test_netloc(self): url_netloc_map = { @@ -140,7 +140,7 @@ def test_netloc(self): } for url, expected_netloc in url_netloc_map.items(): - self.assertEquals(parse_url(url).netloc, expected_netloc) + self.assertEqual(parse_url(url).netloc, expected_netloc) def test_make_headers(self): self.assertEqual( diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -39,7 +39,7 @@ def multicookie_response_handler(listener): self._start_server(multicookie_response_handler) pool = HTTPConnectionPool(self.host, self.port) r = pool.request('GET', '/', retries=0) - self.assertEquals(r.headers, {'set-cookie': 'foo=1, bar=1'}) + self.assertEqual(r.headers, {'set-cookie': 'foo=1, bar=1'}) class TestSNI(SocketDummyServerTestCase):
DeprecationWarning regarding strict argument in connection.py When using requests which includes urllib3 I get this deprecationwarning that traces back to connection.py. It doesn't break code but just clutters logs. 'C:\Python33\lib\sitepackages\requests\packages\urllib3\connection.py:99: DeprecationWarning: the ´strict´ argument isn´t supported anymore; http.client now always assumes HTTP/1.x compliant servers. HTTPConnection.**init**(self, host, port, strict, timeout, source_address)'
Mmmm looks like a regression of #238/#239. :( We need to add a test/configuration to break on DeprecationWarning which travisci would ideally catch under py3.
2014-02-21T19:36:21Z
[]
[]
urllib3/urllib3
357
urllib3__urllib3-357
[ "355" ]
62ecd1523ec383802cb13b09bd7084d2da997420
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -23,6 +23,7 @@ ConnectTimeoutError, EmptyPoolError, HostChangedError, + LocationParseError, MaxRetryError, SSLError, TimeoutError, @@ -40,7 +41,6 @@ from .request import RequestMethods from .response import HTTPResponse from .util import ( - assert_fingerprint, get_host, is_connection_dropped, Timeout, @@ -65,6 +65,9 @@ class ConnectionPool(object): QueueCls = LifoQueue def __init__(self, host, port=None): + if host is None: + raise LocationParseError(host) + # httplib doesn't like it when we include brackets in ipv6 addresses host = host.strip('[]') diff --git a/urllib3/util/url.py b/urllib3/util/url.py --- a/urllib3/util/url.py +++ b/urllib3/util/url.py @@ -131,7 +131,7 @@ def parse_url(url): if port: # If given, ports must be integers. if not port.isdigit(): - raise LocationParseError("Failed to parse: %s" % url) + raise LocationParseError(url) port = int(port) else: # Blank ports are cool, too. (rfc3986#section-3.2.3)
diff --git a/test/test_poolmanager.py b/test/test_poolmanager.py --- a/test/test_poolmanager.py +++ b/test/test_poolmanager.py @@ -2,7 +2,10 @@ from urllib3.poolmanager import PoolManager from urllib3 import connection_from_url -from urllib3.exceptions import ClosedPoolError +from urllib3.exceptions import ( + ClosedPoolError, + LocationParseError, +) class TestPoolManager(unittest.TestCase): @@ -63,6 +66,9 @@ def test_manager_clear(self): self.assertEqual(len(p.pools), 0) + def test_nohost(self): + p = PoolManager(5) + self.assertRaises(LocationParseError, p.connection_from_url, 'http://@') if __name__ == '__main__': diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -77,6 +77,7 @@ def test_invalid_host(self): for location in invalid_host: self.assertRaises(LocationParseError, get_host, location) + def test_parse_url(self): url_host_map = { 'http://google.com/mail': Url('http', host='google.com', path='/mail'), @@ -107,6 +108,7 @@ def test_parse_url(self): 'http://foo:bar@localhost/': Url('http', auth='foo:bar', host='localhost', path='/'), 'http://foo@localhost/': Url('http', auth='foo', host='localhost', path='/'), 'http://foo:bar@baz@localhost/': Url('http', auth='foo:bar@baz', host='localhost', path='/'), + 'http://@': Url('http', host=None, auth='') } for url, expected_url in url_host_map.items(): returned_url = parse_url(url)
"AttributeError: 'NoneType' object has no attribute 'strip'" with malformed URL ``` >>> import urllib3 >>> http = urllib3.PoolManager() >>> r = http.request('GET', 'http://@') Traceback (most recent call last): File "<stdin>", line 1, in <module> File ".../urllib3/request.py", line 75, in request **urlopen_kw) File ".../urllib3/request.py", line 88, in request_encode_url return self.urlopen(method, url, **urlopen_kw) File ".../urllib3/poolmanager.py", line 145, in urlopen conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) File ".../urllib3/poolmanager.py", line 119, in connection_from_host pool = self._new_pool(scheme, host, port) File ".../urllib3/poolmanager.py", line 86, in _new_pool return pool_cls(host, port, **kwargs) File ".../urllib3/connectionpool.py", line 226, in __init__ ConnectionPool.__init__(self, host, port) File ".../urllib3/connectionpool.py", line 156, in __init__ host = host.strip('[]') AttributeError: 'NoneType' object has no attribute 'strip' ``` The fundamental issue here seems to be that `connection_from_host` assumes `host` will never be `None`, but ``` >>> urllib3.util.parse_url('http://@') Url(scheme='http', auth='', host=None, port=None, path=None, query=None, fragment=None) ``` I suggest that `LocationParseError` should be thrown instead under these conditions, and that perhaps `parse_url` should do it (since it does not appear to support URL-schemes that don't have a netloc, and netloc-ful schemes with an empty host don't make sense). For the record, this is _not_ academic - I actually encountered a webserver that returned a redirection to "`http://@`" while doing a moderate-scale survey of the interwebs.
Sounds reasonable to me, what do you think @shazow? On thinking about it a little more, maybe it's better to leave `parse_url` alone - it's documented to be usable for relative URLs after all. In that case I dunno whether it makes more sense to throw `LocationParseError` from `PoolManager.connection_from_url` or `ConnectionPool.__init__`. Hmm. IMO `parse_url` should parse an empty-host string if possible as `host=None`. At that point, the Connection-related class should raise the appropriate exception. What do you guys think? (Anyone interested in doing this?)
2014-03-16T06:40:59Z
[]
[]
urllib3/urllib3
399
urllib3__urllib3-399
[ "296" ]
be18de7e8b4702552e8a9e2650b6c2c4dc079a00
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -5,19 +5,16 @@ # the MIT License: http://www.opensource.org/licenses/mit-license.php -import logging import zlib import io +from socket import timeout as SocketTimeout from ._collections import HTTPHeaderDict -from .exceptions import DecodeError +from .exceptions import DecodeError, ReadTimeoutError from .packages.six import string_types as basestring, binary_type from .util import is_fp_closed -log = logging.getLogger(__name__) - - class DeflateDecoder(object): def __init__(self): @@ -178,23 +175,29 @@ def read(self, amt=None, decode_content=None, cache_content=False): flush_decoder = False try: - if amt is None: - # cStringIO doesn't like amt=None - data = self._fp.read() - flush_decoder = True - else: - cache_content = False - data = self._fp.read(amt) - if amt != 0 and not data: # Platform-specific: Buggy versions of Python. - # Close the connection when no data is returned - # - # This is redundant to what httplib/http.client _should_ - # already do. However, versions of python released before - # December 15, 2012 (http://bugs.python.org/issue16298) do - # not properly close the connection in all cases. There is - # no harm in redundantly calling close. - self._fp.close() + try: + if amt is None: + # cStringIO doesn't like amt=None + data = self._fp.read() flush_decoder = True + else: + cache_content = False + data = self._fp.read(amt) + if amt != 0 and not data: # Platform-specific: Buggy versions of Python. + # Close the connection when no data is returned + # + # This is redundant to what httplib/http.client _should_ + # already do. However, versions of python released before + # December 15, 2012 (http://bugs.python.org/issue16298) do + # not properly close the connection in all cases. There is + # no harm in redundantly calling close. + self._fp.close() + flush_decoder = True + + except SocketTimeout: + # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but + # there is yet no clean way to get at it from this context. + raise ReadTimeoutError(self._pool, None, 'Read timed out.') self._fp_bytes_read += len(data) @@ -204,8 +207,7 @@ def read(self, amt=None, decode_content=None, cache_content=False): except (IOError, zlib.error) as e: raise DecodeError( "Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding, - e) + "failed to decode it." % content_encoding, e) if flush_decoder and decode_content and self._decoder: buf = self._decoder.decompress(binary_type()) @@ -296,7 +298,7 @@ def fileno(self): elif hasattr(self._fp, "fileno"): return self._fp.fileno() else: - raise IOError("The file-like object this HTTPResponse is wrapped " + raise IOError("The file-like object this HTTPResponse is wrapped " "around has no file descriptor") def flush(self):
diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -173,6 +173,32 @@ def socket_handler(listener): finally: socket.setdefaulttimeout(default_timeout) + def test_delayed_body_read_timeout(self): + timed_out = Event() + + def socket_handler(listener): + sock = listener.accept()[0] + buf = b'' + body = 'Hi' + while not buf.endswith(b'\r\n\r\n'): + buf = sock.recv(65536) + sock.send(('HTTP/1.1 200 OK\r\n' + 'Content-Type: text/plain\r\n' + 'Content-Length: %d\r\n' + '\r\n' % len(body)).encode('utf-8')) + + timed_out.wait(timeout=0.5) + sock.send(body.encode('utf-8')) + sock.close() + + self._start_server(socket_handler) + pool = HTTPConnectionPool(self.host, self.port) + + response = pool.urlopen('GET', '/', retries=0, preload_content=False, + timeout=util.Timeout(connect=1, read=0.001)) + self.assertRaises(ReadTimeoutError, response.read) + timed_out.set() + class TestProxyManager(SocketDummyServerTestCase):
socket.timeout not wrapped properly As mentioned in a [comment](https://github.com/kennethreitz/requests/issues/1787#issuecomment-30510686) on kennethreitz/requests#1787 urllib3 apparently lets a `socket.timeout` exception propagate outside library code instead of wrapping it e.g. with `urllib3.exceptions.TimeoutError`. See the linked issue for a full backtrace.
I've been going through their code. i wrote some code of my own to timeout: ``` python import requests print requests.__version__ r = requests.get('http://httpbin.org/delay/50', timeout=0.1) ``` ``` 2.1.0 Traceback (most recent call last): File "working_on_req.py", line 7, in <module> r = requests.get('http://httpbin.org/delay/50', timeout=0.1) File "/home/hackawaye/virtual_envs/requests_virtual/local/lib/python2.7/site-packages/requests-2.1.0-py2.7.egg/requests/api.py", line 55, in get return request('get', url, **kwargs) File "/home/hackawaye/virtual_envs/requests_virtual/local/lib/python2.7/site-packages/requests-2.1.0-py2.7.egg/requests/api.py", line 44, in request return session.request(method=method, url=url, **kwargs) File "/home/hackawaye/virtual_envs/requests_virtual/local/lib/python2.7/site-packages/requests-2.1.0-py2.7.egg/requests/sessions.py", line 382, in request resp = self.send(prep, **send_kwargs) File "/home/hackawaye/virtual_envs/requests_virtual/local/lib/python2.7/site-packages/requests-2.1.0-py2.7.egg/requests/sessions.py", line 485, in send r = adapter.send(request, **kwargs) File "/home/hackawaye/virtual_envs/requests_virtual/local/lib/python2.7/site-packages/requests-2.1.0-py2.7.egg/requests/adapters.py", line 381, in send raise Timeout(e) requests.exceptions.Timeout: (<requests.packages.urllib3.connectionpool.HTTPConnectionPool object at 0x13d6490>, 'Connection to httpbin.org timed out. (connect timeout=0.1)') ``` This is all i get. I noticed you also have an auth parameter but i fail to see how it changes the issue. Can you provide me with the url you were trying to get when you ran into this? It was a local connection; there was nothing running on the port when it happened (so it shouldn't have timed out in the first first place but simply ended up in a failed connection) Ok. So i found where the problem is, it should not occur, it is a problem with the socket.py in that the stream is not properly ended and the socket.recv gets confused and halts giving you the timeout. I can wrap it but it will not be a proper fix for this. I tried to submit a fix for it a while back but i can't remember exactly what char i encountered that broke it this way. It might be caused by the server as well. @ThiefMaster does this occur when using urllib3 installed form source and requests installed from source?
2014-05-30T19:07:06Z
[]
[]
urllib3/urllib3
417
urllib3__urllib3-417
[ "144" ]
26e4ef9084adcce1b0f854f5431ed92519d47fb5
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -23,12 +23,12 @@ ConnectionError, EmptyPoolError, HostChangedError, - LocationParseError, + LocationValueError, MaxRetryError, + ProxyError, + ReadTimeoutError, SSLError, TimeoutError, - ReadTimeoutError, - ProxyError, ) from .packages.ssl_match_hostname import CertificateError from .packages import six @@ -65,13 +65,11 @@ class ConnectionPool(object): QueueCls = LifoQueue def __init__(self, host, port=None): - if host is None: - raise LocationParseError(host) + if not host: + raise LocationValueError("No host specified.") # httplib doesn't like it when we include brackets in ipv6 addresses - host = host.strip('[]') - - self.host = host + self.host = host.strip('[]') self.port = port def __str__(self): diff --git a/urllib3/exceptions.py b/urllib3/exceptions.py --- a/urllib3/exceptions.py +++ b/urllib3/exceptions.py @@ -116,7 +116,12 @@ class ClosedPoolError(PoolError): pass -class LocationParseError(ValueError, HTTPError): +class LocationValueError(ValueError, HTTPError): + "Raised when there is something wrong with a given URL input." + pass + + +class LocationParseError(LocationValueError): "Raised when get_host or similar fails to parse the URL input." def __init__(self, location): diff --git a/urllib3/poolmanager.py b/urllib3/poolmanager.py --- a/urllib3/poolmanager.py +++ b/urllib3/poolmanager.py @@ -14,6 +14,7 @@ from ._collections import RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import port_by_scheme +from .exceptions import LocationValueError from .request import RequestMethods from .util import parse_url @@ -102,10 +103,11 @@ def connection_from_host(self, host, port=None, scheme='http'): ``urllib3.connectionpool.port_by_scheme``. """ - scheme = scheme or 'http' + if not host: + raise LocationValueError("No host specified.") + scheme = scheme or 'http' port = port or port_by_scheme.get(scheme, 80) - pool_key = (scheme, host, port) with self.pools.lock: @@ -118,6 +120,7 @@ def connection_from_host(self, host, port=None, scheme='http'): # Make a fresh ConnectionPool of the desired type pool = self._new_pool(scheme, host, port) self.pools[pool_key] = pool + return pool def connection_from_url(self, url): diff --git a/urllib3/util/url.py b/urllib3/util/url.py --- a/urllib3/util/url.py +++ b/urllib3/util/url.py @@ -95,6 +95,10 @@ def parse_url(url): # Additionally, this implementations does silly things to be optimal # on CPython. + if not url: + # Empty + return Url() + scheme = None auth = None host = None
diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -11,6 +11,7 @@ ClosedPoolError, EmptyPoolError, HostChangedError, + LocationValueError, MaxRetryError, SSLError, ) @@ -186,7 +187,6 @@ def test_pool_close(self): self.assertRaises(Empty, old_pool_queue.get, block=False) - def test_pool_timeouts(self): pool = HTTPConnectionPool(host='localhost') conn = pool._new_conn() @@ -201,6 +201,9 @@ def test_pool_timeouts(self): self.assertEqual(pool.timeout._connect, 3) self.assertEqual(pool.timeout.total, None) + def test_no_host(self): + self.assertRaises(LocationValueError, HTTPConnectionPool, None) + if __name__ == '__main__': unittest.main() diff --git a/test/test_poolmanager.py b/test/test_poolmanager.py --- a/test/test_poolmanager.py +++ b/test/test_poolmanager.py @@ -4,7 +4,7 @@ from urllib3 import connection_from_url from urllib3.exceptions import ( ClosedPoolError, - LocationParseError, + LocationValueError, ) @@ -68,7 +68,8 @@ def test_manager_clear(self): def test_nohost(self): p = PoolManager(5) - self.assertRaises(LocationParseError, p.connection_from_url, 'http://@') + self.assertRaises(LocationValueError, p.connection_from_url, 'http://@') + self.assertRaises(LocationValueError, p.connection_from_url, None) if __name__ == '__main__': diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -596,7 +596,7 @@ def test_source_address_error(self): pool = HTTPConnectionPool( self.host, self.port, source_address=addr) self.assertRaises( - MaxRetryError, pool.request, 'GET', '/source_address') + MaxRetryError, pool.request, 'GET', '/source_address', retries=1) @onlyPy3 def test_httplib_headers_case_insensitive(self):
Check for a blank host while opening a connection Because we can't open a http(s) connection to host, which equals to None. I'm not sure, if this check should be done in the `urllib3.parse_url`, because it can be used to parse URLs like a `file:///tmp/some_file.txt`. Also, see issue #143 for a first part of the discussion.
Tests please. :)
2014-06-24T23:27:16Z
[]
[]
urllib3/urllib3
418
urllib3__urllib3-418
[ "207" ]
b81c242cb637b0f63dd29045068f1bbf890a64dc
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -10,9 +10,10 @@ from socket import timeout as SocketTimeout from ._collections import HTTPHeaderDict -from .exceptions import DecodeError, ReadTimeoutError +from .exceptions import ConnectionError, DecodeError, ReadTimeoutError from .packages.six import string_types as basestring, binary_type from .util import is_fp_closed +from .connection import HTTPException class DeflateDecoder(object): @@ -202,6 +203,10 @@ def read(self, amt=None, decode_content=None, cache_content=False): # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, 'Read timed out.') + except HTTPException as e: + # This includes IncompleteRead. + raise ConnectionError('Connection failed: %r' % e, e) + self._fp_bytes_read += len(data) try:
diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -135,11 +135,11 @@ def test_ssl_verified_with_platform_ca_certs(self): try: import urllib3.contrib.pyopenssl except ImportError: - raise SkipTest('This test needs pyopenssl support') + raise SkipTest('Test requires PyOpenSSL') if (urllib3.connection.ssl_wrap_socket is urllib3.contrib.pyopenssl.orig_connection_ssl_wrap_socket): # Not patched - raise SkipTest('This test should only be run after pyopenssl ' + raise SkipTest('Test should only be run after PyOpenSSL ' 'monkey patching') https_pool = HTTPSConnectionPool('httpbin.org', 443, diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -2,6 +2,7 @@ from urllib3 import HTTPConnectionPool, HTTPSConnectionPool from urllib3.poolmanager import proxy_from_url from urllib3.exceptions import ( + ConnectionError, MaxRetryError, ProxyError, ReadTimeoutError, @@ -16,7 +17,6 @@ from nose.plugins.skip import SkipTest from threading import Event import socket -import time import ssl @@ -196,6 +196,34 @@ def socket_handler(listener): self.assertRaises(ReadTimeoutError, response.read) timed_out.set() + def test_incomplete_response(self): + body = 'Response' + partial_body = body[:2] + + def socket_handler(listener): + sock = listener.accept()[0] + + # Consume request + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf = sock.recv(65536) + + # Send partial response and close socket. + sock.send(( + 'HTTP/1.1 200 OK\r\n' + 'Content-Type: text/plain\r\n' + 'Content-Length: %d\r\n' + '\r\n' + '%s' % (len(body), partial_body)).encode('utf-8') + ) + sock.close() + + self._start_server(socket_handler) + pool = HTTPConnectionPool(self.host, self.port) + + response = pool.request('GET', '/', retries=0, preload_content=False) + self.assertRaises(ConnectionError, response.read) + class TestProxyManager(SocketDummyServerTestCase):
This seems to fix the IncompleteRead errors I was getting I'm sometimes getting IncompleteRead errors when using twython which would bring down the connection. Found a fix online and it seems to have solved the issue so far. Fix came from: http://bobrochel.blogspot.co.nz/2010/11/bad-servers-chunked-encoding-and.html
Thanks @beettlle, can you find a way to write a test for this? Check out some of our tests using dummyserver. I think specifically the socket-level tests would work for this. For example: https://github.com/shazow/urllib3/blob/master/test/with_dummyserver/test_socketlevel.py I've looked though the examples and can't seem to figure out how to make the server send a partial message. Is there anyone that might give me some pointers? I imagine we want something like this... ``` python ... def test_incomplete_response(self): body = 'Response' partial_body = body[:2] def socket_handler(listener): sock = listener.accept()[0] # Consume request buf = b'' while not buf.endswith(b'\r\n\r\n'): buf = sock.recv(65536) # Send partial response and close socket. sock.send( 'HTTP/1.1 200 OK\r\n' 'Content-Type: text/plain\r\n' 'Content-Length: %d\r\n' '\r\n' '%s' % (len(body), partial_body)).encode('utf-8') ) sock.close() self._start_server(socket_handler) pool = HTTPConnectionPool(self.host, self.port) response = pool.request('GET', '/', retries=0) self.assertEqual(response.data, partial_body) ``` Seems a bit dangerous to not raise an error in this scenario though. :/ Maybe an exception should be raised with the partial data attached to it? I know that was just a quick little example and not serious code, but there is a bug here that should definitely be considered before any real implementation: ``` buf = b'' while not buf.endswith(b'\r\n\r\n'): buf = sock.recv(65536) ``` If `sock.recv()` consistently returns an empty string at any point (either immediately or before `'\r\n\r\n'` is reached), this will be caught in an infinite loop. @Anorov Would you be willing to make a PR with a test that shows this behaviour and this fix?
2014-06-25T01:25:09Z
[]
[]
urllib3/urllib3
432
urllib3__urllib3-432
[ "196" ]
5aafbc1077108de76b0fe876a511bbba937b579a
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -48,7 +48,10 @@ class HTTPResponse(io.IOBase): HTTP Response container. Backwards-compatible to httplib's HTTPResponse but the response ``body`` is - loaded and decoded on-demand when the ``data`` property is accessed. + loaded and decoded on-demand when the ``data`` property is accessed. This + class is also compatible with the Python standard library's :mod:`io` + module, and can hence be treated as a readable object in the context of that + framework. Extra parameters for behaviour not present in httplib.HTTPResponse: @@ -317,4 +320,14 @@ def flush(self): return self._fp.flush() def readable(self): + # This method is required for `io` module compatibility. return True + + def readinto(self, b): + # This method is required for `io` module compatibility. + temp = self.read(len(b)) + if len(temp) == 0: + return 0 + else: + b[:len(temp)] = temp + return len(temp)
diff --git a/test/test_response.py b/test/test_response.py --- a/test/test_response.py +++ b/test/test_response.py @@ -182,6 +182,37 @@ def test_io_bufferedreader(self): br.close() self.assertEqual(resp.closed, True) + b = b'fooandahalf' + fp = BytesIO(b) + resp = HTTPResponse(fp, preload_content=False) + br = BufferedReader(resp, 5) + + br.read(1) # sets up the buffer, reading 5 + self.assertEqual(len(fp.read()), len(b) - 5) + + # This is necessary to make sure the "no bytes left" part of `readinto` + # gets tested. + while not br.closed: + br.read(5) + + def test_io_readinto(self): + # This test is necessary because in py2.6, `readinto` doesn't get called + # in `test_io_bufferedreader` like it does for all the other python + # versions. Probably this is because the `io` module in py2.6 is an + # old version that has a different underlying implementation. + + + fp = BytesIO(b'foo') + resp = HTTPResponse(fp, preload_content=False) + + barr = bytearray(3) + assert resp.readinto(barr) == 3 + assert b'foo' == barr + + # The reader should already be empty, so this should read nothing. + assert resp.readinto(barr) == 0 + assert b'foo' == barr + def test_streaming(self): fp = BytesIO(b'foo') resp = HTTPResponse(fp, preload_content=False)
Add documentation for HTTPResponse `io` support Reminder for @eteq re #187: > Would you be up for adding a bit to the documentation/comments which talks about the use cases or benefits of this? That would be greatly appreciated. :)
@eteq Did this ever happen? (Any plans if not?) @shazow has not happened yet, as I've had a very busy travel schedule this summer. Still on my todo list, though. Ah thanks for keeping track of it, then. :) Ping @eteq in case you still want to do this one someday. :) @shazow - It is still on my todo list, I promise, but things keep bubbling up... I will try to push it up a bit so that you can finally close this, though!
2014-07-22T23:46:18Z
[]
[]
urllib3/urllib3
493
urllib3__urllib3-493
[ "492" ]
df4ec5cce1c1d7615f111be30347b67eb27a89ef
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -278,6 +278,23 @@ def _get_timeout(self, timeout): # can be removed later return Timeout.from_float(timeout) + def _raise_timeout(self, err, url, timeout_value): + """Is the error actually a timeout? Will raise a ReadTimeout or pass""" + + if isinstance(err, SocketTimeout): + raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + + # See the above comment about EAGAIN in Python 3. In Python 2 we have + # to specifically catch it and throw the timeout error + if hasattr(err, 'errno') and err.errno in _blocking_errnos: + raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + + # Catch possible read timeouts thrown as SSL errors. If not the + # case, rethrow the original. We need to do this because of: + # http://bugs.python.org/issue10272 + if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6 + raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) + def _make_request(self, conn, method, url, timeout=_Default, **httplib_request_kw): """ @@ -301,7 +318,12 @@ def _make_request(self, conn, method, url, timeout=_Default, conn.timeout = timeout_obj.connect_timeout # Trigger any extra validation we need to do. - self._validate_conn(conn) + try: + self._validate_conn(conn) + except (SocketTimeout, BaseSSLError) as e: + # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. + self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) + raise # conn.request() calls httplib.*.request, not the method in # urllib3.request. It also calls makefile (recv) on the socket. @@ -331,28 +353,8 @@ def _make_request(self, conn, method, url, timeout=_Default, httplib_response = conn.getresponse(buffering=True) except TypeError: # Python 2.6 and older httplib_response = conn.getresponse() - except SocketTimeout: - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout) - - except BaseSSLError as e: - # Catch possible read timeouts thrown as SSL errors. If not the - # case, rethrow the original. We need to do this because of: - # http://bugs.python.org/issue10272 - if 'timed out' in str(e) or \ - 'did not complete (read)' in str(e): # Python 2.6 - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout) - - raise - - except SocketError as e: # Platform-specific: Python 2 - # See the above comment about EAGAIN in Python 3. In Python 2 we - # have to specifically catch it and throw the timeout error - if e.errno in _blocking_errnos: - raise ReadTimeoutError( - self, url, "Read timed out. (read timeout=%s)" % read_timeout) - + except (SocketTimeout, BaseSSLError, SocketError) as e: + self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise # AppEngine doesn't have a version attr.
diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -137,6 +137,24 @@ def socket_handler(listener): finally: timed_out.set() + def test_https_connection_read_timeout(self): + """ Handshake timeouts should fail with a Timeout""" + timed_out = Event() + def socket_handler(listener): + sock = listener.accept()[0] + while not sock.recv(65536): + pass + + timed_out.wait() + sock.close() + + self._start_server(socket_handler) + pool = HTTPSConnectionPool(self.host, self.port, timeout=0.001, retries=False) + try: + self.assertRaises(ReadTimeoutError, pool.request, 'GET', '/') + finally: + timed_out.set() + def test_timeout_errors_cause_retries(self): def socket_handler(listener): sock_timeout = listener.accept()[0]
timeout can be raised during _validate_conn Steps to reproduce: - set up a port to accept incoming TCP traffic, and never send any data back (for example port 5501 on [hamms](https://github.com/kevinburke/hamms)) - make an HTTPS request to the port with a timeout ``` python import urllib3, certifi http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) r = http.request('GET', 'https://localhost:5501/', timeout=3) ``` The traceback is roughly this: ``` python File "urllib3/request.py", line 68, in request **urlopen_kw) File "urllib3/request.py", line 81, in request_encode_url return self.urlopen(method, url, **urlopen_kw) File "urllib3/poolmanager.py", line 153, in urlopen response = conn.urlopen(method, u.request_uri, **kw) File "urllib3/connectionpool.py", line 516, in urlopen body=body, headers=headers) File "urllib3/connectionpool.py", line 304, in _make_request self._validate_conn(conn) File "urllib3/connectionpool.py", line 722, in _validate_conn conn.connect() File "urllib3/connection.py", line 237, in connect ssl_version=resolved_ssl_version) File "urllib3/util/ssl_.py", line 132, in ssl_wrap_socket ssl_version=ssl_version) File "/usr/local/Cellar/python/2.7.8_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ssl.py", line 392, in wrap_socket ciphers=ciphers) File "/usr/local/Cellar/python/2.7.8_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ssl.py", line 148, in __init__ self.do_handshake() File "/usr/local/Cellar/python/2.7.8_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ssl.py", line 310, in do_handshake self._sslobj.do_handshake() ``` The error is a SSLError but it seems like this should be a timeout: > SSLError: [Errno 0] The handshake operation timed out We have logic to catch SSLErrors and raise them as timeouts, which would catch this: ``` except BaseSSLError as e: # Catch possible read timeouts thrown as SSL errors. If not the # case, rethrow the original. We need to do this because of: # http://bugs.python.org/issue10272 if 'timed out' in str(e) or \ 'did not complete (read)' in str(e): # Python 2.6 raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % read_timeout) raise ``` Unfortunately this code block doesn't catch this timeout which is raised during `_validate_conn`. There are a few options here. - duplicate the `if "timed out" in ...` logic around `_validate_conn` - move the `if "timed out" in ...` logic up to urlopen. Existing read timeout errors would get caught in the `except (BaseSSLError, CertificateError) as e` block in urlopen, and not have the connection closed/replaced. - do nothing and leave "handshake timed out" errors as SSLErrors.
That's super-thorough, thanks Kevin! > set up a port to accept incoming TCP traffic, and never send any data back On the bright side, would be trivial to write a socket-level test for this. :D > There are a few options here. [...] Maybe we should pull out the "re-raise as TimeoutError" sub-blocks in the giant try/except into somekind of `raise_timeout(exception)` helper which does the checking and re-raising that we can re-use in/around `_validate_conn()`. Then, ``` python except SocketTimeout: ... except BaseSSLError as e: ... raise except SocketError as e: ... raise ``` Will become, ``` python except (SocketTimeout, SocketError, BaseSSLError) as e: raise_timeout(e) raise ``` Thoughts?
2014-10-28T03:00:03Z
[]
[]
urllib3/urllib3
501
urllib3__urllib3-501
[ "500" ]
b18e02d82685d9604d993786cbd945b593b9131e
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -539,9 +539,12 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, raise EmptyPoolError(self, "No pool connections are available.") except (BaseSSLError, CertificateError) as e: - # Release connection unconditionally because there is no way to - # close it externally in case of exception. - release_conn = True + # Close the connection. If a connection is reused on which there + # was a Certificate error, the next request will certainly raise + # another Certificate error. + if conn: + conn.close() + conn = None raise SSLError(e) except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -330,6 +330,8 @@ def test_enhanced_ssl_connection(self): https_pool._make_request(conn, 'GET', '/') def test_ssl_correct_system_time(self): + self._pool.cert_reqs = 'CERT_REQUIRED' + self._pool.ca_certs = DEFAULT_CA with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self._pool.request('GET', '/') @@ -337,6 +339,8 @@ def test_ssl_correct_system_time(self): self.assertEqual([], w) def test_ssl_wrong_system_time(self): + self._pool.cert_reqs = 'CERT_REQUIRED' + self._pool.ca_certs = DEFAULT_CA with mock.patch('urllib3.connection.datetime') as mock_date: mock_date.date.today.return_value = datetime.date(1970, 1, 1) @@ -370,6 +374,12 @@ def test_ssl_version_as_short_string(self): self._pool.ssl_version = 'SSLv3' self.assertRaises(SSLError, self._pool.request, 'GET', '/') + def test_discards_connection_on_sslerror(self): + self._pool.cert_reqs = 'CERT_REQUIRED' + self.assertRaises(SSLError, self._pool.request, 'GET', '/') + self._pool.ca_certs = DEFAULT_CA + self._pool.request('GET', '/') + class TestHTTPS_NoSAN(HTTPSDummyServerTestCase): certs = NO_SAN_CERTS
Connection retrieved from connection pool has wrong cert_reqs (based on previous request) Based on the issue on requests library: https://github.com/kennethreitz/requests/issues/2314 I´ve found that when you want to perform a request with `self.cert_reqs=='CERT_NONE'`sometimes (if there has been a previous connection with CERT_REQUIRED) the connection retrieved from the connection pool has CERT_REQUIRED: ``` > /Users/raulcd/Projects/requests/requests/packages/urllib3/connectionpool.py(511)urlopen() 510 # Request a connection from the queue. --> 511 conn = self._get_conn(timeout=pool_timeout) 512 ipdb> self.cert_reqs 'CERT_NONE' ipdb> n ipdb> conn.cert_reqs 'CERT_REQUIRED' ``` So the requests should have `CERT_NONE` but the connection retrieved from the connection pool has `CERT_REQUIRED`. I'll work in a fix.
2014-11-05T00:49:31Z
[]
[]
urllib3/urllib3
516
urllib3__urllib3-516
[ "513" ]
497085f4186b43450d72bc62b57e2bbf581c8f27
diff --git a/urllib3/request.py b/urllib3/request.py --- a/urllib3/request.py +++ b/urllib3/request.py @@ -118,18 +118,24 @@ def request_encode_body(self, method, url, fields=None, headers=None, which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ - if encode_multipart: - body, content_type = encode_multipart_formdata( - fields or {}, boundary=multipart_boundary) - else: - body, content_type = (urlencode(fields or {}), - 'application/x-www-form-urlencoded') - if headers is None: headers = self.headers - headers_ = {'Content-Type': content_type} - headers_.update(headers) + extra_kw = {'headers': {}} + + if fields: + if 'body' in urlopen_kw: + raise TypeError('request got values for both \'fields\' and \'body\', can only specify one.') + + if encode_multipart: + body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) + else: + body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' + + extra_kw['body'] = body + extra_kw['headers'] = {'Content-Type': content_type} + + extra_kw['headers'].update(headers) + extra_kw.update(urlopen_kw) - return self.urlopen(method, url, body=body, headers=headers_, - **urlopen_kw) + return self.urlopen(method, url, **extra_kw)
diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -98,6 +98,13 @@ def test_one_name_multiple_values(self): r = self.pool.request('POST', '/echo', fields=fields) self.assertEqual(r.data.count(b'name="foo"'), 2) + def test_request_method_body(self): + body = b'hi' + r = self.pool.request('POST', '/echo', body=body) + self.assertEqual(r.data, body) + + fields = [('hi', 'hello')] + self.assertRaises(TypeError, self.pool.request, 'POST', '/echo', body=body, fields=fields) def test_unicode_upload(self): fieldname = u('myfile')
Support body param in RequestMethods.request Easiest way to do this is by avoiding defining a body kw when no fields are given, then if both are given it will naturally raise a "passed twice" error.
We don't want people to have to do sad things like: ``` python if body is None: return self.pool.request("POST", *args, fields=fields, **kwargs) else: return self.pool.urlopen("POST", *args, body=body, **kwargs) ```
2014-11-27T20:38:52Z
[]
[]
urllib3/urllib3
526
urllib3__urllib3-526
[ "524" ]
a27758625e4169330fcf965652b1093faf5aaaa2
diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py --- a/urllib3/util/ssl_.py +++ b/urllib3/util/ssl_.py @@ -211,7 +211,9 @@ def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED, context.verify_mode = cert_reqs if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 - context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED) + # We do our own verification, including fingerprints and alternative + # hostnames. So disable it here + context.check_hostname = False return context
diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -240,6 +240,15 @@ def test_verify_none_and_good_fingerprint(self): '7A:F2:8A:D7:1E:07:33:67:DE' https_pool.request('GET', '/') + def test_good_fingerprint_and_hostname_mismatch(self): + https_pool = HTTPSConnectionPool('127.0.0.1', self.port, + cert_reqs='CERT_REQUIRED', + ca_certs=DEFAULT_CA) + + https_pool.assert_fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:' \ + '7A:F2:8A:D7:1E:07:33:67:DE' + https_pool.request('GET', '/') + @requires_network def test_https_timeout(self): timeout = Timeout(connect=0.001)
assert_hostname=False seems to be ignored in 1.10 I have some code that is relying on assert_hostname=False to work. I upgrade urllib3 to version 1.10 and the code fails with SSLError: hostname 'remote-host' doesn't match 'localhost' I haven't looked through the code to try to determine why this happens. Did anyone else notice this? If so, what is the fix. Essentially I am creating a connection pool like this: ``` python import urllib3 http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs="cert.pem", assert_hostname=False) try: r = http.request('GET', 'https://remote-host:4443/') print("Certificate verification NO HOSTNAME successful") except urllib3.exceptions.SSLError as e: print ("SSL Error:", e) return -1 return 0 ```
One quick question: 1. Does this pass _right now_ with an older version of urllib3? Which platform are you using? It works for me. I am trying with Linux, python2.6 (to have no SNI) and this code: ``` py import urllib3 http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', assert_hostname=False, ca_certs='/etc/ssl/certs/ca-certificates.crt') try: r = http.request('GET', 'https://sni.velox.ch') print(r.data) print("Certificate verification NO HOSTNAME successful") except urllib3.exceptions.SSLError as e: print("SSL Error:", e) ``` @Lukasa : I downgraded urllib3 using `easy_install3 urllib3==1.9.1` and the code passed. @t-8ch : I was running this under Linux, python3.4.0. What happens with your code if you put `assert_hostname=True`? Does it fail in that case? @alecz20 `assert_hostname=True` is not a valid parameter Could you try: `assert_hostname='localhost'` (It also seems, that conceptually you may want to use `assert_fingerprint`)
2014-12-19T16:34:27Z
[]
[]
urllib3/urllib3
532
urllib3__urllib3-532
[ "529" ]
a27758625e4169330fcf965652b1093faf5aaaa2
diff --git a/dummyserver/server.py b/dummyserver/server.py --- a/dummyserver/server.py +++ b/dummyserver/server.py @@ -50,6 +50,7 @@ class SocketServerThread(threading.Thread): def __init__(self, socket_handler, host='localhost', port=8081, ready_event=None): threading.Thread.__init__(self) + self.daemon = True self.socket_handler = socket_handler self.host = host diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -558,6 +558,14 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, conn = None raise SSLError(e) + except SSLError: + # Treat SSLError separately from BaseSSLError to preserve + # traceback. + if conn: + conn.close() + conn = None + raise + except (TimeoutError, HTTPException, SocketError, ConnectionError) as e: if conn: # Discard the connection for these exceptions. It will be
diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -521,6 +521,43 @@ def socket_handler(listener): finally: timed_out.set() + def test_ssl_failed_fingerprint_verification(self): + def socket_handler(listener): + for i in range(2): + sock = listener.accept()[0] + ssl_sock = ssl.wrap_socket(sock, + server_side=True, + keyfile=DEFAULT_CERTS['keyfile'], + certfile=DEFAULT_CERTS['certfile'], + ca_certs=DEFAULT_CA) + + ssl_sock.send(b'HTTP/1.1 200 OK\r\n' + b'Content-Type: text/plain\r\n' + b'Content-Length: 5\r\n\r\n' + b'Hello') + + ssl_sock.close() + sock.close() + + self._start_server(socket_handler) + # GitHub's fingerprint. Valid, but not matching. + fingerprint = ('A0:C4:A7:46:00:ED:A7:2D:C0:BE:CB' + ':9A:8C:B6:07:CA:58:EE:74:5E') + + def request(): + try: + pool = HTTPSConnectionPool(self.host, self.port, + assert_fingerprint=fingerprint) + response = pool.urlopen('GET', '/', preload_content=False, + timeout=Timeout(connect=1, read=0.001)) + response.read() + finally: + pool.close() + + self.assertRaises(SSLError, request) + # Should not hang, see https://github.com/shazow/urllib3/issues/529 + self.assertRaises(SSLError, request) + def consume_socket(sock, chunks=65536): while not sock.recv(chunks).endswith(b'\r\n\r\n'):
urllib3 keeps connection open after failed fingerprint validation 1. Launch a single-threaded HTTPS server, like the one from Werkzeug/Flask, with ad-hoc certificate generation. 2. Paste this into a file called lol.py: ``` from urllib3.poolmanager import PoolManager def x(): # This is GitHub's certificate fingerprint. This function should not exit successfully. fingerprint = 'A0:C4:A7:46:00:ED:A7:2D:C0:BE:CB:9A:8C:B6:07:CA:58:EE:74:5E' p = PoolManager(assert_fingerprint=fingerprint) r = p.request('GET', 'https://127.0.0.1:5000') p.clear() # doesn't seem to matter ``` 3. Launch `python -i lol.py`, in a Python 3 virtualenv with the latest PyPI-release of urllib3 installed. 4. Execute `x()` once, an exception for the mismatching fingerprint is thrown. 5. Execute `x()` again, the function doesn't terminate. Werkzeug is not responding to it because it is busy with another connection, but that one should've been closed. Notes: - If one kills the Python process where urllib3 is used, the connections are terminated and the server is usable again. - This is Python-3-specific. - This was found during usage with requests, which has the exact same problem. See https://github.com/untitaker/vdirsyncer/pull/161
Also, here's a simple Flask app which will suffice for this test: ``` import flask app = flask.Flask(__name__) @app.route('/') def index(): return "haha!" app.run(ssl_context='adhoc') ``` +1 to closing connections after a failed validation. Should be reasonably easy to port this test to our test_socketlevel.py suite. @untitaker Would you like to work on this PR to fix it? Yes, but please don't expect results until next week. @untitaker This is open source, we expect nothing but love. :) Not sure where to start. The code at https://github.com/shazow/urllib3/blob/master/urllib3/connection.py#L241 raises the exception which hangs the connection, but I am not sure why it would cause any problems... `match_hostname` also raises exceptions of the same type, and they don't cause problems. Or do they? Hm not sure why an exception raised would hang a connection. @Lukasa @sigmavirus24 Any thoughts? Raised exceptions can cause the interpreter to fail to exit in a threaded case. If we're not talking threading, the only thing I could think of is that the connection gets re-used after having been left in a bad way. Otherwise... The example above is not threaded though. In fact this works, in the sense that certificate verification fails but no connections are leaked: ``` from urllib3.poolmanager import PoolManager import certifi def x(): p = PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) r = p.request('GET', 'https://127.0.0.1:5000') p.clear() # doesn't seem to matter ``` It seems though that the traceback for this test is originating from somewhere completely different. @untitaker maybe you are hitting #524? (sorry I am on my phone) Yeah, I also just noticed in a local test, but the example above doesn't use it. What completely irritates me that CA verification is already done when calling `ssl_wrap_socket` https://github.com/shazow/urllib3/blob/master/urllib3/connection.py#L234, while a few lines below, the same thing is done. For this kind of TB to obtain, I had to apply [this patch](https://gist.github.com/untitaker/c0df8493db7fba23799c). Have you seen #529? Lol that's the issue you're posting in. Hmpf, indeed. I meant #526 It seems that raising any exceptions in `Connection.connect` after calling `self.sock.getpeercert` causes this connection leak. Applying #526 doesn't help with both examples. This fixes the issue for me: ``` diff diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py index 8bdf228..98b75f8 100644 --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -549,7 +549,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") - except (BaseSSLError, CertificateError) as e: + except (BaseSSLError, CertificateError, SSLError) as e: # Close the connection. If a connection is reused on which there # was a Certificate error, the next request will certainly raise # another Certificate error. ``` The problem is, that `BaseSSLError` comes from the stdlib `ssl` module and `CertificateError` from `backports.ssl`. So we don't catch our own stuff. Maybe we should clean this up. And get a test. Neat. Working on PR...
2015-01-10T14:25:57Z
[]
[]
urllib3/urllib3
540
urllib3__urllib3-540
[ "539" ]
e6f5d881e55e24d69889110de09b24aa50a7cdaf
diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py --- a/urllib3/util/ssl_.py +++ b/urllib3/util/ssl_.py @@ -1,5 +1,5 @@ from binascii import hexlify, unhexlify -from hashlib import md5, sha1 +from hashlib import md5, sha1, sha256 from ..exceptions import SSLError @@ -96,7 +96,8 @@ def assert_fingerprint(cert, fingerprint): # this digest. hashfunc_map = { 16: md5, - 20: sha1 + 20: sha1, + 32: sha256, } fingerprint = fingerprint.replace(':', '').lower()
diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -202,6 +202,16 @@ def test_assert_fingerprint_sha1(self): '7A:F2:8A:D7:1E:07:33:67:DE' https_pool.request('GET', '/') + def test_assert_fingerprint_sha256(self): + https_pool = HTTPSConnectionPool('localhost', self.port, + cert_reqs='CERT_REQUIRED', + ca_certs=DEFAULT_CA) + + https_pool.assert_fingerprint = ('9A:29:9D:4F:47:85:1C:51:23:F5:9A:A3:' + '0F:5A:EF:96:F9:2E:3C:22:2E:FC:E8:BC:' + '0E:73:90:37:ED:3B:AA:AB') + https_pool.request('GET', '/') + def test_assert_invalid_fingerprint(self): https_pool = HTTPSConnectionPool('127.0.0.1', self.port, cert_reqs='CERT_REQUIRED',
assert_fingerprint SHA256 support is missing assert_fingerprint only seems to support MD5 and SHA1. Would it be possible to add SHA256 support to it?
## I believe we support only what the ssl library supports. If pyOpenSSL supports it, you could try that instead Sent from my Android device with K-9 Mail. Please excuse my brevity.
2015-01-31T20:02:43Z
[]
[]
urllib3/urllib3
545
urllib3__urllib3-545
[ "514" ]
bc8bfdf51ea113b6728c047840ce9890e9de8d0d
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -72,6 +72,21 @@ def __str__(self): return '%s(host=%r, port=%r)' % (type(self).__name__, self.host, self.port) + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + # Return False to re-raise any potential exceptions + return False + + def close(): + """ + Close all pooled connections and disable the pool. + """ + pass + + # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) diff --git a/urllib3/poolmanager.py b/urllib3/poolmanager.py --- a/urllib3/poolmanager.py +++ b/urllib3/poolmanager.py @@ -64,6 +64,14 @@ def __init__(self, num_pools=10, headers=None, **connection_pool_kw): self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close()) + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.clear() + # Return False to re-raise any potential exceptions + return False + def _new_pool(self, scheme, host, port): """ Create a new :class:`ConnectionPool` based on host, port and scheme.
diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -205,6 +205,26 @@ def test_pool_timeouts(self): def test_no_host(self): self.assertRaises(LocationValueError, HTTPConnectionPool, None) + def test_contextmanager(self): + with connection_from_url('http://google.com:80') as pool: + # Populate with some connections + conn1 = pool._get_conn() + conn2 = pool._get_conn() + conn3 = pool._get_conn() + pool._put_conn(conn1) + pool._put_conn(conn2) + + old_pool_queue = pool.pool + + self.assertEqual(pool.pool, None) + + self.assertRaises(ClosedPoolError, pool._get_conn) + + pool._put_conn(conn3) + + self.assertRaises(ClosedPoolError, pool._get_conn) + + self.assertRaises(Empty, old_pool_queue.get, block=False) if __name__ == '__main__': diff --git a/test/test_poolmanager.py b/test/test_poolmanager.py --- a/test/test_poolmanager.py +++ b/test/test_poolmanager.py @@ -71,6 +71,22 @@ def test_nohost(self): self.assertRaises(LocationValueError, p.connection_from_url, 'http://@') self.assertRaises(LocationValueError, p.connection_from_url, None) + def test_contextmanager(self): + with PoolManager(1) as p: + conn_pool = p.connection_from_url('http://google.com') + self.assertEqual(len(p.pools), 1) + conn = conn_pool._get_conn() + + self.assertEqual(len(p.pools), 0) + + self.assertRaises(ClosedPoolError, conn_pool._get_conn) + + conn_pool._put_conn(conn) + + self.assertRaises(ClosedPoolError, conn_pool._get_conn) + + self.assertEqual(len(p.pools), 0) + if __name__ == '__main__': unittest.main()
Add Context Managers The classes like `PoolManager` and `ConnectionPool` and such should act as context managers to make it easier to clean up the resources used by them.
Makes sense, +1.
2015-02-06T11:52:13Z
[]
[]
urllib3/urllib3
560
urllib3__urllib3-560
[ "550" ]
9dafd9b96b74c3a33c4bce88a53ed5a3d0518073
diff --git a/urllib3/exceptions.py b/urllib3/exceptions.py --- a/urllib3/exceptions.py +++ b/urllib3/exceptions.py @@ -162,3 +162,8 @@ class SystemTimeWarning(SecurityWarning): class InsecurePlatformWarning(SecurityWarning): "Warned when certain SSL configuration is not available on a platform." pass + + +class ResponseNotChunked(ProtocolError, ValueError): + "Response needs to be chunked in order to read it as chunks." + pass diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -1,9 +1,15 @@ +try: + import http.client as httplib +except ImportError: + import httplib import zlib import io from socket import timeout as SocketTimeout from ._collections import HTTPHeaderDict -from .exceptions import ProtocolError, DecodeError, ReadTimeoutError +from .exceptions import ( + ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked +) from .packages.six import string_types as basestring, binary_type, PY3 from .connection import HTTPException, BaseSSLError from .util.response import is_fp_closed @@ -117,8 +123,17 @@ def __init__(self, body='', headers=None, status=0, version=0, reason=None, if hasattr(body, 'read'): self._fp = body - if preload_content and not self._body: - self._body = self.read(decode_content=decode_content) + # Are we using the chunked-style of transfer encoding? + self.chunked = False + self.chunk_left = None + tr_enc = self.headers.get('transfer-encoding', '') + if tr_enc.lower() == "chunked": + self.chunked = True + + # We certainly don't want to preload content when the response is chunked. + if not self.chunked: + if preload_content and not self._body: + self._body = self.read(decode_content=decode_content) def get_redirect_location(self): """ @@ -269,11 +284,15 @@ def stream(self, amt=2**16, decode_content=None): If True, will attempt to decode the body based on the 'content-encoding' header. """ - while not is_fp_closed(self._fp): - data = self.read(amt=amt, decode_content=decode_content) + if self.chunked: + for line in self.read_chunked(amt): + yield line + else: + while not is_fp_closed(self._fp): + data = self.read(amt=amt, decode_content=decode_content) - if data: - yield data + if data: + yield data @classmethod def from_httplib(ResponseCls, r, **response_kw): @@ -351,3 +370,59 @@ def readinto(self, b): else: b[:len(temp)] = temp return len(temp) + + def read_chunked(self, amt=None): + # FIXME: Rewrite this method and make it a class with + # a better structured logic. + if not self.chunked: + raise ResponseNotChunked("Response is not chunked. " + "Header 'transfer-encoding: chunked' is missing.") + while True: + # First, we'll figure out length of a chunk and then + # we'll try to read it from socket. + if self.chunk_left is None: + line = self._fp.fp.readline() + line = line.decode() + # See RFC 7230: Chunked Transfer Coding. + i = line.find(';') + if i >= 0: + line = line[:i] # Strip chunk-extensions. + try: + self.chunk_left = int(line, 16) + except ValueError: + # Invalid chunked protocol response, abort. + self.close() + raise httplib.IncompleteRead(''.join(line)) + if self.chunk_left == 0: + break + if amt is None: + chunk = self._fp._safe_read(self.chunk_left) + yield chunk + self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. + self.chunk_left = None + elif amt < self.chunk_left: + value = self._fp._safe_read(amt) + self.chunk_left = self.chunk_left - amt + yield value + elif amt == self.chunk_left: + value = self._fp._safe_read(amt) + self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. + self.chunk_left = None + yield value + else: # amt > self.chunk_left + yield self._fp._safe_read(self.chunk_left) + self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. + self.chunk_left = None + + # Chunk content ends with \r\n: discard it. + while True: + line = self._fp.fp.readline() + if not line: + # Some sites may not end with '\r\n'. + break + if line == b'\r\n': + break + + # We read everything; close the "file". + self.close() +
diff --git a/test/test_response.py b/test/test_response.py --- a/test/test_response.py +++ b/test/test_response.py @@ -2,8 +2,12 @@ from io import BytesIO, BufferedReader +try: + import http.client as httplib +except ImportError: + import httplib from urllib3.response import HTTPResponse -from urllib3.exceptions import DecodeError +from urllib3.exceptions import DecodeError, ResponseNotChunked from base64 import b64decode @@ -156,10 +160,6 @@ def test_body_blob(self): def test_io(self): import socket - try: - from http.client import HTTPResponse as OldHTTPResponse - except: - from httplib import HTTPResponse as OldHTTPResponse fp = BytesIO(b'foo') resp = HTTPResponse(fp, preload_content=False) @@ -174,7 +174,7 @@ def test_io(self): # Try closing with an `httplib.HTTPResponse`, because it has an # `isclosed` method. - hlr = OldHTTPResponse(socket.socket()) + hlr = httplib.HTTPResponse(socket.socket()) resp2 = HTTPResponse(hlr, preload_content=False) self.assertEqual(resp2.closed, False) resp2.close() @@ -406,11 +406,200 @@ def close(self): self.assertEqual(next(stream), b'o') self.assertRaises(StopIteration, next, stream) + def test_mock_transfer_encoding_chunked(self): + stream = [b"fo", b"o", b"bar"] + fp = MockChunkedEncodingResponse(stream) + r = httplib.HTTPResponse(MockSock) + r.fp = fp + resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'}) + + i = 0 + for c in resp.stream(): + self.assertEqual(c, stream[i]) + i += 1 + + def test_mock_transfer_encoding_chunked_custom_read(self): + stream = [b"foooo", b"bbbbaaaaar"] + fp = MockChunkedEncodingResponse(stream) + r = httplib.HTTPResponse(MockSock) + r.fp = fp + r.chunked = True + r.chunk_left = None + resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'}) + expected_response = [b'fo', b'oo', b'o', b'bb', b'bb', b'aa', b'aa', b'ar'] + response = list(resp.read_chunked(2)) + if getattr(self, "assertListEqual", False): + self.assertListEqual(expected_response, response) + else: + for index, item in enumerate(response): + v = expected_response[index] + self.assertEqual(item, v) + + def test_mock_transfer_encoding_chunked_unlmtd_read(self): + stream = [b"foooo", b"bbbbaaaaar"] + fp = MockChunkedEncodingResponse(stream) + r = httplib.HTTPResponse(MockSock) + r.fp = fp + r.chunked = True + r.chunk_left = None + resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'}) + if getattr(self, "assertListEqual", False): + self.assertListEqual(stream, list(resp.read_chunked())) + else: + for index, item in enumerate(resp.read_chunked()): + v = stream[index] + self.assertEqual(item, v) + + def test_read_not_chunked_response_as_chunks(self): + fp = BytesIO(b'foo') + resp = HTTPResponse(fp, preload_content=False) + r = resp.read_chunked() + self.assertRaises(ResponseNotChunked, next, r) + + def test_invalid_chunks(self): + stream = [b"foooo", b"bbbbaaaaar"] + fp = MockChunkedInvalidEncoding(stream) + r = httplib.HTTPResponse(MockSock) + r.fp = fp + r.chunked = True + r.chunk_left = None + resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'}) + self.assertRaises(httplib.IncompleteRead, next, resp.read_chunked()) + + def test_chunked_response_without_crlf_on_end(self): + stream = [b"foo", b"bar", b"baz"] + fp = MockChunkedEncodingWithoutCRLFOnEnd(stream) + r = httplib.HTTPResponse(MockSock) + r.fp = fp + r.chunked = True + r.chunk_left = None + resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'}) + if getattr(self, "assertListEqual", False): + self.assertListEqual(stream, list(resp.stream())) + else: + for index, item in enumerate(resp.stream()): + v = stream[index] + self.assertEqual(item, v) + + def test_chunked_response_with_extensions(self): + stream = [b"foo", b"bar"] + fp = MockChunkedEncodingWithExtensions(stream) + r = httplib.HTTPResponse(MockSock) + r.fp = fp + r.chunked = True + r.chunk_left = None + resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'}) + if getattr(self, "assertListEqual", False): + self.assertListEqual(stream, list(resp.stream())) + else: + for index, item in enumerate(resp.stream()): + v = stream[index] + self.assertEqual(item, v) + def test_get_case_insensitive_headers(self): headers = {'host': 'example.com'} r = HTTPResponse(headers=headers) self.assertEqual(r.headers.get('host'), 'example.com') self.assertEqual(r.headers.get('Host'), 'example.com') + +class MockChunkedEncodingResponse(object): + + def __init__(self, content): + """ + content: collection of str, each str is a chunk in response + """ + self.content = content + self.index = 0 # This class iterates over self.content. + self.closed = False + self.cur_chunk = b'' + self.chunks_exhausted = False + + @staticmethod + def _encode_chunk(chunk): + return '%X\r\n%s\r\n' % (len(chunk), chunk.decode()) + + def _pop_new_chunk(self): + if self.chunks_exhausted: + return b"" + try: + chunk = self.content[self.index] + except IndexError: + chunk = b'' + self.chunks_exhausted = True + else: + self.index += 1 + encoded_chunk = self._encode_chunk(chunk) + return encoded_chunk.encode() + + def pop_current_chunk(self, amt=-1, till_crlf=False): + if amt > 0 and till_crlf: + raise ValueError("Can't specify amt and till_crlf.") + if len(self.cur_chunk) <= 0: + self.cur_chunk = self._pop_new_chunk() + if till_crlf: + try: + i = self.cur_chunk.index(b"\r\n") + except ValueError: + # No CRLF in current chunk -- probably caused by encoder. + self.cur_chunk = b"" + return b"" + else: + chunk_part = self.cur_chunk[:i+2] + self.cur_chunk = self.cur_chunk[i+2:] + return chunk_part + elif amt <= -1: + chunk_part = self.cur_chunk + self.cur_chunk = b'' + return chunk_part + else: + try: + chunk_part = self.cur_chunk[:amt] + except IndexError: + chunk_part = self.cur_chunk + self.cur_chunk = b'' + else: + self.cur_chunk = self.cur_chunk[amt:] + return chunk_part + + def readline(self): + return self.pop_current_chunk(till_crlf=True) + + def read(self, amt=-1): + return self.pop_current_chunk(amt) + + def flush(self): + # Python 3 wants this method. + pass + + def close(self): + self.closed = True + + +class MockChunkedInvalidEncoding(MockChunkedEncodingResponse): + + def _encode_chunk(self, chunk): + return 'ZZZ\r\n%s\r\n' % chunk.decode() + + +class MockChunkedEncodingWithoutCRLFOnEnd(MockChunkedEncodingResponse): + + def _encode_chunk(self, chunk): + return '%X\r\n%s%s' % (len(chunk), chunk.decode(), + "\r\n" if len(chunk) > 0 else "") + + +class MockChunkedEncodingWithExtensions(MockChunkedEncodingResponse): + + def _encode_chunk(self, chunk): + return '%X;asd=qwe\r\n%s\r\n' % (len(chunk), chunk.decode()) + + +class MockSock(object): + @classmethod + def makefile(cls, *args, **kwargs): + return + + if __name__ == '__main__': unittest.main()
[rfe] provide access to chunks when server responds with `Transfer-Encoding: chunked` I've opened [an issue](https://github.com/kennethreitz/requests/issues/2449) at [requests](https://github.com/kennethreitz/requests) but was advised to move it here. There it goes: When server responds with `Transfer-Encoding: chunked`, it would be nice if `urllib3` provided access to chunks sent by server. Therefore upper layers, `requests`, could process those encoded chunks and developers wouldn't need to decode those chunks. More info: http://en.wikipedia.org/wiki/Chunked_transfer_encoding http://mihai.ibanescu.net/chunked-encoding-and-python-requests
Hello again! So, the core problem here is `httplib`. We can achieve this if we grab direct access to the socket underlying `httplib` and decode the chunking ourselves. @shazow, is this a thing you want? HMMM!! Stealing the socket from httplib is not a thing I have considered. If it can be done in a reliable/safe way, that could be worthwhile. :) ...how about going even deeper and ask for RFE in `httplib`? ![tzos8](https://cloud.githubusercontent.com/assets/6292/6263461/18abc880-b7cd-11e4-9e74-3301abaee074.gif) `httplib` should be viewed as a source of pain from which no light or joy can escape. @shazow has repeatedly asked me to rewrite `httplib`. One day I might even do that. =) I can see there is already code in `httplib` which [reads chunked responses](https://hg.python.org/cpython/file/2.7/Lib/httplib.py#l590). The problem is, it doesn't return generator. Also, looking at [this blog post](http://blog.dowski.com/2008/04/02/reading-chunked-http11-responses/), one should be able to change response class, which looks kinda hacky (but doesn't require patching httplib) That might be the better way to go about it. @Lukasa @shazow does it mean that I can write a patch which could be merged at some point (or you would be wiling to work on it?). Just to be clear: I don't mind providing the patch. Shouldn't be that hard, I hope. Ideally you'd write the patch. I'm busy rewriting httplib. ;) @TomasTomecek Patches are always appreciated. :) Keep in mind that we have a 100% test coverage policy for merging. Feel free to open a PR and ping it whenever you'd like us to take a look at it, we're happy to help you along the way.
2015-03-04T10:00:27Z
[]
[]
urllib3/urllib3
563
urllib3__urllib3-563
[ "561" ]
6f0d66e75ec33b2c809e9d63caa7355741ceeed7
diff --git a/urllib3/_collections.py b/urllib3/_collections.py --- a/urllib3/_collections.py +++ b/urllib3/_collections.py @@ -20,8 +20,6 @@ def __exit__(self, exc_type, exc_value, traceback): __all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] -MULTIPLE_HEADERS_ALLOWED = frozenset(['cookie', 'set-cookie', 'set-cookie2']) - _Null = object() @@ -143,7 +141,10 @@ class HTTPHeaderDict(dict): def __init__(self, headers=None, **kwargs): dict.__init__(self) if headers is not None: - self.extend(headers) + if isinstance(headers, HTTPHeaderDict): + self._copy_from(headers) + else: + self.extend(headers) if kwargs: self.extend(kwargs) @@ -223,11 +224,8 @@ def add(self, key, val): vals.append(val) else: # vals should be a tuple then, i.e. only one item so far - if key_lower in MULTIPLE_HEADERS_ALLOWED: - # Need to convert the tuple to list for further extension - _dict_setitem(self, key_lower, [vals[0], vals[1], val]) - else: - _dict_setitem(self, key_lower, new_vals) + # Need to convert the tuple to list for further extension + _dict_setitem(self, key_lower, [vals[0], vals[1], val]) def extend(*args, **kwargs): """Generic import function for any type of header-like object. @@ -276,14 +274,17 @@ def getlist(self, key): def __repr__(self): return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) - def copy(self): - clone = type(self)() - for key in self: - val = _dict_getitem(self, key) + def _copy_from(self, other): + for key in other: + val = _dict_getitem(other, key) if isinstance(val, list): # Don't need to convert tuples val = list(val) - _dict_setitem(clone, key, val) + _dict_setitem(self, key, val) + + def copy(self): + clone = type(self)() + clone._copy_from(self) return clone def iteritems(self):
diff --git a/test/test_collections.py b/test/test_collections.py --- a/test/test_collections.py +++ b/test/test_collections.py @@ -144,35 +144,32 @@ def setUp(self): self.d.add('cookie', 'bar') def test_overwriting_with_setitem_replaces(self): - d = HTTPHeaderDict() + self.d['Cookie'] = 'foo' + self.assertEqual(self.d['cookie'], 'foo') - d['Cookie'] = 'foo' - self.assertEqual(d['cookie'], 'foo') - - d['cookie'] = 'bar' - self.assertEqual(d['Cookie'], 'bar') + self.d['cookie'] = 'bar' + self.assertEqual(self.d['Cookie'], 'bar') def test_copy(self): h = self.d.copy() self.assertTrue(self.d is not h) self.assertEqual(self.d, h) + + def test_getlist_after_copy(self): + self.assertEqual(self.d.getlist('cookie'), HTTPHeaderDict(self.d).getlist('cookie')) - def test_add_multiple_allowed(self): - d = HTTPHeaderDict() - d['Cookie'] = 'foo' - d.add('cookie', 'bar') - - self.assertEqual(d['cookie'], 'foo, bar') - self.assertEqual(d['Cookie'], 'foo, bar') + def test_add_well_known_multiheader(self): + self.d.add('COOKIE', 'asdf') + self.assertEqual(self.d.getlist('cookie'), ['foo', 'bar', 'asdf']) + self.assertEqual(self.d['cookie'], 'foo, bar, asdf') - d.add('cookie', 'asdf') - self.assertEqual(d['cookie'], 'foo, bar, asdf') + def test_add_comma_separated_multiheader(self): + self.d.add('bar', 'foo') + self.d.add('BAR', 'bar') + self.d.add('Bar', 'asdf') + self.assertEqual(self.d.getlist('bar'), ['foo', 'bar', 'asdf']) + self.assertEqual(self.d['bar'], 'foo, bar, asdf') - def test_add_multiple_not_allowed(self): - self.d.add('notmulti', 'should be overwritten on next add call') - self.d.add('notmulti', 'new val') - self.assertEqual(self.d['notmulti'], 'new val') - def test_extend(self): self.d.extend([('set-cookie', '100'), ('set-cookie', '200'), ('set-cookie', '300')]) self.assertEqual(self.d['set-cookie'], '100, 200, 300')
duplicate/multiple headers are lost When the server returns multiple headers of the same type, duplicated headers are lost. Example: WWW-Authenticate: Negotiate WWW-Authenticate: NTLM Results in only NTLM in the www-authenticate header.
Seems to be a result of https://github.com/shazow/urllib3/pull/544#issuecomment-73410346 I have tests broken in my apps as well but didn't have time to debug. For now I froze urllib3 version to 1.10.1. I might have time to take a look on it later today. Arg, we need some better tests for this. Hopefully somebody (@ml31415?) has a chance to take a look at this soon, otherwise I'll roll back #544 and push a hotfix release. @shazow <3 @lukasa fwiw, I want to hold off on a 2.5.5 (requests) release until this is sorted out @sigmavirus24 Good call, we should aim to fix this sharpish then. So looking at the HTTPHeaderDict, there's some real weirdness in its logic that I didn't catch before. For example, its docstrings are fabrications. Here's the docstring for `HTTPHeaderDict.add()`: ``` python """Adds a (name, value) pair, doesn't overwrite the value if it already exists. >>> headers = HTTPHeaderDict(foo='bar') >>> headers.add('Foo', 'baz') >>> headers['foo'] 'bar, baz' """ ``` Here's the code from my terminal: ``` python >>> from urllib3._collections import HTTPHeaderDict >>> d = HTTPHeaderDict(foo='bar') >>> d.add('foo', 'baz') >>> d['foo'] 'baz' ``` The bigger problem is that it's based on a misunderstanding of the RFCs. There's a fixed list of header fields that are allowed to have multiple values in the code: `cookie`, `set-cookie`, and `set-cookie2`. The problem is that RFC 7230 section 3.2.2 reads: > A sender MUST NOT generate multiple header fields with the same field name in a message unless either the entire field value for that header field is defined as a comma-separated list [i.e., #(values)] or the header field is a well-known exception (as noted below). This frozenset includes the well known exceptions, but does not allow for the fact that any header defined as a comma-separated list (which is many of them) are _also_ allowed. In the short term, I recommend backing out #544 because it's provably wrong. Given that I need one for `hyper` as well, it might be worth us commonalising a HTTP header dict that doesn't try to be too clever to use as a baseline, with a well-defined suite of unit tests that confirm its behaviour. At that point we could revisit the performance improvements in #544, guaranteeing that function isn't regressed. In the defense of #544, that lying docstring is a relic from the previous implementation, though it's quite conspicuous that it is unchanged in the middle of a large set of changes. Would be also good to remove (or at least hide) the 1.10.2 from PyPI. I have a fix in #562 but I'm pretty sure the tests will fail. I also didn't add any new ones. I tested most of this by hand. Next step is to add actual tests and make sure they all pass Comma separated values are eaten without problems from #544. For me it still looks like #544 works as specified, and it rather looks like the corresponding server implementations don't. I had asked somewhere in the other discussion, if I should add the frozenset to enforce the duplicate headers only to this well known ones, as it's indeed questionable, if the specs should be followed strictly, when some (many?) implementations don't follow it. Removing this check is not a big deal. In that case, any special headers or frozenset won't be necessary at all, it's a three lines change. #544 was carefully tweaked for spec compatibility and performance, not sure what's the idea of reverting most of it's logic with #562. Yes, the docstring never got fixed, though. Concerning the change in `__getitem__` on multiple headers, I had tried to follow the previous behaviour as good as possible, still not throwing away information. Only returning duplicate headers on `getlist` would be fine to me, but I guess other complaints will follow. Another note. Maybe it would be worth to discuss and specify a desired behaviour internally, before doing further hasty changes. It's not `urllib3` doing something wrong here. It should be clearly defined, when and where to ignore the spec, and why, and how to behave in that cases. @ml31415 I'm not proposing to ignore the spec, I'm proposing that the HTTPHeaderDict doesn't behave correctly because it eats headers that appear twice. The following is a totally valid header list: ``` content-length: 300 server: h2o/1.0.2-alpha1 connection: keep-alive www-authenticate: basic www-authenticate: digest ``` When passed to the HTTPHeaderDict, the dict from #544 eats that pair of headers, providing only one, where the previous implementation would have merged them. Agree that the failure was mine in code review, but we should definitely fix it now. Agree with @Lukasa, this bug breaks authentication schemes when the server provides multiple authentication methods with multiple "WWW-Authenticate" headers. Hmm, sorry for messing this up! Looks like it comes down to what the spec means with "entire field value for that header field"? So far, to me the field value was everything followed after the colon to a line end not followed by space. Thinking about it now, looks like I was horribly wrong with my interpretation. The fix for this would be, to simply remove the field name check in `add`, a three lines change, or to add all csl-allowed headers in the frozen set. Would the well known exceptions, probably containing cookies with commas, require any special handling? Probably not, comma-joining them on `__getitem__` might still be the best option, and `getlist` is there for help.
2015-03-05T13:19:28Z
[]
[]
urllib3/urllib3
569
urllib3__urllib3-569
[ "568" ]
58cd35eb8eb1108d56ffc00e48c79a2888526daa
diff --git a/dummyserver/server.py b/dummyserver/server.py --- a/dummyserver/server.py +++ b/dummyserver/server.py @@ -13,6 +13,9 @@ import sys import threading import socket +import warnings + +from urllib3.exceptions import HTTPWarning from tornado.platform.auto import set_close_exec import tornado.wsgi @@ -40,6 +43,11 @@ # Different types of servers we have: +class NoIPv6Warning(HTTPWarning): + "IPv6 is not available" + pass + + class SocketServerThread(threading.Thread): """ :param socket_handler: Callable which receives a socket argument for one @@ -57,7 +65,12 @@ def __init__(self, socket_handler, host='localhost', port=8081, self.ready_event = ready_event def _start_server(self): - sock = socket.socket(socket.AF_INET6) + if socket.has_ipv6: + sock = socket.socket(socket.AF_INET6) + else: + warnings.warn("No IPv6 support. Falling back to IPv4.", + NoIPv6Warning) + sock = socket.socket(socket.AF_INET) if sys.platform != 'win32': sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((self.host, 0))
diff --git a/test/__init__.py b/test/__init__.py --- a/test/__init__.py +++ b/test/__init__.py @@ -13,7 +13,8 @@ # Reset. SO suggests this hostname TARPIT_HOST = '10.255.255.1' -VALID_SOURCE_ADDRESSES = [('::1', 0), ('127.0.0.1', 0)] +# (Arguments for socket, is it IPv6 address?) +VALID_SOURCE_ADDRESSES = [(('::1', 0), True), (('127.0.0.1', 0), False)] # RFC 5737: 192.0.2.0/24 is for testing only. # RFC 3849: 2001:db8::/32 is for documentation only. INVALID_SOURCE_ADDRESSES = [('192.0.2.255', 0), ('2001:db8::1', 0)] diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -4,6 +4,7 @@ import sys import unittest import time +import warnings import mock @@ -35,6 +36,7 @@ import tornado from dummyserver.testcase import HTTPDummyServerTestCase +from dummyserver.server import NoIPv6Warning from nose.tools import timed @@ -597,7 +599,11 @@ def test_dns_error(self): self.assertRaises(MaxRetryError, pool.request, 'GET', '/test', retries=2) def test_source_address(self): - for addr in VALID_SOURCE_ADDRESSES: + for addr, is_ipv6 in VALID_SOURCE_ADDRESSES: + if is_ipv6 and not socket.has_ipv6: + warnings.warn("No IPv6 support: skipping.", + NoIPv6Warning) + continue pool = HTTPConnectionPool(self.host, self.port, source_address=addr, retries=False) r = pool.request('GET', '/source_address')
tests are stuck when running `make test` I tried to use google's public dns as suggested by @sigmavirus24, but it didn't help. ``` shell $ ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 brd 127.255.255.255 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: enp5s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 inet 192.168.0.17/24 brd 192.168.0.255 scope global enp5s0 valid_lft forever preferred_lft forever inet6 fe80::beae:c5ff:fe65:dd71/64 scope link valid_lft forever preferred_lft forever $ cat /etc/resolv.conf nameserver 8.8.8.8 nameserver 8.8.4.4 ``` strace: ``` shell $ strace -p 12676 Process 12676 attached futex(0xbf8dd0, FUTEX_WAIT_PRIVATE, 0, NULL ``` This is gentoo, wicd and python 2.7. I think I had the same problem with Fedora and NetworkManager. Also, last couple of lines from output: ``` test_oldapi (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok test_proxy_conn_fail (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok test_proxy_pooling (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok test_proxy_pooling_ext (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok test_proxy_verified (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok test_redirect (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok test_multi_setcookie (test.with_dummyserver.test_socketlevel.TestCookies) ... Exception in thread Thread-8: Traceback (most recent call last): File "/usr/lib64/python2.7/threading.py", line 810, in __bootstrap_inner self.run() File "/home/tt/dev/urllib3/dummyserver/server.py", line 76, in run self.server = self._start_server() File "/home/tt/dev/urllib3/dummyserver/server.py", line 63, in _start_server sock.bind((self.host, 0)) File "/usr/lib64/python2.7/socket.py", line 224, in meth return getattr(self._sock,name)(*args) error: getsockaddrarg: bad family ```
That's perplexing, bad family feels like a very weird error to raise. I wonder if the 'bind port 0' is causing a problem here. looks like an IPv6 issue. Applying this, I can run the whole suite. ``` diff --- a/dummyserver/server.py +++ b/dummyserver/server.py @@ -57,7 +57,7 @@ class SocketServerThread(threading.Thread): self.ready_event = ready_event def _start_server(self): - sock = socket.socket(socket.AF_INET6) + sock = socket.socket(socket.AF_INET) if sys.platform != 'win32': sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((self.host, 0)) ``` One test fails still: ``` ERROR: test_source_address (test.with_dummyserver.test_connectionpool.TestConnectionPool) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/tt/dev/urllib3/test/with_dummyserver/test_connectionpool.py", line 603, in test_source_address r = pool.request('GET', '/source_address') File "/home/tt/dev/urllib3/urllib3/request.py", line 68, in request **urlopen_kw) File "/home/tt/dev/urllib3/urllib3/request.py", line 81, in request_encode_url return self.urlopen(method, url, **urlopen_kw) File "/home/tt/dev/urllib3/urllib3/connectionpool.py", line 597, in urlopen _stacktrace=sys.exc_info()[2]) File "/home/tt/dev/urllib3/urllib3/util/retry.py", line 222, in increment raise six.reraise(type(error), error, _stacktrace) File "/home/tt/dev/urllib3/urllib3/connectionpool.py", line 544, in urlopen body=body, headers=headers) File "/home/tt/dev/urllib3/urllib3/connectionpool.py", line 349, in _make_request conn.request(method, url, **httplib_request_kw) File "/usr/lib64/python2.7/httplib.py", line 1001, in request self._send_request(method, url, body, headers) File "/usr/lib64/python2.7/httplib.py", line 1035, in _send_request self.endheaders(body) File "/usr/lib64/python2.7/httplib.py", line 997, in endheaders self._send_output(message_body) File "/usr/lib64/python2.7/httplib.py", line 850, in _send_output self.send(msg) File "/usr/lib64/python2.7/httplib.py", line 812, in send self.connect() File "/home/tt/dev/urllib3/urllib3/connection.py", line 155, in connect conn = self._new_conn() File "/home/tt/dev/urllib3/urllib3/connection.py", line 134, in _new_conn (self.host, self.port), self.timeout, **extra_kw) File "/home/tt/dev/urllib3/urllib3/util/connection.py", line 88, in create_connection raise err ProtocolError: ('Connection aborted.', gaierror(-9, 'Address family for hostname not supported')) -------------------- >> begin captured logging << -------------------- urllib3.util.retry: DEBUG: Converted retries value: False -> Retry(total=False, connect=None, read=None, redirect=0) urllib3.connectionpool: INFO: Starting new HTTP connection (1): localhost ``` So it would seem that whatever machine you're using doesn't support IPv6. That's very surprising. (That or the version of python you're running was compiled without that support.) @sigmavirus24 you are right. Python is compiled without IPv6 support. Is this bug then? Shouldn't those tests just skip? These tests are written expecting IPv6 support since most modern systems should have that. If @shazow and @Lukasa feel that supporting Pythons that don't support IPv6 is something they want to do, then yes this is a bug. In other words, not my call. ;) Ideally we should skip IPv6 dependent tests on platforms that don't support it. Seems like we can test that pretty easily by attempting to bind an IPv6 socket. Might be worth doing that? /me quietly adds "ipv6" to $USE in /etc/make.conf, as a fellow Gentoo user. Umm, yea skipping with a warning makes sense. Any reason we don't want to do `socket.socket()` and let it choose the default family, rather than forcing ipv6? I guess for coverage purposes? (Should that be yet another tox target?) Btw, can confirm tests pass on Py3 on Gentoo with ipv6 enabled. (Also amused that @TomasTomecek working at Redhat is running Gentoo. :P) > Seems like we can test that pretty easily by attempting to bind an IPv6 socket. Even easier, we can check [socket.has_ipv6](https://docs.python.org/2/library/socket.html#socket.has_ipv6). At this point I'd lean to using ipv4 with a warning if ipv6 is not available, with a warning.
2015-03-15T11:17:13Z
[]
[]
urllib3/urllib3
595
urllib3__urllib3-595
[ "593" ]
10b7a0fefa6596f47a9a6afc80f1f4d1ae950b66
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -172,6 +172,36 @@ def tell(self): """ return self._fp_bytes_read + def _init_decoder(self): + """ + Set-up the _decoder attribute if necessar. + """ + # Note: content-encoding value should be case-insensitive, per RFC 7230 + # Section 3.2 + content_encoding = self.headers.get('content-encoding', '').lower() + if self._decoder is None: + if content_encoding in self.CONTENT_DECODERS: + self._decoder = _get_decoder(content_encoding) + + def _decode(self, data, decode_content, flush_decoder): + """ + Decode the data passed in and potentially flush the decoder. + """ + try: + if decode_content and self._decoder: + data = self._decoder.decompress(data) + except (IOError, zlib.error) as e: + content_encoding = self.headers.get('content-encoding', '').lower() + raise DecodeError( + "Received response with content-encoding: %s, but " + "failed to decode it." % content_encoding, e) + + if flush_decoder and decode_content and self._decoder: + buf = self._decoder.decompress(binary_type()) + data += buf + self._decoder.flush() + + return data + def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional @@ -193,12 +223,7 @@ def read(self, amt=None, decode_content=None, cache_content=False): after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ - # Note: content-encoding value should be case-insensitive, per RFC 7230 - # Section 3.2 - content_encoding = self.headers.get('content-encoding', '').lower() - if self._decoder is None: - if content_encoding in self.CONTENT_DECODERS: - self._decoder = _get_decoder(content_encoding) + self._init_decoder() if decode_content is None: decode_content = self.decode_content @@ -247,17 +272,7 @@ def read(self, amt=None, decode_content=None, cache_content=False): self._fp_bytes_read += len(data) - try: - if decode_content and self._decoder: - data = self._decoder.decompress(data) - except (IOError, zlib.error) as e: - raise DecodeError( - "Received response with content-encoding: %s, but " - "failed to decode it." % content_encoding, e) - - if flush_decoder and decode_content and self._decoder: - buf = self._decoder.decompress(binary_type()) - data += buf + self._decoder.flush() + data = self._decode(data, decode_content, flush_decoder) if cache_content: self._body = data @@ -284,9 +299,10 @@ def stream(self, amt=2**16, decode_content=None): If True, will attempt to decode the body based on the 'content-encoding' header. """ + self._init_decoder() if self.chunked: for line in self.read_chunked(amt): - yield line + yield self._decode(line, decode_content, True) else: while not is_fp_closed(self._fp): data = self.read(amt=amt, decode_content=decode_content)
diff --git a/test/test_response.py b/test/test_response.py --- a/test/test_response.py +++ b/test/test_response.py @@ -418,6 +418,29 @@ def test_mock_transfer_encoding_chunked(self): self.assertEqual(c, stream[i]) i += 1 + def test_mock_gzipped_transfer_encoding_chunked_decoded(self): + """Show that we can decode the gizpped and chunked body.""" + def stream(): + # Set up a generator to chunk the gzipped body + import zlib + compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS) + data = compress.compress(b'foobar') + data += compress.flush() + for i in range(0, len(data), 2): + yield data[i:i+2] + + fp = MockChunkedEncodingResponse(list(stream())) + r = httplib.HTTPResponse(MockSock) + r.fp = fp + headers = {'transfer-encoding': 'chunked', 'content-encoding': 'gzip'} + resp = HTTPResponse(r, preload_content=False, headers=headers) + + data = b'' + for c in resp.stream(decode_content=True): + data += c + + self.assertEqual(b'foobar', data) + def test_mock_transfer_encoding_chunked_custom_read(self): stream = [b"foooo", b"bbbbaaaaar"] fp = MockChunkedEncodingResponse(stream) @@ -517,7 +540,9 @@ def __init__(self, content): @staticmethod def _encode_chunk(chunk): - return '%X\r\n%s\r\n' % (len(chunk), chunk.decode()) + # In the general case, we can't decode the chunk to unicode + length = '%X\r\n' % len(chunk) + return length.encode() + chunk + b'\r\n' def _pop_new_chunk(self): if self.chunks_exhausted: @@ -529,8 +554,10 @@ def _pop_new_chunk(self): self.chunks_exhausted = True else: self.index += 1 - encoded_chunk = self._encode_chunk(chunk) - return encoded_chunk.encode() + chunk = self._encode_chunk(chunk) + if not isinstance(chunk, bytes): + chunk = chunk.encode() + return chunk def pop_current_chunk(self, amt=-1, till_crlf=False): if amt > 0 and till_crlf:
decode_content parameter to Response.stream() is not respected for chunked bodies Originally kennethreitz/requests#2561. It seems that if you hit an endpoint that sets both Content-Encoding: gzip and Transfer-Encoding: chunked, and then use `Response.stream()` with `decode_content=True`, we don't decompress the content. That represents an API change, and I'd argue a regression. Should be a fairly simple fix though.
2015-04-23T14:03:54Z
[]
[]
urllib3/urllib3
599
urllib3__urllib3-599
[ "598", "598" ]
548b79a1d1e81bfd297d2ebf1c0129859c656406
diff --git a/dummyserver/handlers.py b/dummyserver/handlers.py --- a/dummyserver/handlers.py +++ b/dummyserver/handlers.py @@ -21,9 +21,6 @@ class Response(object): def __init__(self, body='', status='200 OK', headers=None): - if not isinstance(body, bytes): - body = body.encode('utf8') - self.body = body self.status = status self.headers = headers or [("Content-type", "text/plain")] @@ -34,7 +31,20 @@ def __call__(self, request_handler): for header,value in self.headers: request_handler.add_header(header,value) - request_handler.write(self.body) + # chunked + if isinstance(self.body, list): + for item in self.body: + if not isinstance(item, bytes): + item = item.encode('utf8') + request_handler.write(item) + request_handler.flush() + else: + body = self.body + if not isinstance(body, bytes): + body = body.encode('utf8') + + request_handler.write(body) + RETRY_TEST_NAMES = collections.defaultdict(int) @@ -208,6 +218,9 @@ def successful_retry(self, request): else: return Response("need to keep retrying!", status="418 I'm A Teapot") + def chunked(self, request): + return Response(['123'] * 4) + def shutdown(self, request): sys.exit() diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -455,4 +455,6 @@ def read_chunked(self, amt=None, decode_content=None): break # We read everything; close the "file". + if self._original_response: + self._original_response.close() self.release_conn()
diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -618,6 +618,25 @@ def test_source_address_error(self): self.assertRaises(ProtocolError, pool.request, 'GET', '/source_address') + def test_stream_keepalive(self): + x = 2 + + for _ in range(x): + response = self.pool.request( + 'GET', + '/chunked', + headers={ + 'Connection': 'keep-alive', + }, + preload_content=False, + retries=False, + ) + for chunk in response.stream(): + self.assertEqual(chunk, b'123') + + self.assertEqual(self.pool.num_connections, 1) + self.assertEqual(self.pool.num_requests, x) + class TestRetry(HTTPDummyServerTestCase): def setUp(self):
Keep-alive broken in 1.10.3 with chunked transfer-encoding Per kennethreitz/requests#2568, starting in urllib3 1.10.3 an exception is thrown if a connection with chunked transfer-encoding is reused. Keep-alive broken in 1.10.3 with chunked transfer-encoding Per kennethreitz/requests#2568, starting in urllib3 1.10.3 an exception is thrown if a connection with chunked transfer-encoding is reused.
Thanks @lmikkelsen Yeh I am also having same issue is there a solution for this? We're working on it. =) Keep an eye on this issue for more. > On 24 Apr 2015, at 18:09, ne0ark [email protected] wrote: > > Yeh I am also having same issue is there a solution for this? > > — > Reply to this email directly or view it on GitHub. It seems like this is broken in mechanize as well. I get a similar exception with the following script, but the traceback is different: ``` import requests s = requests.session() r = s.get('http://httpbin.org/stream/20') s.get('http://httpbin.org/stream/20') ``` Start of traceback ([full](https://gist.github.com/untitaker/5b3ff800951747701ebb)): ``` Traceback (most recent call last): File "/home/untitaker/projects/requests/requests/packages/urllib3/connectionpool.py", line 372, in _make_request httplib_response = conn.getresponse(buffering=True) TypeError: getresponse() got an unexpected keyword argument 'buffering' During handling of the above exception, another exception occurred: ``` Happening with Python 3.4.3 Please don't print the start of the traceback, it's irrelevant. Print the _end_ of it. =D To be clear @untitaker, your traceback is different because you've got Python 3 printing the chained exceptions. If the others users did, they'd show the same traceback as you. Here is my traceback using urllib2: https://gist.github.com/ne0ark/a451c13de9e6643a751e So using _just_ urllib3, I'm having trouble reproducing this. The following is more-or-less what an adapter does in requests ``` py import urllib3 url = 'http://httpbin.org/stream/20' http = urllib3.PoolManager( block=False, num_pools=10, maxsize=10, strict=True, ) conn = http.connection_from_url(url) response = conn.urlopen( method='GET', url=url, headers={ 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, compress', }, preload_content=False, decode_content=False, ) list(response.stream()) response = conn.urlopen( method='GET', url=url, headers={ 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, compress', }, preload_content=False, decode_content=False, ) list(response.stream()) ``` 53a1e6a is the culprit. @t-8ch do we really think closing the connection is the right thing to do here? _Edit_ that _does_ fix the issue though @sigmavirus24 I don't think so. But somehow httplib has to be conviced, that we are done with this response. Doing ``` py if self._original_response is not None: self._original_response.fp = None self.release_conn() ``` fixes the bug but is missing testcoverage @sigmavirus24 It seems even weirder. Running your example from upthread shows me _three_ requests on two sockets in wireshark. The second request is duplicated and sent on two sockets. I assume the exception is caught somewhere. Using `requests` gives me two requests on one socket (as expected, plus the exception). Yeah, ofc retries... So requests disables retries which explains the differences. Also: testing against a local httpbin works (gunicorn does not use keepalives...). And reading the documentation for the `release_con` parameter to `ConnectionPool.__init__` we should _not_ try to automatically return the connection back to the pool, if we set `preload_content` (which we do). @lmikkelsen Did you run into this problem using httpbin? Thanks @lmikkelsen Yeh I am also having same issue is there a solution for this? We're working on it. =) Keep an eye on this issue for more. > On 24 Apr 2015, at 18:09, ne0ark [email protected] wrote: > > Yeh I am also having same issue is there a solution for this? > > — > Reply to this email directly or view it on GitHub. It seems like this is broken in mechanize as well. I get a similar exception with the following script, but the traceback is different: ``` import requests s = requests.session() r = s.get('http://httpbin.org/stream/20') s.get('http://httpbin.org/stream/20') ``` Start of traceback ([full](https://gist.github.com/untitaker/5b3ff800951747701ebb)): ``` Traceback (most recent call last): File "/home/untitaker/projects/requests/requests/packages/urllib3/connectionpool.py", line 372, in _make_request httplib_response = conn.getresponse(buffering=True) TypeError: getresponse() got an unexpected keyword argument 'buffering' During handling of the above exception, another exception occurred: ``` Happening with Python 3.4.3 Please don't print the start of the traceback, it's irrelevant. Print the _end_ of it. =D To be clear @untitaker, your traceback is different because you've got Python 3 printing the chained exceptions. If the others users did, they'd show the same traceback as you. Here is my traceback using urllib2: https://gist.github.com/ne0ark/a451c13de9e6643a751e So using _just_ urllib3, I'm having trouble reproducing this. The following is more-or-less what an adapter does in requests ``` py import urllib3 url = 'http://httpbin.org/stream/20' http = urllib3.PoolManager( block=False, num_pools=10, maxsize=10, strict=True, ) conn = http.connection_from_url(url) response = conn.urlopen( method='GET', url=url, headers={ 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, compress', }, preload_content=False, decode_content=False, ) list(response.stream()) response = conn.urlopen( method='GET', url=url, headers={ 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, compress', }, preload_content=False, decode_content=False, ) list(response.stream()) ``` 53a1e6a is the culprit. @t-8ch do we really think closing the connection is the right thing to do here? _Edit_ that _does_ fix the issue though @sigmavirus24 I don't think so. But somehow httplib has to be conviced, that we are done with this response. Doing ``` py if self._original_response is not None: self._original_response.fp = None self.release_conn() ``` fixes the bug but is missing testcoverage @sigmavirus24 It seems even weirder. Running your example from upthread shows me _three_ requests on two sockets in wireshark. The second request is duplicated and sent on two sockets. I assume the exception is caught somewhere. Using `requests` gives me two requests on one socket (as expected, plus the exception). Yeah, ofc retries... So requests disables retries which explains the differences. Also: testing against a local httpbin works (gunicorn does not use keepalives...). And reading the documentation for the `release_con` parameter to `ConnectionPool.__init__` we should _not_ try to automatically return the connection back to the pool, if we set `preload_content` (which we do). @lmikkelsen Did you run into this problem using httpbin?
2015-04-27T05:18:09Z
[]
[]
urllib3/urllib3
605
urllib3__urllib3-605
[ "601" ]
7906b5f166da93c61ddd41608cd64d920953a9cd
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -432,11 +432,17 @@ def read_chunked(self, amt=None, decode_content=None): 'content-encoding' header. """ self._init_decoder() - # FIXME: Rewrite this method and make it a class with - # a better structured logic. + # FIXME: Rewrite this method and make it a class with a better structured logic. if not self.chunked: raise ResponseNotChunked("Response is not chunked. " "Header 'transfer-encoding: chunked' is missing.") + + if self._original_response and self._original_response._method.upper() == 'HEAD': + # Don't bother reading the body of a HEAD request. + # FIXME: Can we do this somehow without accessing private httplib _method? + self._original_response.close() + return + while True: self._update_chunk_length() if self.chunk_left == 0:
diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -616,3 +616,33 @@ def test_httplib_headers_case_insensitive(self): HEADERS = {'Content-Length': '0', 'Content-type': 'text/plain'} r = pool.request('GET', '/') self.assertEqual(HEADERS, dict(r.headers.items())) # to preserve case sensitivity + + +class TestHEAD(SocketDummyServerTestCase): + def test_chunked_head_response_does_not_hang(self): + handler = create_response_handler( + b'HTTP/1.1 200 OK\r\n' + b'Transfer-Encoding: chunked\r\n' + b'Content-type: text/plain\r\n' + b'\r\n' + ) + self._start_server(handler) + pool = HTTPConnectionPool(self.host, self.port, retries=False) + r = pool.request('HEAD', '/', timeout=1, preload_content=False) + + # stream will use the read_chunked method here. + self.assertEqual([], list(r.stream())) + + def test_empty_head_response_does_not_hang(self): + handler = create_response_handler( + b'HTTP/1.1 200 OK\r\n' + b'Content-Length: 256\r\n' + b'Content-type: text/plain\r\n' + b'\r\n' + ) + self._start_server(handler) + pool = HTTPConnectionPool(self.host, self.port, retries=False) + r = pool.request('HEAD', '/', timeout=1, preload_content=False) + + # stream will use the read method here. + self.assertEqual([], list(r.stream()))
No tolerance for responses to head requests that indicate chunking. As shown [here](https://ptpb.pw/r/AFSJnZhspw4EkC13PK8TBHpNqMDH), if we receive a redirect response that has no body, but claims Transfer-Encoding: chunked, we will hang indefinitely waiting for a chunked response we never see. This has been observed with requests 2.6.2 (containing the new chunked handling) but not with requests 2.6.0 (containing the old broke stuff), which moves along just fine. Now, it's worth noting that Google has totally screwed up here, because the response is invalid, as this Wireshark trace shows: ``` HEAD /~r/DeveloperChronicles/~3/iauy0NfhAus/hitchhikers-guide-to-modern-android.html HTTP/1.1 Host: feedproxy.google.com Connection: keep-alive Accept-Encoding: gzip, deflate Accept: */* User-Agent: python-requests/2.6.2 CPython/2.7.8 Windows/7 HTTP/1.1 301 Moved Permanently Location: http://www.devchronicles.com/2015/03/hitchhikers-guide-to-modern-android.html?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+DeveloperChronicles+%28Developer+Chronicles%29 Content-Type: text/html; charset=UTF-8 Date: Tue, 28 Apr 2015 09:56:23 GMT Expires: Tue, 28 Apr 2015 09:56:23 GMT Cache-Control: private, max-age=0 X-Content-Type-Options: nosniff X-XSS-Protection: 1; mode=block Server: GSE Alternate-Protocol: 80:quic,p=1 Transfer-Encoding: chunked Accept-Ranges: none Vary: Accept-Encoding ``` Note that Google have sent Transfer-Encoding: chunked, but no body. This is, as far as I can tell, against all the relevant RFCs: a zero length body has a valid chunked representation (`0\r\n\r\n`). 301 is also not defined as having no body. Regardless, however, curl gets this right, so we should too if at all possible. We should also confirm whether this is a problem for older versions of urllib3 or whether they too function correctly. Arguably this may want to block a requests release, @sigmavirus24.
Actually, no, I'm wrong. From RFC 7231: > The HEAD method is identical to GET except that the server MUST NOT send a message body in the response (i.e., the response terminates at the end of the header section). I suspect there was special case code in httplib for this. I'm going to guess that httplib was checking the request method to determine if it should expect a body Seems likely to me, we should probably do the same. There's a lesson being learned here about overriding httplib functions... https://hg.python.org/cpython/file/293d9964cf6e/Lib/http/client.py#l344 is likely the check. Looks like we should be doing the same-ish. > There's a lesson being learned here about overriding httplib functions... Is the lesson: "Although httplib is terrible in so many cases, it still has a lot of handling that we don't know we need"? > Is the lesson: "Although httplib is terrible in so many cases, it still has a lot of handling that we don't know we need"? Yes. Ideally, I'd prefer for urllib3 to not care whether a body is provided or not. :) Alternatively, https://hg.python.org/cpython/file/293d9964cf6e/Lib/http/client.py#l426 is a way to handle this. Since httplib stores that information for us, and we're already accessing private things on an HTTPResponse, we may as well follow that logic as well. @sigmavirus24 That looks like the generally correct action. I've a fix in the works for this.
2015-04-28T16:17:47Z
[]
[]
urllib3/urllib3
647
urllib3__urllib3-647
[ "644" ]
ab84b5d35202bed4936cd44ef35aafd264f87090
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -120,7 +120,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): :param maxsize: Number of connections to save that can be reused. More than 1 is useful - in multithreaded situations. If ``block`` is set to false, more + in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. @@ -409,7 +409,7 @@ def is_same_host(self, url): # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) - + # Use explicit default port for comparison when none is given if self.port and not port: port = port_by_scheme.get(scheme) @@ -568,25 +568,22 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, # Close the connection. If a connection is reused on which there # was a Certificate error, the next request will certainly raise # another Certificate error. - if conn: - conn.close() - conn = None + conn = conn and conn.close() + release_conn = True raise SSLError(e) except SSLError: # Treat SSLError separately from BaseSSLError to preserve # traceback. - if conn: - conn.close() - conn = None + conn = conn and conn.close() + release_conn = True raise except (TimeoutError, HTTPException, SocketError, ConnectionError) as e: - if conn: - # Discard the connection for these exceptions. It will be - # be replaced during the next _get_conn() call. - conn.close() - conn = None + # Discard the connection for these exceptions. It will be + # be replaced during the next _get_conn() call. + conn = conn and conn.close() + release_conn = True if isinstance(e, SocketError) and self.proxy: e = ProxyError('Cannot connect to proxy.', e) @@ -626,6 +623,9 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_redirect: + # Release the connection for this response, since we're not + # returning it to be released manually. + response.release_conn() raise return response
diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -217,13 +217,10 @@ def test_contextmanager(self): old_pool_queue = pool.pool self.assertEqual(pool.pool, None) - self.assertRaises(ClosedPoolError, pool._get_conn) pool._put_conn(conn3) - self.assertRaises(ClosedPoolError, pool._get_conn) - self.assertRaises(Empty, old_pool_queue.get, block=False) diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -647,6 +647,27 @@ def test_chunked_gzip(self): self.assertEqual(b'123' * 4, response.read()) + def test_cleanup_on_connection_error(self): + ''' + Test that connections are recycled to the pool on + connection errors where no http response is received. + ''' + poolsize = 3 + with HTTPConnectionPool(self.host, self.port, maxsize=poolsize, block=True) as http: + self.assertEqual(http.pool.qsize(), poolsize) + + # force a connection error by supplying a non-existent + # url. We won't get a response for this and so the + # conn won't be implicitly returned to the pool. + self.assertRaises(MaxRetryError, + http.request, 'GET', '/redirect', fields={'target': '/'}, release_conn=False, retries=0) + + r = http.request('GET', '/redirect', fields={'target': '/'}, release_conn=False, retries=1) + r.release_conn() + + # the pool should still contain poolsize elements + self.assertEqual(http.pool.qsize(), http.pool.maxsize) + class TestRetry(HTTPDummyServerTestCase): def setUp(self): diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -119,8 +119,9 @@ def socket_handler(listener): def test_connection_refused(self): # Does the pool retry if there is no listener on the port? host, port = get_unreachable_address() - pool = HTTPConnectionPool(host, port) - self.assertRaises(MaxRetryError, pool.request, 'GET', '/', retries=0) + http = HTTPConnectionPool(host, port, maxsize=3, block=True) + self.assertRaises(MaxRetryError, http.request, 'GET', '/', retries=0, release_conn=False) + self.assertEqual(http.pool.qsize(), http.pool.maxsize) def test_connection_read_timeout(self): timed_out = Event() @@ -133,13 +134,15 @@ def socket_handler(listener): sock.close() self._start_server(socket_handler) - pool = HTTPConnectionPool(self.host, self.port, timeout=0.001, retries=False) + http = HTTPConnectionPool(self.host, self.port, timeout=0.001, retries=False, maxsize=3, block=True) try: - self.assertRaises(ReadTimeoutError, pool.request, 'GET', '/') + self.assertRaises(ReadTimeoutError, http.request, 'GET', '/', release_conn=False) finally: timed_out.set() + self.assertEqual(http.pool.qsize(), http.pool.maxsize) + def test_https_connection_read_timeout(self): """ Handshake timeouts should fail with a Timeout""" timed_out = Event()
Connection pool exhausted when connection failures occur, should refill with empty connections If preload_content is not specified on a request, connections are implicitly returned to the pool after the request is read. However, when a timeout error occurs or the connection is dropped the connection is closed but not returned in to the pool (since the response is None). This problem is compounded when retries are turned on since the next attempt will grab a new connection from the pool thus depleting it further. With non-blocking pools this is not a problem since we will create a new connection if the pool is empty but when the pool is blocking we have found that eventually the pool size becomes zero (after no.of connections timeout errors) which causes the calling application to hang on it's next request. This can be fixed in connectionpool.urlopen in the exception handlers by explicitly returning the closed connection to the pool via _put_conn (if release_conn is false), since subsequent calls to _get_conn will check if the connection has been dropped and return a new one.
Hello again! I think we need to be sure that we close the connection before we return them to the pool. Otherwise we run the risk of attempting to re-use a live connection that has timed out, which will end extremely poorly for us. We should be putting a `None` into the pool if we want to discard the connection. That will be replaced with a fresh connection when we try to get it out of the pool. That does sound like a bug. @shazow's idea is even better than mine. @jlatherfold Is this something you'd be interested in working on? Producing a failing test would be the first step. Make a ConnectionPool with a small pool (1-2), trigger some connection exceptions, see if the pool gets exhausted. Hi, I've already done this, hence this bug report. Locally I've fixed it by putting the [closed] connection back on to the pool in the exception handlers (which do close the connections). Those Nones or closed connections are discarded and a new one is returned in subsequent calls to _get_conn. Sadly I don't know when I will get time to do any more on this right now. @jlatherfold Would be handy if you can share your code for reproducing the issue. :) Also if you can confirm that putting `None` rather than the closed connection still works as expected, that would be great. I can confirm that _put_conn(None) works as expected. Here's some basic code, which also produces this error. Setting the read-timeout to a low value will cause read timeout exceptions. You'll need to hit a url with some large content - doesn't have to be massive - 500KB or so although you can probably replicate this with much less): ``` python import os import sys import requests def get(req, url): r = req.get(url, stream=True, timeout=(0.5, 0.1)) # set get timeout low to force timeout exceptions r.raise_for_status() return r.iter_content(chunk_size=4096) if __name__ == '__main__': # if you need to auth the session do so.... session = requests.Session() # setting the poolsize to 1 and 1 retries will result in this program # hanging on the first retry attempt (failed connection or read timeout) # set the pool_maxsize=4 and set some trace statements in connectionpool.py # to print the queue size in _get_conn and _put_conn and watch it slowly # decrease as we hit timeouts and retries..... adpater = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1, pool_block=True, max_retries=1) session.mount('http://', adpater) while True: try: print('request.get......') content = get(session, '<url to large content>') # consume the response for chunk in content: pass except Exception: print('ops.....') # after a while when you've hit few timeouts you're not going to get here.... print('request.done......') ``` Sorry - didn't know how to format for code - but you get the idea.... Updated code formatting. Bonus points if somebody wants to translate that to plain-urllib3 and double-bonus if you make it into a test. :) Ok, I'll make the patch myself. Please let me know what procedure I need to follow or point me in the right direction where I can get that info as I have not made any code contributions before. @jlatherfold Thanks! Fork the project, make a branch, start with a test that shows the failure (we have full test coverage, have a look at other tests for examples), then send a PR for feedback. We'll review the code and give you more pointers. If you can think of a way to reproduce the scenario without doing a full end-to-end request, then urllib/tests/test_connectionpool.py would be a good place to put it. If you need a server, then take a look at the with_dummyserver subdirectory of suites. We lately prefer the test_socketlevel.py method of testing with a dummyserver when possible. Might be wise to just write a simple example of the scenario reproducing in urllib3 first, to make sure it's not requests' fault. Hi, I now have a fix for this with unit test and am ready to submit a PR. The work has been done on a local branch. Would you like me to merge it to master or push the branch to my fork and use that? @jlatherfold Whatever you prefer, as long as it ends up as a Pull Request on our end. :)
2015-06-10T18:26:13Z
[]
[]
urllib3/urllib3
650
urllib3__urllib3-650
[ "579" ]
e79025d31bb953153e9c183b01aeb1727b5c18e8
diff --git a/urllib3/connection.py b/urllib3/connection.py --- a/urllib3/connection.py +++ b/urllib3/connection.py @@ -192,6 +192,9 @@ def set_cert(self, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, assert_hostname=None, assert_fingerprint=None): + if ca_certs and cert_reqs is None: + cert_reqs = 'CERT_REQUIRED' + self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -696,6 +696,10 @@ def __init__(self, host, port=None, HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, block, headers, retries, _proxy, _proxy_headers, **conn_kw) + + if ca_certs and cert_reqs is None: + cert_reqs = 'CERT_REQUIRED' + self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs
diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -229,6 +229,11 @@ def test_absolute_url(self): 'http://google.com:80/path?query=foo', c._absolute_url('path?query=foo')) + def test_ca_certs_default_cert_required(self): + with connection_from_url('https://google.com:80', ca_certs='/etc/ssl/certs/custom.pem') as pool: + conn = pool._get_conn() + self.assertEqual(conn.cert_reqs, 'CERT_REQUIRED') + if __name__ == '__main__': unittest.main() diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -419,6 +419,11 @@ def test_discards_connection_on_sslerror(self): self._pool.ca_certs = DEFAULT_CA self._pool.request('GET', '/') + def test_set_cert_default_cert_required(self): + conn = VerifiedHTTPSConnection(self.host, self.port) + conn.set_cert(ca_certs='/etc/ssl/certs/custom.pem') + self.assertEqual(conn.cert_reqs, 'CERT_REQUIRED') + class TestHTTPS_NoSAN(HTTPSDummyServerTestCase): certs = NO_SAN_CERTS
cert_reqs default to 'CERT_REQUIRED' when ca_certs is set Is this a reasonable change? Would make our boilerplate a little simpler/less crufty. ``` diff - http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) + http = urllib3.PoolManager(ca_certs=certifi.where()) ```
@Lukasa @sigmavirus24 @dstufft Thoughts on this? Consider me :+1: That said, we should allow people to use alternate values if they know they need it. Yea, it would be merely a default. Override would work as before. I'm +1 :+1: :shipit:
2015-06-12T22:30:47Z
[]
[]
urllib3/urllib3
664
urllib3__urllib3-664
[ "664" ]
b49a871f42dc07be61cb6898663c698f87f585f6
diff --git a/dummyserver/handlers.py b/dummyserver/handlers.py --- a/dummyserver/handlers.py +++ b/dummyserver/handlers.py @@ -73,6 +73,10 @@ def options(self): """ Handle OPTIONS requests """ self._call_method() + def head(self): + """ Handle HEAD requests """ + self._call_method() + def _call_method(self): """ Call the correct method in this class based on the incoming URI """ req = self.request @@ -232,6 +236,13 @@ def chunked_gzip(self, request): return Response(chunks, headers=[('Content-Encoding', 'gzip')]) + def nbytes(self, request): + length = int(request.params.get('length')) + data = b'1' * length + return Response( + data, + headers=[('Content-Type', 'application/octet-stream')]) + def shutdown(self, request): sys.exit() diff --git a/urllib3/contrib/appengine.py b/urllib3/contrib/appengine.py new file mode 100644 --- /dev/null +++ b/urllib3/contrib/appengine.py @@ -0,0 +1,222 @@ +import logging +import os +import warnings + +from ..exceptions import ( + HTTPError, + HTTPWarning, + MaxRetryError, + ProtocolError, + TimeoutError, + SSLError +) + +from ..packages.six import BytesIO +from ..request import RequestMethods +from ..response import HTTPResponse +from ..util.timeout import Timeout +from ..util.retry import Retry + +try: + from google.appengine.api import urlfetch +except ImportError: + urlfetch = None + + +log = logging.getLogger(__name__) + + +class AppEnginePlatformWarning(HTTPWarning): + pass + + +class AppEnginePlatformError(HTTPError): + pass + + +class AppEngineManager(RequestMethods): + """ + Connection manager for Google App Engine sandbox applications. + + This manager uses the URLFetch service directly instead of using the + emulated httplib, and is subject to URLFetch limitations as described in + the App Engine documentation here: + + https://cloud.google.com/appengine/docs/python/urlfetch + + Notably it will raise an AppEnginePlatformError if: + * URLFetch is not available. + * If you attempt to use this on GAEv2 (Managed VMs), as full socket + support is available. + * If a request size is more than 10 megabytes. + * If a response size is more than 32 megabtyes. + * If you use an unsupported request method such as OPTIONS. + + Beyond those cases, it will raise normal urllib3 errors. + """ + + def __init__(self, headers=None, retries=None, validate_certificate=True): + if not urlfetch: + raise AppEnginePlatformError( + "URLFetch is not available in this environment.") + + if is_prod_appengine_v2(): + raise AppEnginePlatformError( + "Use normal urllib3.PoolManager instead of AppEngineManager" + "on Managed VMs, as using URLFetch is not necessary in " + "this environment.") + + warnings.warn( + "urllib3 is using URLFetch on Google App Engine sandbox instead " + "of sockets. To use sockets directly instead of URLFetch see " + "https://urllib3.readthedocs.org/en/latest/contrib.html.", + AppEnginePlatformWarning) + + RequestMethods.__init__(self, headers) + self.validate_certificate = validate_certificate + + self.retries = retries or Retry.DEFAULT + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + # Return False to re-raise any potential exceptions + return False + + def urlopen(self, method, url, body=None, headers=None, + retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, + **response_kw): + + retries = self._get_retries(retries, redirect) + + try: + response = urlfetch.fetch( + url, + payload=body, + method=method, + headers=headers or {}, + allow_truncated=False, + follow_redirects=( + redirect and + retries.redirect != 0 and + retries.total), + deadline=self._get_absolute_timeout(timeout), + validate_certificate=self.validate_certificate, + ) + except urlfetch.DeadlineExceededError as e: + raise TimeoutError(self, e) + + except urlfetch.InvalidURLError as e: + if 'too large' in e.message: + raise AppEnginePlatformError( + "URLFetch request too large, URLFetch only " + "supports requests up to 10mb in size.", e) + raise ProtocolError(e) + + except urlfetch.DownloadError as e: + if 'Too many redirects' in e.message: + raise MaxRetryError(self, url, reason=e) + raise ProtocolError(e) + + except urlfetch.ResponseTooLargeError as e: + raise AppEnginePlatformError( + "URLFetch response too large, URLFetch only supports" + "responses up to 32mb in size.", e) + + except urlfetch.SSLCertificateError as e: + raise SSLError(e) + + except urlfetch.InvalidMethodError as e: + raise AppEnginePlatformError( + "URLFetch does not support method: %s" % method, e) + + http_response = self._urlfetch_response_to_http_response( + response, **response_kw) + + # Check for redirect response + if (http_response.get_redirect_location() and + retries.raise_on_redirect and redirect): + raise MaxRetryError(self, url, "too many redirects") + + # Check if we should retry the HTTP response. + if retries.is_forced_retry(method, status_code=http_response.status): + retries = retries.increment( + method, url, response=http_response, _pool=self) + log.info("Forced retry: %s" % url) + retries.sleep() + return self.urlopen( + method, url, + body=body, headers=headers, + retries=retries, redirect=redirect, + timeout=timeout, **response_kw) + + return http_response + + def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): + + if is_prod_appengine_v1(): + # Production GAE handles deflate encoding automatically, but does + # not remove the encoding header. + content_encoding = urlfetch_resp.headers.get('content-encoding') + + if content_encoding == 'deflate': + del urlfetch_resp.headers['content-encoding'] + + return HTTPResponse( + # In order for decoding to work, we must present the content as + # a file-like object. + body=BytesIO(urlfetch_resp.content), + headers=urlfetch_resp.headers, + status=urlfetch_resp.status_code, + **response_kw + ) + + def _get_absolute_timeout(self, timeout): + if timeout is Timeout.DEFAULT_TIMEOUT: + return 5 # 5s is the default timeout for URLFetch. + if isinstance(timeout, Timeout): + if not timeout.read is timeout.connect: + warnings.warn( + "URLFetch does not support granular timeout settings, " + "reverting to total timeout.", AppEnginePlatformWarning) + return timeout.total + return timeout + + def _get_retries(self, retries, redirect): + if not isinstance(retries, Retry): + retries = Retry.from_int( + retries, redirect=redirect, default=self.retries) + + if retries.connect or retries.read or retries.redirect: + warnings.warn( + "URLFetch only supports total retries and does not " + "recognize connect, read, or redirect retry parameters.", + AppEnginePlatformWarning) + + return retries + + +def is_appengine(): + return (is_local_appengine() or + is_prod_appengine_v1() or + is_prod_appengine_v2()) + + +def is_appengine_sandbox(): + return is_appengine() and not is_prod_appengine_v2() + + +def is_local_appengine(): + return ('APPENGINE_RUNTIME' in os.environ and + 'Development/' in os.environ['SERVER_SOFTWARE']) + + +def is_prod_appengine_v1(): + return ('APPENGINE_RUNTIME' in os.environ and + 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and + not is_prod_appengine_v2()) + + +def is_prod_appengine_v2(): + return os.environ.get('GAE_VM', False) == 'true'
diff --git a/test/contrib/test_gae_manager.py b/test/contrib/test_gae_manager.py new file mode 100644 --- /dev/null +++ b/test/contrib/test_gae_manager.py @@ -0,0 +1,185 @@ +import unittest + +from dummyserver.testcase import HTTPSDummyServerTestCase +from nose.plugins.skip import SkipTest + +try: + from google.appengine.api import urlfetch + (urlfetch) +except ImportError: + raise SkipTest("App Engine SDK not available.") + +from urllib3.contrib.appengine import AppEngineManager, AppEnginePlatformError +from urllib3.exceptions import ( + TimeoutError, + ProtocolError, + SSLError) +from urllib3.util.url import Url +from urllib3.util.retry import Retry + +from test.with_dummyserver.test_connectionpool import ( + TestConnectionPool, TestRetry) + + +# Prevent nose from running these test. +TestConnectionPool.__test__ = False +TestRetry.__test__ = False + + +# This class is used so we can re-use the tests from the connection pool. +# It proxies all requests to the manager. +class MockPool(object): + def __init__(self, host, port, manager, scheme='http'): + self.host = host + self.port = port + self.manager = manager + self.scheme = scheme + + def request(self, method, url, *args, **kwargs): + url = self._absolute_url(url) + return self.manager.request(method, url, *args, **kwargs) + + def urlopen(self, method, url, *args, **kwargs): + url = self._absolute_url(url) + return self.manager.urlopen(method, url, *args, **kwargs) + + def _absolute_url(self, path): + return Url( + scheme=self.scheme, + host=self.host, + port=self.port, + path=path).url + + +# Note that this doesn't run in the sandbox, it only runs with the URLFetch +# API stub enabled. There's no need to enable the sandbox as we know for a fact +# that URLFetch is used by the connection manager. +class TestGAEConnectionManager(TestConnectionPool): + __test__ = True + + # Magic class variable that tells NoseGAE to enable the URLFetch stub. + nosegae_urlfetch = True + + def setUp(self): + self.manager = AppEngineManager() + self.pool = MockPool(self.host, self.port, self.manager) + + # Tests specific to AppEngineManager + + def test_exceptions(self): + # DeadlineExceededError -> TimeoutError + self.assertRaises( + TimeoutError, + self.pool.request, + 'GET', + '/sleep?seconds=0.005', + timeout=0.001) + + # InvalidURLError -> ProtocolError + self.assertRaises( + ProtocolError, + self.manager.request, + 'GET', + 'ftp://invalid/url') + + # DownloadError -> ProtocolError + self.assertRaises( + ProtocolError, + self.manager.request, + 'GET', + 'http://0.0.0.0') + + # ResponseTooLargeError -> AppEnginePlatformError + self.assertRaises( + AppEnginePlatformError, + self.pool.request, + 'GET', + '/nbytes?length=33554433') # One byte over 32 megabtyes. + + # URLFetch reports the request too large error as a InvalidURLError, + # which maps to a AppEnginePlatformError. + body = b'1' * 10485761 # One byte over 10 megabytes. + self.assertRaises( + AppEnginePlatformError, + self.manager.request, + 'POST', + '/', + body=body) + + # Re-used tests below this line. + # Subsumed tests + test_timeout_float = None # Covered by test_exceptions. + + # Non-applicable tests + test_conn_closed = None + test_nagle = None + test_socket_options = None + test_disable_default_socket_options = None + test_defaults_are_applied = None + test_tunnel = None + test_keepalive = None + test_keepalive_close = None + test_connection_count = None + test_connection_count_bigpool = None + test_for_double_release = None + test_release_conn_parameter = None + test_stream_keepalive = None + test_cleanup_on_connection_error = None + + # Tests that should likely be modified for appengine specific stuff + test_timeout = None + test_connect_timeout = None + test_connection_error_retries = None + test_total_timeout = None + test_none_total_applies_connect = None + test_timeout_success = None + test_source_address_error = None + test_bad_connect = None + test_partial_response = None + test_dns_error = None + + +class TestGAEConnectionManagerWithSSL(HTTPSDummyServerTestCase): + nosegae_urlfetch = True + + def setUp(self): + self.manager = AppEngineManager() + self.pool = MockPool(self.host, self.port, self.manager, 'https') + + def test_exceptions(self): + # SSLCertificateError -> SSLError + # SSLError is raised with dummyserver because URLFetch doesn't allow + # self-signed certs. + self.assertRaises( + SSLError, + self.pool.request, + 'GET', + '/') + + +class TestGAERetry(TestRetry): + __test__ = True + + # Magic class variable that tells NoseGAE to enable the URLFetch stub. + nosegae_urlfetch = True + + def setUp(self): + self.manager = AppEngineManager() + self.pool = MockPool(self.host, self.port, self.manager) + + def test_default_method_whitelist_retried(self): + """ urllib3 should retry methods in the default method whitelist """ + retry = Retry(total=1, status_forcelist=[418]) + # Use HEAD instead of OPTIONS, as URLFetch doesn't support OPTIONS + resp = self.pool.request( + 'HEAD', '/successful_retry', + headers={'test-name': 'test_default_whitelist'}, + retries=retry) + self.assertEqual(resp.status, 200) + + #test_max_retry = None + #test_disabled_retry = None + + +if __name__ == '__main__': + unittest.main()
contrib.appengine.AppEngineManager This is currently _very_ rough and early. I want to make sure I'm on the right track before continuing. Outstanding questions: 1. What functionality should be covered by the connection pool? (retries, etc.) 2. Should I just inherit from TestConnectionPool and null out the tests that aren't applicable to GAE instead of duplicating code? 3. How should this connection class be wired in? On GAE Sandbox environments w/o sockets, I assume this should be default. 4. What exact edge cases should this class be aware; e.g, where does URLFetch behavior differ from what's expected of httplib/sockets? Do the tests already cover this? [_shazow edit: Fixes #664]_
2015-06-30T19:50:55Z
[]
[]
urllib3/urllib3
674
urllib3__urllib3-674
[ "673" ]
21a288be4487040c6e21e27cec025b74d2a83152
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -2,6 +2,7 @@ import http.client as httplib except ImportError: import httplib +from contextlib import contextmanager import zlib import io from socket import timeout as SocketTimeout @@ -202,56 +203,18 @@ def _decode(self, data, decode_content, flush_decoder): return data - def read(self, amt=None, decode_content=None, cache_content=False): + @contextmanager + def _error_catcher(self): """ - Similar to :meth:`httplib.HTTPResponse.read`, but with two additional - parameters: ``decode_content`` and ``cache_content``. - - :param amt: - How much of the content to read. If specified, caching is skipped - because it doesn't make sense to cache partial content as the full - response. + Catch low-level python exceptions, instead re-raising urllib3 + variants, so that low-level exceptions are not leaked in the + high-level api. - :param decode_content: - If True, will attempt to decode the body based on the - 'content-encoding' header. - - :param cache_content: - If True, will save the returned data such that the same result is - returned despite of the state of the underlying file object. This - is useful if you want the ``.data`` property to continue working - after having ``.read()`` the file object. (Overridden if ``amt`` is - set.) + On exit, release the connection back to the pool. """ - self._init_decoder() - if decode_content is None: - decode_content = self.decode_content - - if self._fp is None: - return - - flush_decoder = False - data = None - try: try: - if amt is None: - # cStringIO doesn't like amt=None - data = self._fp.read() - flush_decoder = True - else: - cache_content = False - data = self._fp.read(amt) - if amt != 0 and not data: # Platform-specific: Buggy versions of Python. - # Close the connection when no data is returned - # - # This is redundant to what httplib/http.client _should_ - # already do. However, versions of python released before - # December 15, 2012 (http://bugs.python.org/issue16298) do - # not properly close the connection in all cases. There is - # no harm in redundantly calling close. - self._fp.close() - flush_decoder = True + yield except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but @@ -281,6 +244,56 @@ def read(self, amt=None, decode_content=None, cache_content=False): if self._original_response and self._original_response.isclosed(): self.release_conn() + def read(self, amt=None, decode_content=None, cache_content=False): + """ + Similar to :meth:`httplib.HTTPResponse.read`, but with two additional + parameters: ``decode_content`` and ``cache_content``. + + :param amt: + How much of the content to read. If specified, caching is skipped + because it doesn't make sense to cache partial content as the full + response. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param cache_content: + If True, will save the returned data such that the same result is + returned despite of the state of the underlying file object. This + is useful if you want the ``.data`` property to continue working + after having ``.read()`` the file object. (Overridden if ``amt`` is + set.) + """ + self._init_decoder() + if decode_content is None: + decode_content = self.decode_content + + if self._fp is None: + return + + flush_decoder = False + data = None + + with self._error_catcher(): + if amt is None: + # cStringIO doesn't like amt=None + data = self._fp.read() + flush_decoder = True + else: + cache_content = False + data = self._fp.read(amt) + if amt != 0 and not data: # Platform-specific: Buggy versions of Python. + # Close the connection when no data is returned + # + # This is redundant to what httplib/http.client _should_ + # already do. However, versions of python released before + # December 15, 2012 (http://bugs.python.org/issue16298) do + # not properly close the connection in all cases. There is + # no harm in redundantly calling close. + self._fp.close() + flush_decoder = True + if data: self._fp_bytes_read += len(data) @@ -452,24 +465,24 @@ def read_chunked(self, amt=None, decode_content=None): self._original_response.close() return - while True: - self._update_chunk_length() - if self.chunk_left == 0: - break - chunk = self._handle_chunk(amt) - yield self._decode(chunk, decode_content=decode_content, - flush_decoder=True) - - # Chunk content ends with \r\n: discard it. - while True: - line = self._fp.fp.readline() - if not line: - # Some sites may not end with '\r\n'. - break - if line == b'\r\n': - break - - # We read everything; close the "file". - if self._original_response: - self._original_response.close() - self.release_conn() + with self._error_catcher(): + while True: + self._update_chunk_length() + if self.chunk_left == 0: + break + chunk = self._handle_chunk(amt) + yield self._decode(chunk, decode_content=decode_content, + flush_decoder=True) + + # Chunk content ends with \r\n: discard it. + while True: + line = self._fp.fp.readline() + if not line: + # Some sites may not end with '\r\n'. + break + if line == b'\r\n': + break + + # We read everything; close the "file". + if self._original_response: + self._original_response.close()
diff --git a/test/test_response.py b/test/test_response.py --- a/test/test_response.py +++ b/test/test_response.py @@ -7,7 +7,7 @@ except ImportError: import httplib from urllib3.response import HTTPResponse -from urllib3.exceptions import DecodeError, ResponseNotChunked +from urllib3.exceptions import DecodeError, ResponseNotChunked, ProtocolError from base64 import b64decode @@ -487,7 +487,7 @@ def test_invalid_chunks(self): r.chunked = True r.chunk_left = None resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'}) - self.assertRaises(httplib.IncompleteRead, next, resp.read_chunked()) + self.assertRaises(ProtocolError, next, resp.read_chunked()) def test_chunked_response_without_crlf_on_end(self): stream = [b"foo", b"bar", b"baz"]
stream() "contract" changed in 1.10.3, can now raise IncompleteRead In 1.10.2, `HTTPResponse.stream()` always called `HTTPResponse.read()`. This method catches all `HTTPException` (including `IncompleteRead`), and re-raises them as `ProtocolError`. In 1.10.3 and 1.10.4, `HTTPResponse.stream()` may call `HTTPResponse.read_chunked()` instead. This method may (via the `_update_chunk_length()` helper method) raise `IncompleteRead`. This exception will not be caught, and will be raised out of the call to `HTTPResponse.stream()`. A project I work on recently upgraded to requests 2.7.0 (which has vendored urllib3 1.10.4), and has started to see crashes because of `IncompleteRead` being raised from `HTTPResponse.stream()`. We previously would catch `RequestException`, as requests would catch `ProtocolError` (but not `IncompleteRead`) and re-raise a `ChunkedEncodingError` in `Response.iter_content()`. I'm not familiar with `urllib3.util.retry`, but at a glance it also seems like this change would negatively affect the `_is_read_error()` helper method.
Hm sorry about that. Are you proposing a specific fix, or is this a request for a major version bump? The potentially simplest way to handle this is to have our own IncompleteRead error subclass ProtocolError and wrap the IncompleteRead from httplib. (Assuming I'm understanding the problem correctly that we're no catching httplib.IncompleteRead) I like sigma's suggestion. It preserves the information of the exception while allowing clients to continue to catch the same set of exceptions. Thoughts @shazow? @shazow I wasn't entirely sure if this was intentional or not. If it's not intentional, I would propose making it so that `read_chunked` catches the same set of exceptions that `read` does. Then a major version bump is not necessary. @sigmavirus24's solution would work for this particular exception, though I don't know if there are other low-level exceptions that `read_chunked` may raise that `read` catches. It may also be possible to refactor the try/except in `read` into a context manager that is also used by `read_chunked`. Yea we definitely shouldn't be leaking httplib exceptions in the high-level api. +1, we shouldn't leak if we possibly can.
2015-07-14T20:21:56Z
[]
[]
urllib3/urllib3
711
urllib3__urllib3-711
[ "710" ]
3d8e7295ecc626f36cdc12d8111b27cab2502de6
diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py --- a/urllib3/util/ssl_.py +++ b/urllib3/util/ssl_.py @@ -17,6 +17,25 @@ import errno import warnings +import hmac + + +def _const_compare_digest_backport(a, b): + """ + Compare two digests of equal length in constant time. + + The digests must be of type str/bytes. + Returns True if the digests match, and False otherwise. + """ + result = abs(len(a) - len(b)) + for l, r in zip(bytearray(a), bytearray(b)): + result |= l ^ r + return result == 0 + + +_const_compare_digest = getattr(hmac, 'compare_digest', + _const_compare_digest_backport) + try: # Test for SSL features import ssl @@ -134,7 +153,7 @@ def assert_fingerprint(cert, fingerprint): cert_digest = hashfunc(cert).digest() - if cert_digest != fingerprint_bytes: + if not _const_compare_digest(cert_digest, fingerprint_bytes): raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' .format(fingerprint, hexlify(cert_digest)))
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -1,3 +1,4 @@ +import hashlib import warnings import logging import unittest @@ -18,6 +19,7 @@ from urllib3.util.ssl_ import ( resolve_cert_reqs, ssl_wrap_socket, + _const_compare_digest_backport, ) from urllib3.exceptions import ( LocationParseError, @@ -412,3 +414,16 @@ def test_ssl_wrap_socket_with_no_sni(self): ssl_wrap_socket(ssl_context=mock_context, sock=socket) mock_context.wrap_socket.assert_called_once_with(socket) ssl_.HAS_SNI = HAS_SNI + + def test_const_compare_digest_fallback(self): + target = hashlib.sha256(b'abcdef').digest() + self.assertTrue(_const_compare_digest_backport(target, target)) + + prefix = target[:-1] + self.assertFalse(_const_compare_digest_backport(target, prefix)) + + suffix = target + b'0' + self.assertFalse(_const_compare_digest_backport(target, suffix)) + + incorrect = hashlib.sha256(b'xyz').digest() + self.assertFalse(_const_compare_digest_backport(target, incorrect))
SSL cert digest check uses non-constant-time comparison This is the problematic line: https://github.com/shazow/urllib3/blob/master/urllib3/util/ssl_.py#L137 The digests being compared are strings, and `==` is not constant-time, which could potentially allow a timing attack. While it may not immediately be obvious whether an adversary could gain any advantage by successfully timing this line, I am a firm believer in "better safe than sorry." The best way to fix this is to use a constant-time digest comparison function, such as the built-in `hmac.compare_digest`. However, this function was only introduced in Python 2.7.7, while this library supports Python 2.6+. The next best solution would be to use something like the `constant_time_compare` function from the following package: https://github.com/mitsuhiko/itsdangerous Using a constant-time comparison is semantically equivalent to using `==`, and the performance impact is negligible and comes along with improved security. I believe this is a good tradeoff.
2015-09-23T21:37:28Z
[]
[]
urllib3/urllib3
734
urllib3__urllib3-734
[ "734" ]
267a865cd6d29b0ffe25b544d5d1d172941b24dc
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -236,6 +236,12 @@ def _error_catcher(self): if self._original_response and not self._original_response.isclosed(): self._original_response.close() + # Closing the response may not actually be sufficient to close + # everything, so if we have a hold of the connection close that + # too. + if self._connection is not None: + self._connection.close() + raise finally: if self._original_response and self._original_response.isclosed():
diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -30,6 +30,7 @@ class MimeToolMessage(object): pass from threading import Event +import select import socket import ssl @@ -366,6 +367,72 @@ def socket_handler(listener): self.assertRaises(ProtocolError, response.read) self.assertEqual(poolsize, pool.pool.qsize()) + def test_connection_closed_on_read_timeout_preload_false(self): + timed_out = Event() + + def socket_handler(listener): + sock = listener.accept()[0] + + # Consume request + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf = sock.recv(65535) + + # Send partial chunked response and then hang. + sock.send(( + 'HTTP/1.1 200 OK\r\n' + 'Content-Type: text/plain\r\n' + 'Transfer-Encoding: chunked\r\n' + '\r\n' + '8\r\n' + '12345678\r\n').encode('utf-8') + ) + timed_out.wait(5) + + # Expect a new request, but keep hold of the old socket to avoid + # leaking it. Because we don't want to hang this thread, we + # actually use select.select to confirm that a new request is + # coming in: this lets us time the thread out. + rlist, _, _ = select.select([listener], [], [], 1) + assert rlist + new_sock = listener.accept()[0] + + # Consume request + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf = new_sock.recv(65535) + + # Send complete chunked response. + new_sock.send(( + 'HTTP/1.1 200 OK\r\n' + 'Content-Type: text/plain\r\n' + 'Transfer-Encoding: chunked\r\n' + '\r\n' + '8\r\n' + '12345678\r\n' + '0\r\n\r\n').encode('utf-8') + ) + + new_sock.close() + sock.close() + + self._start_server(socket_handler) + with HTTPConnectionPool(self.host, self.port) as pool: + # First request should fail. + response = pool.urlopen('GET', '/', retries=0, + preload_content=False, + timeout=Timeout(connect=1, read=0.001)) + try: + self.assertRaises(ReadTimeoutError, response.read) + finally: + timed_out.set() + + # Second should succeed. + response = pool.urlopen('GET', '/', retries=0, + preload_content=False, + timeout=Timeout(connect=1, read=0.1)) + self.assertEqual(len(response.read()), 8) + class TestProxyManager(SocketDummyServerTestCase):
Defensively close connections Generally speaking, if we hit an exception when working with a connection we should assume that the connection is dead. We've not been defensive enough with this in the past, and that attitude pretty much just leads us to a constant cycle of bug-squashing each time we discover a new connection-invalidating exception. In this case, the problem (and the fix) were inspired by [this HN post](https://news.ycombinator.com/item?id=10512343). For that reason, I'd like to credit @rkday (in his guise as @rkd-msw) for the actual fix. Rob, if you'd like to, I'm happy for you to open a new pull request with these commits in place. If you don't care about the commit log, I've made sure you're added to CONTRIBUTORS.
2015-11-05T11:06:49Z
[]
[]
urllib3/urllib3
754
urllib3__urllib3-754
[ "753" ]
b09e2c227149f223e850837cd17b1fb7827c08ad
diff --git a/urllib3/__init__.py b/urllib3/__init__.py --- a/urllib3/__init__.py +++ b/urllib3/__init__.py @@ -81,6 +81,8 @@ def add_stderr_logger(level=logging.DEBUG): # InsecurePlatformWarning's don't vary between requests, so we keep it default. warnings.simplefilter('default', exceptions.InsecurePlatformWarning, append=True) +# SNIMissingWarnings should go off only once. +warnings.simplefilter('default', exceptions.SNIMissingWarning) def disable_warnings(category=exceptions.HTTPWarning): diff --git a/urllib3/exceptions.py b/urllib3/exceptions.py --- a/urllib3/exceptions.py +++ b/urllib3/exceptions.py @@ -175,6 +175,11 @@ class InsecurePlatformWarning(SecurityWarning): pass +class SNIMissingWarning(HTTPWarning): + "Warned when making a HTTPS request without SNI available." + pass + + class ResponseNotChunked(ProtocolError, ValueError): "Response needs to be chunked in order to read it as chunks." pass diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py --- a/urllib3/util/ssl_.py +++ b/urllib3/util/ssl_.py @@ -6,7 +6,7 @@ from binascii import hexlify, unhexlify from hashlib import md5, sha1, sha256 -from ..exceptions import SSLError, InsecurePlatformWarning +from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning SSLContext = None @@ -303,4 +303,15 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, context.load_cert_chain(certfile, keyfile) if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI return context.wrap_socket(sock, server_hostname=server_hostname) + + warnings.warn( + 'An HTTPS request has been made, but the SNI (Subject Name ' + 'Indication) extension to TLS is not available on this platform. ' + 'This may cause the server to present an incorrect TLS ' + 'certificate, which can cause validation failures. For more ' + 'information, see ' + 'https://urllib3.readthedocs.org/en/latest/security.html' + '#snimissingwarning.', + SNIMissingWarning + ) return context.wrap_socket(sock)
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -26,6 +26,7 @@ TimeoutStateError, InsecureRequestWarning, SSLError, + SNIMissingWarning, ) from urllib3.util import is_fp_closed, ssl_ @@ -415,6 +416,20 @@ def test_ssl_wrap_socket_with_no_sni(self): mock_context.wrap_socket.assert_called_once_with(socket) ssl_.HAS_SNI = HAS_SNI + def test_ssl_wrap_socket_with_no_sni_warns(self): + socket = object() + mock_context = Mock() + # Ugly preservation of original value + HAS_SNI = ssl_.HAS_SNI + ssl_.HAS_SNI = False + with patch('warnings.warn') as warn: + ssl_wrap_socket(ssl_context=mock_context, sock=socket) + mock_context.wrap_socket.assert_called_once_with(socket) + ssl_.HAS_SNI = HAS_SNI + self.assertTrue(warn.call_count >= 1) + warnings = [call[0][1] for call in warn.call_args_list] + self.assertTrue(SNIMissingWarning in warnings) + def test_const_compare_digest_fallback(self): target = hashlib.sha256(b'abcdef').digest() self.assertTrue(_const_compare_digest_backport(target, target)) diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -35,6 +35,7 @@ ) from urllib3.packages import six from urllib3.util.timeout import Timeout +from urllib3.util.ssl_ import HAS_SNI ResourceWarning = getattr( @@ -77,7 +78,10 @@ def test_verified(self): self.assertFalse(warn.called, warn.call_args_list) else: self.assertTrue(warn.called) - call, = warn.call_args_list + if HAS_SNI: + call = warn.call_args_list[0] + else: + call = warn.call_args_list[1] error = call[0][1] self.assertEqual(error, InsecurePlatformWarning) @@ -176,8 +180,10 @@ def test_ssl_unverified_with_ca_certs(self): calls = warn.call_args_list if sys.version_info >= (2, 7, 9): category = calls[0][0][1] - else: + elif HAS_SNI: category = calls[1][0][1] + else: + category = calls[2][0][1] self.assertEqual(category, InsecureRequestWarning) @requires_network
Warn if we do not have access to SNI Suggested in kennethreitz/requests#2910. This would make it easier for people to correlate unexpected cert verification failures with the specific action they need to take.
+1 to more descriptive errors, ideally it should only affect when there is no SNI and SNI is required for the request to succeed. Well, we can't really tell when SNI is required for the request to succeed, only when it's _likely_ to be needed. In that case, I think warning once, on the first HTTPS request, on a platform that has no SNI is likely to be the way to go here.
2015-12-02T14:15:01Z
[]
[]
urllib3/urllib3
758
urllib3__urllib3-758
[ "750" ]
b09e2c227149f223e850837cd17b1fb7827c08ad
diff --git a/urllib3/poolmanager.py b/urllib3/poolmanager.py --- a/urllib3/poolmanager.py +++ b/urllib3/poolmanager.py @@ -26,7 +26,7 @@ log = logging.getLogger(__name__) SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', - 'ssl_version') + 'ssl_version', 'ca_cert_dir') class PoolManager(RequestMethods):
diff --git a/test/with_dummyserver/test_poolmanager.py b/test/with_dummyserver/test_poolmanager.py --- a/test/with_dummyserver/test_poolmanager.py +++ b/test/with_dummyserver/test_poolmanager.py @@ -162,6 +162,12 @@ def test_http_with_ssl_keywords(self): r = http.request('GET', 'http://%s:%s/' % (self.host, self.port)) self.assertEqual(r.status, 200) + def test_http_with_ca_cert_dir(self): + http = PoolManager(ca_certs='REQUIRED', ca_cert_dir='/nosuchdir') + + r = http.request('GET', 'http://%s:%s/' % (self.host, self.port)) + self.assertEqual(r.status, 200) + class TestIPv6PoolManager(IPv6HTTPDummyServerTestCase): if not HAS_IPV6:
ca_cert_dir keyword argument may be passed to HTTPConnectionPool by accident. Seems like as part of #701 I missed the `SSL_KEYWORDS` block in `poolmanager.py`. This means that `ca_cert_dir` may accidentally be passed to the `HTTPConnectionPool`. This leads to the following error when attempting to use `ca_cert_dir` with a `PoolManager` and then making a plaintext HTTP connection: ``` >>> import urllib3 >>> p = urllib3.PoolManager(ca_cert_dir='/usr/local/etc/openssl') >>> p.urlopen('GET', 'http://http2bin.org/get') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "urllib3/poolmanager.py", line 162, in urlopen response = conn.urlopen(method, u.request_uri, **kw) File "urllib3/connectionpool.py", line 548, in urlopen conn = self._get_conn(timeout=pool_timeout) File "urllib3/connectionpool.py", line 250, in _get_conn return conn or self._new_conn() File "urllib3/connectionpool.py", line 211, in _new_conn strict=self.strict, **self.conn_kw) File "urllib3/connection.py", line 121, in __init__ _HTTPConnection.__init__(self, *args, **kw) TypeError: __init__() got an unexpected keyword argument 'ca_cert_dir' ```
This should also make sure to update the new tests in #751 to include the ca_cert_dir field. Mmm that is pretty nasty.
2015-12-03T11:27:49Z
[]
[]
urllib3/urllib3
761
urllib3__urllib3-761
[ "760" ]
91aba9890660a0393671eca80105c77f7cee1688
diff --git a/dummyserver/server.py b/dummyserver/server.py --- a/dummyserver/server.py +++ b/dummyserver/server.py @@ -34,10 +34,15 @@ 'certfile': os.path.join(CERTS_PATH, 'server.no_san.crt'), 'keyfile': DEFAULT_CERTS['keyfile'] } +IPV6_ADDR_CERTS = { + 'certfile': os.path.join(CERTS_PATH, 'server.ipv6addr.crt'), + 'keyfile': os.path.join(CERTS_PATH, 'server.ipv6addr.key'), +} DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem') DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem') NO_SAN_CA = os.path.join(CERTS_PATH, 'cacert.no_san.pem') DEFAULT_CA_DIR = os.path.join(CERTS_PATH, 'ca_path_test') +IPV6_ADDR_CA = os.path.join(CERTS_PATH, 'server.ipv6addr.crt') def _has_ipv6(host): diff --git a/dummyserver/testcase.py b/dummyserver/testcase.py --- a/dummyserver/testcase.py +++ b/dummyserver/testcase.py @@ -113,6 +113,17 @@ class HTTPSDummyServerTestCase(HTTPDummyServerTestCase): certs = DEFAULT_CERTS +class IPV6HTTPSDummyServerTestCase(HTTPSDummyServerTestCase): + host = '::1' + + @classmethod + def setUpClass(cls): + if not socket.has_ipv6: + raise SkipTest('IPv6 not available') + else: + super(IPV6HTTPSDummyServerTestCase, cls).setUpClass() + + class HTTPDummyProxyTestCase(unittest.TestCase): http_host = 'localhost' diff --git a/urllib3/connection.py b/urllib3/connection.py --- a/urllib3/connection.py +++ b/urllib3/connection.py @@ -265,7 +265,16 @@ def connect(self): 'for details.)'.format(hostname)), SubjectAltNameWarning ) - match_hostname(cert, self.assert_hostname or hostname) + + # In case the hostname is an IPv6 address, strip the square + # brackets from it before using it to validate. This is because + # a certificate with an IPv6 address in it won't have square + # brackets around that address. Sadly, match_hostname won't do this + # for us: it expects the plain host part without any extra work + # that might have been done to make it palatable to httplib. + asserted_hostname = self.assert_hostname or hostname + asserted_hostname = asserted_hostname.strip('[]') + match_hostname(cert, asserted_hostname) self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED or self.assert_fingerprint is not None)
diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -8,9 +8,12 @@ import mock from nose.plugins.skip import SkipTest -from dummyserver.testcase import HTTPSDummyServerTestCase +from dummyserver.testcase import ( + HTTPSDummyServerTestCase, IPV6HTTPSDummyServerTestCase +) from dummyserver.server import (DEFAULT_CA, DEFAULT_CA_BAD, DEFAULT_CERTS, - NO_SAN_CERTS, NO_SAN_CA, DEFAULT_CA_DIR) + NO_SAN_CERTS, NO_SAN_CA, DEFAULT_CA_DIR, + IPV6_ADDR_CERTS, IPV6_ADDR_CA, HAS_IPV6) from test import ( onlyPy26OrOlder, @@ -466,5 +469,20 @@ def test_warning_for_certs_without_a_san(self): self.assertTrue(warn.called) + +class TestHTTPS_IPv6Addr(IPV6HTTPSDummyServerTestCase): + certs = IPV6_ADDR_CERTS + + def test_strip_square_brackets_before_validating(self): + """Test that the fix for #760 works.""" + if not HAS_IPV6: + raise SkipTest("Only runs on IPv6 systems") + https_pool = HTTPSConnectionPool('[::1]', self.port, + cert_reqs='CERT_REQUIRED', + ca_certs=IPV6_ADDR_CA) + r = https_pool.request('GET', '/') + self.assertEqual(r.status, 200) + + if __name__ == '__main__': unittest.main()
Must remove square brackets from IPv6 addresses when using match_hostname Spotted in [OpenStack](https://bugs.launchpad.net/nova/+bug/1526413). Introduced by my fix #708. Apparently, while `httplib` itself doesn't care about the square brackets, the `match_hostname` function needs them gone. I suggest we move the stripping down into our `ssl.py` module.
This is the openstack bug, FYI: https://bugs.launchpad.net/nova/+bug/1526413 @Lukasa seems to be working on this so I'm labelling it in progress and assigning him :-P
2015-12-15T17:05:16Z
[]
[]
urllib3/urllib3
783
urllib3__urllib3-783
[ "779" ]
4a72e5375c49043128114b05eef2d9262645407d
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -387,6 +387,9 @@ def close(self): if not self.closed: self._fp.close() + if self._connection is not None: + self._connection.close() + @property def closed(self): if self._fp is None:
diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -433,6 +433,53 @@ def socket_handler(listener): timeout=Timeout(connect=1, read=0.1)) self.assertEqual(len(response.read()), 8) + def test_closing_response_actually_closes_connection(self): + done_closing = Event() + complete = Event() + # The insane use of this variable here is to get around the fact that + # Python 2.6 does not support returning a value from Event.wait(). This + # means we can't tell if an event timed out, so we can't use the timing + # out of the 'complete' event to determine the success or failure of + # the test. Python 2 also doesn't have the nonlocal statement, so we + # can't write directly to this variable, only mutate it. Hence: list. + successful = [] + + def socket_handler(listener): + sock = listener.accept()[0] + + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf = sock.recv(65536) + + sock.send(('HTTP/1.1 200 OK\r\n' + 'Content-Type: text/plain\r\n' + 'Content-Length: 0\r\n' + '\r\n').encode('utf-8')) + + # Wait for the socket to close. + done_closing.wait(timeout=1) + + # Look for the empty string to show that the connection got closed. + # Don't get stuck in a timeout. + sock.settimeout(1) + new_data = sock.recv(65536) + self.assertFalse(new_data) + successful.append(True) + sock.close() + complete.set() + + self._start_server(socket_handler) + pool = HTTPConnectionPool(self.host, self.port) + + response = pool.request('GET', '/', retries=0, preload_content=False) + self.assertEqual(response.status, 200) + response.close() + + done_closing.set() # wait until the socket in our pool gets closed + complete.wait(timeout=1) + if not successful: + self.fail("Timed out waiting for connection close") + class TestProxyManager(SocketDummyServerTestCase):
HTTPResponse.close may not close underlying connection. Found while investigating kennethreitz/requests#2963 The `HTTPResponse` class has a `close` method that rather suggests it will try to close the backing TCP connection behind the given HTTP response. Right now, that's not what happens if the connection is kept alive for any reason (that is, if the server did not send `Connection: close`): instead, the TCP connection will be kept alive and handled as normal. This seems moderately surprising to me. What it means, in practice, is that calling `HTTPResponse.close()` in both urllib3 and httplib/http.client does not guarantee the closure of the backing TCP connection: instead, in both cases it says "I'm done with the TCP connection, but the underlying connection is free to re-use it". The problems this causes can be see in the `_error_catcher` context manager on the HTTPResponse which does not actually call the class `close` method, presumably because it's too deficient to do the job. This behaviour affects the chunked transfer encoding decoding logic which calls `self.close()` and therefore may incorrectly keep the connection alive, though it does not itself return the connection to the pool. I believe it _should_ be safe to have `close` close the underlying connection if it is present. As something of an optimisation, we can then safely assume that `close` can call `release_conn`, which will allow us to keep hold of the `HTTPConnection` object in a situation where otherwise we might lose it.
Reason 89 for dropping httplib.
2016-01-14T13:35:51Z
[]
[]
urllib3/urllib3
795
urllib3__urllib3-795
[ "791" ]
f69268ac53d2c4fdf02c35f3ce42efa5f8e6c62e
diff --git a/urllib3/contrib/pyopenssl.py b/urllib3/contrib/pyopenssl.py --- a/urllib3/contrib/pyopenssl.py +++ b/urllib3/contrib/pyopenssl.py @@ -54,9 +54,17 @@ import OpenSSL.SSL from pyasn1.codec.der import decoder as der_decoder from pyasn1.type import univ, constraint -from socket import _fileobject, timeout, error as SocketError +from socket import timeout, error as SocketError + +try: # Platform-specific: Python 2 + from socket import _fileobject +except ImportError: # Platform-specific: Python 3 + _fileobject = None + from urllib3.packages.backports.makefile import backport_makefile + import ssl import select +import six from .. import connection from .. import util @@ -104,6 +112,7 @@ def inject_into_urllib3(): connection.ssl_wrap_socket = ssl_wrap_socket util.HAS_SNI = HAS_SNI + util.IS_PYOPENSSL = True def extract_from_urllib3(): @@ -111,6 +120,7 @@ def extract_from_urllib3(): connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket util.HAS_SNI = orig_util_HAS_SNI + util.IS_PYOPENSSL = False # Note: This is a slightly bug-fixed version of same from ndg-httpsclient. @@ -135,7 +145,7 @@ def get_subj_alt_name(peer_cert): for i in range(peer_cert.get_extension_count()): ext = peer_cert.get_extension(i) ext_name = ext.get_short_name() - if ext_name != 'subjectAltName': + if ext_name != b'subjectAltName': continue # PyOpenSSL returns extension data in ASN.1 encoded form @@ -167,13 +177,17 @@ def __init__(self, connection, socket, suppress_ragged_eofs=True): self.socket = socket self.suppress_ragged_eofs = suppress_ragged_eofs self._makefile_refs = 0 + self._closed = False def fileno(self): return self.socket.fileno() - def makefile(self, mode, bufsize=-1): - self._makefile_refs += 1 - return _fileobject(self, mode, bufsize, close=True) + # Copy-pasted from Python 3.5 source code + def _decref_socketios(self): + if self._makefile_refs > 0: + self._makefile_refs -= 1 + if self._closed: + self.close() def recv(self, *args, **kwargs): try: @@ -198,6 +212,27 @@ def recv(self, *args, **kwargs): else: return data + def recv_into(self, *args, **kwargs): + try: + return self.connection.recv_into(*args, **kwargs) + except OpenSSL.SSL.SysCallError as e: + if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'): + return 0 + else: + raise SocketError(str(e)) + except OpenSSL.SSL.ZeroReturnError as e: + if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: + return 0 + else: + raise + except OpenSSL.SSL.WantReadError: + rd, wd, ed = select.select( + [self.socket], [], [], self.socket.gettimeout()) + if not rd: + raise timeout('The read operation timed out') + else: + return self.recv_into(*args, **kwargs) + def settimeout(self, timeout): return self.socket.settimeout(timeout) @@ -225,6 +260,7 @@ def shutdown(self): def close(self): if self._makefile_refs < 1: try: + self._closed = True return self.connection.close() except OpenSSL.SSL.Error: return @@ -262,6 +298,16 @@ def _drop(self): self._makefile_refs -= 1 +if _fileobject: # Platform-specific: Python 2 + def makefile(self, mode, bufsize=-1): + self._makefile_refs += 1 + return _fileobject(self, mode, bufsize, close=True) +else: # Platform-specific: Python 3 + makefile = backport_makefile + +WrappedSocket.makefile = makefile + + def _verify_callback(cnx, x509, err_no, err_depth, return_code): return err_no == 0 @@ -293,6 +339,8 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST) cnx = OpenSSL.SSL.Connection(ctx, sock) + if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 + server_hostname = server_hostname.encode('utf-8') cnx.set_tlsext_host_name(server_hostname) cnx.set_connect_state() while True: diff --git a/urllib3/packages/backports/__init__.py b/urllib3/packages/backports/__init__.py new file mode 100644 diff --git a/urllib3/packages/backports/makefile.py b/urllib3/packages/backports/makefile.py new file mode 100644 --- /dev/null +++ b/urllib3/packages/backports/makefile.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +""" +backports.makefile +~~~~~~~~~~~~~~~~~~ + +Backports the Python 3 ``socket.makefile`` method for use with anything that +wants to create a "fake" socket object. +""" +import io + +from socket import SocketIO + + +def backport_makefile(self, mode="r", buffering=None, encoding=None, + errors=None, newline=None): + """ + Backport of ``socket.makefile`` from Python 3.5. + """ + if not set(mode) <= set(["r", "w", "b"]): + raise ValueError( + "invalid mode %r (only r, w, b allowed)" % (mode,) + ) + writing = "w" in mode + reading = "r" in mode or not writing + assert reading or writing + binary = "b" in mode + rawmode = "" + if reading: + rawmode += "r" + if writing: + rawmode += "w" + raw = SocketIO(self, rawmode) + self._makefile_refs += 1 + if buffering is None: + buffering = -1 + if buffering < 0: + buffering = io.DEFAULT_BUFFER_SIZE + if buffering == 0: + if not binary: + raise ValueError("unbuffered streams must be binary") + return raw + if reading and writing: + buffer = io.BufferedRWPair(raw, raw, buffering) + elif reading: + buffer = io.BufferedReader(raw, buffering) + else: + assert writing + buffer = io.BufferedWriter(raw, buffering) + if binary: + return buffer + text = io.TextIOWrapper(buffer, encoding, errors, newline) + text.mode = mode + return text diff --git a/urllib3/util/__init__.py b/urllib3/util/__init__.py --- a/urllib3/util/__init__.py +++ b/urllib3/util/__init__.py @@ -6,6 +6,7 @@ from .ssl_ import ( SSLContext, HAS_SNI, + IS_PYOPENSSL, assert_fingerprint, resolve_cert_reqs, resolve_ssl_version, @@ -26,6 +27,7 @@ __all__ = ( 'HAS_SNI', + 'IS_PYOPENSSL', 'SSLContext', 'Retry', 'Timeout', diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py --- a/urllib3/util/ssl_.py +++ b/urllib3/util/ssl_.py @@ -12,6 +12,7 @@ SSLContext = None HAS_SNI = False create_default_context = None +IS_PYOPENSSL = False # Maps the length of a digest to a possible hash function producing this digest HASHFUNC_MAP = {
diff --git a/test/contrib/test_pyopenssl.py b/test/contrib/test_pyopenssl.py --- a/test/contrib/test_pyopenssl.py +++ b/test/contrib/test_pyopenssl.py @@ -1,9 +1,6 @@ from nose.plugins.skip import SkipTest from urllib3.packages import six -if six.PY3: - raise SkipTest('Testing of PyOpenSSL disabled on PY3') - try: from urllib3.contrib.pyopenssl import (inject_into_urllib3, extract_from_urllib3) diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -38,7 +38,7 @@ ) from urllib3.packages import six from urllib3.util.timeout import Timeout -from urllib3.util.ssl_ import HAS_SNI +import urllib3.util as util ResourceWarning = getattr( @@ -77,11 +77,11 @@ def test_verified(self): r = https_pool.request('GET', '/') self.assertEqual(r.status, 200) - if sys.version_info >= (2, 7, 9): + if sys.version_info >= (2, 7, 9) or util.IS_PYOPENSSL: self.assertFalse(warn.called, warn.call_args_list) else: self.assertTrue(warn.called) - if HAS_SNI: + if util.HAS_SNI: call = warn.call_args_list[0] else: call = warn.call_args_list[1] @@ -181,9 +181,9 @@ def test_ssl_unverified_with_ca_certs(self): self.assertTrue(warn.called) calls = warn.call_args_list - if sys.version_info >= (2, 7, 9): + if sys.version_info >= (2, 7, 9) or util.IS_PYOPENSSL: category = calls[0][0][1] - elif HAS_SNI: + elif util.HAS_SNI: category = calls[1][0][1] else: category = calls[2][0][1] @@ -237,8 +237,9 @@ def test_assert_fingerprint_md5(self): cert_reqs='CERT_REQUIRED', ca_certs=DEFAULT_CA) - https_pool.assert_fingerprint = 'CA:84:E1:AD0E5a:ef:2f:C3:09' \ - ':E7:30:F8:CD:C8:5B' + https_pool.assert_fingerprint = 'F2:06:5A:42:10:3F:45:1C:17:FE:E6:' \ + '07:1E:8A:86:E5' + https_pool.request('GET', '/') def test_assert_fingerprint_sha1(self): @@ -246,8 +247,8 @@ def test_assert_fingerprint_sha1(self): cert_reqs='CERT_REQUIRED', ca_certs=DEFAULT_CA) - https_pool.assert_fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:' \ - '7A:F2:8A:D7:1E:07:33:67:DE' + https_pool.assert_fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:' \ + 'BF:93:CF:F9:71:CC:07:7D:0A' https_pool.request('GET', '/') def test_assert_fingerprint_sha256(self): @@ -255,9 +256,9 @@ def test_assert_fingerprint_sha256(self): cert_reqs='CERT_REQUIRED', ca_certs=DEFAULT_CA) - https_pool.assert_fingerprint = ('9A:29:9D:4F:47:85:1C:51:23:F5:9A:A3:' - '0F:5A:EF:96:F9:2E:3C:22:2E:FC:E8:BC:' - '0E:73:90:37:ED:3B:AA:AB') + https_pool.assert_fingerprint = ('C5:4D:0B:83:84:89:2E:AE:B4:58:BB:12:' + 'F7:A6:C4:76:05:03:88:D8:57:65:51:F3:' + '1E:60:B0:8B:70:18:64:E6') https_pool.request('GET', '/') def test_assert_invalid_fingerprint(self): @@ -294,8 +295,8 @@ def test_verify_none_and_good_fingerprint(self): cert_reqs='CERT_NONE', ca_certs=DEFAULT_CA_BAD) - https_pool.assert_fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:' \ - '7A:F2:8A:D7:1E:07:33:67:DE' + https_pool.assert_fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:' \ + 'BF:93:CF:F9:71:CC:07:7D:0A' https_pool.request('GET', '/') def test_good_fingerprint_and_hostname_mismatch(self): @@ -303,8 +304,8 @@ def test_good_fingerprint_and_hostname_mismatch(self): cert_reqs='CERT_REQUIRED', ca_certs=DEFAULT_CA) - https_pool.assert_fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:' \ - '7A:F2:8A:D7:1E:07:33:67:DE' + https_pool.assert_fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:' \ + 'BF:93:CF:F9:71:CC:07:7D:0A' https_pool.request('GET', '/') @requires_network @@ -325,8 +326,8 @@ def test_https_timeout(self): timeout=timeout, retries=False, cert_reqs='CERT_REQUIRED') https_pool.ca_certs = DEFAULT_CA - https_pool.assert_fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:' \ - '7A:F2:8A:D7:1E:07:33:67:DE' + https_pool.assert_fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:' \ + 'BF:93:CF:F9:71:CC:07:7D:0A' timeout = Timeout(total=None) https_pool = HTTPSConnectionPool(self.host, self.port, timeout=timeout, @@ -385,7 +386,7 @@ def new_pool(timeout, cert_reqs='CERT_REQUIRED'): timeout=Timeout(total=None, connect=0.001)) def test_enhanced_ssl_connection(self): - fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:7A:F2:8A:D7:1E:07:33:67:DE' + fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A' conn = VerifiedHTTPSConnection(self.host, self.port) https_pool = HTTPSConnectionPool(self.host, self.port,
Add PyOpenSSL tests to the regular test runs. I've just noticed that, while we have tests that can run under PyOpenSSL, they don't run either under tox or Travis. I suggest that we promote those tests up to be full parts of our test suite, especially as PyOpenSSL can be critically important for some features. While we're there, we should add `TestSSL` to the import from `test_socketlevel`, as the `SSL` tests are probably a good thing to have run under PyOpenSSL as well! This will give us a hook for writing tests that only run under PyOpenSSL and the expectation that they will actually run on multiple platforms.
2016-01-26T12:51:00Z
[]
[]
urllib3/urllib3
807
urllib3__urllib3-807
[ "805" ]
3ecc0cee79971e9cae9412266e07001c7922104b
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -556,6 +556,10 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, # complains about UnboundLocalError. err = None + # Keep track of whether we cleanly exited the except block. This + # ensures we do proper cleanup in finally. + clean_exit = False + try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) @@ -585,10 +589,8 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, connection=response_conn, **response_kw) - # else: - # The connection will be put back into the pool when - # ``response.release_conn()`` is called (implicitly by - # ``response.read()``) + # Everything went great! + clean_exit = True except Empty: # Timed out by queue. @@ -598,22 +600,19 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, # Close the connection. If a connection is reused on which there # was a Certificate error, the next request will certainly raise # another Certificate error. - conn = conn and conn.close() - release_conn = True + clean_exit = False raise SSLError(e) except SSLError: # Treat SSLError separately from BaseSSLError to preserve # traceback. - conn = conn and conn.close() - release_conn = True + clean_exit = False raise except (TimeoutError, HTTPException, SocketError, ProtocolError) as e: # Discard the connection for these exceptions. It will be # be replaced during the next _get_conn() call. - conn = conn and conn.close() - release_conn = True + clean_exit = False if isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError('Cannot connect to proxy.', e) @@ -628,6 +627,14 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, err = e finally: + if not clean_exit: + # We hit some kind of exception, handled or otherwise. We need + # to throw the connection away unless explicitly told not to. + # Close the connection, set the variable to None, and make sure + # we put the None back in the pool to avoid leaking it. + conn = conn and conn.close() + release_conn = True + if release_conn: # Put the connection back to be reused. If the connection is # expired then it will be None, which will get replaced with a
diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -283,6 +283,33 @@ def test_ca_certs_default_cert_required(self): conn = pool._get_conn() self.assertEqual(conn.cert_reqs, 'CERT_REQUIRED') + def test_cleanup_on_extreme_connection_error(self): + """ + This test validates that we clean up properly even on exceptions that + we'd not otherwise catch, i.e. those that inherit from BaseException + like KeyboardInterrupt or gevent.Timeout. See #805 for more details. + """ + class RealBad(BaseException): + pass + + def kaboom(*args, **kwargs): + raise RealBad() + + c = connection_from_url('http://localhost:80') + c._make_request = kaboom + + initial_pool_size = c.pool.qsize() + + try: + # We need to release_conn this way or we'd put it away regardless. + c.urlopen('GET', '/', release_conn=False) + except RealBad: + pass + + new_pool_size = c.pool.qsize() + self.assertEqual(initial_pool_size, new_pool_size) + + if __name__ == '__main__': unittest.main()
Connection pool queue can still be exhausted by gevent timeouts A similar error was discussed here https://github.com/shazow/urllib3/issues/644 already and some fix applied, but the problem still partly exists. Consider this code: ``` python from gevent import monkey, spawn, sleep, Timeout, joinall monkey.patch_all() import sys from itertools import product from urllib3.connectionpool import HTTPConnectionPool def timed_read(cp, i, timeout, release=None, preload_content=True): try: with Timeout(timeout): resp = cp.urlopen('GET', 'http://httpbin.org/redirect/:1', release_conn=release, preload_content=preload) print "%s opened" % i sys.stdout.flush() data = resp.data except Timeout: print "%s timed out" % i else: content_len = int(resp.headers['content-length']) if len(data) != content_len: print "%s incomplete read: %s/%s" % (i, len(data), content_len) else: print "%s finished" % i sys.stdout.flush() def main(timeout=10, release=None, preload=True, poolsize=2): print '-' * 10 + " timeout=%s preload=%s release=%s " % (timeout, preload, release) + '-' * 10 cp = HTTPConnectionPool('httpbin.org', block=True, maxsize=poolsize, timeout=None) group = [] for i in range(3): group.append(spawn(timed_read, cp, i, timeout, release=release, preload_content=preload)) joinall(group) current_size = cp.pool.qsize() if current_size != poolsize: print "poolsize decreased: %s" % current_size for timeout, preload in product([10, 4, 0.1], [True, False]): main(timeout=timeout, preload=preload) ``` Setting a timeout that way may seem stupid, as it could be set on the connection itself, but it's just an example for having a timeout running on a longer running task including several calls into the connection pool. What happens is, that the connections are not properly returned back into the queue, and the queue finally runs empty, and any further call to it blocks forever: <pre> ---------- timeout=10 preload=True release=None ---------- 1 opened 1 finished 0 opened 0 finished 2 opened 2 finished ---------- timeout=10 preload=False release=None ---------- 0 opened 0 finished 1 opened 1 finished 2 opened 2 finished ---------- timeout=4 preload=True release=None ---------- 1 opened 1 finished 0 timed out 2 timed out ---------- timeout=4 preload=False release=None ---------- 1 opened 1 finished 0 opened 0 finished 2 timed out poolsize decreased: 1 ---------- timeout=0.1 preload=True release=None ---------- 0 timed out 1 timed out 2 timed out ---------- timeout=0.1 preload=False release=None ---------- 0 timed out 1 timed out 2 timed out poolsize decreased: 0 </pre> Simply setting `preload_content` to `True` is not an option, as e.g. `False` is set by default by requests. The main problem is in https://github.com/shazow/urllib3/blob/master/urllib3/response.py#L246 . The `Timeout` does not derive from `Exception`, so `self._original_response` is not closed in the except clause, and therefore the connection is not released in the finally clause.
Urgh, so `gevent.Timeout` is an instance of `BaseException`, which is pretty annoying. I understand why they did it (didn't want overbroad `except` clauses preventing the timeout from firing), but it leads to awkward problems like these. The simplest way to do this might be just to broaden _our_ `except` clause. Because there's no way we can swallow the exception inside our `except` block, we may want to write `except BaseException` instead and then comment why we did it. Of course, this is a fairly nonsensical arms race, so the other option is to have an `else` clause that sets a boolean on clean exit, and if that boolean isn't present have the `finally` clause close and dispose of the connection. That would move almost all the error handling into `finally`, so it'd be fairly bloody similar to the current code, but might appease those who object to overbroad `except` blocks. @shazow, I'm happy to do the legwork to fix this: do you have a preference as to how? @Lukasa Yea I think a cleanup boolean + finally is the way to go for complex cleanups based on how the rest of our code has been evolving. Looks good to me. @ml31415 Should be fixed in master, can you confirm? (Let's re-open if it's still an issue.) Hmmm, unfortunately it looks like it's still there. Looks like i overlooked something initially :/ <pre> ---------- timeout=0.1 preload=False release=None ---------- 0 timed out 1 timed out 2 timed out poolsize decreased: 0 </pre> Though I think the changes in #805 are the right thing to do. Yea we're keeping the changes, just removing the changelog entry that says it fixes a gevent thing since it's not fixed. :) @ml31415 Do we have a shorter repro scenario than the one above? Ideally one it's not a problem for me to run locally? Hmm, not shorter, but at least local: ``` python from gevent import monkey, spawn, sleep, Timeout, joinall from gevent.server import StreamServer monkey.patch_all() import sys from itertools import product from urllib3.connectionpool import HTTPConnectionPool RESPONSE = """ HTTP/1.1 200 OK Server: gevent Content-Type: text/html; charset=windows-1251 Content-Encoding: identity Content-Length: 10 asdfasdf """.lstrip().replace('\n', '\r\n') # Do not remove the final empty line! class RawServer(object): def __init__(self, response, host='127.0.0.1'): self.response = response self.server = StreamServer((host, 0), self.handler) def handler(self, sock, addr): sleep(1) sock.sendall(self.response) @property def url(self): return 'http://%s:%s' % self.server.address def __enter__(self): self.server.start() return self def __exit__(self, *args, **kwargs): self.server.stop() def timed_read(cp, i, timeout, host, port, release=None, preload_content=True): try: with Timeout(timeout): resp = cp.urlopen('GET', 'http://%s:%s' % (host, port), release_conn=release, preload_content=preload) print "%s opened" % i data = resp.data except Timeout: print "%s timed out" % i else: print "%s finished" % i def main(timeout=10, release=None, preload=True, poolsize=2): with RawServer(RESPONSE) as s: host, port = s.server.address cp = HTTPConnectionPool(*s.server.address, block=True, maxsize=poolsize, timeout=None) print '-' * 10 + " timeout=%s preload=%s release=%s " % (timeout, preload, release) + '-' * 10 group = [] for i in range(3): group.append(spawn(timed_read, cp, i, timeout, host, port, release=release, preload_content=preload)) joinall(group) current_size = cp.pool.qsize() if current_size != poolsize: print "poolsize decreased: %s" % current_size for timeout, preload in product([2, 0.1], [True, False]): main(timeout=timeout, preload=preload) ``` I'll try to get the whole thing more spot on than this. There is another scenario, that guaratees an exhausted pool, when the greenlet / thread fails, before the response is read (the timeout stuff is only there, so it doesn't block eternally on the empty loop): ``` python def timed_read(cp, i, timeout, host, port, release=None, preload_content=True): try: with Timeout(timeout): resp = cp.urlopen('GET', 'http://%s:%s' % (host, port), release_conn=release, preload_content=preload) print "%s opened" % i 1 / 0 data = resp.data except Timeout: print "%s timed out" % i except ZeroDivisionError: print "%s intentionally failed" % i else: print "%s finished" % i ``` I guess the best solution to this would be, if the response supports the with protocol, so we could do smth like: ``` python with cp.urlopen('asdf.org') as resp: 1 / 0 resp.data ``` And it still cleans itself up afterwards. Shorter test version: ``` python def test_response_closed(): class Raiser(object): def read(self): raise Timeout() with RawServer(RESPONSE) as s: cp = HTTPConnectionPool(*s.server.address) resp = cp.urlopen('GET', s.url, preload_content=False) resp._fp = Raiser() try: resp.data except Timeout: print "timed out" assert resp._connection is None assert resp._original_response.isclosed() ``` @ml31415 Just to be clear, are you testing with the master branch? For me, the master branch passes your smaller test. ``` gevent==1.0.2 greenlet==0.4.9 -e [email protected]:Lukasa/urllib3.git@3ecc0cee79971e9cae9412266e07001c7922104b#egg=urllib3 wheel==0.26.0 ``` Hmm, yes the smaller test runs now, yes. Still the other one doesn't. And I actually don't really understand how it still fails :( Ok, so the reason you're still seeing this problem is that, in basically all these cases the timeout is _actually_ occurring in the connection handling code. This is not covered by the fix in the #806, because I didn't touch that other block there. I think it likely needs a similar change. The change for that block is going to be a bit trickier in part because that block _already_ has a condition variable for releasing the connection, and even worse, that condition variable is passed _into_ the function (so I can't default it to something that will behave sensibly). This is definitely fix-able, though it's going to be a bit frustrating to have two connection-close metavariables.
2016-02-29T16:28:04Z
[]
[]
urllib3/urllib3
818
urllib3__urllib3-818
[ "793" ]
f7980a68a6d12f3dce2d2c51fb4a308791ef54aa
diff --git a/urllib3/fields.py b/urllib3/fields.py --- a/urllib3/fields.py +++ b/urllib3/fields.py @@ -36,11 +36,11 @@ def format_header_param(name, value): result = '%s="%s"' % (name, value) try: result.encode('ascii') - except UnicodeEncodeError: + except (UnicodeEncodeError, UnicodeDecodeError): pass else: return result - if not six.PY3: # Python 2: + if not six.PY3 and isinstance(value, six.text_type): # Python 2: value = value.encode('utf-8') value = email.utils.encode_rfc2231(value, 'utf-8') value = '%s*=%s' % (name, value)
diff --git a/test/__init__.py b/test/__init__.py --- a/test/__init__.py +++ b/test/__init__.py @@ -33,7 +33,6 @@ def setUp(): clear_warnings() warnings.simplefilter('ignore', HTTPWarning) - def onlyPy26OrOlder(test): """Skips this test unless you are on Python2.6.x or earlier.""" @@ -67,6 +66,17 @@ def wrapper(*args, **kwargs): return test(*args, **kwargs) return wrapper +def onlyPy2(test): + """Skips this test unless you are on Python 2.x""" + + @functools.wraps(test) + def wrapper(*args, **kwargs): + msg = "{name} requires Python 2.x to run".format(name=test.__name__) + if six.PY3: + raise SkipTest(msg) + return test(*args, **kwargs) + return wrapper + def onlyPy3(test): """Skips this test unless you are on Python3.x""" diff --git a/test/test_fields.py b/test/test_fields.py --- a/test/test_fields.py +++ b/test/test_fields.py @@ -1,7 +1,8 @@ import unittest from urllib3.fields import guess_content_type, RequestField -from urllib3.packages.six import u +from urllib3.packages.six import u, PY3 +from . import onlyPy2 class TestRequestField(unittest.TestCase): @@ -47,3 +48,9 @@ def test_render_part(self): field = RequestField('somename', 'data') param = field._render_part('filename', u('n\u00e4me')) self.assertEqual(param, "filename*=utf-8''n%C3%A4me") + + @onlyPy2 + def test_render_unicode_bytes_py2(self): + field = RequestField('somename', 'data') + param = field._render_part('filename', 'n\xc3\xa4me') + self.assertEqual(param, "filename*=utf-8''n%C3%A4me")
UnicodeDecodeError in format_header_params This issue was discussed here: https://github.com/kennethreitz/requests/issues/2639 and it seemed like the consensus was that this should be fixed in urllib3.
@jonathan-s did you test that this is still a problem? Yup, it still occurrs. I'd be glad to handle this - relevant code: https://github.com/shazow/urllib3/blob/571fd737863fa0c60df24bce1fe4972719fd7ed2/urllib3/fields.py#L22-L47 Recapping the use case: this is generally coming into play during multipart/form-data uploads of files with Unicode names when using Python 2. This isn't coming into play in Python 3; the flow there is that we're only encountering a caught UnicodeEncodeError at line 39, and no UnicodeDecodeError; this is because Python 3 strings are native unicode and only the encode operation is necessary. From there, we're skipping over lines 41 through 44, and just passing the Unicode string to encode_rfc_2231(), which builds RFC-2231-compliant ASCII strings from Unicode. In comparison, at line 38 in Python 2, we encounter a UnicodeDecodeError. It seems like str.encode() is trying to load the bytes as an ASCII string before dumping the string to ASCII-encoded bytes. If it weren't for this, we'd also encounter a UnicodeEncodeError. If we pass both error types, then we'll encounter another UnicodeDecodeError at line 44, because we'll be again attempting the str.encode operation, inclusive of trying to decode the bytes as ASCII (not possible). How I want to handle this: First, I want to catch both error types at line 39. Second, I don't actually think lines 43 and 44 are necessary - as US-ASCII is a strict subset of UTF-8, and str == bytes in Python 2, the operation isn't actually doing anything. This removes the possibility for the second UnicodeDecodeError, and lets the string pass through encode_rfc_2231(), as is the current behavior in Python 3 (treat the bytes as UTF-8, and escape them appropriately for transmission over ASCII). Thoughts? EDIT: Neglected to realize that some form of lines 43 and 44 are necessary to handle Python2 Unicode objects. @haikuginger Would you be up for making a quick PR for feedback? Visualizing all the changes through written word is painful. :)
2016-03-16T18:43:19Z
[]
[]
urllib3/urllib3
840
urllib3__urllib3-840
[ "838" ]
914a21844d9f1fcc599d35719606f8c6a8ee54b0
diff --git a/urllib3/util/connection.py b/urllib3/util/connection.py --- a/urllib3/util/connection.py +++ b/urllib3/util/connection.py @@ -46,6 +46,8 @@ def is_connection_dropped(conn): # Platform-specific # This function is copied from socket.py in the Python 2.7 standard # library test suite. Added to its signature is only `socket_options`. +# One additional modification is that we avoid binding to IPv6 servers +# discovered in DNS if the system doesn't have IPv6 functionality. def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None): """Connect to *address* and return the socket object. @@ -64,14 +66,19 @@ def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, if host.startswith('['): host = host.strip('[]') err = None - for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): + + # Using the value from allowed_gai_family() in the context of getaddrinfo lets + # us select whether to work with IPv4 DNS records, IPv6 records, or both. + # The original create_connection function always returns all records. + family = allowed_gai_family() + + for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) # If provided, set socket level options before connecting. - # This is the only addition urllib3 makes to this function. _set_socket_options(sock, socket_options) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: @@ -99,3 +106,39 @@ def _set_socket_options(sock, options): for opt in options: sock.setsockopt(*opt) + + +def allowed_gai_family(): + """This function is designed to work in the context of + getaddrinfo, where family=socket.AF_UNSPEC is the default and + will perform a DNS search for both IPv6 and IPv4 records.""" + + family = socket.AF_INET + if HAS_IPV6: + family = socket.AF_UNSPEC + return family + + +def _has_ipv6(host): + """ Returns True if the system can bind an IPv6 address. """ + sock = None + has_ipv6 = False + + if socket.has_ipv6: + # has_ipv6 returns true if cPython was compiled with IPv6 support. + # It does not tell us if the system has IPv6 support enabled. To + # determine that we must bind to an IPv6 address. + # https://github.com/shazow/urllib3/pull/611 + # https://bugs.python.org/issue658327 + try: + sock = socket.socket(socket.AF_INET6) + sock.bind((host, 0)) + has_ipv6 = True + except Exception: + pass + + if sock: + sock.close() + return has_ipv6 + +HAS_IPV6 = _has_ipv6('::1')
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -3,6 +3,7 @@ import logging import unittest import ssl +import socket from itertools import chain from mock import patch, Mock @@ -28,7 +29,10 @@ SSLError, SNIMissingWarning, ) - +from urllib3.util.connection import ( + allowed_gai_family, + _has_ipv6 +) from urllib3.util import is_fp_closed, ssl_ from . import clear_warnings @@ -442,3 +446,29 @@ def test_const_compare_digest_fallback(self): incorrect = hashlib.sha256(b'xyz').digest() self.assertFalse(_const_compare_digest_backport(target, incorrect)) + + def test_has_ipv6_disabled_on_compile(self): + with patch('socket.has_ipv6', False): + self.assertFalse(_has_ipv6('::1')) + + def test_has_ipv6_enabled_but_fails(self): + with patch('socket.has_ipv6', True): + with patch('socket.socket') as mock: + instance = mock.return_value + instance.bind = Mock(side_effect=Exception('No IPv6 here!')) + self.assertFalse(_has_ipv6('::1')) + + def test_has_ipv6_enabled_and_working(self): + with patch('socket.has_ipv6', True): + with patch('socket.socket') as mock: + instance = mock.return_value + instance.bind.return_value = True + self.assertTrue(_has_ipv6('::1')) + + def test_ip_family_ipv6_enabled(self): + with patch('urllib3.util.connection.HAS_IPV6', True): + self.assertEqual(allowed_gai_family(), socket.AF_UNSPEC) + + def test_ip_family_ipv6_disabled(self): + with patch('urllib3.util.connection.HAS_IPV6', False): + self.assertEqual(allowed_gai_family(), socket.AF_INET)
urllib3 attempts to use IPv6 even when IPv6 is disabled This is an issue when running on a server without IPv6 (must be disabled because the network does not support it). Example when connecting to https://graph.facebook.com using requests and IPv4 happens to fail: ``` HTTPSConnectionPool(host='graph.facebook.com', port=443): Max retries exceeded with url: /v2.5/me/feed (Caused by NewConnectionError('<requests.packages.urllib3.connection.VerifiedHTTPSConnection object at 0x7f4dbd158518>: Failed to establish a new connection: [Errno 97] Address family not supported by protocol',)) Traceback (most recent call last): File "/home/lib/python3.4/site-packages/requests/packages/urllib3/connection.py", line 137, in _new_conn (self.host, self.port), self.timeout, **extra_kw) File "/home/lib/python3.4/site-packages/requests/packages/urllib3/util/connection.py", line 91, in create_connection raise err File "/home/lib/python3.4/site-packages/requests/packages/urllib3/util/connection.py", line 71, in create_connection sock = socket.socket(af, socktype, proto) File "/usr/lib/python3.4/socket.py", line 126, in __init__ _socket.socket.__init__(self, family, type, proto, fileno) OSError: [Errno 97] Address family not supported by protocol ``` urllib3 should throw an exception after exhausting all IPv4 options instead of trying (and invariably failing) IPv6. See closed issue https://github.com/kennethreitz/requests/issues/3084.
2016-04-11T02:56:02Z
[]
[]
urllib3/urllib3
866
urllib3__urllib3-866
[ "651" ]
40e0cacc3c538b62e5714a54e8a6742d80c1d360
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -550,6 +550,17 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, conn = None + # Track whether `conn` needs to be released before + # returning/raising/recursing. Update this variable if necessary, and + # leave `release_conn` constant throughout the function. That way, if + # the function recurses, the original value of `release_conn` will be + # passed down into the recursive call, and its value will be respected. + # + # See issue #651 [1] for details. + # + # [1] <https://github.com/shazow/urllib3/issues/651> + release_this_conn = release_conn + # Merge the proxy headers. Only do this in HTTP. We have to copy the # headers dict so we can safely change it without those changes being # reflected in anyone else's copy. @@ -638,9 +649,9 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, # Close the connection, set the variable to None, and make sure # we put the None back in the pool to avoid leaking it. conn = conn and conn.close() - release_conn = True + release_this_conn = True - if release_conn: + if release_this_conn: # Put the connection back to be reused. If the connection is # expired then it will be None, which will get replaced with a # fresh connection during _get_conn. diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -165,6 +165,10 @@ def data(self): if self._fp: return self.read(cache_content=True) + @property + def connection(self): + return self._connection + def tell(self): """ Obtain the number of bytes pulled over the wire so far. May differ from
diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import unittest from urllib3.connectionpool import ( @@ -6,6 +8,7 @@ HTTPConnectionPool, HTTPSConnectionPool, ) +from urllib3.response import httplib from urllib3.util.timeout import Timeout from urllib3.packages.ssl_match_hostname import CertificateError from urllib3.exceptions import ( @@ -16,7 +19,10 @@ MaxRetryError, ProtocolError, SSLError, + TimeoutError, ) +from urllib3._collections import HTTPHeaderDict +from .test_response import MockChunkedEncodingResponse, MockSock from socket import error as SocketError from ssl import SSLError as BaseSSLError @@ -309,6 +315,58 @@ def kaboom(*args, **kwargs): new_pool_size = c.pool.qsize() self.assertEqual(initial_pool_size, new_pool_size) + def test_release_conn_param_is_respected_after_http_error_retry(self): + """For successful ```urlopen(release_conn=False)```, the connection isn't released, even after a retry. + + This is a regression test for issue #651 [1], where the connection + would be released if the initial request failed, even if a retry + succeeded. + + [1] <https://github.com/shazow/urllib3/issues/651> + """ + + class _raise_once_make_request_function(object): + """Callable that can mimic `_make_request()`. + + Raises the given exception on its first call, but returns a + successful response on subsequent calls. + """ + def __init__(self, ex): + super(_raise_once_make_request_function, self).__init__() + self._ex = ex + + def __call__(self, *args, **kwargs): + if self._ex: + ex, self._ex = self._ex, None + raise ex() + response = httplib.HTTPResponse(MockSock) + response.fp = MockChunkedEncodingResponse([b'f', b'o', b'o']) + response.headers = response.msg = HTTPHeaderDict() + return response + + def _test(exception): + pool = HTTPConnectionPool(host='localhost', maxsize=1, block=True) + + # Verify that the request succeeds after two attempts, and that the + # connection is left on the response object, instead of being + # released back into the pool. + pool._make_request = _raise_once_make_request_function(exception) + response = pool.urlopen('GET', '/', retries=1, + release_conn=False, preload_content=False, + chunked=True) + self.assertEqual(pool.pool.qsize(), 0) + self.assertEqual(pool.num_connections, 2) + self.assertTrue(response.connection is not None) + + response.release_conn() + self.assertEqual(pool.pool.qsize(), 1) + self.assertTrue(response.connection is None) + + # Run the test case for all the retriable exceptions. + _test(TimeoutError) + _test(HTTPException) + _test(SocketError) + _test(ProtocolError) if __name__ == '__main__': diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -17,7 +17,7 @@ from urllib3.util.retry import Retry from urllib3._collections import HTTPHeaderDict, OrderedDict -from dummyserver.testcase import SocketDummyServerTestCase +from dummyserver.testcase import SocketDummyServerTestCase, consume_socket from dummyserver.server import ( DEFAULT_CERTS, DEFAULT_CA, get_unreachable_address) @@ -507,6 +507,66 @@ def socket_handler(listener): if not successful: self.fail("Timed out waiting for connection close") + def test_release_conn_param_is_respected_after_timeout_retry(self): + """For successful ```urlopen(release_conn=False)```, the connection isn't released, even after a retry. + + This test allows a retry: one request fails, the next request succeeds. + + This is a regression test for issue #651 [1], where the connection + would be released if the initial request failed, even if a retry + succeeded. + + [1] <https://github.com/shazow/urllib3/issues/651> + """ + def socket_handler(listener): + sock = listener.accept()[0] + consume_socket(sock) + + # Close the connection, without sending any response (not even the + # HTTP status line). This will trigger a `Timeout` on the client, + # inside `urlopen()`. + sock.close() + + # Expect a new request. Because we don't want to hang this thread, + # we actually use select.select to confirm that a new request is + # coming in: this lets us time the thread out. + rlist, _, _ = select.select([listener], [], [], 5) + assert rlist + sock = listener.accept()[0] + consume_socket(sock) + + # Send complete chunked response. + sock.send(( + 'HTTP/1.1 200 OK\r\n' + 'Content-Type: text/plain\r\n' + 'Transfer-Encoding: chunked\r\n' + '\r\n' + '8\r\n' + '12345678\r\n' + '0\r\n\r\n').encode('utf-8') + ) + + sock.close() + + self._start_server(socket_handler) + with HTTPConnectionPool(self.host, self.port, maxsize=1) as pool: + # First request should fail, but the timeout and `retries=1` should + # save it. + response = pool.urlopen('GET', '/', retries=1, + release_conn=False, preload_content=False, + timeout=Timeout(connect=1, read=0.001)) + + # The connection should still be on the response object, and none + # should be in the pool. We opened two though. + self.assertEqual(pool.num_connections, 2) + self.assertEqual(pool.pool.qsize(), 0) + self.assertTrue(response.connection is not None) + + # Consume the data. This should put the connection back. + response.read() + self.assertEqual(pool.pool.qsize(), 1) + self.assertTrue(response.connection is None) + class TestProxyManager(SocketDummyServerTestCase):
Likely bug in retry/release-conn code, need tests In this change: https://github.com/shazow/urllib3/pull/647/files#diff-211abff6a07837e4a89a8663a89d2c84R582 @jlatherfold writes: > For the (TimeoutError, HTTPException, SocketError, ConnectionError) exception handler you don't raise (presumably because there may be retries) and you will go on to release the conn and make another call to urlopen (if retries). But you call it with release_conn=True (since you've just set it to true) - what happens if that call succeeds? You'll release the conn back to the pool before the response is read. At least that's what it looks like to me.
I will try and confirm this next week sometime, but writing a test to force it might not be so easy. Take a look at the socketlevel tests in #647, should be some good examples. Hi, I ran a few trial runs with your changes and set read timeout low enough to force the issue. I set retries=1 and got a lot of retries that succeeded so the problem that I thought would occur didn't - I couldn't confirm that this is an issue. You could probably be certain if you did something like this (ie. use a local varaible instead of release_conn): put_conn_on_error = False try: # make call... except: conn = conn and conn.close() put_conn_on_error = True finally: if release_conn or put_conn_on_error: self._put_conn(conn) if retries: # try again... Anyway, it seems ok as it is and I won't be doing any further tests on this for now. Maybe, though not as certain as actually having a test in the suite which exercises this scenario. :) We'll see if we can add that later, thanks for trying!
2016-05-17T22:42:45Z
[]
[]
urllib3/urllib3
911
urllib3__urllib3-911
[ "833" ]
c2c14daf2379946ab86238d09eb7fb092174bc4c
diff --git a/urllib3/util/url.py b/urllib3/util/url.py --- a/urllib3/util/url.py +++ b/urllib3/util/url.py @@ -10,7 +10,8 @@ class Url(namedtuple('Url', url_attrs)): """ Datastructure for representing an HTTP URL. Used as a return value for - :func:`parse_url`. + :func:`parse_url`. Both the scheme and host are normalized as they are + both case-insensitive according to RFC 3986. """ slots = () @@ -18,6 +19,10 @@ def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None): if path and not path.startswith('/'): path = '/' + path + if scheme: + scheme = scheme.lower() + if host: + host = host.lower() return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -77,13 +77,13 @@ def test_get_host(self): 'http://[2a00:1450:4001:c01::67]:80/test': ('http', '[2a00:1450:4001:c01::67]', 80), # More IPv6 from http://www.ietf.org/rfc/rfc2732.txt - 'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:8000/index.html': ('http', '[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]', 8000), - 'http://[1080:0:0:0:8:800:200C:417A]/index.html': ('http', '[1080:0:0:0:8:800:200C:417A]', None), + 'http://[fedc:ba98:7654:3210:fedc:ba98:7654:3210]:8000/index.html': ('http', '[fedc:ba98:7654:3210:fedc:ba98:7654:3210]', 8000), + 'http://[1080:0:0:0:8:800:200c:417a]/index.html': ('http', '[1080:0:0:0:8:800:200c:417a]', None), 'http://[3ffe:2a00:100:7031::1]': ('http', '[3ffe:2a00:100:7031::1]', None), - 'http://[1080::8:800:200C:417A]/foo': ('http', '[1080::8:800:200C:417A]', None), + 'http://[1080::8:800:200c:417a]/foo': ('http', '[1080::8:800:200c:417a]', None), 'http://[::192.9.5.5]/ipng': ('http', '[::192.9.5.5]', None), - 'http://[::FFFF:129.144.52.38]:42/index.html': ('http', '[::FFFF:129.144.52.38]', 42), - 'http://[2010:836B:4179::836B:4179]': ('http', '[2010:836B:4179::836B:4179]', None), + 'http://[::ffff:129.144.52.38]:42/index.html': ('http', '[::ffff:129.144.52.38]', 42), + 'http://[2010:836b:4179::836b:4179]': ('http', '[2010:836b:4179::836b:4179]', None), } for url, expected_host in url_host_map.items(): returned_host = get_host(url) @@ -100,6 +100,35 @@ def test_invalid_host(self): for location in invalid_host: self.assertRaises(LocationParseError, get_host, location) + def test_host_normalization(self): + """Asserts the scheme and host is normalized to lower-case.""" + url_host_map = { + # Hosts + 'HTTP://GOOGLE.COM/mail/': ('http', 'google.com', None), + 'GOogle.COM/mail': ('http', 'google.com', None), + 'HTTP://GoOgLe.CoM:8000/mail/': ('http', 'google.com', 8000), + 'HTTP://user:[email protected]:1234': ('http', 'example.com', 1234), + '173.194.35.7': ('http', '173.194.35.7', None), + 'HTTP://173.194.35.7': ('http', '173.194.35.7', None), + 'HTTP://[2a00:1450:4001:c01::67]:80/test': ('http', '[2a00:1450:4001:c01::67]', 80), + 'HTTP://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:8000/index.html': ('http', '[fedc:ba98:7654:3210:fedc:ba98:7654:3210]', 8000), + 'HTTPS://[1080:0:0:0:8:800:200c:417A]/index.html': ('https', '[1080:0:0:0:8:800:200c:417a]', None), + } + for url, expected_host in url_host_map.items(): + returned_host = get_host(url) + self.assertEqual(returned_host, expected_host) + + def test_parse_url_normalization(self): + """Assert parse_url normalizes the scheme/host, and only the scheme/host""" + test_urls = [ + ('HTTP://GOOGLE.COM/MAIL/', 'http://google.com/MAIL/'), + ('HTTP://JeremyCline:[email protected]:8080/', 'http://JeremyCline:[email protected]:8080/'), + ('HTTPS://Example.Com/?Key=Value', 'https://example.com/?Key=Value'), + ('Https://Example.Com/#Fragment', 'https://example.com/#Fragment'), + ] + for url, expected_normalized_url in test_urls: + actual_normalized_url = parse_url(url).url + self.assertEqual(actual_normalized_url, expected_normalized_url) parse_url_host_map = { 'http://google.com/mail': Url('http', host='google.com', path='/mail'), diff --git a/test/with_dummyserver/test_proxy_poolmanager.py b/test/with_dummyserver/test_proxy_poolmanager.py --- a/test/with_dummyserver/test_proxy_poolmanager.py +++ b/test/with_dummyserver/test_proxy_poolmanager.py @@ -300,6 +300,16 @@ def test_https_proxy_pool_timeout(self): except MaxRetryError as e: self.assertEqual(type(e.reason), ConnectTimeoutError) + def test_scheme_host_case_insensitive(self): + """Assert that upper-case schemes and hosts are normalized.""" + http = proxy_from_url(self.proxy_url.upper()) + + r = http.request('GET', '%s/' % self.http_url.upper()) + self.assertEqual(r.status, 200) + + r = http.request('GET', '%s/' % self.https_url.upper()) + self.assertEqual(r.status, 200) + class TestIPv6HTTPProxyManager(IPv6HTTPDummyProxyTestCase):
PoolManagers are overly case-sensitive. In a URI, both scheme and authority are case-insensitive: that is, the scheme, host, and port should all be considered case insensitively (and basically treated as lower-case). Unfortunately, the PoolManager and ProxyManager do not consistently do that. The biggest problem is in the ProxyManager, where `connection_from_host` will incorrectly treat any HTTPS scheme with a capital letter in it like a HTTP connection (which isn't great), but the flaw is pretty common elsewhere in those objects (e.g. an uppercased host will get a different connection pool to a lowercased host). At the very minimum we should be using a lowercased scheme at all points in the urllib3 stack. We should also consider whether we want to be case-sensitive about domains (I'm inclined to say that we do not). Related: kennethreitz/requests#3075.
+1, we should build this behaviour into our new PoolKey namedtuples somehow. Maybe we need a duck interface that returns normalized tuple values given some request context. Like: ``` python pool_key = pool_key_cls.from_request(request_content) # Or some better method name ``` Then we can build in the lower-casing normalizing logic into the pool_key_cls as-needed. I'm not entirely sure that's enough, but to be fair I'm not sat down with the code right now so I can't be sure.
2016-07-01T01:28:43Z
[]
[]
urllib3/urllib3
922
urllib3__urllib3-922
[ "258" ]
466cae357c32ee955288bb3947d61e42cdd03569
diff --git a/dummyserver/server.py b/dummyserver/server.py --- a/dummyserver/server.py +++ b/dummyserver/server.py @@ -34,6 +34,10 @@ 'certfile': os.path.join(CERTS_PATH, 'server.no_san.crt'), 'keyfile': DEFAULT_CERTS['keyfile'] } +IP_SAN_CERTS = { + 'certfile': os.path.join(CERTS_PATH, 'server.ip_san.crt'), + 'keyfile': DEFAULT_CERTS['keyfile'] +} IPV6_ADDR_CERTS = { 'certfile': os.path.join(CERTS_PATH, 'server.ipv6addr.crt'), 'keyfile': os.path.join(CERTS_PATH, 'server.ipv6addr.key'), diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -60,6 +60,9 @@ 'idna>=2.0.0', 'certifi', ], + 'secure:python_version <= "2.7"': [ + "ipaddress", + ], 'socks': [ 'PySocks>=1.5.6,<2.0,!=1.5.7', ] diff --git a/urllib3/packages/ssl_match_hostname/__init__.py b/urllib3/packages/ssl_match_hostname/__init__.py --- a/urllib3/packages/ssl_match_hostname/__init__.py +++ b/urllib3/packages/ssl_match_hostname/__init__.py @@ -1,5 +1,11 @@ +import sys + try: - # Python 3.2+ + # Our match_hostname function is the same as 3.5's, so we only want to + # import the match_hostname function if it's at least that good. + if sys.version_info < (3, 5): + raise ImportError("Fallback to vendored code") + from ssl import CertificateError, match_hostname except ImportError: try: diff --git a/urllib3/packages/ssl_match_hostname/_implementation.py b/urllib3/packages/ssl_match_hostname/_implementation.py --- a/urllib3/packages/ssl_match_hostname/_implementation.py +++ b/urllib3/packages/ssl_match_hostname/_implementation.py @@ -4,8 +4,20 @@ # stdlib. http://docs.python.org/3/license.html import re +import sys + +# ipaddress has been backported to 2.6+ in pypi. If it is installed on the +# system, use it to handle IPAddress ServerAltnames (this was added in +# python-3.5) otherwise only do DNS matching. This allows +# backports.ssl_match_hostname to continue to be used all the way back to +# python-2.4. +try: + import ipaddress +except ImportError: + ipaddress = None + +__version__ = '3.5.0.1' -__version__ = '3.4.0.2' class CertificateError(ValueError): pass @@ -64,6 +76,23 @@ def _dnsname_match(dn, hostname, max_wildcards=1): return pat.match(hostname) +def _to_unicode(obj): + if isinstance(obj, str) and sys.version_info < (3,): + obj = unicode(obj, encoding='ascii', errors='strict') + return obj + +def _ipaddress_match(ipname, host_ip): + """Exact matching of IP addresses. + + RFC 6125 explicitly doesn't define an algorithm for this + (section 1.7.2 - "Out of Scope"). + """ + # OpenSSL may add a trailing newline to a subjectAltName's IP address + # Divergence from upstream: ipaddress can't handle byte str + ip = ipaddress.ip_address(_to_unicode(ipname).rstrip()) + return ip == host_ip + + def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 @@ -73,12 +102,35 @@ def match_hostname(cert, hostname): returns nothing. """ if not cert: - raise ValueError("empty or no certificate") + raise ValueError("empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED") + try: + # Divergence from upstream: ipaddress can't handle byte str + host_ip = ipaddress.ip_address(_to_unicode(hostname)) + except ValueError: + # Not an IP address (common case) + host_ip = None + except UnicodeError: + # Divergence from upstream: Have to deal with ipaddress not taking + # byte strings. addresses should be all ascii, so we consider it not + # an ipaddress in this case + host_ip = None + except AttributeError: + # Divergence from upstream: Make ipaddress library optional + if ipaddress is None: + host_ip = None + else: + raise dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': - if _dnsname_match(value, hostname): + if host_ip is None and _dnsname_match(value, hostname): + return + dnsnames.append(value) + elif key == 'IP Address': + if host_ip is not None and _ipaddress_match(value, host_ip): return dnsnames.append(value) if not dnsnames:
diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -13,7 +13,8 @@ ) from dummyserver.server import (DEFAULT_CA, DEFAULT_CA_BAD, DEFAULT_CERTS, NO_SAN_CERTS, NO_SAN_CA, DEFAULT_CA_DIR, - IPV6_ADDR_CERTS, IPV6_ADDR_CA, HAS_IPV6) + IPV6_ADDR_CERTS, IPV6_ADDR_CA, HAS_IPV6, + IP_SAN_CERTS) from test import ( onlyPy26OrOlder, @@ -508,6 +509,17 @@ def test_warning_for_certs_without_a_san(self): self.assertTrue(warn.called) +class TestHTTPS_IPSAN(HTTPSDummyServerTestCase): + certs = IP_SAN_CERTS + + def test_can_validate_ip_san(self): + """Ensure that urllib3 can validate SANs with IP addresses in them.""" + https_pool = HTTPSConnectionPool('127.0.0.1', self.port, + cert_reqs='CERT_REQUIRED', + ca_certs=DEFAULT_CA) + r = https_pool.request('GET', '/') + self.assertEqual(r.status, 200) + class TestHTTPS_IPv6Addr(IPV6HTTPSDummyServerTestCase): certs = IPV6_ADDR_CERTS
support ip address typed subject alterative names when connecting to an ip address using https According to rfc6125 section 3.1.3.2 subjectAltName values of type iPAddress should be considered when the https connection is made directly to an ip address rather than a dns domain name. If a server uses such a certificate, urllib3 will fail to validate it, however other tools such as curl and go does. go even refuses to consider CN when verifying the certificate when connected by the
Could you clarify what change you're proposing? Default cert validation? I'm having a little difficulty nesting through this code, but it seems to me that when verifying a certificate currently match_hostname() in urllib3/packages/ssl_match_hostname/**init**.py gets called. Ideally this should only be called if we are requesting the resource based on a dns name, if the url contains an ip address a similar function should be called that checks subjectAlternativeName entries with type IP. Such a function could look somewhat like this: https://gist.github.com/sigmunau/6918558 (python syntax pseudocode, not tested) That sounds sensible. Interested in making a PR? :) The ssl_match_hostname module is backported and vendored in from Python 3.3+. There is a good chance that there is already a method in the Py3 stdlib which does this. Could you check? If so, we could vendor it in as well. Would be better than making it up and maintaining it ourselves. There doesn't seem to be such a method there. At least not nearby where the hostname based on is. I also couldn't find where, if anywhere the match_hostname function is called in the stdlib, so as to see if they do something clever outside it. If you could point my in the direction of where this method is called from and how to find the info I need to determine what method to use I could try to make something that works and submit a PR [Second result](https://github.com/shazow/urllib3/search?q=match_hostname&source=cc). [According to this](http://docs.python.org/3.4/library/ssl.html#ssl.match_hostname), > The rules applied are those for checking the identity of HTTPS servers as outlined in RFC 2818, **except that IP addresses are not currently supported.** Looks like we're on our own. @wolever would love your expertise here too. :) D'oh! Didn't notice this email in the flurry of other notifications. I haven't read through the specific RFCs yet, but this proposal, along with the pseudo code, definitely seem sensible. In addition to submitting a pull request here, I'd be happy to help you submit a patch to Python 3.3: I've got a good feeling that this would get accepted (especially given the "match_hostname"), and the process isn't particularly arduous. Ok! And after reading the [relevant bit of the RFC](http://tools.ietf.org/html/rfc6125#page-47) and [curl's code](https://github.com/bagder/curl/blob/master/lib/ssluse.c#L1102) (especially [line 1174](https://github.com/bagder/curl/blob/master/lib/ssluse.c#L1174)) this does seem reasonable. So! Yes, I would be a solid +1 on this. And, in fact, if you're feeling a bit ambition, I would propose that this is added to `match_hostname`, and the corresponding Python standard library function be patched too: I can't think of any reasonable attack vectors this would open up, and it would definitely make things less surprising. I've just run into this issue trying to rely on IP address SAN's in a cert. I was using Python 2.7.10 but on a whim I tried Python 3.5.0 and it works there. Indeed https://hg.python.org/cpython/file/3.5/Lib/ssl.py#l254 shows there is now code that uses the IP SAN's if present. What would need to happen to get this working on older Pythons? I'm guessing that simply updating the vendored code would fix versions `< 3.2` but versions `>= 3.2 && < 3.5` would use their own shipped version and remain broken? @bodgit Yes, I suspect so. Also, our pyopenssl compatibility layer requires updating. I may take a swing at this tomorrow. I don't like the implemention in Python stdlib and I have raised my concern but it was too late. IMO the code has two problems 1) The stdlib attempts to auto-detect if a string is a IPv4 or IPv6 address. It feels wrong. 2) It also sets the IP address as SNI but that is a violation of the protocol. https://bugs.python.org/msg244665 By the way for future compatibility the new API should be modelled after OpenSSL's new API. For a while OpenSSL is able to do proper validation of host names, IP addresses and email itself. @tiran When it comes to copying the stdlib mostly what I'm going to do is copy the logic from `match_hostname`, and potentially a bit from the `getpeercert` function. I'm not going to adjust the SNI logic we have. As to guessing the IP address type, is there any way around that? I'd like to resurrect this a little bit and take another spin at it. Ideally if we can backport just the smallest bits of ipaddress we need then we can optionally have this support. It'll require some PyOpenSSL enhancement as well, but what doesn't? (Alternatively if any other core is looking for something to do, this might be a bit fun!)
2016-07-11T09:18:46Z
[]
[]
urllib3/urllib3
980
urllib3__urllib3-980
[ "979" ]
0fb5e083b2adf7618db8c26e8e50206de09dd845
diff --git a/urllib3/contrib/pyopenssl.py b/urllib3/contrib/pyopenssl.py --- a/urllib3/contrib/pyopenssl.py +++ b/urllib3/contrib/pyopenssl.py @@ -138,7 +138,19 @@ def _dnsname_to_stdlib(name): then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). """ - name = idna.encode(name) + def idna_encode(name): + """ + Borrowed wholesale from the Python Cryptography Project. It turns out + that we can't just safely call `idna.encode`: it can explode for + wildcard names. This avoids that problem. + """ + for prefix in [u'*.', u'.']: + if name.startswith(prefix): + name = name[len(prefix):] + return prefix.encode('ascii') + idna.encode(name) + return idna.encode(name) + + name = idna_encode(name) if sys.version_info >= (3, 0): name = name.decode('utf-8') return name
diff --git a/test/contrib/test_pyopenssl.py b/test/contrib/test_pyopenssl.py --- a/test/contrib/test_pyopenssl.py +++ b/test/contrib/test_pyopenssl.py @@ -1,9 +1,13 @@ +# -*- coding: utf-8 -*- +import unittest + from nose.plugins.skip import SkipTest from urllib3.packages import six try: from urllib3.contrib.pyopenssl import (inject_into_urllib3, - extract_from_urllib3) + extract_from_urllib3, + _dnsname_to_stdlib) except ImportError as e: raise SkipTest('Could not import PyOpenSSL: %r' % e) @@ -18,3 +22,36 @@ def setup_module(): def teardown_module(): extract_from_urllib3() + + +class TestPyOpenSSLHelpers(unittest.TestCase): + """ + Tests for PyOpenSSL helper functions. + """ + def test_dnsname_to_stdlib_simple(self): + """ + We can convert a dnsname to a native string when the domain is simple. + """ + name = u"उदाहरण.परीक" + expected_result = 'xn--p1b6ci4b4b3a.xn--11b5bs8d' + + self.assertEqual(_dnsname_to_stdlib(name), expected_result) + + def test_dnsname_to_stdlib_leading_period(self): + """ + If there is a . in front of the domain name we correctly encode it. + """ + name = u".उदाहरण.परीक" + expected_result = '.xn--p1b6ci4b4b3a.xn--11b5bs8d' + + self.assertEqual(_dnsname_to_stdlib(name), expected_result) + + def test_dnsname_to_stdlib_leading_splat(self): + """ + If there's a wildcard character in the front of the string we handle it + appropriately. + """ + name = u"*.उदाहरण.परीक" + expected_result = '*.xn--p1b6ci4b4b3a.xn--11b5bs8d' + + self.assertEqual(_dnsname_to_stdlib(name), expected_result)
Usage of cryptography + idna breaks on wildcard certs The changes in #930 seem to have broken with some wildcard certificates: https://stackoverflow.com/questions/39521147/why-is-urllib3-idna-complaining-about-a-wildcard-in-an-x509-cert-how-do-i-fix-i I haven't attempted to reproduce this, but I thought recording it here would be of use.
So we're using idna basically because cryptography does. @reaperhulk, any insight here? Besides that `idna` was a terrible mistake? I need to know what version of `cryptography`. I was just able to load the cert in question without problems with both current master and version 1.5. **title** = "cryptography" ... **version** = "1.5" This is running under python 2.7 on an Ubuntu 12.04 VM. I'm running under a virtual env with the following: setuptools==26.1.1 td-client==0.5.0 urllib3[secure]==1.17 I also have the required system packages listed in the user guide: https://cryptography.io/en/latest/installation/#building-cryptography-on-linux dpkg -l | grep build-essential ii build-essential 11.6ubuntu6 amd64 Informational list of build-essential packages dpkg -l | grep libssl-dev ii libssl-dev:amd64 1.0.1f-1ubuntu2.7 amd64 Secure Sockets Layer toolkit - development files dpkg -l | grep libffi-dev ii libffi-dev:amd64 3.1~rc1+r3.0.13-12 amd64 Foreign Function Interface library (development files) dpkg -l | grep "\spython-dev\s" ii python-dev 2.7.5-5ubuntu3 amd64 header files and a static library for Python (default) What happens if you run this script: ``` python from cryptography import x509 from cryptography.hazmat.backends.openssl.backend import backend pem = b"""-----BEGIN CERTIFICATE----- MIIFMjCCBBqgAwIBAgIHK427mrAroDANBgkqhkiG9w0BAQsFADCBtDELMAkGA1UE BhMCVVMxEDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAY BgNVBAoTEUdvRGFkZHkuY29tLCBJbmMuMS0wKwYDVQQLEyRodHRwOi8vY2VydHMu Z29kYWRkeS5jb20vcmVwb3NpdG9yeS8xMzAxBgNVBAMTKkdvIERhZGR5IFNlY3Vy ZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjAeFw0xNDA0MTAwNjA2MDNaFw0x NzAxMjAxMDA3MDRaMEAxITAfBgNVBAsTGERvbWFpbiBDb250cm9sIFZhbGlkYXRl ZDEbMBkGA1UEAwwSKi50cmVhc3VyZWRhdGEuY29tMIIBIjANBgkqhkiG9w0BAQEF AAOCAQ8AMIIBCgKCAQEAzmjm0Ogr4IPN2YQAWFcxJGzCegewnT0E3hW5tD7kgFBj bGpLvHFdqnhd1nGhxYF/CufioUxKSgWtvrw5sL0zVyO1oeURjk5+FFwPMTKd3scW 1v7u2FJrs2uMoYL+Y2DlqWqUQikbX9U3flXcol1XSWD9MZbZLMNeZdKHMRtTi5ew FVXQusYWG+eCovYtetutZ+qhoaGChirTKwg4lNXeID79/XuAleNQ0rUD+HgD6B1K ZBXxomMibV48VerPQLzrp3BztXgjroSIqqtZGEcQHloMYfcn1zq7rRmhR5268RqP sUc6cvijI537IsBvlNaByb2WW5DeLgQKraPt3v2YIQIDAQABo4IBujCCAbYwDwYD VR0TAQH/BAUwAwEBADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDgYD VR0PAQH/BAQDAgWgMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwuZ29kYWRk eS5jb20vZ2RpZzJzMS0zOS5jcmwwUwYDVR0gBEwwSjBIBgtghkgBhv1tAQcXATA5 MDcGCCsGAQUFBwIBFitodHRwOi8vY2VydGlmaWNhdGVzLmdvZGFkZHkuY29tL3Jl cG9zaXRvcnkvMHYGCCsGAQUFBwEBBGowaDAkBggrBgEFBQcwAYYYaHR0cDovL29j c3AuZ29kYWRkeS5jb20vMEAGCCsGAQUFBzAChjRodHRwOi8vY2VydGlmaWNhdGVz LmdvZGFkZHkuY29tL3JlcG9zaXRvcnkvZ2RpZzIuY3J0MB8GA1UdIwQYMBaAFEDC vSeOzDSDMKIz1/tss/C0LIDOMC8GA1UdEQQoMCaCEioudHJlYXN1cmVkYXRhLmNv bYIQdHJlYXN1cmVkYXRhLmNvbTAdBgNVHQ4EFgQU3oO+gpNB3WYMVywxSM2p2E6f etMwDQYJKoZIhvcNAQELBQADggEBAJLvOqYh7tiPbT+qKyNHuuSjetfUYHWBx29A t+hXI7Khn1waKxbgUa/QSZiUUD9C097LMhydx/Z8p6bHlkFqIfSOKkxWS736N17C Tldrz8RSSTByp2OfCdA9Ts9ANToR9R33vR40aOQDL29NyoyTAjNPRCHKmLz4F8sE 1tlKJKysO9p6qA1+B5ZEHOY6McK4tm3L2Jhz0DgMzk3QbBh8aJ/cGfezQQoC+RT4 L3Nx0+yhclrOY9L1PEQ4hBjJCsUKd7XcKfUNx+iXTNpTfmTiTfsBmfNx+V79cu+v RoSAetF+rkP0MTReO7DD9jybxn+2ttsC1x8HCw6Nc+NK7GqEHok= -----END CERTIFICATE-----""" cert = x509.load_pem_x509_certificate(pem, backend) print(cert.subject) print(cert.extensions.get_extension_for_class(x509.SubjectAlternativeName)) ``` That should load and print info about the certificate you linked to on SO. Running with urllib3 v1.7 ``` ~/repos/akamai_pusher/current_version$ ./akamai_pusher.sh <Name([<NameAttribute(oid=<ObjectIdentifier(oid=2.5.4.11, name=organizationalUnitName)>, value=u'Domain Control Validated')>, <NameAttribute(oid=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>, value=u'*.treasuredata.com')>])> <Extension(oid=<ObjectIdentifier(oid=2.5.29.17, name=subjectAltName)>, critical=False, value=<SubjectAlternativeName(<GeneralNames([<DNSName(value=*.treasuredata.com)>, <DNSName(value=treasuredata.com)>])>)>)> ``` Okay, I understand this bug now. https://github.com/shazow/urllib3/blob/0319846b13ad44e1a2a6b3985369cdc15cefe914/urllib3/contrib/pyopenssl.py#L141 is way too simplistic. It assumes any name given to it will be IDNA encodable, which is untrue. In reality this code needs to look like what we do in cryptography (https://github.com/pyca/cryptography/blob/master/src/cryptography/hazmat/backends/openssl/encode_asn1.py#L365-L371). Ok fab, I'll give that a shot and see if it helps.
2016-09-18T07:33:46Z
[]
[]
urllib3/urllib3
1,017
urllib3__urllib3-1017
[ "1015" ]
24b1647b2c5296aabc336c542a7d06548b90a0c2
diff --git a/urllib3/fields.py b/urllib3/fields.py --- a/urllib3/fields.py +++ b/urllib3/fields.py @@ -130,7 +130,7 @@ def _render_parts(self, header_parts): iterable = header_parts.items() for name, value in iterable: - if value: + if value is not None: parts.append(self._render_part(name, value)) return '; '.join(parts)
diff --git a/test/test_fields.py b/test/test_fields.py --- a/test/test_fields.py +++ b/test/test_fields.py @@ -36,6 +36,15 @@ def test_make_multipart(self): 'Content-Location: /test\r\n' '\r\n') + def test_make_multipart_empty_filename(self): + field = RequestField('somename', 'data', '') + field.make_multipart(content_type='application/octet-stream') + self.assertEqual( + field.render_headers(), + 'Content-Disposition: form-data; name="somename"; filename=""\r\n' + 'Content-Type: application/octet-stream\r\n' + '\r\n') + def test_render_parts(self): field = RequestField('somename', 'data') parts = field._render_parts({'name': 'value', 'filename': 'value'})
Multipart request headers do not work properly for values of empty string Continuing the discussion from https://github.com/sigmavirus24/requests-toolbelt/issues/162, attempting to create a `RequestField` which is then made multipart via `make_multipart` does not work properly if the filename given is an empty string. urllib3 test code: ``` from urllib3.fields import RequestField field = RequestField(name="somename", data="somedata", filename="") field.make_multipart(content_type="application/octet-stream") print(field.headers) ``` Expected output: ``` {'Content-Type': 'application/octet-stream', 'Content-Location': None, 'Content-Disposition': 'form-data; name="somename"; filename=""'} ``` Actual output: ``` {'Content-Type': 'application/octet-stream', 'Content-Location': None, 'Content-Disposition': 'form-data; name="somename"'} ``` ##
A test made in Chrome and Firefox on macOS shows that the `filename` field should be present in the content disposition even if the value is empty (such as when an optional input tag on a webpage accepts multiple files and none are provided). Chrome 55: ``` ------WebKitFormBoundaryCvXp0tseQuZuGTru Content-Disposition: form-data; name="somename"; filename="" Content-Type: application/octet-stream ------WebKitFormBoundaryCvXp0tseQuZuGTru-- ``` Firefox 49: ``` -----------------------------1681649282594654441443913898 Content-Disposition: form-data; name="somename"; filename="" Content-Type: application/octet-stream -----------------------------1681649282594654441443913898-- ``` Flask server code I wrote to test the request payload from Chrome: ``` from flask import Flask, request app = Flask(__name__) @app.route('/', methods=['GET']) def multipart_form(): return '<form name="testform" action="" enctype="multipart/form-data" method="post"><input id="file" type="file" multiple="multiple" name="somename"><input id="post" type="submit" value="Go"></form>' @app.route('/', methods=['POST']) def upload_handler(): print(request.files['somename']) return 'ok' ``` I'm happy to accept a PR to change this behaviour. =)
2016-10-31T03:51:10Z
[]
[]
urllib3/urllib3
1,018
urllib3__urllib3-1018
[ "1009" ]
f8371b343c54e4daf6b5f66fdbe246ae0ed2b596
diff --git a/urllib3/connection.py b/urllib3/connection.py --- a/urllib3/connection.py +++ b/urllib3/connection.py @@ -170,7 +170,13 @@ def request_chunked(self, method, url, body=None, headers=None): """ headers = HTTPHeaderDict(headers if headers is not None else {}) skip_accept_encoding = 'accept-encoding' in headers - self.putrequest(method, url, skip_accept_encoding=skip_accept_encoding) + skip_host = 'host' in headers + self.putrequest( + method, + url, + skip_accept_encoding=skip_accept_encoding, + skip_host=skip_host + ) for header, value in headers.items(): self.putheader(header, value) if 'transfer-encoding' not in headers:
diff --git a/test/with_dummyserver/test_chunked_transfer.py b/test/with_dummyserver/test_chunked_transfer.py --- a/test/with_dummyserver/test_chunked_transfer.py +++ b/test/with_dummyserver/test_chunked_transfer.py @@ -74,3 +74,29 @@ def test_empty_string_body(self): def test_empty_iterable_body(self): self._test_body([]) + + def test_removes_duplicate_host_header(self): + self.start_chunked_handler() + chunks = ['foo', 'bar', '', 'bazzzzzzzzzzzzzzzzzzzzzz'] + pool = HTTPConnectionPool(self.host, self.port, retries=False) + pool.urlopen( + 'GET', '/', chunks, headers={'Host': 'test.org'}, chunked=True + ) + + header_block = self.buffer.split(b'\r\n\r\n', 1)[0].lower() + header_lines = header_block.split(b'\r\n')[1:] + + host_headers = [x for x in header_lines if x.startswith(b'host')] + self.assertEqual(len(host_headers), 1) + + def test_provides_default_host_header(self): + self.start_chunked_handler() + chunks = ['foo', 'bar', '', 'bazzzzzzzzzzzzzzzzzzzzzz'] + pool = HTTPConnectionPool(self.host, self.port, retries=False) + pool.urlopen('GET', '/', chunks, chunked=True) + + header_block = self.buffer.split(b'\r\n\r\n', 1)[0].lower() + header_lines = header_block.split(b'\r\n')[1:] + + host_headers = [x for x in header_lines if x.startswith(b'host')] + self.assertEqual(len(host_headers), 1)
Chunked uploads do not deduplicate the Host header field. When using `urlopen` with a headers dictionary that includes the `Host` header, the `Host` header from the user will be preferred to the automatic one added by httplib. This is not true when calling `urlopen` with `chunked=True`, because `request_chunked` does not have the same deduplication logic as `request` does. We basically need to duplicate [this logic](https://github.com/python/cpython/blob/master/Lib/http/client.py#L1236-L1241). Should be a pretty easy patch if anyone wants a quick contribution win. A test to prove that this doesn't work, followed by the patch to make it work. ##
2016-10-31T11:14:26Z
[]
[]
urllib3/urllib3
1,028
urllib3__urllib3-1028
[ "1005" ]
2398290c337096c75808ddbb268339afe45912f3
diff --git a/urllib3/connection.py b/urllib3/connection.py --- a/urllib3/connection.py +++ b/urllib3/connection.py @@ -56,7 +56,10 @@ class ConnectionError(Exception): 'https': 443, } -RECENT_DATE = datetime.date(2014, 1, 1) +# When updating RECENT_DATE, move it to +# within two years of the current date, and no +# earlier than 6 months ago. +RECENT_DATE = datetime.date(2016, 1, 1) class DummyConnection(object):
diff --git a/test/test_connection.py b/test/test_connection.py --- a/test/test_connection.py +++ b/test/test_connection.py @@ -1,4 +1,9 @@ -import unittest +import datetime +import sys +if sys.version_info >= (2, 7): + import unittest +else: + import unittest2 as unittest import mock @@ -6,6 +11,7 @@ CertificateError, VerifiedHTTPSConnection, _match_hostname, + RECENT_DATE ) @@ -43,6 +49,14 @@ def test_match_hostname_mismatch(self): ) self.assertEqual(e._peer_cert, cert) + def test_recent_date(self): + # This test is to make sure that the RECENT_DATE value + # doesn't get too far behind what the current date is. + # When this test fails update urllib3.connection.RECENT_DATE + # according to the rules defined in that file. + two_years = datetime.timedelta(days=365 * 2) + self.assertGreater(RECENT_DATE, (datetime.datetime.today() - two_years).date()) + if __name__ == '__main__': unittest.main()
Add unit test for RECENT_DATE I've got a feeling this value could be easily forgotten to be updated over time. Could we perhaps add a unit test that will fail if it gets to be too far away from the current date. It seems like it was last modified in mid 2014 but wasn't updated since then, should this value be updated now? Link to the blame: https://github.com/shazow/urllib3/blame/master/urllib3/connection.py#L59 ##
We should probably consider updating this sometime soon, yeah. Might be worth having a unit test. Perhaps having a maximum distance from the current date of 2 years? Also, should I update it to 2015 perhaps? I can submit a PR for this if that's what's wanted. Why don't we just determine it programmatically during the test? :trollface:
2016-11-05T21:37:43Z
[]
[]
urllib3/urllib3
1,033
urllib3__urllib3-1033
[ "1032" ]
092e5f63bdde523b265115c7b44f46e56bd41f9d
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -73,7 +73,7 @@ def __init__(self, host, port=None): # Instead, we need to make sure we never pass ``None`` as the port. # However, for backward compatibility reasons we can't actually # *assert* that. - self.host = host.strip('[]') + self.host = host.strip('[]').lower() self.port = port def __str__(self):
diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -275,6 +275,11 @@ def test_contextmanager(self): self.assertRaises(ClosedPoolError, pool._get_conn) self.assertRaises(Empty, old_pool_queue.get, block=False) + def test_mixed_case_url(self): + pool = HTTPConnectionPool('Example.com') + response = pool.request('GET', "http://Example.com") + self.assertEqual(response.status, 200) + def test_absolute_url(self): c = connection_from_url('http://google.com:80') self.assertEqual(
Unexpected HostChangedError due to downcasing of hostname On version 1.19 ```urllib3.exceptions.HostChangedError``` is thrown when a pool is created for a host with uppercase letters, and then a request is made with url containing this hostname. ```python from urllib3 import HTTPConnectionPool pool = HTTPConnectionPool("Example.com") response = pool.request('GET', "http://Example.com") ``` Gives: ``` Traceback (most recent call last): File "minimum.py", line 5, in <module> response = pool.request('GET', "http://Example.com") File "/usr/local/lib/python3.5/dist-packages/urllib3/request.py", line 66, in request **urlopen_kw) File "/usr/local/lib/python3.5/dist-packages/urllib3/request.py", line 87, in request_encode_url return self.urlopen(method, url, **extra_kw) File "/usr/local/lib/python3.5/dist-packages/urllib3/connectionpool.py", line 549, in urlopen raise HostChangedError(self, url, retries) urllib3.exceptions.HostChangedError: HTTPConnectionPool(host='Example.com', port=None): Tried to open a foreign host with url: http://Example.com ``` Is this the expected behavior, or should a ConnectionPools host also be downcased when doing is_same_host() ?
Hmmm. I don't think that this is necessarily _intended_ behavior, but I don't think it's necessarily incorrect either. Can I ask why you're specifically using ConnectionPool instead of PoolManager? In general, we'd suggest using the higher-level API to avoid issues like this. I'm not sure why I was using ConnectionPool, I will use PoolManager instead. I only noticed this in some old code after I upgraded urllib3 via pip. As to whether this is the correct behavior, rfc952 declares that no distinction is made between upper and lower case. So the biggest issue here is probably that we should forcibly lowercase the hostname in the base `ConnectionPool` class. I'd accept a patch that does that. @Lukasa Is a blind `lower()` call appropriate? I thought that function was too smart for it's own good with some non-ASCII characters. urllib3 doesn't tolerate non-ascii characters so I don't really mind how smart it is. ;) Ah okay nevermind. :) @SethMichaelLarson @NickMolloy are either of you planning on putting this together? If not, I've got some time to cobble this together. @nateprewitt I can add this change with some tests tonight, should be easy enough.
2016-11-10T00:57:39Z
[]
[]
urllib3/urllib3
1,036
urllib3__urllib3-1036
[ "1035" ]
d932ff1e6e894740e795ddadd26c2fa5a49b0f84
diff --git a/dummyserver/testcase.py b/dummyserver/testcase.py --- a/dummyserver/testcase.py +++ b/dummyserver/testcase.py @@ -1,4 +1,4 @@ -import unittest +import sys import socket import threading from nose.plugins.skip import SkipTest @@ -13,6 +13,11 @@ from dummyserver.handlers import TestingApp from dummyserver.proxy import ProxyHandler +if sys.version_info >= (2, 7): + import unittest +else: + import unittest2 as unittest + def consume_socket(sock, chunks=65536): while not sock.recv(chunks).endswith(b'\r\n\r\n'): diff --git a/urllib3/contrib/socks.py b/urllib3/contrib/socks.py --- a/urllib3/contrib/socks.py +++ b/urllib3/contrib/socks.py @@ -83,6 +83,7 @@ def _new_conn(self): proxy_port=self._socks_options['proxy_port'], proxy_username=self._socks_options['username'], proxy_password=self._socks_options['password'], + proxy_rdns=self._socks_options['rdns'], timeout=self.timeout, **extra_kw ) @@ -153,8 +154,16 @@ def __init__(self, proxy_url, username=None, password=None, if parsed.scheme == 'socks5': socks_version = socks.PROXY_TYPE_SOCKS5 + rdns = False + elif parsed.scheme == 'socks5h': + socks_version = socks.PROXY_TYPE_SOCKS5 + rdns = True elif parsed.scheme == 'socks4': socks_version = socks.PROXY_TYPE_SOCKS4 + rdns = False + elif parsed.scheme == 'socks4a': + socks_version = socks.PROXY_TYPE_SOCKS4 + rdns = True else: raise ValueError( "Unable to determine SOCKS version from %s" % proxy_url @@ -168,6 +177,7 @@ def __init__(self, proxy_url, username=None, password=None, 'proxy_port': parsed.port, 'username': username, 'password': password, + 'rdns': rdns } connection_pool_kw['_socks_options'] = socks_options
diff --git a/test/contrib/test_socks.py b/test/contrib/test_socks.py --- a/test/contrib/test_socks.py +++ b/test/contrib/test_socks.py @@ -229,6 +229,37 @@ def request_handler(listener): self.assertEqual(response.data, b'') self.assertEqual(response.headers['Server'], 'SocksTestServer') + def test_local_dns(self): + def request_handler(listener): + sock = listener.accept()[0] + + handler = handle_socks5_negotiation(sock, negotiate=False) + addr, port = next(handler) + + self.assertIn(addr, ['127.0.0.1', '::1']) + self.assertTrue(port, 80) + handler.send(True) + + while True: + buf = sock.recv(65535) + if buf.endswith(b'\r\n\r\n'): + break + + sock.sendall(b'HTTP/1.1 200 OK\r\n' + b'Server: SocksTestServer\r\n' + b'Content-Length: 0\r\n' + b'\r\n') + sock.close() + + self._start_server(request_handler) + proxy_url = "socks5://%s:%s" % (self.host, self.port) + pm = socks.SOCKSProxyManager(proxy_url) + response = pm.request('GET', 'http://localhost') + + self.assertEqual(response.status, 200) + self.assertEqual(response.data, b'') + self.assertEqual(response.headers['Server'], 'SocksTestServer') + def test_correct_header_line(self): def request_handler(listener): sock = listener.accept()[0] @@ -256,7 +287,7 @@ def request_handler(listener): sock.close() self._start_server(request_handler) - proxy_url = "socks5://%s:%s" % (self.host, self.port) + proxy_url = "socks5h://%s:%s" % (self.host, self.port) pm = socks.SOCKSProxyManager(proxy_url) response = pm.request('GET', 'http://example.com') self.assertEqual(response.status, 200) @@ -268,7 +299,7 @@ def request_handler(listener): event.wait() self._start_server(request_handler) - proxy_url = "socks5://%s:%s" % (self.host, self.port) + proxy_url = "socks5h://%s:%s" % (self.host, self.port) pm = socks.SOCKSProxyManager(proxy_url) self.assertRaises( @@ -285,7 +316,7 @@ def request_handler(listener): event.set() self._start_server(request_handler) - proxy_url = "socks5://%s:%s" % (self.host, self.port) + proxy_url = "socks5h://%s:%s" % (self.host, self.port) pm = socks.SOCKSProxyManager(proxy_url) event.wait() @@ -308,7 +339,7 @@ def request_handler(listener): sock.close() self._start_server(request_handler) - proxy_url = "socks5://%s:%s" % (self.host, self.port) + proxy_url = "socks5h://%s:%s" % (self.host, self.port) pm = socks.SOCKSProxyManager(proxy_url) self.assertRaises( @@ -361,7 +392,7 @@ def request_handler(listener): next(handler) self._start_server(request_handler) - proxy_url = "socks5://%s:%s" % (self.host, self.port) + proxy_url = "socks5h://%s:%s" % (self.host, self.port) pm = socks.SOCKSProxyManager(proxy_url, username='user', password='badpass') @@ -445,6 +476,37 @@ def request_handler(listener): self.assertEqual(response.headers['Server'], 'SocksTestServer') self.assertEqual(response.data, b'') + def test_local_dns(self): + def request_handler(listener): + sock = listener.accept()[0] + + handler = handle_socks4_negotiation(sock) + addr, port = next(handler) + + self.assertEqual(addr, '127.0.0.1') + self.assertTrue(port, 80) + handler.send(True) + + while True: + buf = sock.recv(65535) + if buf.endswith(b'\r\n\r\n'): + break + + sock.sendall(b'HTTP/1.1 200 OK\r\n' + b'Server: SocksTestServer\r\n' + b'Content-Length: 0\r\n' + b'\r\n') + sock.close() + + self._start_server(request_handler) + proxy_url = "socks4://%s:%s" % (self.host, self.port) + pm = socks.SOCKSProxyManager(proxy_url) + response = pm.request('GET', 'http://localhost') + + self.assertEqual(response.status, 200) + self.assertEqual(response.headers['Server'], 'SocksTestServer') + self.assertEqual(response.data, b'') + def test_correct_header_line(self): def request_handler(listener): sock = listener.accept()[0] @@ -472,7 +534,7 @@ def request_handler(listener): sock.close() self._start_server(request_handler) - proxy_url = "socks4://%s:%s" % (self.host, self.port) + proxy_url = "socks4a://%s:%s" % (self.host, self.port) pm = socks.SOCKSProxyManager(proxy_url) response = pm.request('GET', 'http://example.com') self.assertEqual(response.status, 200) @@ -491,7 +553,7 @@ def request_handler(listener): sock.close() self._start_server(request_handler) - proxy_url = "socks4://%s:%s" % (self.host, self.port) + proxy_url = "socks4a://%s:%s" % (self.host, self.port) pm = socks.SOCKSProxyManager(proxy_url) self.assertRaises( @@ -539,7 +601,7 @@ def request_handler(listener): next(handler) self._start_server(request_handler) - proxy_url = "socks4://%s:%s" % (self.host, self.port) + proxy_url = "socks4a://%s:%s" % (self.host, self.port) pm = socks.SOCKSProxyManager(proxy_url, username='baduser') try: @@ -590,7 +652,7 @@ def request_handler(listener): tls.close() self._start_server(request_handler) - proxy_url = "socks5://%s:%s" % (self.host, self.port) + proxy_url = "socks5h://%s:%s" % (self.host, self.port) pm = socks.SOCKSProxyManager(proxy_url) response = pm.request('GET', 'https://localhost')
Differentiate socks5h from socks5 and socks4a from socks4 when handling proxy string In a proxy string, socks5h:// and socks4a:// mean that the hostname is resolved by the SOCKS server. socks5:// and socks4:// mean that the hostname is resolved locally. socks4a:// means to use SOCKS4a, which is an extension of SOCKS4. Let's make urllib3 honor it.
2016-11-13T12:28:09Z
[]
[]
urllib3/urllib3
1,166
urllib3__urllib3-1166
[ "1165" ]
d29272aa8d28e42125c2868971b580344fab4e75
diff --git a/urllib3/poolmanager.py b/urllib3/poolmanager.py --- a/urllib3/poolmanager.py +++ b/urllib3/poolmanager.py @@ -82,6 +82,12 @@ def _default_key_normalizer(key_class, request_context): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) + # The socket_options key may be a list and needs to be transformed into a + # tuple. + socket_opts = context.get('socket_options') + if socket_opts is not None: + context['socket_options'] = tuple(socket_opts) + # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()):
diff --git a/test/test_poolmanager.py b/test/test_poolmanager.py --- a/test/test_poolmanager.py +++ b/test/test_poolmanager.py @@ -1,3 +1,4 @@ +import socket import sys from urllib3.poolmanager import ( @@ -340,6 +341,25 @@ def test_override_pool_kwargs_host(self): self.assertEqual(100, override_pool.retries) self.assertTrue(override_pool.block) + def test_pool_kwargs_socket_options(self): + """Assert passing socket options works with connection_from_host""" + p = PoolManager(socket_options=[]) + override_opts = [ + (socket.SOL_SOCKET, socket.SO_REUSEADDR, 1), + (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + ] + pool_kwargs = {'socket_options': override_opts} + + default_pool = p.connection_from_host('example.com', scheme='http') + override_pool = p.connection_from_host( + 'example.com', scheme='http', pool_kwargs=pool_kwargs + ) + + self.assertEqual(default_pool.conn_kw['socket_options'], []) + self.assertEqual( + override_pool.conn_kw['socket_options'], override_opts + ) + def test_merge_pool_kwargs(self): """Assert _merge_pool_kwargs works in the happy case""" p = PoolManager(strict=True)
urllib3==1.21: TypeError: unhashable type: 'list' There seems to be a regression from `1.20` to `1.21`. ``` 2017-04-28T04:26:10.152544966Z File "/usr/local/lib/python3.5/site-packages/steembase/http_client.py", line 146, in exec 2017-04-28T04:26:10.152552486Z response = self.request(body=body) 2017-04-28T04:26:10.152560726Z File "/usr/local/lib/python3.5/site-packages/urllib3/poolmanager.py", line 303, in urlopen 2017-04-28T04:26:10.152585508Z conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) 2017-04-28T04:26:10.152592399Z File "/usr/local/lib/python3.5/site-packages/urllib3/poolmanager.py", line 219, in connection_from_host 2017-04-28T04:26:10.152599135Z return self.connection_from_context(request_context) 2017-04-28T04:26:10.152604508Z File "/usr/local/lib/python3.5/site-packages/urllib3/poolmanager.py", line 232, in connection_from_context 2017-04-28T04:26:10.152610299Z return self.connection_from_pool_key(pool_key, request_context=request_context) 2017-04-28T04:26:10.152615790Z File "/usr/local/lib/python3.5/site-packages/urllib3/poolmanager.py", line 245, in connection_from_pool_key 2017-04-28T04:26:10.152621674Z pool = self.pools.get(pool_key) 2017-04-28T04:26:10.152626932Z File "/usr/local/lib/python3.5/_collections_abc.py", line 597, in get 2017-04-28T04:26:10.152632427Z return self[key] 2017-04-28T04:26:10.152637611Z File "/usr/local/lib/python3.5/site-packages/urllib3/_collections.py", line 53, in __getitem__ 2017-04-28T04:26:10.152643188Z item = self._container.pop(key) 2017-04-28T04:26:10.152648363Z TypeError: unhashable type: 'list' ``` Code: https://github.com/Netherdrake/steem-python/blob/master/steembase/http_client.py
Thanks for the report. Could you provide a minimal reproducible example? You link to code but there's no mention of how you're using the code to cause this error. Any attempt to do a http request will do. A simple example: ``` pip install -U steem ``` ``` from steem import Steem s = Steem() s.last_irreversible_block_num ``` Steem in this case wraps `http_client.py` (code link above). My speculation is that something changed in PR 1016, which breaks backwards compatibility. Looks like it's `socket_options` that needs to be converted into a tuple to be used as a part of a `PoolKey` because lists aren't hash-able? It's quite late, but I do believe this is a regression. Nice catch, I will comment again when I've had a chance to sleep. (1 AM currently) Yeah that's it, here's a minimal verifiable example for maintainers that read this tomorrow: ```python from urllib3.poolmanager import PoolManager p = PoolManager(socket_options=[]) p.urlopen('GET', 'https://www.google.com') ``` cc: #1016 @jeremycline Goodnight :sleeping: Yeah, that looks like a correct analysis. We definitely regressed that. Looks like we have a 1.21.1 release in our future folks!
2017-04-28T07:36:53Z
[]
[]
urllib3/urllib3
1,173
urllib3__urllib3-1173
[ "1149" ]
20d0e8681c6dfbc0f7590a949feb3e692fa32275
diff --git a/urllib3/sync_connection.py b/urllib3/sync_connection.py --- a/urllib3/sync_connection.py +++ b/urllib3/sync_connection.py @@ -16,6 +16,7 @@ import collections import datetime +import errno import itertools import socket import warnings @@ -25,7 +26,8 @@ from .base import Request, Response from .exceptions import ( ConnectTimeoutError, NewConnectionError, SubjectAltNameWarning, - SystemTimeWarning, BadVersionError, FailedTunnelError, InvalidBodyError + SystemTimeWarning, BadVersionError, FailedTunnelError, InvalidBodyError, + ProtocolError ) from .packages import six from .util import selectors, connection, ssl_ as ssl_util @@ -43,6 +45,9 @@ _SUPPORTED_VERSIONS = frozenset([b'1.0', b'1.1']) +# A sentinel object returned when some syscalls return EAGAIN. +_EAGAIN = object() + def _headers_to_native_string(headers): """ @@ -141,24 +146,6 @@ def _body_bytes(request, state_machine): yield state_machine.send(h11.EndOfMessage()) -def _maybe_read_response(data, state_machine): - """ - Feeds some more data into the state machine and potentially returns a - response object. - """ - response = None - event = None - state_machine.receive_data(data) - - while event is not h11.NEED_DATA: - event = state_machine.next_event() - if isinstance(event, h11.Response): - response = event - break - - return response - - def _response_from_h11(h11_response, body_object): """ Given a h11 Response object, build a urllib3 response object and return it. @@ -195,6 +182,52 @@ def _build_tunnel_request(host, port, headers): return tunnel_request +def _wait_for_event(selector, sock, event, timeout): + """ + Waits for a specific event on a socket for no more than the time in + timeout. Throws an exception if the timeout is exceeded. + """ + old_events = selector.get_key(sock).events + try: + selector.modify(sock, event) + if not selector.select(timeout=timeout): + # TODO: Raise our own timeouts later + raise sock.timeout() + return + finally: + selector.modify(sock, old_events) + + +def _recv_or_eagain(sock): + """ + Calls recv on a non-blocking socket. Returns the number of bytes read or + the sentinel object _EAGAIN. + """ + try: + return sock.recv(65536) + except ssl.SSLWantReadError: + return _EAGAIN + except (OSError, socket.error) as e: + if e.errno == errno.EAGAIN: + return _EAGAIN + raise + + +def _write_or_eagain(sock, data): + """ + Calls send on a non-blocking socket. Returns the number of bytes written or + the sentinel object _EAGAIN. + """ + try: + return sock.send(data) + except ssl.SSLWantWriteError: + return _EAGAIN + except (OSError, socket.error) as e: + if e.errno == errno.EAGAIN: + return _EAGAIN + raise + + _DEFAULT_SOCKET_OPTIONS = object() @@ -280,7 +313,7 @@ def _wrap_socket(self, conn, ssl_context, fingerprint, assert_hostname): return conn - def _send_unless_readable(self, data): + def _send_unless_readable(self, state_machine, data): """ This method sends the data in ``data`` on the given socket. It will abort early if the socket became readable for any reason. @@ -288,6 +321,10 @@ def _send_unless_readable(self, data): If the socket became readable, this returns True. Otherwise, returns False. """ + # First, register the socket with the selector. + self._selector.modify( + self._sock, selectors.EVENT_READ | selectors.EVENT_WRITE + ) # We take a memoryview here because if the chunk is very large we're # going to slice it a few times, and we'd like to avoid doing copies as # we do that. @@ -296,30 +333,151 @@ def _send_unless_readable(self, data): while chunk: events = self._selector.select()[0][1] # TODO: timeout! - # If the socket is readable, we stop uploading. + # The "happy path" here is that the socket has become marked + # writable. If that happens, we just call send. If this returns + # EAGAIN or SSL_WANT_WRITE, that's fine, we just spin around again. + # + # The less happy path here is that the socket has become marked + # *readable*. That is...problematic. It may be the case that there + # is data to receive from the remote peer. If there is, we want to + # stop uploading. However, in the TLS case this data may be + # triggering a TLS renegotiation, so the simple fact that the + # socket is readable is not a bug. So what we do is attempt to call + # recv. If it returns data, we shove it into our state machine and + # then break from the loop. If it returns EAGAIN, we assume that + # it was just TLS stuff and move on. + # + # Note that we only *actually* break from the loop if and when we + # get an actual final response header block. Prior to that point we + # will keep sending data. This allows 1XX header blocks to also be + # ignored. if events & selectors.EVENT_READ: + data = _recv_or_eagain(self._sock) + if data is _EAGAIN: + continue + + state_machine.receive_data(data) return True - assert events & selectors.EVENT_WRITE - chunk_sent = self._sock.send(chunk) - chunk = chunk[chunk_sent:] + if events & selectors.EVENT_WRITE: + # This `while` loop is present to prevent us doing too much + # selector polling. We already know the selector is writable: + # we don't need to ask again until a write actually succeeds or + # we get EAGAIN. + bytes_written = None + while bytes_written is None: + try: + bytes_written = _write_or_eagain(self._sock, chunk) + except ssl.SSLWantReadError: + # This is unlikely, but we should still tolerate it. + _wait_for_event( + self._selector, + self._sock, + selectors.EVENT_READ, + None # TODO: Timeout! + ) + else: + if bytes_written is not _EAGAIN: + chunk = chunk[bytes_written:] return False - def _receive_bytes(self, read_timeout): + def send_request(self, request, read_timeout): """ - This method blocks until the socket is readable or the read times out - (TODO), and then returns whatever data was read. Signals EOF the same - way ``recv`` does: by returning the empty string. + Given a Request object, performs the logic required to get a response. """ - keys = self._selector.select(read_timeout) - if not keys: - # TODO: Raise our own timeouts later. - raise socket.timeout() - events = keys[0][1] - assert events == selectors.EVENT_READ - data = self._sock.recv(65536) - return data + # Step 1: Send Request. + # TODO: Replace read_timeout with something smarter. + self._read_timeout = read_timeout + + # Before we begin, confirm that the state machine is ok. + if (self._state_machine.our_state is not h11.IDLE or + self._state_machine.their_state is not h11.IDLE): + raise ProtocolError("Invalid internal state transition") + + header_bytes = _request_to_bytes(request, self._state_machine) + body_chunks = _body_bytes(request, self._state_machine) + request_chunks = itertools.chain([header_bytes], body_chunks) + response = None + + # First, register the socket with the selector. + self._selector.modify( + self._sock, selectors.EVENT_READ | selectors.EVENT_WRITE + ) + + # Next, send the body. + for chunk in request_chunks: + did_read = self._send_unless_readable(self._state_machine, chunk) + if did_read: + break + + # Ok, we've sent the request. Now we want to read the response. This + # needs a different loop, slightly. + # + # While reading, we are again looping around in select(). By default, + # we do not look for writability, because for large responses to small + # requests the socket will inevitably be writable. Each time the + # selector marks the socket as readable, we will attempt to read. This + # may raise EAGAIN or WANT_READ, either of which causes us to just loop + # again. However it may *also* raise WANT_WRITE. If it does, we will + # block the event loop until the socket returns *writable*, and then + # loop back around again. + self._selector.modify(self._sock, selectors.EVENT_READ) + response = None + while not isinstance(response, h11.Response): + response = self._read_until_event( + self._state_machine, self._read_timeout + ) + + if response.http_version not in _SUPPORTED_VERSIONS: + raise BadVersionError(response.http_version) + + return _response_from_h11(response, self) + + def _read_until_event(self, state_machine, read_timeout): + """ + A selector loop that spins over the selector and socket, issuing reads + and feeding the data into h11 and checking whether h11 has an event for + us. The moment there is an event other than h11.NEED_DATA, this + function returns that event. + """ + # While reading, we are looping around in select(). By default, we do + # not look for writability, because for large responses to small + # requests the socket will inevitably be writable. Each time the + # selector marks the socket as readable, we will attempt to read. This + # may raise EAGAIN or WANT_READ, either of which causes us to just loop + # again. However, it may *also* raise WANT_WRITE. If it does, we will + # block the event loop until the socket returns *writable*, and then + # loop back around again. + event = state_machine.next_event() + self._selector.modify(self._sock, selectors.EVENT_READ) + while event is h11.NEED_DATA: + selector_events = self._selector.select(read_timeout) + if not selector_events: + # TODO: Raise our own timeouts later. + raise socket.timeout() + + # This `while` loop is present to prevent us doing too much + # selector polling. We already know the selector is readable: we + # don't need to ask again until a read actually succeeds or we get + # EAGAIN. + read_bytes = None + while read_bytes is None: + try: + read_bytes = _recv_or_eagain(self._sock) + except ssl.SSLWantWriteError: + _wait_for_event( + self._selector, + self._sock, + selectors.EVENT_WRITE, + read_timeout + ) + else: + if read_bytes is not _EAGAIN: + state_machine.receive_data(read_bytes) + event = state_machine.next_event() + + return event def _tunnel(self, conn): """ @@ -347,17 +505,18 @@ def _tunnel(self, conn): self._selector.register( self._sock, selectors.EVENT_READ | selectors.EVENT_WRITE ) - self._send_unless_readable(bytes_to_send) + self._send_unless_readable(tunnel_state_machine, bytes_to_send) # At this point we no longer care if the socket is writable. self._selector.modify(self._sock, selectors.EVENT_READ) response = None - while response is None: - # TODO: Add a timeout here. - # TODO: Error handling. - read_bytes = self._receive_bytes(read_timeout=None) - response = _maybe_read_response(read_bytes, tunnel_state_machine) + while not isinstance(response, h11.Response): + # TODO: add a timeout here + # TODO: Error handling + response = self._read_until_event( + tunnel_state_machine, read_timeout=None + ) if response.status_code != 200: response = _response_from_h11(response, self) @@ -433,47 +592,6 @@ def connect(self, ssl_context=None, self._sock, selectors.EVENT_READ | selectors.EVENT_WRITE ) - def send_request(self, request, read_timeout): - """ - Sends a single Request object. Returns a Response. - """ - # TODO: Replace read_timeout with something smarter. - self._read_timeout = read_timeout - - # Before we begin, confirm that the state machine is ok. - assert self._state_machine.our_state is h11.IDLE - assert self._state_machine.their_state is h11.IDLE - - # First, register the socket with the selector. We want to look for - # readability *and* writability, because if the socket suddenly becomes - # readable we need to stop our upload immediately. - self._selector.modify( - self._sock, selectors.EVENT_READ | selectors.EVENT_WRITE - ) - header_bytes = _request_to_bytes(request, self._state_machine) - body_chunks = _body_bytes(request, self._state_machine) - request_chunks = itertools.chain([header_bytes], body_chunks) - - for chunk in request_chunks: - # If the socket becomes readable we don't need to error out or - # anything: we can just continue with our current logic. - readable = self._send_unless_readable(chunk) - if readable: - break - - # At this point we no longer care if the socket is writable. - self._selector.modify(self._sock, selectors.EVENT_READ) - - response = None - while response is None: - read_bytes = self._receive_bytes(read_timeout) - response = _maybe_read_response(read_bytes, self._state_machine) - - if response.http_version not in _SUPPORTED_VERSIONS: - raise BadVersionError(response.http_version) - - return _response_from_h11(response, self) - def close(self): """ Close this connection, suitable for being re-added to a connection @@ -563,18 +681,14 @@ def next(self): if self._state_machine is None: raise StopIteration() - data = None - - while data is None: - event = self._state_machine.next_event() - if event is h11.NEED_DATA: - received_bytes = self._receive_bytes(self._read_timeout) - self._state_machine.receive_data(received_bytes) - elif isinstance(event, h11.Data): - data = bytes(event.data) - elif isinstance(event, h11.EndOfMessage): - self._reset() - raise StopIteration() + event = self._read_until_event( + self._state_machine, read_timeout=self._read_timeout + ) + if isinstance(event, h11.Data): + data = bytes(event.data) + elif isinstance(event, h11.EndOfMessage): + self._reset() + raise StopIteration() return data
diff --git a/test/contrib/test_securetransport.py b/test/contrib/test_securetransport.py --- a/test/contrib/test_securetransport.py +++ b/test/contrib/test_securetransport.py @@ -7,6 +7,8 @@ except ImportError as e: raise SkipTest('Could not import SecureTransport: %r' % e) +raise SkipTest('SecureTransport currently not supported in v2!') + from ..with_dummyserver.test_https import TestHTTPS, TestHTTPS_TLSv1 # noqa: F401 from ..with_dummyserver.test_socketlevel import ( # noqa: F401 TestSNI, TestSocketClosing, TestClientCerts diff --git a/test/test_sync_connection.py b/test/test_sync_connection.py new file mode 100644 --- /dev/null +++ b/test/test_sync_connection.py @@ -0,0 +1,434 @@ +""" +Low-level synchronous connection tests. + +These tests involve mocking out the network layer to cause specific unusual +behaviours to occur. The goal is to ensure that the synchronous connection +layer can handle unexpected network weather without falling over, and without +expending undue effort to arrange that these effects actually happen on a real +network. +""" +import collections +import errno +import socket +import ssl +import unittest + +import h11 + +from urllib3.base import Request +from urllib3.sync_connection import SyncHTTP1Connection +from urllib3.util import selectors + + +# Objects and globals for handling scenarios. +Event = collections.namedtuple('Event', ['expected_object', 'event', 'meta']) + +SELECTOR = "SELECTOR" +SOCKET = "SOCKET" +RAISE_EAGAIN = "RAISE_EAGAIN" +RAISE_WANT_READ = "RAISE_WANT_READ" +RAISE_WANT_WRITE = "RAISE_WANT_WRITE" + +EVENT_SELECT = "EVENT_SELECT" + +EVENT_SEND = "EVENT_SEND" +SEND_ALL = "SEND_ALL" + +EVENT_RECV = "EVENT_RECV" +RECV_ALL = "RECV_ALL" + + +# A number of helpful shorthands for common events. +SELECT_UPLOAD_WRITE = Event( + SELECTOR, + EVENT_SELECT, + (selectors.EVENT_READ | selectors.EVENT_WRITE, selectors.EVENT_WRITE) +) +SELECT_UPLOAD_READ = Event( + SELECTOR, + EVENT_SELECT, + (selectors.EVENT_READ | selectors.EVENT_WRITE, selectors.EVENT_READ) +) +SELECT_DOWNLOAD_READ = Event( + SELECTOR, EVENT_SELECT, (selectors.EVENT_READ, selectors.EVENT_READ) +) +SELECT_DOWNLOAD_WRITE = Event( + SELECTOR, EVENT_SELECT, (selectors.EVENT_READ, selectors.EVENT_READ) +) +SELECT_WRITABLE_WRITE = Event( + SELECTOR, EVENT_SELECT, (selectors.EVENT_WRITE, selectors.EVENT_WRITE) +) +SOCKET_SEND_ALL = Event(SOCKET, EVENT_SEND, (SEND_ALL,)) +SOCKET_SEND_5 = Event(SOCKET, EVENT_SEND, (5,)) +SOCKET_SEND_EAGAIN = Event(SOCKET, EVENT_SEND, (RAISE_EAGAIN,)) +SOCKET_SEND_WANTREAD = Event(SOCKET, EVENT_SEND, (RAISE_WANT_READ,)) +SOCKET_SEND_WANTWRITE = Event(SOCKET, EVENT_SEND, (RAISE_WANT_WRITE,)) +SOCKET_RECV_ALL = Event(SOCKET, EVENT_RECV, (RECV_ALL,)) +SOCKET_RECV_5 = Event(SOCKET, EVENT_RECV, (5,)) +SOCKET_RECV_EAGAIN = Event(SOCKET, EVENT_RECV, (RAISE_EAGAIN,)) +SOCKET_RECV_WANTREAD = Event(SOCKET, EVENT_RECV, (RAISE_WANT_READ,)) +SOCKET_RECV_WANTWRITE = Event(SOCKET, EVENT_RECV, (RAISE_WANT_WRITE,)) + + +REQUEST = ( + b'GET / HTTP/1.1\r\n' + b'host: localhost\r\n' + b'\r\n' +) +RESPONSE = ( + b'HTTP/1.1 200 OK\r\n' + b'Server: totallyarealserver/1.0.0\r\n' + b'Content-Length: 8\r\n' + b'Content-Type: text/plain\r\n' + b'\r\n' + b'complete' +) + + +class ScenarioError(Exception): + """ + An error occurred with running the scenario. + """ + pass + + +class ScenarioSelector(object): + """ + A mock Selector object. This selector implements a tiny bit of the selector + API (only that which is used by the higher layers), and response to select + based on the scenario it is provided. + """ + def __init__(self, scenario, sock): + self._scenario = scenario + self._fd = sock + self._events = None + + def register(self, fd, events): + if fd is not self._fd: + raise ScenarioError("Registered unexpected socket!") + self._events = events + + def modify(self, fd, events): + if fd is not self._fd: + raise ScenarioError("Modifying unexpected socket!") + self._events = events + + def select(self, timeout=None): + expected_object, event, args = self._scenario.pop(0) + if expected_object is not SELECTOR: + raise ScenarioError("Received non selector event!") + + if event is not EVENT_SELECT: + raise ScenarioError("Expected EVENT_SELECT, got %s" % event) + + expected_events, returned_event = args + if self._events != expected_events: + raise ScenarioError( + "Expected events %s, got %s" % (self._events, expected_events) + ) + + key = self.get_key(self._fd) + return [(key, returned_event)] + + def get_key(self, fd): + if fd is not self._fd: + raise ScenarioError("Querying unexpected socket!") + return selectors.SelectorKey( + self._fd, + 1, + self._events, + None + ) + + def close(self): + pass + + +class ScenarioSocket(object): + """ + A mock Socket object. This object implements a tiny bit of the socket API + (only that which is used by the synchronous connection), and responds to + socket calls based on the scenario it is provided. + """ + def __init__(self, scenario): + self._scenario = scenario + self._data_to_send = RESPONSE + self._data_sent = b'' + self._closed = False + + def _raise_errors(self, possible_error): + if possible_error is RAISE_EAGAIN: + raise socket.error(errno.EAGAIN, "try again later") + elif possible_error is RAISE_WANT_READ: + raise ssl.SSLWantReadError("Want read") + elif possible_error is RAISE_WANT_WRITE: + raise ssl.SSLWantWriteError("Want write") + + def send(self, data): + expected_object, event, args = self._scenario.pop(0) + if expected_object is not SOCKET: + raise ScenarioError("Received non selector event!") + + if event is not EVENT_SEND: + raise ScenarioError("Expected EVENT_SEND, got %s" % event) + + amount, = args + self._raise_errors(amount) + if amount is SEND_ALL: + amount = len(data) + + self._data_sent += data[:amount].tobytes() + return amount + + def recv(self, amt): + expected_object, event, args = self._scenario.pop(0) + if expected_object is not SOCKET: + raise ScenarioError("Received non selector event!") + + if event is not EVENT_RECV: + raise ScenarioError("Expected EVENT_RECV, got %s" % event) + + amount, = args + self._raise_errors(amount) + if amount is RECV_ALL: + amount = min(len(RESPONSE), amt) + + rdata = self._data_to_send[:amount] + self._data_to_send = self._data_to_send[amount:] + return rdata + + def setblocking(self, *args): + pass + + def close(self): + self._closed = True + + +class TestUnusualSocketConditions(unittest.TestCase): + """ + This class contains tests that take strict control over sockets and + selectors. The goal here is to simulate unusual network conditions that are + extremely difficult to reproducibly simulate even with socketlevel tests in + which we control both ends of the connection. For example, these tests + will trigger WANT_READ and WANT_WRITE errors in TLS stacks which are + otherwise extremely hard to trigger, and will also fire EAGAIN on sockets + marked readable/writable, which can technically happen but are extremely + tricky to trigger by using actual sockets and the loopback interface. + + These tests are necessarily not a perfect replacement for actual realworld + examples, but those are so prohibitively difficult to trigger that these + will have to do instead. + """ + # A stub value of the read timeout that will be used by the selector. + # This should not be edited by tests: only used as a reference for what + # delay values they can use to force things to time out. + READ_TIMEOUT = 5 + + def run_scenario(self, scenario): + conn = SyncHTTP1Connection('localhost', 80) + conn._state_machine = h11.Connection(our_role=h11.CLIENT) + conn._sock = sock = ScenarioSocket(scenario) + conn._selector = ScenarioSelector(scenario, sock) + + request = Request(method=b'GET', target=b'/') + request.add_host(host=b'localhost', port=80, scheme='http') + response = conn.send_request(request, read_timeout=self.READ_TIMEOUT) + body = b''.join(response.body) + + # The scenario should be totally consumed. + self.assertFalse(scenario) + + # Validate that the response is complete. + self.assertEqual(response.status_code, 200) + self.assertEqual(body, b'complete') + self.assertEqual(response.version, b'HTTP/1.1') + self.assertEqual(len(response.headers), 3) + self.assertEqual(response.headers['server'], 'totallyarealserver/1.0.0') + self.assertEqual(response.headers['content-length'], '8') + self.assertEqual(response.headers['content-type'], 'text/plain') + + return sock + + def test_happy_path(self): + """ + When everything goes smoothly, the response is cleanly consumed. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_ALL, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST) + + def test_handle_recv_eagain_download(self): + """ + When a socket is marked readable during response body download but + returns EAGAIN when read from, the code simply retries the read. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_ALL, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_EAGAIN, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_EAGAIN, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST) + + def test_handle_recv_want_read_download(self): + """ + When a socket is marked readable during response body download but + returns SSL_WANT_READ when read from, the code simply retries the read. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_ALL, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_WANTREAD, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_WANTREAD, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST) + + def test_handle_recv_eagain_upload(self): + """ + When a socket is marked readable during request upload but returns + EAGAIN when read from, the code ignores it and continues with upload. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_5, + SELECT_UPLOAD_READ, + SOCKET_RECV_EAGAIN, + SELECT_UPLOAD_WRITE, + SOCKET_SEND_ALL, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST) + + def test_handle_recv_wantread_upload(self): + """ + When a socket is marked readable during request upload but returns + WANT_READ when read from, the code ignores it and continues with upload. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_5, + SELECT_UPLOAD_READ, + SOCKET_RECV_WANTREAD, + SELECT_UPLOAD_WRITE, + SOCKET_SEND_ALL, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST) + + def test_handle_send_eagain_upload(self): + """ + When a socket is marked writable during request upload but returns + EAGAIN when written to, the code ignores it and continues with upload. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_5, + SELECT_UPLOAD_WRITE, + SOCKET_SEND_EAGAIN, + SELECT_UPLOAD_WRITE, + SOCKET_SEND_ALL, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST) + + def test_handle_send_wantwrite_upload(self): + """ + When a socket is marked writable during request upload but returns + WANT_WRITE when written to, the code ignores it and continues with + upload. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_5, + SELECT_UPLOAD_WRITE, + SOCKET_SEND_WANTWRITE, + SELECT_UPLOAD_WRITE, + SOCKET_SEND_ALL, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST) + + def test_handle_early_response(self): + """ + When a socket is marked readable during request upload, and any data is + read from the socket, the upload immediately stops and the response is + read. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_5, + SELECT_UPLOAD_READ, + SOCKET_RECV_5, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST[:5]) + self.assertTrue(sock._closed) + + def test_handle_want_read_during_upload(self): + """ + When a socket is marked writable during request upload but returns + WANT_READ when written to, the code waits for the socket to become + readable and issues the write again. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_5, + # Return WANT_READ twice for good measure. + SELECT_UPLOAD_WRITE, + SOCKET_SEND_WANTREAD, + SELECT_DOWNLOAD_READ, + SOCKET_SEND_WANTREAD, + SELECT_DOWNLOAD_READ, + SOCKET_SEND_ALL, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST) + + def test_handle_want_write_during_download(self): + """ + When a socket is marked readable during response download but returns + WANT_WRITE when read from, the code waits for the socket to become + writable and issues the read again. + """ + scenario = [ + SELECT_UPLOAD_WRITE, + SOCKET_SEND_ALL, + # Return WANT_WRITE twice for good measure. + SELECT_DOWNLOAD_READ, + SOCKET_RECV_WANTWRITE, + SELECT_WRITABLE_WRITE, + SOCKET_RECV_WANTWRITE, + SELECT_WRITABLE_WRITE, + SOCKET_RECV_5, + SELECT_DOWNLOAD_READ, + SOCKET_RECV_ALL, + ] + sock = self.run_scenario(scenario) + self.assertEqual(sock._data_sent, REQUEST)
sync_connection does not correctly handle SSLWantRead and SSLWantWrite If either of `SSLWantRead` or `SSLWantWrite` are raised in the context of `sync_connection._receive_bytes()` on the V2 branch, the error is raised, rather than being interpreted and handled with appropriate action.
Yup, so this is broader than that. Anywhere we call `socket.recv()` or `socket.send` we need to be able to tolerate getting either of these errors and handling them appropriately. This shouldn't be too hard, but requires some careful refactoring, I think. I'll tackle this by next week if no-one beats me to it.
2017-05-08T08:54:07Z
[]
[]
urllib3/urllib3
1,178
urllib3__urllib3-1178
[ "1112" ]
3695c67a7a417f3e06890983070a469c46f59fdf
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -622,25 +622,14 @@ def urlopen(self, method, url, body=None, headers=None, retries=None, # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") - except (BaseSSLError, CertificateError) as e: - # Close the connection. If a connection is reused on which there - # was a Certificate error, the next request will certainly raise - # another Certificate error. - clean_exit = False - raise SSLError(e) - - except SSLError: - # Treat SSLError separately from BaseSSLError to preserve - # traceback. - clean_exit = False - raise - - except (TimeoutError, HTTPException, SocketError, ProtocolError) as e: + except (TimeoutError, HTTPException, SocketError, ProtocolError, + BaseSSLError, SSLError, CertificateError) as e: # Discard the connection for these exceptions. It will be - # be replaced during the next _get_conn() call. + # replaced during the next _get_conn() call. clean_exit = False - - if isinstance(e, (SocketError, NewConnectionError)) and self.proxy: + if isinstance(e, (BaseSSLError, CertificateError)): + e = SSLError(e) + elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError('Cannot connect to proxy.', e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError('Connection aborted.', e)
diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -193,18 +193,19 @@ def test_pool_size(self): def _raise(ex): raise ex() - def _test(exception, expect): + def _test(exception, expect, reason=None): pool._make_request = lambda *args, **kwargs: _raise(exception) - with pytest.raises(expect): + with pytest.raises(expect) as excinfo: pool.request('GET', '/') - + if reason is not None: + assert isinstance(excinfo.value.reason, reason) assert pool.pool.qsize() == POOL_SIZE # Make sure that all of the exceptions return the connection # to the pool _test(Empty, EmptyPoolError) - _test(BaseSSLError, SSLError) - _test(CertificateError, SSLError) + _test(BaseSSLError, MaxRetryError, SSLError) + _test(CertificateError, MaxRetryError, SSLError) # The pool should never be empty, and with these two exceptions # being raised, a retry will be triggered, but that retry will diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -36,6 +36,7 @@ InsecureRequestWarning, SystemTimeWarning, InsecurePlatformWarning, + MaxRetryError, ) from urllib3.packages import six from urllib3.util.timeout import Timeout @@ -171,10 +172,11 @@ def test_invalid_common_name(self): try: https_pool.request('GET', '/') self.fail("Didn't raise SSL invalid common name") - except SSLError as e: + except MaxRetryError as e: + self.assertIsInstance(e.reason, SSLError) self.assertTrue( - "doesn't match" in str(e) or - "certificate verify failed" in str(e) + "doesn't match" in str(e.reason) or + "certificate verify failed" in str(e.reason) ) def test_verified_with_bad_ca_certs(self): @@ -186,10 +188,11 @@ def test_verified_with_bad_ca_certs(self): try: https_pool.request('GET', '/') self.fail("Didn't raise SSL error with bad CA certs") - except SSLError as e: - self.assertTrue('certificate verify failed' in str(e), + except MaxRetryError as e: + self.assertIsInstance(e.reason, SSLError) + self.assertTrue('certificate verify failed' in str(e.reason), "Expected 'certificate verify failed'," - "instead got: %r" % e) + "instead got: %r" % e.reason) def test_verified_without_ca_certs(self): # default is cert_reqs=None which is ssl.CERT_NONE @@ -201,23 +204,26 @@ def test_verified_without_ca_certs(self): https_pool.request('GET', '/') self.fail("Didn't raise SSL error with no CA certs when" "CERT_REQUIRED is set") - except SSLError as e: + except MaxRetryError as e: + self.assertIsInstance(e.reason, SSLError) # there is a different error message depending on whether or # not pyopenssl is injected - self.assertTrue('No root certificates specified' in str(e) or - 'certificate verify failed' in str(e) or - 'invalid certificate chain' in str(e), + self.assertTrue('No root certificates specified' in str(e.reason) or + 'certificate verify failed' in str(e.reason) or + 'invalid certificate chain' in str(e.reason), "Expected 'No root certificates specified', " "'certificate verify failed', or " "'invalid certificate chain', " - "instead got: %r" % e) + "instead got: %r" % e.reason) def test_no_ssl(self): pool = HTTPSConnectionPool(self.host, self.port) pool.ConnectionCls = None self.addCleanup(pool.close) self.assertRaises(SSLError, pool._new_conn) - self.assertRaises(SSLError, pool.request, 'GET', '/') + with self.assertRaises(MaxRetryError) as cm: + pool.request('GET', '/', retries=0) + self.assertIsInstance(cm.exception.reason, SSLError) def test_unverified_ssl(self): """ Test that bare HTTPSConnection can connect, make requests """ @@ -324,17 +330,22 @@ def test_assert_invalid_fingerprint(self): https_pool.assert_fingerprint = 'AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:' \ 'AA:AA:AA:AA:AA:AA:AA:AA:AA' - self.assertRaises(SSLError, https_pool.request, 'GET', '/') + def _test_request(pool): + with self.assertRaises(MaxRetryError) as cm: + pool.request('GET', '/', retries=0) + self.assertIsInstance(cm.exception.reason, SSLError) + + _test_request(https_pool) https_pool._get_conn() # Uneven length https_pool.assert_fingerprint = 'AA:A' - self.assertRaises(SSLError, https_pool.request, 'GET', '/') + _test_request(https_pool) https_pool._get_conn() # Invalid length https_pool.assert_fingerprint = 'AA' - self.assertRaises(SSLError, https_pool.request, 'GET', '/') + _test_request(https_pool) def test_verify_none_and_bad_fingerprint(self): https_pool = HTTPSConnectionPool('127.0.0.1', self.port, @@ -344,7 +355,9 @@ def test_verify_none_and_bad_fingerprint(self): https_pool.assert_fingerprint = 'AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:' \ 'AA:AA:AA:AA:AA:AA:AA:AA:AA' - self.assertRaises(SSLError, https_pool.request, 'GET', '/') + with self.assertRaises(MaxRetryError) as cm: + https_pool.request('GET', '/', retries=0) + self.assertIsInstance(cm.exception.reason, SSLError) def test_verify_none_and_good_fingerprint(self): https_pool = HTTPSConnectionPool('127.0.0.1', self.port, @@ -510,7 +523,9 @@ def test_discards_connection_on_sslerror(self): # is an issue with the OpenSSL for Python 2.6 on Windows. self._pool.cert_reqs = 'CERT_REQUIRED' - self.assertRaises(SSLError, self._pool.request, 'GET', '/') + with self.assertRaises(MaxRetryError) as cm: + self._pool.request('GET', '/', retries=0) + self.assertIsInstance(cm.exception.reason, SSLError) self._pool.ca_certs = DEFAULT_CA self._pool.request('GET', '/') diff --git a/test/with_dummyserver/test_proxy_poolmanager.py b/test/with_dummyserver/test_proxy_poolmanager.py --- a/test/with_dummyserver/test_proxy_poolmanager.py +++ b/test/with_dummyserver/test_proxy_poolmanager.py @@ -82,12 +82,13 @@ def test_proxy_verified(self): https_pool = http._new_pool('https', self.https_host, self.https_port) try: - https_pool.request('GET', '/') + https_pool.request('GET', '/', retries=0) self.fail("Didn't raise SSL error with wrong CA") - except SSLError as e: - self.assertTrue('certificate verify failed' in str(e), + except MaxRetryError as e: + self.assertIsInstance(e.reason, SSLError) + self.assertTrue('certificate verify failed' in str(e.reason), "Expected 'certificate verify failed'," - "instead got: %r" % e) + "instead got: %r" % e.reason) http = proxy_from_url(self.proxy_url, cert_reqs='REQUIRED', ca_certs=DEFAULT_CA) @@ -103,10 +104,11 @@ def test_proxy_verified(self): https_fail_pool = http._new_pool('https', '127.0.0.1', self.https_port) try: - https_fail_pool.request('GET', '/') + https_fail_pool.request('GET', '/', retries=0) self.fail("Didn't raise SSL invalid common name") - except SSLError as e: - self.assertTrue("doesn't match" in str(e)) + except MaxRetryError as e: + self.assertIsInstance(e.reason, SSLError) + self.assertTrue("doesn't match" in str(e.reason)) def test_redirect(self): http = proxy_from_url(self.proxy_url) diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -79,7 +79,7 @@ def socket_handler(listener): self.addCleanup(pool.close) try: pool.request('GET', '/', retries=0) - except SSLError: # We are violating the protocol + except MaxRetryError: # We are violating the protocol pass done_receiving.wait() self.assertTrue(self.host.encode('ascii') in self.buf, @@ -220,7 +220,7 @@ def socket_handler(listener): self.addCleanup(pool.close) try: pool.request('GET', '/', retries=0) - except SSLError: + except MaxRetryError: done_receiving.set() else: done_receiving.set() @@ -942,7 +942,9 @@ def socket_handler(listener): pool = HTTPSConnectionPool(self.host, self.port) self.addCleanup(pool.close) - self.assertRaises(SSLError, pool.request, 'GET', '/', retries=0) + with self.assertRaises(MaxRetryError) as cm: + pool.request('GET', '/', retries=0) + self.assertIsInstance(cm.exception.reason, SSLError) def test_ssl_read_timeout(self): timed_out = Event() @@ -1010,14 +1012,62 @@ def request(): assert_fingerprint=fingerprint) try: response = pool.urlopen('GET', '/', preload_content=False, - timeout=Timeout(connect=1, read=0.001)) + timeout=Timeout(connect=1, read=0.001), + retries=0) response.read() finally: pool.close() - self.assertRaises(SSLError, request) + with self.assertRaises(MaxRetryError) as cm: + request() + self.assertIsInstance(cm.exception.reason, SSLError) # Should not hang, see https://github.com/shazow/urllib3/issues/529 - self.assertRaises(SSLError, request) + self.assertRaises(MaxRetryError, request) + + def test_retry_ssl_error(self): + def socket_handler(listener): + # first request, trigger an SSLError + sock = listener.accept()[0] + sock2 = sock.dup() + ssl_sock = ssl.wrap_socket(sock, + server_side=True, + keyfile=DEFAULT_CERTS['keyfile'], + certfile=DEFAULT_CERTS['certfile']) + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf += ssl_sock.recv(65536) + + # Deliberately send from the non-SSL socket to trigger an SSLError + sock2.send(( + 'HTTP/1.1 200 OK\r\n' + 'Content-Type: text/plain\r\n' + 'Content-Length: 4\r\n' + '\r\n' + 'Fail').encode('utf-8')) + sock2.close() + ssl_sock.close() + + # retried request + sock = listener.accept()[0] + ssl_sock = ssl.wrap_socket(sock, + server_side=True, + keyfile=DEFAULT_CERTS['keyfile'], + certfile=DEFAULT_CERTS['certfile']) + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf += ssl_sock.recv(65536) + ssl_sock.send(b'HTTP/1.1 200 OK\r\n' + b'Content-Type: text/plain\r\n' + b'Content-Length: 7\r\n\r\n' + b'Success') + ssl_sock.close() + + self._start_server(socket_handler) + + pool = HTTPSConnectionPool(self.host, self.port) + self.addCleanup(pool.close) + response = pool.urlopen('GET', '/', retries=1) + self.assertEqual(response.data, b'Success') class TestErrorWrapping(SocketDummyServerTestCase):
Bug: No Retry for SSL Error That following code should fire off a `MaxRetryError`, but instead doen't retry and throws an `SSLError`. **Test case:** _server.py_ ```python import time import socket s = socket.socket() s.bind(('', 4433)) s.listen(10) while True: news, _ = s.accept() time.sleep(0.5) news.close() ``` _client.py_ ```python import urllib3 http = urllib3.PoolManager() http.request('GET', 'https://localhost:4433/', retries=5) ``` <sup>*Disclaimer:* Test case was created by [**Lukasa**](https://github.com/kennethreitz/requests/issues/3845#issuecomment-278273421)</sup>
Ok, so my analysis of this bug is just that we do not retry on `SSLError`, and we probably should. The code block in `ConnectionPool.urlopen` is the one that makes this call, and I think it's just not right. I'm inclined to say that the blocks for `BaseSSLError`, `CertificateError`, and `SSLError` should all be rolled down into the block below. Thoughts? This is something that would be good for one of the regular contributors to pick up if they want to. =) I'm currently trying to implement this as proposed by @Lukasa, basically like this: diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py index b4f1166..031df4c 100644 --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -622,26 +622,16 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") - except (BaseSSLError, CertificateError) as e: - # Close the connection. If a connection is reused on which there - # was a Certificate error, the next request will certainly raise - # another Certificate error. - clean_exit = False - raise SSLError(e) - - except SSLError: - # Treat SSLError separately from BaseSSLError to preserve - # traceback. - clean_exit = False - raise - - except (TimeoutError, HTTPException, SocketError, ProtocolError) as e: + except (TimeoutError, HTTPException, SocketError, ProtocolError, + BaseSSLError, SSLError, CertificateError) as e: # Discard the connection for these exceptions. It will be - # be replaced during the next _get_conn() call. + # replaced during the next _get_conn() call. clean_exit = False if isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError('Cannot connect to proxy.', e) + elif isinstance(e, (BaseSSLError, CertificateError)): + e = SSLError(e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError('Connection aborted.', e) This introduces a whole bunch of test failures, most of which are because they expect an `SSLError` but are now receiving a `MaxRetryError`. That's easy enough to fix, but it opens up another question: doesn't this break users of the library that specifically want to handle `SSLError` exceptions? Yup, but that's ok: urllib3 does not have backward-compatibility guarantees on minor releases, so I'm not too fussed about it. :grin: Sweet! I'll open a PR later today then!
2017-05-15T13:25:57Z
[]
[]
urllib3/urllib3
1,186
urllib3__urllib3-1186
[ "1183" ]
3b7ded611911f19a958ce2433a2d86ff8d6121ce
diff --git a/_travis/fetch_gae_sdk.py b/_travis/fetch_gae_sdk.py deleted file mode 100644 --- a/_travis/fetch_gae_sdk.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Retrieved from https://github.com/Google/oauth2client - -"""Fetch the most recent GAE SDK and decompress it in the current directory. - -Usage: - fetch_gae_sdk.py [<dest_dir>] - -Current releases are listed here: - https://www.googleapis.com/storage/v1/b/appengine-sdks/o?prefix=featured -""" -from __future__ import print_function - -import json -import os -import StringIO -import sys -import urllib2 -import zipfile - - -_SDK_URL = ( - 'https://www.googleapis.com/storage/v1/b/appengine-sdks/o?prefix=featured') - - -def get_gae_versions(): - try: - version_info_json = urllib2.urlopen(_SDK_URL).read() - except: - return {} - try: - version_info = json.loads(version_info_json) - except: - return {} - return version_info.get('items', {}) - - -def _version_tuple(v): - version_string = os.path.splitext(v['name'])[0].rpartition('_')[2] - return tuple(int(x) for x in version_string.split('.')) - - -def get_sdk_urls(sdk_versions): - python_releases = [v for v in sdk_versions - if v['name'].startswith('featured/google_appengine')] - current_releases = sorted(python_releases, key=_version_tuple, - reverse=True) - return [release['mediaLink'] for release in current_releases] - - -def main(argv): - if len(argv) > 2: - print('Usage: {0} [<destination_dir>]'.format(argv[0])) - return 1 - if len(argv) > 1: - dest_dir = argv[1] - else: - try: - dest_dir = os.path.dirname(os.environ['GAE_PYTHONPATH']) - except IndexError: - dest_dir = '.' - if not os.path.exists(dest_dir): - os.makedirs(dest_dir) - - if os.path.exists(os.path.join(dest_dir, 'google_appengine')): - print('GAE SDK already installed at {0}, exiting.'.format(dest_dir)) - return 0 - - sdk_versions = get_gae_versions() - if not sdk_versions: - print('Error fetching GAE SDK version info') - return 1 - sdk_urls = get_sdk_urls(sdk_versions) - for sdk_url in sdk_urls: - try: - sdk_contents = StringIO.StringIO(urllib2.urlopen(sdk_url).read()) - break - except: - pass - else: - print('Could not read SDK from any of ', sdk_urls) - return 1 - sdk_contents.seek(0) - try: - with zipfile.ZipFile(sdk_contents) as zip_contents: - zip_contents.extractall(dest_dir) - except: - print('Error extracting SDK contents') - return 1 - - -if __name__ == '__main__': - sys.exit(main(sys.argv[:]))
diff --git a/test/appengine/requirements.txt b/test/appengine/requirements.txt --- a/test/appengine/requirements.txt +++ b/test/appengine/requirements.txt @@ -1 +1 @@ -NoseGAE==0.5.7 +NoseGAE==0.5.10
Running `tox -e gae` is broken on current master Extracted from #1182: ```console $ tox -e gae GLOB sdist-make: /Users/alexwlchan/repos/urllib3/setup.py gae inst-nodeps: /Users/alexwlchan/repos/urllib3/.tox/dist/urllib3-dev.zip gae installed: appdirs==1.4.3,backports.ssl-match-hostname==3.5.0.1,certifi==2017.4.17,coverage==3.7.1,funcsigs==1.0.2,mock==1.3.0,nose==1.3.7,nose-exclude==0.4.1,NoseGAE==0.5.7,packaging==16.8,pbr==3.0.1,pkginfo==1.4.1,pluggy==0.3.1,psutil==4.3.1,py==1.4.33,pyparsing==2.2.0,PySocks==1.5.6,pytest==3.1.0,requests==2.14.2,six==1.10.0,tornado==4.2.1,tox==2.1.1,twine==1.5.0,urllib3===dev,virtualenv==15.1.0 gae runtests: PYTHONHASHSEED='2409600760' gae runtests: commands[0] | nosetests -c /Users/alexwlchan/repos/urllib3/test/appengine/nose.cfg test/appengine Traceback (most recent call last): File ".tox/gae/bin/nosetests", line 11, in <module> sys.exit(run_exit()) File "/Users/alexwlchan/repos/urllib3/.tox/gae/lib/python2.7/site-packages/nose/core.py", line 121, in __init__ **extra_args) File "/usr/local/Cellar/python/2.7.13/Frameworks/Python.framework/Versions/2.7/lib/python2.7/unittest/main.py", line 94, in __init__ self.parseArgs(argv) File "/Users/alexwlchan/repos/urllib3/.tox/gae/lib/python2.7/site-packages/nose/core.py", line 145, in parseArgs self.config.configure(argv, doc=self.usage()) File "/Users/alexwlchan/repos/urllib3/.tox/gae/lib/python2.7/site-packages/nose/config.py", line 346, in configure self.plugins.configure(options, self) File "/Users/alexwlchan/repos/urllib3/.tox/gae/lib/python2.7/site-packages/nose/plugins/manager.py", line 284, in configure cfg(options, config) File "/Users/alexwlchan/repos/urllib3/.tox/gae/lib/python2.7/site-packages/nose/plugins/manager.py", line 99, in __call__ return self.call(*arg, **kw) File "/Users/alexwlchan/repos/urllib3/.tox/gae/lib/python2.7/site-packages/nose/plugins/manager.py", line 167, in simple result = meth(*arg, **kw) File "/Users/alexwlchan/repos/urllib3/.tox/gae/lib/python2.7/site-packages/nosegae.py", line 91, in configure import dev_appserver ImportError: No module named dev_appserver ERROR: InvocationError: '/Users/alexwlchan/repos/urllib3/.tox/gae/bin/nosetests -c /Users/alexwlchan/repos/urllib3/test/appengine/nose.cfg test/appengine' ________________________________________________________________________________ summary _________________________________________________________________________________ ERROR: gae: commands failed ```
Hmm. I found [the most recent successful Travis job](https://travis-ci.org/shazow/urllib3/jobs/236341107), and pinned based on the requirements reported by tox: ```console $ cat test/appengine/requirements.txt requests==2.14.2 appdirs==1.4.3 backports.ssl-match-hostname==3.5.0.1 certifi==2017.4.17 coverage==3.7.1 funcsigs==1.0.2 mock==1.3.0 nose==1.3.7 nose-exclude==0.4.1 NoseGAE==0.5.7 packaging==16.8 pbr==3.0.1 pluggy==0.3.1 psutil==4.3.1 py==1.4.33 pyparsing==2.2.0 PySocks==1.5.6 requests==2.14.2 six==1.10.0 tornado==4.2.1 tox==2.1.1 twine==1.5.0 urllib3===dev virtualenv==15.1.0 ``` Same issue. Interesting, this passes on Travis, with a matching set of versions – see https://travis-ci.org/shazow/urllib3/jobs/236925046 – but not on my local machine. But it doesn’t pass in Travis on my fork – https://travis-ci.org/alexwlchan/urllib3/jobs/236924925 – I wonder, is there something dodgy about the Travis cache for urllib3 that’s masking the bug? @alexwlchan yes, sending a PR to fix shortly.
2017-05-28T17:06:17Z
[]
[]
urllib3/urllib3
1,213
urllib3__urllib3-1213
[ "1191" ]
d74df4af794b28db61edbbacec688463bbabdd43
diff --git a/urllib3/connectionpool.py b/urllib3/connectionpool.py --- a/urllib3/connectionpool.py +++ b/urllib3/connectionpool.py @@ -396,7 +396,7 @@ def _make_request(self, conn, method, url, timeout=_Default, chunked=False, try: assert_header_parsing(httplib_response.msg) - except HeaderParsingError as hpe: # Platform-specific: Python 3 + except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 log.warning( 'Failed to parse headers (url=%s): %s', self._absolute_url(url), hpe, exc_info=True)
diff --git a/test/appengine/__init__.py b/test/appengine/__init__.py --- a/test/appengine/__init__.py +++ b/test/appengine/__init__.py @@ -1,71 +0,0 @@ -import os -import sys -import unittest -from nose.plugins.skip import SkipTest - - -def activate_sandbox(): - """ - Enables parts of the GAE sandbox that are relevant. - - Inserts the stub module import hook which causes the usage of appengine-specific - httplib, httplib2, socket, etc. - """ - from google.appengine.tools.devappserver2.python import sandbox - - for name in list(sys.modules): - if name in sandbox.dist27.MODULE_OVERRIDES: - del sys.modules[name] - sys.meta_path.insert(0, sandbox.StubModuleImportHook()) - sys.path_importer_cache = {} - - -def deactivate_sandbox(): - from google.appengine.tools.devappserver2.python import sandbox - - sys.meta_path = [ - x for x in sys.meta_path if not isinstance(x, sandbox.StubModuleImportHook)] - sys.path_importer_cache = {} - - # Delete any instances of sandboxed modules. - for name in list(sys.modules): - if name in sandbox.dist27.MODULE_OVERRIDES: - del sys.modules[name] - - -class AppEngineSandboxTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - - if sys.version_info[:2] != (2, 7): - raise SkipTest("App Engine only tests on py2.7") - - if 'APPLICATION_ID' not in os.environ: - raise SkipTest("NoseGAE plugin not used.") - - try: - activate_sandbox() - except ImportError: - raise SkipTest("App Engine SDK not available.") - - @classmethod - def tearDownClass(self): - try: - deactivate_sandbox() - except ImportError: - pass - - -class MockResponse(object): - def __init__(self, content, status_code, content_was_truncated, final_url, headers): - import httplib - from StringIO import StringIO - - self.content = content - self.status_code = status_code - self.content_was_truncated = content_was_truncated - self.final_url = final_url - self.header_msg = httplib.HTTPMessage(StringIO(''.join( - ["%s: %s\n" % (k, v) for k, v in headers.iteritems()] + ["\n"]))) - self.headers = self.header_msg.items() diff --git a/test/appengine/app.yaml b/test/appengine/app.yaml deleted file mode 100644 --- a/test/appengine/app.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# dummy app.yaml for nosegae - -api_version: 1 -runtime: python27 -threadsafe: true - -handlers: -- url: / - static_files: README.md - upload: README.md - mime_type: text/plain diff --git a/test/appengine/conftest.py b/test/appengine/conftest.py new file mode 100644 --- /dev/null +++ b/test/appengine/conftest.py @@ -0,0 +1,72 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +# Import py.test hooks and fixtures for App Engine +from gcp_devrel.testing.appengine import ( + pytest_configure, + pytest_runtest_call, + testbed +) +import pytest +import six + + +__all__ = [ + 'pytest_configure', + 'pytest_runtest_call', + 'pytest_ignore_collect', + 'testbed', + 'sandbox', +] + + [email protected] +def sandbox(testbed): + """ + Enables parts of the GAE sandbox that are relevant. + Inserts the stub module import hook which causes the usage of + appengine-specific httplib, httplib2, socket, etc. + """ + from google.appengine.tools.devappserver2.python import sandbox + + for name in list(sys.modules): + if name in sandbox.dist27.MODULE_OVERRIDES: + del sys.modules[name] + sys.meta_path.insert(0, sandbox.StubModuleImportHook()) + sys.path_importer_cache = {} + + yield testbed + + sys.meta_path = [ + x for x in sys.meta_path + if not isinstance(x, sandbox.StubModuleImportHook)] + sys.path_importer_cache = {} + + # Delete any instances of sandboxed modules. + for name in list(sys.modules): + if name in sandbox.dist27.MODULE_OVERRIDES: + del sys.modules[name] + + +def pytest_ignore_collect(path, config): + """Skip App Engine tests in python 3 or if no SDK is available.""" + if 'appengine' in str(path): + if six.PY3: + return True + if not os.environ.get('GAE_SDK_PATH'): + return True + return False diff --git a/test/appengine/nose.cfg b/test/appengine/nose.cfg deleted file mode 100644 --- a/test/appengine/nose.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[nosetests] -cover-min-percentage=0 -with-gae=1 -gae-application=test/appengine/app.yaml diff --git a/test/appengine/requirements.txt b/test/appengine/requirements.txt deleted file mode 100644 --- a/test/appengine/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -NoseGAE==0.5.10 diff --git a/test/contrib/test_gae_manager.py b/test/appengine/test_gae_manager.py similarity index 69% rename from test/contrib/test_gae_manager.py rename to test/appengine/test_gae_manager.py --- a/test/contrib/test_gae_manager.py +++ b/test/appengine/test_gae_manager.py @@ -1,29 +1,12 @@ -import unittest +import dummyserver.testcase +import pytest -from dummyserver.testcase import HTTPSDummyServerTestCase -from nose.plugins.skip import SkipTest +from urllib3.contrib import appengine +import urllib3.exceptions +import urllib3.util.url +import urllib3.util.retry -try: - from google.appengine.api import urlfetch - (urlfetch) -except ImportError: - raise SkipTest("App Engine SDK not available.") - -from urllib3.contrib.appengine import AppEngineManager, AppEnginePlatformError -from urllib3.exceptions import ( - TimeoutError, - ProtocolError, - SSLError) -from urllib3.util.url import Url -from urllib3.util.retry import Retry, RequestHistory - -from test.with_dummyserver.test_connectionpool import ( - TestConnectionPool, TestRetry, TestRetryAfter) - - -# Prevent nose from running these test. -TestConnectionPool.__test__ = False -TestRetry.__test__ = False +from test.with_dummyserver import test_connectionpool # This class is used so we can re-use the tests from the connection pool. @@ -44,7 +27,7 @@ def urlopen(self, method, url, *args, **kwargs): return self.manager.urlopen(method, url, *args, **kwargs) def _absolute_url(self, path): - return Url( + return urllib3.util.url.Url( scheme=self.scheme, host=self.host, port=self.port, @@ -54,14 +37,11 @@ def _absolute_url(self, path): # Note that this doesn't run in the sandbox, it only runs with the URLFetch # API stub enabled. There's no need to enable the sandbox as we know for a fact # that URLFetch is used by the connection manager. -class TestGAEConnectionManager(TestConnectionPool): - __test__ = True - - # Magic class variable that tells NoseGAE to enable the URLFetch stub. - nosegae_urlfetch = True [email protected]('testbed') +class TestGAEConnectionManager(test_connectionpool.TestConnectionPool): def setUp(self): - self.manager = AppEngineManager() + self.manager = appengine.AppEngineManager() self.pool = MockPool(self.host, self.port, self.manager) # Tests specific to AppEngineManager @@ -69,7 +49,7 @@ def setUp(self): def test_exceptions(self): # DeadlineExceededError -> TimeoutError self.assertRaises( - TimeoutError, + urllib3.exceptions.TimeoutError, self.pool.request, 'GET', '/sleep?seconds=0.005', @@ -77,21 +57,21 @@ def test_exceptions(self): # InvalidURLError -> ProtocolError self.assertRaises( - ProtocolError, + urllib3.exceptions.ProtocolError, self.manager.request, 'GET', 'ftp://invalid/url') # DownloadError -> ProtocolError self.assertRaises( - ProtocolError, + urllib3.exceptions.ProtocolError, self.manager.request, 'GET', 'http://0.0.0.0') # ResponseTooLargeError -> AppEnginePlatformError self.assertRaises( - AppEnginePlatformError, + appengine.AppEnginePlatformError, self.pool.request, 'GET', '/nbytes?length=33554433') # One byte over 32 megabtyes. @@ -100,7 +80,7 @@ def test_exceptions(self): # which maps to a AppEnginePlatformError. body = b'1' * 10485761 # One byte over 10 megabytes. self.assertRaises( - AppEnginePlatformError, + appengine.AppEnginePlatformError, self.manager.request, 'POST', '/', @@ -139,11 +119,12 @@ def test_exceptions(self): test_dns_error = None -class TestGAEConnectionManagerWithSSL(HTTPSDummyServerTestCase): - nosegae_urlfetch = True [email protected]('testbed') +class TestGAEConnectionManagerWithSSL( + dummyserver.testcase.HTTPSDummyServerTestCase): def setUp(self): - self.manager = AppEngineManager() + self.manager = appengine.AppEngineManager() self.pool = MockPool(self.host, self.port, self.manager, 'https') def test_exceptions(self): @@ -151,25 +132,22 @@ def test_exceptions(self): # SSLError is raised with dummyserver because URLFetch doesn't allow # self-signed certs. self.assertRaises( - SSLError, + urllib3.exceptions.SSLError, self.pool.request, 'GET', '/') -class TestGAERetry(TestRetry): - __test__ = True - - # Magic class variable that tells NoseGAE to enable the URLFetch stub. - nosegae_urlfetch = True [email protected]('testbed') +class TestGAERetry(test_connectionpool.TestRetry): def setUp(self): - self.manager = AppEngineManager() + self.manager = appengine.AppEngineManager() self.pool = MockPool(self.host, self.port, self.manager) def test_default_method_whitelist_retried(self): """ urllib3 should retry methods in the default method whitelist """ - retry = Retry(total=1, status_forcelist=[418]) + retry = urllib3.util.retry.Retry(total=1, status_forcelist=[418]) # Use HEAD instead of OPTIONS, as URLFetch doesn't support OPTIONS resp = self.pool.request( 'HEAD', '/successful_retry', @@ -179,16 +157,18 @@ def test_default_method_whitelist_retried(self): def test_retry_return_in_response(self): headers = {'test-name': 'test_retry_return_in_response'} - retry = Retry(total=2, status_forcelist=[418]) + retry = urllib3.util.retry.Retry(total=2, status_forcelist=[418]) resp = self.pool.request('GET', '/successful_retry', headers=headers, retries=retry) self.assertEqual(resp.status, 200) self.assertEqual(resp.retries.total, 1) # URLFetch use absolute urls. - self.assertEqual(resp.retries.history, - (RequestHistory('GET', - self.pool._absolute_url('/successful_retry'), - None, 418, None),)) + self.assertEqual( + resp.retries.history, + (urllib3.util.retry.RequestHistory( + 'GET', + self.pool._absolute_url('/successful_retry'), + None, 418, None),)) # test_max_retry = None # test_disabled_retry = None @@ -197,17 +177,10 @@ def test_retry_return_in_response(self): test_multi_redirect_history = None -class TestGAERetryAfter(TestRetryAfter): - __test__ = True - - # Magic class variable that tells NoseGAE to enable the URLFetch stub. - nosegae_urlfetch = True [email protected]('testbed') +class TestGAERetryAfter(test_connectionpool.TestRetryAfter): def setUp(self): # Disable urlfetch which doesn't respect Retry-After header. - self.manager = AppEngineManager(urlfetch_retries=False) + self.manager = appengine.AppEngineManager(urlfetch_retries=False) self.pool = MockPool(self.host, self.port, self.manager) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/appengine/test_urlfetch.py b/test/appengine/test_urlfetch.py --- a/test/appengine/test_urlfetch.py +++ b/test/appengine/test_urlfetch.py @@ -1,49 +1,73 @@ -from . import AppEngineSandboxTest, MockResponse +"""These tests ensure that when running in App Engine standard with the +App Engine sandbox enabled that urllib3 appropriately uses the App +Engine-patched version of httplib to make requests.""" + +import httplib +import StringIO +import unittest from mock import patch -from nose.plugins.skip import SkipTest +import pytest + from ..test_no_ssl import TestWithoutSSL -class TestHTTP(AppEngineSandboxTest, TestWithoutSSL): - nosegae_urlfetch = True +class MockResponse(object): + def __init__(self, content, status_code, content_was_truncated, final_url, + headers): + + self.content = content + self.status_code = status_code + self.content_was_truncated = content_was_truncated + self.final_url = final_url + self.header_msg = httplib.HTTPMessage(StringIO.StringIO(''.join( + ["%s: %s\n" % (k, v) for k, v in headers.iteritems()] + ["\n"]))) + self.headers = headers + [email protected]('sandbox') +class TestHTTP(TestWithoutSSL): def test_urlfetch_called_with_http(self): - """ - Check that URLFetch is used to fetch non-https resources - """ + """Check that URLFetch is used to fetch non-https resources.""" resp = MockResponse( 'OK', 200, False, 'http://www.google.com', {'content-type': 'text/plain'}) - with patch('google.appengine.api.urlfetch.fetch', return_value=resp) as fetchmock: + fetch_patch = patch( + 'google.appengine.api.urlfetch.fetch', return_value=resp + ) + with fetch_patch as fetch_mock: import urllib3 pool = urllib3.HTTPConnectionPool('www.google.com', '80') r = pool.request('GET', '/') self.assertEqual(r.status, 200, r.data) - self.assertEqual(fetchmock.call_count, 1) - + self.assertEqual(fetch_mock.call_count, 1) -class TestHTTPS(AppEngineSandboxTest): - nosegae_urlfetch = True [email protected]('sandbox') +class TestHTTPS(unittest.TestCase): + @pytest.mark.xfail( + reason='This is not yet supported by urlfetch, presence of the ssl ' + 'module will bypass urlfetch.') def test_urlfetch_called_with_https(self): """ Check that URLFetch is used when fetching https resources """ - raise SkipTest() # Skipped for now because it fails. resp = MockResponse( 'OK', 200, False, 'https://www.google.com', {'content-type': 'text/plain'}) - with patch('google.appengine.api.urlfetch.fetch', return_value=resp) as fetchmock: + fetch_patch = patch( + 'google.appengine.api.urlfetch.fetch', return_value=resp + ) + with fetch_patch as fetch_mock: import urllib3 pool = urllib3.HTTPSConnectionPool('www.google.com', '443') pool.ConnectionCls = urllib3.connection.UnverifiedHTTPSConnection r = pool.request('GET', '/') self.assertEqual(r.status, 200, r.data) - self.assertEqual(fetchmock.call_count, 1) + self.assertEqual(fetch_mock.call_count, 1)
Move GAE tox env to use the pytest runner Context: https://github.com/shazow/urllib3/pull/1187#issuecomment-304813261 Related: https://github.com/shazow/urllib3/issues/1160
2017-06-08T04:38:45Z
[]
[]
urllib3/urllib3
1,235
urllib3__urllib3-1235
[ "1234" ]
33f4bda0225b1d2b5cc9014c10e65ebee267ab9d
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -588,12 +588,12 @@ def read_chunked(self, amt=None, decode_content=None): "Body should be httplib.HTTPResponse like. " "It should have have an fp attribute which returns raw chunks.") - # Don't bother reading the body of a HEAD request. - if self._original_response and is_response_to_head(self._original_response): - self._original_response.close() - return - with self._error_catcher(): + # Don't bother reading the body of a HEAD request. + if self._original_response and is_response_to_head(self._original_response): + self._original_response.close() + return + while True: self._update_chunk_length() if self.chunk_left == 0:
diff --git a/test/test_response.py b/test/test_response.py --- a/test/test_response.py +++ b/test/test_response.py @@ -3,6 +3,7 @@ from io import BytesIO, BufferedReader import pytest +import mock from urllib3.response import HTTPResponse from urllib3.exceptions import ( @@ -606,6 +607,22 @@ def test_chunked_response_with_extensions(self): resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'}) assert stream == list(resp.stream()) + def test_chunked_head_response(self): + r = httplib.HTTPResponse(MockSock, method='HEAD') + r.chunked = True + r.chunk_left = None + resp = HTTPResponse('', + preload_content=False, + headers={'transfer-encoding': 'chunked'}, + original_response=r) + assert resp.chunked is True + + resp.supports_chunked_reads = lambda: True + resp.release_conn = mock.Mock() + for _ in resp.stream(): + continue + resp.release_conn.assert_called_once_with() + def test_get_case_insensitive_headers(self): headers = {'host': 'example.com'} r = HTTPResponse(headers=headers)
HEAD response with chunked Transfer-Encoding does not release the connection Amazon S3 sets "Transfer-Encoding: chunked" in the response even for a HEAD request. For example: ``` DEBUG:botocore.vendored.requests.packages.urllib3.connectionpool:Starting new HTTPS connection (1): s3.amazonaws.com send: 'HEAD /timur-test/garbage HTTP/1.1\r\nHost: s3.amazonaws.com\r\nAccept-Encoding: identity\r\n\r\n' reply: 'HTTP/1.1 301 Moved Permanently\r\n' header: x-amz-bucket-region: us-west-2 header: x-amz-request-id: 4E83FBED76CBB9BB header: x-amz-id-2: dxdu+gwK6fdzdK2CaA/0yAcBxO7wdSmzRDwo9JRYA7QeMEk8xMkuE5VJ2AiabWWSqpi+IHweXqA= header: Content-Type: application/xml header: Transfer-Encoding: chunked header: Date: Fri, 21 Jul 2017 06:41:02 GMT header: Server: AmazonS3 ``` This causes the connection to not be returned to the pool after calling `stream()` or `read_chunked()` (which `stream()` is a thin wrapper for in this case). This is due to the following conditional in the `read_chunked()` method: https://github.com/shazow/urllib3/blob/master/urllib3/response.py#L592. I believe moving this conditional inside the `error_catcher` context manager is the right resolution to this (or calling `release_conn()` explicitly). Will submit a PR for this.
2017-07-21T07:37:32Z
[]
[]
urllib3/urllib3
1,255
urllib3__urllib3-1255
[ "1254" ]
945f2a6ca40d6c20c3eceb0d6e53eeef9d6b0c53
diff --git a/dummyserver/testcase.py b/dummyserver/testcase.py --- a/dummyserver/testcase.py +++ b/dummyserver/testcase.py @@ -76,6 +76,23 @@ def tearDownClass(cls): if hasattr(cls, 'server_thread'): cls.server_thread.join(0.1) + def assert_header_received( + self, + received_headers, + header_name, + expected_value=None + ): + header_name = header_name.encode('ascii') + if expected_value is not None: + expected_value = expected_value.encode('ascii') + header_titles = [] + for header in received_headers: + key, value = header.split(b': ') + header_titles.append(key) + if key == header_name and expected_value is not None: + self.assertEqual(value, expected_value) + self.assertIn(header_name, header_titles) + class IPV4SocketDummyServerTestCase(SocketDummyServerTestCase): @classmethod diff --git a/urllib3/connection.py b/urllib3/connection.py --- a/urllib3/connection.py +++ b/urllib3/connection.py @@ -124,6 +124,35 @@ def __init__(self, *args, **kw): # Superclass also sets self.source_address in Python 2.7+. _HTTPConnection.__init__(self, *args, **kw) + @property + def host(self): + """ + Getter method to remove any trailing dots that indicate the hostname is an FQDN. + + In general, SSL certificates don't include the trailing dot indicating a + fully-qualified domain name, and thus, they don't validate properly when + checked against a domain name that includes the dot. In addition, some + servers may not expect to receive the trailing dot when provided. + + However, the hostname with trailing dot is critical to DNS resolution; doing a + lookup with the trailing dot will properly only resolve the appropriate FQDN, + whereas a lookup without a trailing dot will search the system's search domain + list. Thus, it's important to keep the original host around for use only in + those cases where it's appropriate (i.e., when doing DNS lookup to establish the + actual TCP connection across which we're going to send HTTP requests). + """ + return self._dns_host.rstrip('.') + + @host.setter + def host(self, value): + """ + Setter for the `host` property. + + We assume that only urllib3 uses the _dns_host attribute; httplib itself + only uses `host`, and it seems reasonable that other libraries follow suit. + """ + self._dns_host = value + def _new_conn(self): """ Establish a socket connection and set nodelay settings on it. @@ -138,7 +167,7 @@ def _new_conn(self): try: conn = connection.create_connection( - (self.host, self.port), self.timeout, **extra_kw) + (self._dns_host, self.port), self.timeout, **extra_kw) except SocketTimeout as e: raise ConnectTimeoutError(
diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -118,6 +118,7 @@ def test_same_host_no_port_https(self, a, b): ('google.com', 'https://google.com/'), ('yahoo.com', 'http://google.com/'), ('google.com', 'https://google.net/'), + ('google.com', 'http://google.com./'), ]) def test_not_same_host_no_port_http(self, a, b): with HTTPConnectionPool(a) as c: @@ -130,6 +131,7 @@ def test_not_same_host_no_port_http(self, a, b): ('google.com', 'http://google.com/'), ('yahoo.com', 'https://google.com/'), ('google.com', 'https://google.net/'), + ('google.com', 'https://google.com./'), ]) def test_not_same_host_no_port_https(self, a, b): with HTTPSConnectionPool(a) as c: diff --git a/test/test_poolmanager.py b/test/test_poolmanager.py --- a/test/test_poolmanager.py +++ b/test/test_poolmanager.py @@ -31,6 +31,14 @@ def test_same_url(self): assert conn1 == conn2 + # Ensure that FQDNs are handled separately from relative domains + p = PoolManager(2) + + conn1 = p.connection_from_url('http://localhost.:8081/foo') + conn2 = p.connection_from_url('http://localhost:8081/bar') + + assert conn1 != conn2 + def test_many_urls(self): urls = [ "http://localhost:8081/foo", diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -65,6 +65,11 @@ def test_simple(self): r = self._pool.request('GET', '/') self.assertEqual(r.status, 200, r.data) + def test_dotted_fqdn(self): + pool = HTTPSConnectionPool(self.host + '.', self.port) + r = pool.request('GET', '/') + self.assertEqual(r.status, 200, r.data) + def test_set_ssl_version_to_tlsv1(self): self._pool.ssl_version = ssl.PROTOCOL_TLSv1 r = self._pool.request('GET', '/') diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -1225,6 +1225,37 @@ def socket_handler(listener): pool.request('GET', '/', headers=OrderedDict(expected_request_headers)) self.assertEqual(expected_request_headers, actual_request_headers) + def test_request_host_header_ignores_fqdn_dot(self): + + received_headers = [] + + def socket_handler(listener): + sock = listener.accept()[0] + + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf += sock.recv(65536) + + for header in buf.split(b'\r\n')[1:]: + if header: + received_headers.append(header) + + sock.send(( + u'HTTP/1.1 204 No Content\r\n' + u'Content-Length: 0\r\n' + u'\r\n').encode('ascii')) + + sock.close() + + self._start_server(socket_handler) + + pool = HTTPConnectionPool(self.host + '.', self.port, retries=False) + self.addCleanup(pool.close) + pool.request('GET', '/') + self.assert_header_received( + received_headers, 'Host', '%s:%s' % (self.host, self.port) + ) + def test_response_headers_are_returned_in_the_original_order(self): # NOTE: Probability this test gives a false negative is 1/(K!) K = 16
Match_hostname: hostname 'github.com.' doesn't match 'github.com' or 'www.github.com' I'm getting hostname mismatch error when I try to access a domain with a trailing dot. Example: Version of urllib3 I'm using ``` (venv) ➜ ~ pip list --format=columns | grep urllib3 urllib3 1.22 ``` ``` (venv) ➜ ~ python Python 2.7.10 (default, Jul 14 2015, 19:46:27) [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.39)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import urllib3 >>> http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED') >>> r = http.request('GET', 'https://github.com/') >>> print r.status 200 >>> r = http.request('GET', 'https://github.com./') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/karteek/venv/lib/python2.7/site-packages/urllib3/request.py", line 66, in request **urlopen_kw) File "/Users/karteek/venv/lib/python2.7/site-packages/urllib3/request.py", line 87, in request_encode_url return self.urlopen(method, url, **extra_kw) File "/Users/karteek/venv/lib/python2.7/site-packages/urllib3/poolmanager.py", line 321, in urlopen response = conn.urlopen(method, u.request_uri, **kw) File "/Users/karteek/venv/lib/python2.7/site-packages/urllib3/connectionpool.py", line 668, in urlopen **response_kw) File "/Users/karteek/venv/lib/python2.7/site-packages/urllib3/connectionpool.py", line 668, in urlopen **response_kw) File "/Users/karteek/venv/lib/python2.7/site-packages/urllib3/connectionpool.py", line 668, in urlopen **response_kw) File "/Users/karteek/venv/lib/python2.7/site-packages/urllib3/connectionpool.py", line 639, in urlopen _stacktrace=sys.exc_info()[2]) File "/Users/karteek/venv/lib/python2.7/site-packages/urllib3/util/retry.py", line 388, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='github.com.', port=443): Max retries exceeded with url: / (Caused by SSLError(CertificateError("hostname 'github.com.' doesn't match either of 'github.com', 'www.github.com'",),)) ``` Similar request using curl seems to go fine. ``` curl -v https://github.com./ * Trying 192.30.255.112... * Connected to github.com (192.30.255.112) port 443 (#0) * TLS 1.2 connection using TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 * Server certificate: github.com * Server certificate: DigiCert SHA2 Extended Validation Server CA * Server certificate: DigiCert High Assurance EV Root CA > GET / HTTP/1.1 > Host: github.com > User-Agent: curl/7.43.0 > Accept: */* > < HTTP/1.1 200 OK < Date: Thu, 31 Aug 2017 21:06:01 GMT < Content-Type: text/html; charset=utf-8 < Transfer-Encoding: chunked < Server: GitHub.com < Status: 200 OK < Cache-Control: no-cache ``` Also, going to similar URL https://github.com./robots.txt on browsers Firefox and Chrome doesn't show any SSL errors (They get redirected to https://github.com/robots.txt)
This is an interesting issue! Looking at how cURL does it, it seems like it's internally setting the Host header to remove the trailing dot, as well as removing the trailing dot when doing hostname validation on the cert. I did some quick checking on `getaddrinfo` (which we use for DNS resolution), and it seems like it does both check my system's search domain list when the dot isn't present, and respect the trailing dot (indicating an FQDN) to indicate an absolute domain when it is present, so we definitely want to make any transformations post-resolution. It's unclear to me whether we should just need to make the modification needed for SSL certification [somewhere around here](https://github.com/shazow/urllib3/blob/4fc0be786662dc5b79bf7e939eedd653c78d4616/urllib3/connection.py#L325) (that'll pass a hostname with trailing dot removed to whatever SSL backend is in use for the purposes of certificate verification) or if we should perform the dot-ectomy change in the Host header we send as well (which would need more careful consideration due to the DNS resolution stuff mentioned above). As for algorithm, I believe simply doing `host.rstrip('.')` is the minimally-invasive operation we need to remove a trailing dot that indicates an FQDN; as far as I can tell, neither the IPv4 nor IPv6 syntax allows for trailing dots. @Lukasa, thoughts? I'd be happy to work on a fix for this. Probably we should almost unconditionally do the dot-ectomy. It is likely to confuse most people to see it and also to cause latent bugs down the line, so probably after DNS resolution it should just be stripped. @Lukasa, sounds good. I'll get to work on that; the unfortunate part is that we'll probably need to store the domain twice (once for DNS purposes; once for everything else). Nah, let's just create a property that does the transformation for us when we need it. 😁 LAZY EVALUATION FOR THE LAZY DEVELOPMENT GOD
2017-09-02T00:04:26Z
[]
[]
urllib3/urllib3
1,318
urllib3__urllib3-1318
[ "1286" ]
64514ec42cbf1e4d9e8624c6c8a2a863ac4f5810
diff --git a/urllib3/_collections.py b/urllib3/_collections.py --- a/urllib3/_collections.py +++ b/urllib3/_collections.py @@ -15,6 +15,7 @@ def __exit__(self, exc_type, exc_value, traceback): from collections import OrderedDict except ImportError: from .packages.ordered_dict import OrderedDict +from .exceptions import InvalidHeader from .packages.six import iterkeys, itervalues, PY3 @@ -305,13 +306,22 @@ def from_httplib(cls, message): # Python 2 # python2.7 does not expose a proper API for exporting multiheaders # efficiently. This function re-reads raw lines from the message # object and extracts the multiheaders properly. + obs_fold_continued_leaders = (' ', '\t') headers = [] for line in message.headers: - if line.startswith((' ', '\t')): - key, value = headers[-1] - headers[-1] = (key, value + '\r\n' + line.rstrip()) - continue + if line.startswith(obs_fold_continued_leaders): + if not headers: + # We received a header line that starts with OWS as described + # in RFC-7230 S3.2.4. This indicates a multiline header, but + # there exists no previous header to which we can attach it. + raise InvalidHeader( + 'Header continuation with no previous header: %s' % line + ) + else: + key, value = headers[-1] + headers[-1] = (key, value + ' ' + line.strip()) + continue key, value = line.split(':', 1) headers.append((key, value.strip()))
diff --git a/test/test_collections.py b/test/test_collections.py --- a/test/test_collections.py +++ b/test/test_collections.py @@ -4,6 +4,7 @@ ) import pytest +from urllib3.exceptions import InvalidHeader from urllib3.packages import six xrange = six.moves.xrange @@ -340,8 +341,8 @@ def test_from_httplib_py2(self): Content-Type: text/html; charset=windows-1251 Connection: keep-alive X-Some-Multiline: asdf - asdf - asdf + asdf\t +\t asdf Set-Cookie: bb_lastvisit=1348253375; expires=Sat, 21-Sep-2013 18:49:35 GMT; path=/ Set-Cookie: bb_lastactivity=0; expires=Sat, 21-Sep-2013 18:49:35 GMT; path=/ www-authenticate: asdf @@ -356,6 +357,14 @@ def test_from_httplib_py2(self): assert len(cookies) == 2 assert cookies[0].startswith("bb_lastvisit") assert cookies[1].startswith("bb_lastactivity") - assert d['x-some-multiline'].split() == ['asdf', 'asdf', 'asdf'] + assert d['x-some-multiline'] == 'asdf asdf asdf' assert d['www-authenticate'] == 'asdf, bla' assert d.getlist('www-authenticate') == ['asdf', 'bla'] + with_invalid_multiline = """\tthis-is-not-a-header: but it has a pretend value +Authorization: Bearer 123 + +""" + buffer = six.moves.StringIO(with_invalid_multiline.replace('\n', '\r\n')) + msg = six.moves.http_client.HTTPMessage(buffer) + with pytest.raises(InvalidHeader): + HTTPHeaderDict.from_httplib(msg)
IndexError when handle malformed http response I use urllib3 receive a http response like this, ``` HTTP/1.1 200 OK Content-Type: application/octet-stream Content-Length: 89606 Content-Disposition: attachment; filename="MB-500Ap_2009-01-12.cfg" Connection: close Brickcom-50xA OperationSetting.locale=auto HostName.name=cam ModuleInfo.DIDO_module=1 ModuleInfo.PIR_module=0 ModuleInfo.WLED=0 SensorFPSSetting.fps=0 ModuleInfo.AUTOIRIS_module=0 ModuleInfo.IRCUT_module=0 ModuleInfo.IRLED_module=0 ModuleInfo.lightsensor=0 ModuleInfo.EXPOSURE_module=0 ModuleInfo.MDNS_module=0 ModuleInfo.PTZ_module=1 ModuleInfo.MSN_module=0 ModuleInfo.WIFI_module=0 ModuleInfo.watchDog_module=0 ModuleInfo.sdcard_module=1 ModuleInfo.usbstorage_module=0 ModuleInfo.sambamount_module=0 ModuleInfo.QoS=0 ModuleInfo.shutter_speed=0 ModuleInfo.discovery_internet=1 ModuleInfo.POE_module= ModuleInfo.audio_record=1 ``` it throws a IndexError,I print the traceback, ``` req = http_get(url, auth=("admin", "admin"), timeout=timeout, verify=False) File "C:\Python27\lib\site-packages\requests\api.py", line 72, in get return request('get', url, params=params, **kwargs) File "C:\Python27\lib\site-packages\requests\api.py", line 58, in request return session.request(method=method, url=url, **kwargs) File "C:\Python27\lib\site-packages\requests\sessions.py", line 508, in request resp = self.send(prep, **send_kwargs) File "C:\Python27\lib\site-packages\requests\sessions.py", line 618, in send r = adapter.send(request, **kwargs) File "C:\Python27\lib\site-packages\requests\adapters.py", line 440, in send timeout=timeout File "C:\Python27\lib\site-packages\urllib3\connectionpool.py", line 617, in urlopen **response_kw) File "C:\Python27\lib\site-packages\urllib3\response.py", line 456, in from_httplib headers = HTTPHeaderDict.from_httplib(headers) File "C:\Python27\lib\site-packages\urllib3\_collections.py", line 312, in from_httplib key, value = headers[-1] IndexError: list index out of range ``` how can I deal with this issue?
I'll see if I can replicate soon and figure out the right strategy. In the original, are those header fields spaced out with `\t`, or individual space characters? the source response is ``` 'HTTP/1.1 200 OK\r\n\tContent-Type: application/octet-stream\r\n\tContent-Length: 49059\r\n\tContent-Disposition: attachment; filename="configfile.txt"\r\n\tConnection: close\r\n\r\n ``` Okay, so we're correctly dropping those "headers" because they're not actually headers (tab character is disallowed in header field names). This results in a response that has no headers, but we have a piece of logic that expects at least one. I'll need to dig in and see if there are any other places that would be unhappy with a headerless response and if so, what kind of effort would be involved with cleaning that up. @sigmavirus24, thoughts? I don't think we want to try to figure out what the server means when it sends an invalid header; it's too ambiguous. @haikuginger we're not correctly dropping them necessarily. We're expecting valid headers. The server is wrong and I think we should raise an InvalidHeader exception of sorts in this case, especially when we can't just try to add it onto the previous header. In this instance I think the error is coming from httplib, so we may not be able to raise that error easily. Okay, I'm just getting back to this. This is our code, not httplib, and what's happening is that we're interpreting a line that starts with `'\t'` or `' '` (space) as a line continuation for the last header - and then getting the last header with the `-1` index. It looks like this is obsoleted with RFC-7230, but user agents may still accept it by replacing the whole thing with a single space (not the current `'\r\n'` we currently implement). I propose that we adjust the implementation such that... 1. Multiline header values are passed per RFC-7230 with a space inserted between lines in the value 2. Items that appear to be multiline headers (preceded by `'\r\n\t'` or `'\r\n '`) raise a specific exception unless preceded by at least one header. @sigmavirus24, does that seem reasonable to you? Note to @beruhan: this will not prevent an exception from happening for the response you've listed above, but will make that exception more explicit and helpful, so that you can catch it and take action accordingly. @haikuginger yeah, 2 is what we're missing and definitely seems reasonable. 👍
2018-01-23T15:31:31Z
[]
[]
urllib3/urllib3
1,345
urllib3__urllib3-1345
[ "1088" ]
69f16a0c555316f486f8d43d889edb1abae1e151
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -600,6 +600,11 @@ def read_chunked(self, amt=None, decode_content=None): Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``. + :param amt: + How much of the content to read. If specified, caching is skipped + because it doesn't make sense to cache partial content as the full + response. + :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. @@ -621,6 +626,11 @@ def read_chunked(self, amt=None, decode_content=None): self._original_response.close() return + # If a response is already read and closed + # then return immediately. + if self._fp.fp is None: + return + while True: self._update_chunk_length() if self.chunk_left == 0:
diff --git a/test/appengine/test_gae_manager.py b/test/appengine/test_gae_manager.py --- a/test/appengine/test_gae_manager.py +++ b/test/appengine/test_gae_manager.py @@ -105,6 +105,8 @@ def test_exceptions(self): test_release_conn_parameter = None test_stream_keepalive = None test_cleanup_on_connection_error = None + test_read_chunked_short_circuit = None + test_read_chunked_on_closed_response = None # Tests that should likely be modified for appengine specific stuff test_timeout = None diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -5,6 +5,7 @@ import unittest import time import warnings +import pytest import mock @@ -695,6 +696,26 @@ def test_stream_keepalive(self): self.assertEqual(self.pool.num_connections, 1) self.assertEqual(self.pool.num_requests, x) + def test_read_chunked_short_circuit(self): + response = self.pool.request( + 'GET', + '/chunked', + preload_content=False + ) + response.read() + with pytest.raises(StopIteration): + next(response.read_chunked()) + + def test_read_chunked_on_closed_response(self): + response = self.pool.request( + 'GET', + '/chunked', + preload_content=False + ) + response.close() + with pytest.raises(StopIteration): + next(response.read_chunked()) + def test_chunked_gzip(self): response = self.pool.request( 'GET',
If the response is complete, stream/read_chunked should short-circuit return the empty string. Originally seen in kennethreitz/requests#3807. Basically, read_chunked should check whether `_fp` is `None`. If it is, it should immediately return the empty byte string rather than do any processing. Put another way, this shouldn't blow up: ```python import urllib3 http = urllib3.PoolManager() resp = http.request('GET', 'http://http2bin.org/stream/100') resp.read() next(resp.read_chunked()) ``` I recommend someone who wants to help out pick this issue up, it should be easily fixed and tested.
I believe we already check that `_fp` isn't None [here](https://github.com/shazow/urllib3/blob/master/urllib3/response.py#L518-L525), right? I think the actual issue is we're not checking that `_fp.fp` isn't None which signals the connection has been closed (and hopefully read). Can we not just ask the fp directly whether it's closed? @Lukasa, yeah, it looks like `closed` on the Response object will correctly check this for us. I was about to go and fix this -- shouldn't we just `return` so that `StopIteration` is raised instead of returning `b''`? Any updates on this? I'm getting a ton of these in a recent project. Thank you!
2018-03-25T21:17:13Z
[]
[]
urllib3/urllib3
1,346
urllib3__urllib3-1346
[ "1316" ]
c4f123dcdcad3582561373278d2772f819ec3cb8
diff --git a/urllib3/poolmanager.py b/urllib3/poolmanager.py --- a/urllib3/poolmanager.py +++ b/urllib3/poolmanager.py @@ -312,8 +312,9 @@ def urlopen(self, method, url, redirect=True, **kw): kw['assert_same_host'] = False kw['redirect'] = False + if 'headers' not in kw: - kw['headers'] = self.headers + kw['headers'] = self.headers.copy() if self.proxy is not None and u.scheme == "http": response = conn.urlopen(method, url, **kw) @@ -335,6 +336,14 @@ def urlopen(self, method, url, redirect=True, **kw): if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) + # Strip headers marked as unsafe to forward to the redirected location. + # Check remove_headers_on_redirect to avoid a potential network call within + # conn.is_same_host() which may use socket.gethostbyname() in the future. + if (retries.remove_headers_on_redirect + and not conn.is_same_host(redirect_location)): + for header in retries.remove_headers_on_redirect: + kw['headers'].pop(header, None) + try: retries = retries.increment(method, url, response=response, _pool=conn) except MaxRetryError: diff --git a/urllib3/util/retry.py b/urllib3/util/retry.py --- a/urllib3/util/retry.py +++ b/urllib3/util/retry.py @@ -19,6 +19,7 @@ log = logging.getLogger(__name__) + # Data structure for representing the metadata of requests that result in a retry. RequestHistory = namedtuple('RequestHistory', ["method", "url", "error", "status", "redirect_location"]) @@ -139,6 +140,10 @@ class Retry(object): Whether to respect Retry-After header on status codes defined as :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not. + :param iterable remove_headers_on_redirect: + Sequence of headers to remove from the request when a response + indicating a redirect is returned before firing off the redirected + request. """ DEFAULT_METHOD_WHITELIST = frozenset([ @@ -146,13 +151,16 @@ class Retry(object): RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503]) + DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization']) + #: Maximum backoff time. BACKOFF_MAX = 120 def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, backoff_factor=0, raise_on_redirect=True, raise_on_status=True, - history=None, respect_retry_after_header=True): + history=None, respect_retry_after_header=True, + remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST): self.total = total self.connect = connect @@ -171,6 +179,7 @@ def __init__(self, total=10, connect=None, read=None, redirect=None, status=None self.raise_on_status = raise_on_status self.history = history or tuple() self.respect_retry_after_header = respect_retry_after_header + self.remove_headers_on_redirect = remove_headers_on_redirect def new(self, **kw): params = dict( @@ -182,6 +191,7 @@ def new(self, **kw): raise_on_redirect=self.raise_on_redirect, raise_on_status=self.raise_on_status, history=self.history, + remove_headers_on_redirect=self.remove_headers_on_redirect ) params.update(kw) return type(self)(**params)
diff --git a/test/test_retry.py b/test/test_retry.py --- a/test/test_retry.py +++ b/test/test_retry.py @@ -249,3 +249,13 @@ def test_retry_method_not_in_whitelist(self): retry = Retry() with pytest.raises(ReadTimeoutError): retry.increment(method='POST', error=error) + + def test_retry_default_remove_headers_on_redirect(self): + retry = Retry() + + assert list(retry.remove_headers_on_redirect) == ['Authorization'] + + def test_retry_set_remove_headers_on_redirect(self): + retry = Retry(remove_headers_on_redirect=['X-API-Secret']) + + assert list(retry.remove_headers_on_redirect) == ['X-API-Secret'] diff --git a/test/with_dummyserver/test_poolmanager.py b/test/with_dummyserver/test_poolmanager.py --- a/test/with_dummyserver/test_poolmanager.py +++ b/test/with_dummyserver/test_poolmanager.py @@ -109,6 +109,52 @@ def test_too_many_redirects(self): except MaxRetryError: pass + def test_redirect_cross_host_remove_headers(self): + http = PoolManager() + self.addCleanup(http.clear) + + r = http.request('GET', '%s/redirect' % self.base_url, + fields={'target': '%s/headers' % self.base_url_alt}, + headers={'Authorization': 'foo'}) + + self.assertEqual(r.status, 200) + + data = json.loads(r.data.decode('utf-8')) + + self.assertNotIn('Authorization', data) + + def test_redirect_cross_host_no_remove_headers(self): + http = PoolManager() + self.addCleanup(http.clear) + + r = http.request('GET', '%s/redirect' % self.base_url, + fields={'target': '%s/headers' % self.base_url_alt}, + headers={'Authorization': 'foo'}, + retries=Retry(remove_headers_on_redirect=[])) + + self.assertEqual(r.status, 200) + + data = json.loads(r.data.decode('utf-8')) + + self.assertEqual(data['Authorization'], 'foo') + + def test_redirect_cross_host_set_removed_headers(self): + http = PoolManager() + self.addCleanup(http.clear) + + r = http.request('GET', '%s/redirect' % self.base_url, + fields={'target': '%s/headers' % self.base_url_alt}, + headers={'X-API-Secret': 'foo', + 'Authorization': 'bar'}, + retries=Retry(remove_headers_on_redirect=['X-API-Secret'])) + + self.assertEqual(r.status, 200) + + data = json.loads(r.data.decode('utf-8')) + + self.assertNotIn('X-API-Secret', data) + self.assertEqual(data['Authorization'], 'bar') + def test_raise_on_redirect(self): http = PoolManager() self.addCleanup(http.clear)
Auth header remains during redirects Requests does it: https://github.com/request/request/pull/1184
Except urllib3 doesn't directly handle authentication the way Requests does. I don't disagree, but there are fundamentally different layers of concern here. I tend to agree with @sigmavirus24. Our header handling is much more agnostic to what those headers actually are. If we implemented authentication directly, that'd be one thing, but we don't. I would be interested to see what cURL does here in the specific case where it's given `-H "Authorization: Basic xxxxxxxxx"` as compared to `-U "username:password"`. If it's the case that cURL strips a manually-constructed header on cross-host redirect, then I could be convinced to entertain a PR, although I likely wouldn't spend time on it myself. @haikuginger actually, cURL preserves authentication on redirect in the face of leaking credential. But it also doesn't automagic redirects like requests does. To do that you need to specify `-L` so sending auth in the header or with `-u` won't automatically give away credentials unless you manually follow it or specify `-L`. (Since you said you'd be interested) urllib3 redirects across hosts by default these days, doesn't it? Sounds like a pretty scary surprise-footgun to have. At minimum, sounds like the redirect feature needs a big fat warning that all headers including authentication headers will be forwarded along. Ideally it feels like request-specific headers need to be stripped for cross-host by default (maybe a `urllib3.Retry` object param?). (Sorry for helicopter-commenting here, just randomly saw this thread in my filter, please feel free to ignore. :heart:) Edit: Random scary scenario I'm thinking about: You have an API that for some reason can yield a redirect (it's a thing, [I've even built one in 2010](https://github.com/shazow/302found)), then by default the redirect destination is going to get your secret bearer token and all that jazz. Especially scary as a _default_. Btw if we decide it's a good idea and nobody wants to do it, I'll put together a quick PR. Should be 4-5 lines of changes in urlopen and Retry. I agree this is a potential footgun. I'd almost be tempted to turn off redirect following by default. If I turn off redirect, the script breaks e.g redirect=False. It needs to redirect just not with auth. This is a big problem if you use say a webhost server and then have to redirect to Amazon. A workaround for now is to use a monkey patch to remove auth. @YODABear, understandable, but our concerns are for all users, not just for one specific use case. I'd welcome feedback from @Lukasa, @sigmavirus24, and @shazow as to what the Right Thing to Do is considering that we don't have an end-to-end API for HTTP auth. Reopening this issue for that discussion of the broader case. @shazow there is more discussion of potential problems with this over on the relevant Requests issues and I agree with you that it's a problem. I thought urllib3 had redirect handling off-by-default. I can also toss together a PR for this. > If I turn off redirect, the script breaks e.g redirect=False. That's surprising but I suspect that if you asked for help on [StackOverflow](https://stackoverflow.com) they would help you understand why its breaking and how to fix it while we fix this underlying bug. @sigmavirus24, please go right ahead. @sigmavirus24 Right, IMO any changes we make should be configurable back to what we have today via the Retry object. I was imagining something like `retries=Retry(forward_headers_across_hosts=True)` in the spirit of keeping scary flags annoyingly long and explicit. I doubt I'm the only one who has come across this issue. Anyone who uses a host with auth and has to redirect with no auth has likely encountered this (so likely issue will be asked by someone else in the future if not fixed). I had a look at some SO posts. In the mean time, the only viable method seems to be using monkey patches. Can get quite length depending on how many different hosts you have. Another possibility could be to urlparse out the auth credentials though monkey patch works fine for now so never tested. @sigmavirus24, were you still planning on handling this? If not, I can make the change myself.
2018-03-26T01:34:33Z
[]
[]
urllib3/urllib3
1,350
urllib3__urllib3-1350
[ "1224" ]
c8c98c473ab30f427fe05057f2ceded10ec93153
diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py --- a/urllib3/util/ssl_.py +++ b/urllib3/util/ssl_.py @@ -2,11 +2,13 @@ import errno import warnings import hmac +import socket from binascii import hexlify, unhexlify from hashlib import md5, sha1, sha256 from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning +from ..packages import six SSLContext = None @@ -53,6 +55,27 @@ def _const_compare_digest_backport(a, b): OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 OP_NO_COMPRESSION = 0x20000 + +# Python 2.7 and earlier didn't have inet_pton on non-Linux +# so we fallback on inet_aton in those cases. This means that +# we can only detect IPv4 addresses in this case. +if hasattr(socket, 'inet_pton'): + inet_pton = socket.inet_pton +else: + # Maybe we can use ipaddress if the user has urllib3[secure]? + try: + import ipaddress + + def inet_pton(_, host): + if isinstance(host, six.binary_type): + host = host.decode('ascii') + return ipaddress.ip_address(host) + + except ImportError: # Platform-specific: Non-Linux + def inet_pton(_, host): + return socket.inet_aton(host) + + # A secure default. # Sources for more information on TLS ciphers: # @@ -325,17 +348,49 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, if certfile: context.load_cert_chain(certfile, keyfile) - if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI - return context.wrap_socket(sock, server_hostname=server_hostname) - - warnings.warn( - 'An HTTPS request has been made, but the SNI (Server Name ' - 'Indication) extension to TLS is not available on this platform. ' - 'This may cause the server to present an incorrect TLS ' - 'certificate, which can cause validation failures. You can upgrade to ' - 'a newer version of Python to solve this. For more information, see ' - 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' - '#ssl-warnings', - SNIMissingWarning - ) + + # If we detect server_hostname is an IP address then the SNI + # extension should not be used according to RFC3546 Section 3.1 + # We shouldn't warn the user if SNI isn't available but we would + # not be using SNI anyways due to IP address for server_hostname. + if ((server_hostname is not None and not is_ipaddress(server_hostname)) + or IS_SECURETRANSPORT): + if HAS_SNI and server_hostname is not None: + return context.wrap_socket(sock, server_hostname=server_hostname) + + warnings.warn( + 'An HTTPS request has been made, but the SNI (Server Name ' + 'Indication) extension to TLS is not available on this platform. ' + 'This may cause the server to present an incorrect TLS ' + 'certificate, which can cause validation failures. You can upgrade to ' + 'a newer version of Python to solve this. For more information, see ' + 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' + '#ssl-warnings', + SNIMissingWarning + ) + return context.wrap_socket(sock) + + +def is_ipaddress(hostname): + """Detects whether the hostname given is an IP address. + + :param str hostname: Hostname to examine. + :return: True if the hostname is an IP address, False otherwise. + """ + if six.PY3 and isinstance(hostname, six.binary_type): + # IDN A-label bytes are ASCII compatible. + hostname = hostname.decode('ascii') + + families = [socket.AF_INET] + if hasattr(socket, 'AF_INET6'): + families.append(socket.AF_INET6) + + for af in families: + try: + inet_pton(af, hostname) + except (socket.error, ValueError, OSError): + pass + else: + return True + return False
diff --git a/test/test_ssl.py b/test/test_ssl.py new file mode 100644 --- /dev/null +++ b/test/test_ssl.py @@ -0,0 +1,73 @@ +import mock +import pytest +from six import b +from urllib3.util import ssl_ +from urllib3.exceptions import SNIMissingWarning + + [email protected]('addr', [ + '::1', + '::', + '127.0.0.1', + '8.8.8.8', + b('127.0.0.1') +]) +def test_is_ipaddress_true(addr): + assert ssl_.is_ipaddress(addr) + + [email protected]('addr', [ + 'www.python.org', + b('www.python.org') +]) +def test_is_ipaddress_false(addr): + assert not ssl_.is_ipaddress(addr) + + [email protected]( + ['has_sni', 'server_hostname', 'uses_sni'], + [(True, '127.0.0.1', False), + (False, 'www.python.org', False), + (False, '0.0.0.0', False), + (True, 'www.google.com', True), + (True, None, False), + (False, None, False)] +) +def test_context_sni_with_ip_address(monkeypatch, has_sni, server_hostname, uses_sni): + monkeypatch.setattr(ssl_, 'HAS_SNI', has_sni) + + sock = mock.Mock() + context = mock.create_autospec(ssl_.SSLContext) + + ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context) + + if uses_sni: + context.wrap_socket.assert_called_with(sock, server_hostname=server_hostname) + else: + context.wrap_socket.assert_called_with(sock) + + [email protected]( + ['has_sni', 'server_hostname', 'should_warn'], + [(True, 'www.google.com', False), + (True, '127.0.0.1', False), + (False, '127.0.0.1', False), + (False, 'www.google.com', True), + (True, None, False), + (False, None, False)] +) +def test_sni_missing_warning_with_ip_addresses(monkeypatch, has_sni, server_hostname, should_warn): + monkeypatch.setattr(ssl_, 'HAS_SNI', has_sni) + + sock = mock.Mock() + context = mock.create_autospec(ssl_.SSLContext) + + with mock.patch('warnings.warn') as warn: + ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context) + + if should_warn: + assert warn.call_count >= 1 + warnings = [call[0][1] for call in warn.call_args_list] + assert SNIMissingWarning in warnings + else: + assert warn.call_count == 0 diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -500,7 +500,8 @@ def test_ssl_wrap_socket_with_no_sni_warns(self): ssl_.HAS_SNI = False try: with patch('warnings.warn') as warn: - ssl_wrap_socket(ssl_context=mock_context, sock=socket) + ssl_wrap_socket(ssl_context=mock_context, sock=socket, + server_hostname='www.google.com') mock_context.wrap_socket.assert_called_once_with(socket) assert warn.call_count >= 1 warnings = [call[0][1] for call in warn.call_args_list] diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -279,14 +279,7 @@ def test_unverified_ssl(self): # the unverified warning. Older systems may also emit other # warnings, which we want to ignore here. calls = warn.call_args_list - if sys.version_info >= (2, 7, 9) or util.IS_PYOPENSSL \ - or util.IS_SECURETRANSPORT: - category = calls[0][0][1] - elif util.HAS_SNI: - category = calls[1][0][1] - else: - category = calls[2][0][1] - self.assertEqual(category, InsecureRequestWarning) + self.assertIn(InsecureRequestWarning, [x[0][1] for x in calls]) def test_ssl_unverified_with_ca_certs(self): pool = HTTPSConnectionPool(self.host, self.port,
when connecting to https by IP address, client hello message has a server_name SNI block with literal ip address I originally opened this in [requests](https://github.com/requests/requests/issues/4178), but was asked to raise it here. When connecting to a machine over https using an IP address, the SSL client hello includes a server_name extension with the ip address listed as the server name. This is not allowed in the [SNI spec](https://tools.ietf.org/html/rfc3546#section-3.1), and it should not include the server_name extension. Reproduce: 1. Set up packet logging 2. ```python import urllib3 http = urllib3.PoolManager() http.request('GET', 'https://8.8.8.8') ```` 3. Inspect packet logs for client hello. It'll look a bit like this: ``` Frame 31: 571 bytes on wire (4568 bits), 571 bytes captured (4568 bits) on interface 0 Ethernet II, Src: BizlinkK_XX:XX:XX (9c:eb:e8:XX:XX:XX), Dst: JuniperN_XX:XX:XX (08:81:f4:XX:XX:XX) Internet Protocol Version 4, Src: 192.168.89.18, Dst: 8.8.8.8 Transmission Control Protocol, Src Port: 1156, Dst Port: 443, Seq: 1, Ack: 1, Len: 517 Secure Sockets Layer TLSv1.2 Record Layer: Handshake Protocol: Client Hello Content Type: Handshake (22) Version: TLS 1.0 (0x0301) Length: 512 Handshake Protocol: Client Hello Handshake Type: Client Hello (1) Length: 508 Version: TLS 1.2 (0x0303) Random Session ID Length: 0 Cipher Suites Length: 148 Cipher Suites (74 suites) Compression Methods Length: 1 Compression Methods (1 method) Extensions Length: 319 Extension: server_name Type: server_name (0x0000) Length: 12 Server Name Indication extension Server Name list length: 10 Server Name Type: host_name (0) Server Name length: 7 Server Name: 8.8.8.8 Extension: ec_point_formats Extension: elliptic_curves Extension: SessionTicket TLS Extension: signature_algorithms Extension: Heartbeat Extension: Padding ``` The particular server I'm connecting to (azure windows 2016 Datacenter edition) immediately closes the connection when it sees a client hello like this.
Ok, so this is a thing that's bothered me for a while. SNI absolutely should not have IP addresses in it, but spotting an IP address is a tricky thing to do. I think our best bet is to optionally attempt to import the `ipaddress` module to do IP address detection. Failing that, if we want an internal utility for this we can write one: it just turns out to be moderately tricky. Anyone got opinions on this? We're hitting this in pywinrm/Ansible as well if SNI is enabled. I'd be +1 for a soft dep on `ipaddress`- we can make it a hard dep for pywinrm just to make sure it's always there for our fairly common use case of someone hitting a WinRM endpoint by IP (in the off chance SNI is enabled on that host)... This is a known bug in Python's ssl module. I'll fix it for 3.7 as soon as I can drop support for OpenSSL < 1.0.2. Experimental patch: https://github.com/tiran/cpython/commit/a3fa2cf76dfc5a1976a416627131d4e4c6070b9e#diff-e1cc5bf74055e388cda128367a814c8fR690 @nitzmahone In #1287 you wrote that SNI IP address is causing connection abort with Windows' http.sys. I didn't know that the issue can cause connection problems. Can you please open an issue on bugs.python.org? @tiran done: https://bugs.python.org/issue32185 - I see it's already been default-assigned to you :D @nitzmahone Yeah, I'm the poor soul that has to fix all SSL/TLS issues. :) As a user, I can confirm that this SNI IP address issue seems to be a problem when using pywinrm with Windows Server 2012. I was using IP addresses for two Windows Server 2012 boxes. One was connecting and one was failing with a SSL handshake error. I look at the packets, and when my client sent the IP address, the server rejected the packet. (Same thing worked on the other box though.) When I hard-coded random hostnames into /etc/hosts with those IP addresses, it worked a treat. Winrm is listening on all addresses and doesn't seem to care what hostname it has so long as it matches the SSL certificate. I'm curious now how one determines if SNI is enabled or not... with WinRM or Windows in general. All I see is info about IIS which I assume isn't relevant int he case of WinRM... While using IP addresses, client would send a SYN, server would send a SYN-ACK, client would send an ACK, and then the client would send a push packet containing the IP addresses and the server would send a reset packet. As soon as I switched to using hostnames, instead of a reset packet, I would get a packet that appeared to contain the SSL certificate or at least some details about it. Note that this latter behaviour also happened on one of the Windows machines even when using IP addresses. Trying to figure out how SNI relates to WinRM... Ahh... I take it that both IIS and WinRM use http.sys... ayup Still no idea what the difference between those machines was as I think http.sys has SNI enabled by default? The client was exactly the same so I don't see how that could have impacted it. I'm guessing maybe it had to do with some IIS sites and differences in configuration there between the servers? It doesn't, but it's global- if one listener enables the SNI extension, http.sys will accept/process it on all of them- otherwise it just ignores it. Hmm, that doesn't seem to be the case here. It looks like there is one site on the 2nd server with SNI enabled, but I wasn't having any problems with WinRM just using an IP address on that server :/. @Lukasa, given that this appears to be something that'll be fixed in Python 3.7, do you think we should proceed with a fix for this in our own code? I agree that if so, a soft dependency on `ipaddress` makes sense. @haikuginger I'm planning to backport the fix to 2.7 and 3.6, too. It will work with most common setups, that is either OpenSSL >= 1.0.2 or LibreSSL >= 2.5.3 or or platforms with ``inet_pton()`` in libc. You don't need the ``ipaddress`` module. You can simply use the socket module to detect if a string is an IPv4 or IPv6 address. It's faster, too. I proposed a solution on another PR or issue. ``` import socket import sys def check_ipaddr(hostname): if sys.version_info >= (3, 0) and isinstance(hostname, bytes): # IDN A-label bytes are ASCII compatible hostname = hostname.decode('ascii') families = [socket.AF_INET] if hasattr(socket, 'AF_INET6'): families.append(socket.AF_INET6) for af in families: try: socket.inet_pton(af, hostname) except socket.error: pass else: return True return False ``` ``` >>> check_ipaddr('::1') True >>> check_ipaddr('127.0.0.1') True >>> check_ipaddr(b'127.0.0.1') True >>> check_ipaddr('www.python.org') False >>> check_ipaddr(b'www.python.org') False ``` https://bugs.python.org/issue32185 has landed in CPython branches 2.7, 3.6, 3.7, and master.
2018-03-28T13:36:26Z
[]
[]
urllib3/urllib3
1,399
urllib3__urllib3-1399
[ "1396" ]
6be637268931e100b0d2d15cecd5524323fd4f36
diff --git a/urllib3/util/wait.py b/urllib3/util/wait.py --- a/urllib3/util/wait.py +++ b/urllib3/util/wait.py @@ -43,9 +43,6 @@ def _retry_on_intr(fn, timeout): else: # Old and broken Pythons. def _retry_on_intr(fn, timeout): - if timeout is not None and timeout <= 0: - return fn(timeout) - if timeout is None: deadline = float("inf") else: @@ -117,7 +114,7 @@ def _have_working_poll(): # from libraries like eventlet/greenlet. try: poll_obj = select.poll() - poll_obj.poll(0) + _retry_on_intr(poll_obj.poll, 0) except (AttributeError, OSError): return False else:
diff --git a/test/test_wait.py b/test/test_wait.py --- a/test/test_wait.py +++ b/test/test_wait.py @@ -137,6 +137,40 @@ def handler(sig, frame): assert interrupt_count[0] > 0 [email protected]( + not hasattr(signal, "setitimer"), + reason="need setitimer() support" +) [email protected]("wfs", variants) +def test_eintr_zero_timeout(wfs, spair): + a, b = spair + interrupt_count = [0] + + def handler(sig, frame): + assert sig == signal.SIGALRM + interrupt_count[0] += 1 + + old_handler = signal.signal(signal.SIGALRM, handler) + try: + assert not wfs(a, read=True, timeout=0) + try: + # Start delivering SIGALRM 1000 times per second, + # to trigger race conditions such as + # https://github.com/urllib3/urllib3/issues/1396. + signal.setitimer(signal.ITIMER_REAL, 0.001, 0.001) + # Hammer the system call for a while to trigger the + # race. + for i in range(100000): + wfs(a, read=True, timeout=0) + finally: + # Stop delivering SIGALRM + signal.setitimer(signal.ITIMER_REAL, 0) + finally: + signal.signal(signal.SIGALRM, old_handler) + + assert interrupt_count[0] > 0 + + @pytest.mark.skipif( not hasattr(signal, "setitimer"), reason="need setitimer() support"
Interrupted system call while profiling with plop Using Python 2.7.12, requests 2.19.0, urllib3 1.23 and using [plop](https://github.com/bdarnell/plop) for profiling, I'm intermittently hitting this stack trace in long-running code: ``` File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/sessions.py", line 525, in get return self.request('GET', url, **kwargs) File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/sessions.py", line 512, in request resp = self.send(prep, **send_kwargs) File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/sessions.py", line 622, in send r = adapter.send(request, **kwargs) File "/home/bmerry/work/sdp/git/katdal/katdal/chunkstore_s3.py", line 56, in send return super(_TimeoutHTTPAdapter, self).send(request, stream, timeout, *args, **kwargs) File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/requests/adapters.py", line 445, in send timeout=timeout File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/connectionpool.py", line 588, in urlopen conn = self._get_conn(timeout=pool_timeout) File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/connectionpool.py", line 239, in _get_conn if conn and is_connection_dropped(conn): File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/connection.py", line 23, in is_connection_dropped return wait_for_read(sock, timeout=0.0) File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py", line 146, in wait_for_read return wait_for_socket(sock, read=True, timeout=timeout) File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py", line 107, in poll_wait_for_socket return bool(_retry_on_intr(do_poll, timeout)) File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py", line 47, in _retry_on_intr return fn(timeout) File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/urllib3/util/wait.py", line 105, in do_poll return poll_obj.poll(t) select.error: (4, 'Interrupted system call') Profiling timer expired ``` Looking at the implementation of `_retry_on_intr` for older Pythons, it has this special case: ```python if timeout is not None and timeout <= 0: return fn(timeout) ``` which in turn seems to apply to the call stack above (see is_connection_dropped, which passes a timeout of 0.0). So apparently there are cases where poll can fail with EINTR even with a zero timeout. FWIW, I'm running Ubuntu 16.04 and Linux 4.4.0-116-generic. I'll try commenting out that fast path and doing some testing overnight to confirm that that is the problem. I don't yet had a minimal reproducible example, but I'll work on it (my first attempt of just banging on some URL in a loop hasn't worked). I wanted to file this before I forgot.
I thought our select code handled `EINTR`, I know there's been changes to that code recently. > I thought our select code handled EINTR It does... except in the case of a zero timeout, where the code I quoted above bypasses the EINTR handling. Here's the [context](https://github.com/urllib3/urllib3/blob/6be637268931e100b0d2d15cecd5524323fd4f36/urllib3/util/wait.py#L46-L47) of that code. I presume it's special-cased to avoid the cost of calling `monotonic()`, but the same effect could probably be achieved by setting `deadline` to 0 (or -inf) if timeout is zero. I got around to running the same code with those two lines commented out, and can confirm that it fixed the problem. If I get time I'll see if I can write a minimal example, but it may turn out to be sensitive to the access patterns/timings on the particular internal HTTP server I'm hitting. I'll also look at doing a PR with the fix suggested above. Ok, I've managed to reproduce the problem. My first attempt was run against a Python SimpleHTTPServer, which only implements HTTP 1.0 and hence there is no keep-alive and no connection reuse. When run against an HTTP 1.1 server (I used the local CUPS server on my machine just because that's what was running) this crashes within seconds using urllib3 1.23: ```python #!/usr/bin/env python import plop.collector import requests collector = plop.collector.Collector(interval=0.0001) collector.start() session = requests.Session() while True: session.get('http://localhost:631/', timeout=3) ``` If I get time I'll write a PR to fix it.
2018-06-18T08:55:03Z
[]
[]
urllib3/urllib3
1,439
urllib3__urllib3-1439
[ "1438" ]
285889d57f8bb00116617dc654124a4c7d223fed
diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py --- a/src/urllib3/util/response.py +++ b/src/urllib3/util/response.py @@ -59,8 +59,14 @@ def assert_header_parsing(headers): get_payload = getattr(headers, 'get_payload', None) unparsed_data = None - if get_payload: # Platform-specific: Python 3. - unparsed_data = get_payload() + if get_payload: + # get_payload is actually email.message.Message.get_payload; + # we're only interested in the result if it's not a multipart message + if not headers.is_multipart(): + payload = get_payload() + + if isinstance(payload, (bytes, str)): + unparsed_data = payload if defects or unparsed_data: raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -1289,12 +1289,12 @@ def socket_handler(listener): ) class TestBrokenHeaders(SocketDummyServerTestCase): - def _test_broken_header_parsing(self, headers): + def _test_broken_header_parsing(self, headers, unparsed_data_check=None): self.start_response_handler(( b'HTTP/1.1 200 OK\r\n' b'Content-Length: 0\r\n' b'Content-type: text/plain\r\n' - ) + b'\r\n'.join(headers) + b'\r\n' + ) + b'\r\n'.join(headers) + b'\r\n\r\n' ) pool = HTTPConnectionPool(self.host, self.port, retries=False) @@ -1306,26 +1306,52 @@ def _test_broken_header_parsing(self, headers): for record in logs: if 'Failed to parse headers' in record.msg and \ pool._absolute_url('/') == record.args[0]: - return + if unparsed_data_check is None or unparsed_data_check in record.getMessage(): + return self.fail('Missing log about unparsed headers') def test_header_without_name(self): self._test_broken_header_parsing([ - b': Value\r\n', - b'Another: Header\r\n', + b': Value', + b'Another: Header', ]) def test_header_without_name_or_value(self): self._test_broken_header_parsing([ - b':\r\n', - b'Another: Header\r\n', + b':', + b'Another: Header', ]) def test_header_without_colon_or_value(self): self._test_broken_header_parsing([ b'Broken Header', b'Another: Header', - ]) + ], 'Broken Header') + + +class TestHeaderParsingContentType(SocketDummyServerTestCase): + + def _test_okay_header_parsing(self, header): + self.start_response_handler(( + b'HTTP/1.1 200 OK\r\n' + b'Content-Length: 0\r\n' + ) + header + b'\r\n\r\n' + ) + + pool = HTTPConnectionPool(self.host, self.port, retries=False) + self.addCleanup(pool.close) + + with LogRecorder() as logs: + pool.request('GET', '/') + + for record in logs: + assert 'Failed to parse headers' not in record.msg + + def test_header_text_plain(self): + self._test_okay_header_parsing(b'Content-type: text/plain') + + def test_header_message_rfc822(self): + self._test_okay_header_parsing(b'Content-type: message/rfc822') class TestHEAD(SocketDummyServerTestCase):
'Failed to parse headers' warning logged when getting message/rfc822 content I've been investigating an [issue](https://github.com/boto/botocore/issues/1551) I've recently discovered when retrieving objects from S3. I've now tracked it to `urllib3`; this test case (which I've added to `urllib3/test/with_dummyserver/test_socketlevel.py`) demonstrates it: ```py class TestOkayHeaders(SocketDummyServerTestCase): def _test_okay_header_parsing(self, header): self.start_response_handler(( b'HTTP/1.1 200 OK\r\n' b'Content-Length: 0\r\n' ) + header + b'\r\n\r\n' ) pool = HTTPConnectionPool(self.host, self.port, retries=False) self.addCleanup(pool.close) with LogRecorder() as logs: pool.request('GET', '/') for record in logs: assert 'Failed to parse headers' not in record.msg def test_header_text_plain(self): self._test_okay_header_parsing(b'Content-type: text/plain') def test_header_message_rfc822(self): self._test_okay_header_parsing(b'Content-type: message/rfc822') ``` The test with `text/plain` passes, while the test with `message/rfc822` fails, and this is logged: ```py Failed to parse headers (url=http://localhost:36732/): Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>] Traceback (most recent call last): File "/home/user/git/urllib3/src/urllib3/connectionpool.py", line 396, in _make_request assert_header_parsing(httplib_response.msg) File "/home/user/git/urllib3/src/urllib3/util/response.py", line 68, in assert_header_parsing raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) urllib3.exceptions.HeaderParsingError: Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>] ``` While retrieving content of type `message/rfc822` still works, the warning message being logged is incorrect and unhelpful.
Thanks for opening this issue. I'm able to reproduce on Python 3.7.0 and this does look like a bug. Looks to be within [`urllib3.util.response.assert_header_parsing`](https://github.com/urllib3/urllib3/blob/master/src/urllib3/util/response.py#L38) where we call `get_payload` and we receive a zero-length `HTTPMessage` with no defects that we don't receive for other message types. Could be due to how `message/rfc822` is handled by httplib? Will require some research on my part. Maybe we need to change the `if get_payload:` branch into something like: ```python unparsed_data = None if get_payload: payload = get_payload() # Non-multipart message. if isinstance(payload, (bytes, str)): unparsed_data = payload # Multipart message. elif isinstance(payload, list): multipart_defects = [x.defects for x in payload if getattr(x, "defects", None)] if multipart_defects: if defects is None: defects = multipart_defects else: defects.extend(multipart_defects) ``` With this change the above unit test passes. > Could be due to how `message/rfc822` is handled by httplib? Yes, I believe it is. In `http.client.parse_headers`, it instantiates `email.parser.Parser` to parse the HTTP headers. Then in `email.feedparser.FeedParser` in the `_parsegen()` method, there's this line: ```py if self._cur.get_content_maintype() == 'message': ``` It's in the following code-block that the extra `HTTPMessage` object is generated. Yeah this is definitely a change we need to make then. I can open a PR and create tests later tonight. :) Is the proposed fix overkill? If we change the end of `assert_header_parsing()` as follows: ```py if defects: print(unparsed_data) # dummy raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) if unparsed_data: print(unparsed_data) # dummy raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) ``` we can do some checks with `coverage` to see which of the two conditions actually arises in the test cases (without the extra `TestOkayHeaders` test). This shows that the `unparsed_data` check isn't triggered by the test suite. Are there any real world situations (except for the case I've identified in this issue) where `unparsed_data` could be non-empty? If not, then perhaps we shouldn't check it at all, which makes the fix for this issue much easier.
2018-09-12T17:42:54Z
[]
[]
urllib3/urllib3
1,442
urllib3__urllib3-1442
[ "1441" ]
03d884bc51f56eb87369df0a0ab9108369b425b5
diff --git a/src/urllib3/response.py b/src/urllib3/response.py --- a/src/urllib3/response.py +++ b/src/urllib3/response.py @@ -90,7 +90,31 @@ def decompress(self, data): self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) +class MultiDecoder(object): + """ + From RFC7231: + If one or more encodings have been applied to a representation, the + sender that applied the encodings MUST generate a Content-Encoding + header field that lists the content codings in the order in which + they were applied. + """ + + def __init__(self, modes): + self._decoders = [_get_decoder(m.strip()) for m in modes.split(',')] + + def flush(self): + return self._decoders[0].flush() + + def decompress(self, data): + for d in reversed(self._decoders): + data = d.decompress(data) + return data + + def _get_decoder(mode): + if ',' in mode: + return MultiDecoder(mode) + if mode == 'gzip': return GzipDecoder() @@ -283,8 +307,13 @@ def _init_decoder(self): # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 content_encoding = self.headers.get('content-encoding', '').lower() - if self._decoder is None and content_encoding in self.CONTENT_DECODERS: - self._decoder = _get_decoder(content_encoding) + if self._decoder is None: + if content_encoding in self.CONTENT_DECODERS: + self._decoder = _get_decoder(content_encoding) + elif ',' in content_encoding: + encodings = [e.strip() for e in content_encoding.split(',') if e.strip() in self.CONTENT_DECODERS] + if len(encodings): + self._decoder = _get_decoder(content_encoding) def _decode(self, data, decode_content, flush_decoder): """
diff --git a/test/test_response.py b/test/test_response.py --- a/test/test_response.py +++ b/test/test_response.py @@ -208,6 +208,38 @@ def test_chunked_decoding_gzip_swallow_garbage(self): assert r.data == b'foofoofoo' + def test_multi_decoding_deflate_deflate(self): + data = zlib.compress(zlib.compress(b'foo')) + + fp = BytesIO(data) + r = HTTPResponse(fp, headers={'content-encoding': 'deflate, deflate'}) + + assert r.data == b'foo' + + def test_multi_decoding_deflate_gzip(self): + compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS) + data = compress.compress(zlib.compress(b'foo')) + data += compress.flush() + + fp = BytesIO(data) + r = HTTPResponse(fp, headers={'content-encoding': 'deflate, gzip'}) + + assert r.data == b'foo' + + def test_multi_decoding_gzip_gzip(self): + compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS) + data = compress.compress(b'foo') + data += compress.flush() + + compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS) + data = compress.compress(data) + data += compress.flush() + + fp = BytesIO(data) + r = HTTPResponse(fp, headers={'content-encoding': 'gzip, gzip'}) + + assert r.data == b'foo' + def test_body_blob(self): resp = HTTPResponse(b'foo') assert resp.data == b'foo'
Content-Encoding with multiple encodings not supported It is possible for a server to respond with a Content-Encoding header like: `Content-Encoding: deflate, deflate` From RFC7231: > If one or more encodings have been applied to a representation, the > sender that applied the encodings MUST generate a Content-Encoding > header field that lists the content codings in the order in which > they were applied. This can be tested with: ```python def test_multiple_content_encodings(): data = zlib.compress(zlib.compress(b'foo')) fp = BytesIO(data) r = HTTPResponse(fp, headers={'Content-Encoding': 'deflate, deflate'}) assert r.data == b'foo' ```` Would it make sense to do something like: ````python if len(self.headers.get('content-encoding').split(',') > 1: self._decoder = MultiDecoder(self.headers.get('content-encoding')) ```` where MultiDecoder just iterates through the list applying standard decoders?
TIL that `Content-Encoding` allows for nested encodings. Do you have an example site that does this? We haven't hit this problem until now. I'm open to a PR that implements this behavior.
2018-09-17T20:20:42Z
[]
[]
urllib3/urllib3
1,450
urllib3__urllib3-1450
[ "1446" ]
6b3636f797840e6e01ed3c1ecfbcb6290069614a
diff --git a/src/urllib3/contrib/_appengine_environ.py b/src/urllib3/contrib/_appengine_environ.py new file mode 100644 --- /dev/null +++ b/src/urllib3/contrib/_appengine_environ.py @@ -0,0 +1,30 @@ +""" +This module provides means to detect the App Engine environment. +""" + +import os + + +def is_appengine(): + return (is_local_appengine() or + is_prod_appengine() or + is_prod_appengine_mvms()) + + +def is_appengine_sandbox(): + return is_appengine() and not is_prod_appengine_mvms() + + +def is_local_appengine(): + return ('APPENGINE_RUNTIME' in os.environ and + 'Development/' in os.environ['SERVER_SOFTWARE']) + + +def is_prod_appengine(): + return ('APPENGINE_RUNTIME' in os.environ and + 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and + not is_prod_appengine_mvms()) + + +def is_prod_appengine_mvms(): + return os.environ.get('GAE_VM', False) == 'true' diff --git a/src/urllib3/contrib/appengine.py b/src/urllib3/contrib/appengine.py --- a/src/urllib3/contrib/appengine.py +++ b/src/urllib3/contrib/appengine.py @@ -41,7 +41,6 @@ from __future__ import absolute_import import io import logging -import os import warnings from ..packages.six.moves.urllib.parse import urljoin @@ -58,6 +57,7 @@ from ..response import HTTPResponse from ..util.timeout import Timeout from ..util.retry import Retry +from . import _appengine_environ try: from google.appengine.api import urlfetch @@ -280,26 +280,10 @@ def _get_retries(self, retries, redirect): return retries -def is_appengine(): - return (is_local_appengine() or - is_prod_appengine() or - is_prod_appengine_mvms()) +# Alias methods from _appengine_environ to maintain public API interface. - -def is_appengine_sandbox(): - return is_appengine() and not is_prod_appengine_mvms() - - -def is_local_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Development/' in os.environ['SERVER_SOFTWARE']) - - -def is_prod_appengine(): - return ('APPENGINE_RUNTIME' in os.environ and - 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and - not is_prod_appengine_mvms()) - - -def is_prod_appengine_mvms(): - return os.environ.get('GAE_VM', False) == 'true' +is_appengine = _appengine_environ.is_appengine +is_appengine_sandbox = _appengine_environ.is_appengine_sandbox +is_local_appengine = _appengine_environ.is_local_appengine +is_prod_appengine = _appengine_environ.is_prod_appengine +is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms diff --git a/src/urllib3/util/connection.py b/src/urllib3/util/connection.py --- a/src/urllib3/util/connection.py +++ b/src/urllib3/util/connection.py @@ -1,6 +1,7 @@ from __future__ import absolute_import import socket from .wait import NoWayToWaitForSocketError, wait_for_read +from ..contrib import _appengine_environ def is_connection_dropped(conn): # Platform-specific @@ -105,6 +106,13 @@ def _has_ipv6(host): sock = None has_ipv6 = False + # App Engine doesn't support IPV6 sockets and actually has a quota on the + # number of sockets that can be used, so just early out here instead of + # creating a socket needlessly. + # See https://github.com/urllib3/urllib3/issues/1446 + if _appengine_environ.is_appengine_sandbox(): + return False + if socket.has_ipv6: # has_ipv6 returns true if cPython was compiled with IPv6 support. # It does not tell us if the system has IPv6 support enabled. To
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -540,6 +540,13 @@ def test_has_ipv6_enabled_and_working(self): instance.bind.return_value = True assert _has_ipv6('::1') + def test_has_ipv6_disabled_on_appengine(self): + gae_patch = patch( + 'urllib3.contrib._appengine_environ.is_appengine_sandbox', + return_value=True) + with gae_patch: + assert not _has_ipv6('::1') + def test_ip_family_ipv6_enabled(self): with patch('urllib3.util.connection.HAS_IPV6', True): assert allowed_gai_family() == socket.AF_UNSPEC
Creating socket to check if IPv6 is enabled on AppEngine Module `urlib3/util/connection.py` executes `_has_ipv6` function while being imported. For checking if IPv6 is enabled this function is creating socket: sock = socket.socket(socket.AF_INET6) sock.bind((host, 0)) has_ipv6 = True From documentation: https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html using socket should be enabled only if `GAE_USE_SOCKETS_HTTPLIB` is set to `true`. So before calling that function there should be check if we want to use sockets on App Engine.
@apietrzak I'm not sure I understand. Can you give me a bit more details? Does this cause failures? Does it incorrectly detect ipv6 support? @theacodes On appengine you have a daily quota on creating sockets: https://cloud.google.com/appengine/quotas#Sockets As you can see in this documentation page: https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html there is a way to use urllib3 without sockets. For this purpose an additional "pool manager" was created which is using URLfetch api: ``` from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox if is_appengine_sandbox(): # AppEngineManager uses AppEngine's URLFetch API behind the scenes http = AppEngineManager() else: # PoolManager uses a socket-level API behind the scenes http = PoolManager() ``` The code above always creates a socket. You can check it by adding for example `import pdb;pdb.set_trace()` to util/connections.py code. So, unfortunately for us, this is a bug. Additionaly this check is for each machine on app engine (Only import urllib3 runs the checking). App engine is a cloud service, so the machines frequently are spawned and killed. Example code: ![image](https://user-images.githubusercontent.com/20789218/46474294-c7988d00-c7e2-11e8-8a6a-79059757ef06.png) And logs: ![image](https://user-images.githubusercontent.com/20789218/46474319-dbdc8a00-c7e2-11e8-947c-dc3d67ad7908.png) Gotcha, that makes sense. Thank you for clarifying. I'm not sure if I can do anything here. We make a bit of a mistake in the interface here. `HAS_IPV6` is a global constant in a public module that has existed for years. We can't make it lazy (because it's a global constant) and we can't really do anything in `contrib.appengine` to interrupt it, as the import for that module happens way before `contrib.appengine` has any say in the process. We could figure out how to use some of the environment detection logic in `contrib.appengine` within `_is_ipv6()` to short-circuit the socket stuff. But it feels super gross.
2018-10-05T21:36:43Z
[]
[]
urllib3/urllib3
1,463
urllib3__urllib3-1463
[ "1462" ]
0aeba3be0224a930f6ffef254ed12b41303a86d7
diff --git a/src/urllib3/util/ssl_.py b/src/urllib3/util/ssl_.py --- a/src/urllib3/util/ssl_.py +++ b/src/urllib3/util/ssl_.py @@ -263,6 +263,8 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None, """ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) + context.set_ciphers(ciphers or DEFAULT_CIPHERS) + # Setting the default here, as we may have no ssl module on import cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
diff --git a/test/test_ssl.py b/test/test_ssl.py --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -70,3 +70,21 @@ def test_sni_missing_warning_with_ip_addresses(monkeypatch, has_sni, server_host assert SNIMissingWarning in warnings else: assert warn.call_count == 0 + + [email protected]( + ["ciphers", "expected_ciphers"], + [(None, ssl_.DEFAULT_CIPHERS), + ("ECDH+AESGCM:ECDH+CHACHA20", "ECDH+AESGCM:ECDH+CHACHA20")] +) +def test_create_urllib3_context_set_ciphers(monkeypatch, ciphers, expected_ciphers): + + context = mock.create_autospec(ssl_.SSLContext) + context.set_ciphers = mock.Mock() + context.options = 0 + monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context) + + assert ssl_.create_urllib3_context(ciphers=ciphers) is context + + assert context.set_ciphers.call_count == 1 + assert context.set_ciphers.call_args == mock.call(expected_ciphers)
Can no longer create a context with specific ciphers The recent change to remove support for Python 2.6 has made it so that the "ciphers" parameter of the create_urllib3_context function in ssl_.py no longer has any effect. The parameter is unused for the rest of the function. This was working properly in Python 3.6, so I don't know why it was tagged as being Python 2.6 specific.
I see what happened here, the branch for that piece of code was incorrectly marked as being for Python 2.6 only but actually was for Python 2.6+. See: https://github.com/urllib3/urllib3/commit/cb2159878f8b47c5b4bc6d159ae2857b85c0a197#diff-7c9a38cd64066636d0e73a2449a28640L294
2018-10-23T18:16:23Z
[]
[]
urllib3/urllib3
1,484
urllib3__urllib3-1484
[ "1483" ]
ea29152647797296ddd14144f3bee4bfd18a9d4e
diff --git a/src/urllib3/response.py b/src/urllib3/response.py --- a/src/urllib3/response.py +++ b/src/urllib3/response.py @@ -705,3 +705,20 @@ def geturl(self): return self.retries.history[-1].redirect_location else: return self._request_url + + def __iter__(self): + buffer = [b""] + for chunk in self.stream(decode_content=True): + if b"\n" in chunk: + chunk = chunk.split(b"\n") + yield b"".join(buffer) + chunk[0] + b"\n" + for x in chunk[1:-1]: + yield x + b"\n" + if chunk[-1]: + buffer = [chunk[-1]] + else: + buffer = [] + else: + buffer.append(chunk) + if buffer: + yield b"".join(buffer)
diff --git a/test/test_response.py b/test/test_response.py --- a/test/test_response.py +++ b/test/test_response.py @@ -708,6 +708,41 @@ def test_geturl_retries(self): resp = HTTPResponse(fp, retries=retry) assert resp.geturl() == 'https://www.example.com' + @pytest.mark.parametrize( + ["payload", "expected_stream"], + [(b"", [b""]), + (b"\n", [b"\n"]), + (b"abc\ndef", [b"abc\n", b"def"]), + (b"Hello\nworld\n\n\n!", [b"Hello\n", b"world\n", b"\n", b"\n", b"!"])] + ) + def test__iter__(self, payload, expected_stream): + actual_stream = [] + for chunk in HTTPResponse(BytesIO(payload), preload_content=False): + actual_stream.append(chunk) + + assert actual_stream == expected_stream + + def test__iter__decode_content(self): + def stream(): + # Set up a generator to chunk the gzipped body + compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS) + data = compress.compress(b'foo\nbar') + data += compress.flush() + for i in range(0, len(data), 2): + yield data[i:i + 2] + + fp = MockChunkedEncodingResponse(list(stream())) + r = httplib.HTTPResponse(MockSock) + r.fp = fp + headers = {'transfer-encoding': 'chunked', 'content-encoding': 'gzip'} + resp = HTTPResponse(r, preload_content=False, headers=headers) + + data = b'' + for c in resp: + data += c + + assert b'foo\nbar' == data + class MockChunkedEncodingResponse(object): diff --git a/test/with_dummyserver/test_connectionpool.py b/test/with_dummyserver/test_connectionpool.py --- a/test/with_dummyserver/test_connectionpool.py +++ b/test/with_dummyserver/test_connectionpool.py @@ -512,6 +512,23 @@ def test_post_with_multipart(self): self.assertEqual(body[i], expected_body[i]) + def test_post_with_multipart__iter__(self): + data = {'hello': 'world'} + r = self.pool.request('POST', '/echo', + fields=data, + preload_content=False, + multipart_boundary="boundary", + encode_multipart=True) + + chunks = [chunk for chunk in r] + assert chunks == [ + b"--boundary\r\n", + b'Content-Disposition: form-data; name="hello"\r\n', + b'\r\n', + b'world\r\n', + b"--boundary--\r\n" + ] + def test_check_gzip(self): r = self.pool.request('GET', '/encodingrequest', headers={'accept-encoding': 'gzip'})
Maybe it should be explicitly documented that iterating over an HTTPResponse is bad HTTPResponse inherits from io.IOBase, meaning it's iterable. By python's documentation this means it iterates over lines in the response. The problem is that it calls IOBase's readline, which in turn **calls the object's read method with size of 1, until it gets a newline.** While discovering this when sending a huge payload and figuring out why its slow, this might not be obvious to people using the library and by accident calling: for chunk in response: ... instead of for chunk in response.stream(sane_size): ... Maybe iterating over the response should be disallowed, or documented that it's bad to do so.
2018-11-28T01:59:07Z
[]
[]
urllib3/urllib3
1,487
urllib3__urllib3-1487
[ "1096" ]
cbe5732ab6d05915cb7c9580d505c36c83cc27c2
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -49,9 +49,9 @@ license='MIT', packages=['urllib3', 'urllib3.packages', 'urllib3.packages.ssl_match_hostname', - 'urllib3.packages.backports', 'urllib3.contrib', - 'urllib3.contrib._securetransport', 'urllib3.util', - ], + 'urllib3.packages.backports', 'urllib3.packages.rfc3986', + 'urllib3.contrib', 'urllib3.contrib._securetransport', + 'urllib3.util'], package_dir={'': 'src'}, requires=[], python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4", diff --git a/src/urllib3/packages/rfc3986/__init__.py b/src/urllib3/packages/rfc3986/__init__.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/__init__.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2014 Rackspace +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +An implementation of semantics and validations described in RFC 3986. + +See http://rfc3986.readthedocs.io/ for detailed documentation. + +:copyright: (c) 2014 Rackspace +:license: Apache v2.0, see LICENSE for details +""" + +from .api import is_valid_uri +from .api import normalize_uri +from .api import uri_reference +from .api import URIReference +from .api import urlparse +from .parseresult import ParseResult + +__title__ = 'rfc3986' +__author__ = 'Ian Stapleton Cordasco' +__author_email__ = '[email protected]' +__license__ = 'Apache v2.0' +__copyright__ = 'Copyright 2014 Rackspace' +__version__ = '1.2.0' + +__all__ = ( + 'ParseResult', + 'URIReference', + 'is_valid_uri', + 'normalize_uri', + 'uri_reference', + 'urlparse', + '__title__', + '__author__', + '__author_email__', + '__license__', + '__copyright__', + '__version__', +) diff --git a/src/urllib3/packages/rfc3986/abnf_regexp.py b/src/urllib3/packages/rfc3986/abnf_regexp.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/abnf_regexp.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Module for the regular expressions crafted from ABNF.""" + +# https://tools.ietf.org/html/rfc3986#page-13 +GEN_DELIMS = GENERIC_DELIMITERS = ":/?#[]@" +GENERIC_DELIMITERS_SET = set(GENERIC_DELIMITERS) +# https://tools.ietf.org/html/rfc3986#page-13 +SUB_DELIMS = SUB_DELIMITERS = "!$&'()*+,;=" +SUB_DELIMITERS_SET = set(SUB_DELIMITERS) +# Escape the '*' for use in regular expressions +SUB_DELIMITERS_RE = r"!$&'()\*+,;=" +RESERVED_CHARS_SET = GENERIC_DELIMITERS_SET.union(SUB_DELIMITERS_SET) +ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' +DIGIT = '0123456789' +# https://tools.ietf.org/html/rfc3986#section-2.3 +UNRESERVED = UNRESERVED_CHARS = ALPHA + DIGIT + '._!-' +UNRESERVED_CHARS_SET = set(UNRESERVED_CHARS) +NON_PCT_ENCODED_SET = RESERVED_CHARS_SET.union(UNRESERVED_CHARS_SET) +# We need to escape the '-' in this case: +UNRESERVED_RE = r'A-Za-z0-9._~\-' + +# Percent encoded character values +PERCENT_ENCODED = PCT_ENCODED = '%[A-Fa-f0-9]{2}' +PCHAR = '([' + UNRESERVED_RE + SUB_DELIMITERS_RE + ':@]|%s)' % PCT_ENCODED + +# NOTE(sigmavirus24): We're going to use more strict regular expressions +# than appear in Appendix B for scheme. This will prevent over-eager +# consuming of items that aren't schemes. +SCHEME_RE = '[a-zA-Z][a-zA-Z0-9+.-]*' +_AUTHORITY_RE = '[^/?#]*' +_PATH_RE = '[^?#]*' +_QUERY_RE = '[^#]*' +_FRAGMENT_RE = '.*' + +# Extracted from http://tools.ietf.org/html/rfc3986#appendix-B +COMPONENT_PATTERN_DICT = { + 'scheme': SCHEME_RE, + 'authority': _AUTHORITY_RE, + 'path': _PATH_RE, + 'query': _QUERY_RE, + 'fragment': _FRAGMENT_RE, +} + +# See http://tools.ietf.org/html/rfc3986#appendix-B +# In this case, we name each of the important matches so we can use +# SRE_Match#groupdict to parse the values out if we so choose. This is also +# modified to ignore other matches that are not important to the parsing of +# the reference so we can also simply use SRE_Match#groups. +URL_PARSING_RE = ( + r'(?:(?P<scheme>{scheme}):)?(?://(?P<authority>{authority}))?' + r'(?P<path>{path})(?:\?(?P<query>{query}))?' + r'(?:#(?P<fragment>{fragment}))?' +).format(**COMPONENT_PATTERN_DICT) + + +# ######################### +# Authority Matcher Section +# ######################### + +# Host patterns, see: http://tools.ietf.org/html/rfc3986#section-3.2.2 +# The pattern for a regular name, e.g., www.google.com, api.github.com +REGULAR_NAME_RE = REG_NAME = '((?:{0}|[{1}])*)'.format( + '%[0-9A-Fa-f]{2}', SUB_DELIMITERS_RE + UNRESERVED_RE +) +# The pattern for an IPv4 address, e.g., 192.168.255.255, 127.0.0.1, +IPv4_RE = '([0-9]{1,3}.){3}[0-9]{1,3}' +# Hexadecimal characters used in each piece of an IPv6 address +HEXDIG_RE = '[0-9A-Fa-f]{1,4}' +# Least-significant 32 bits of an IPv6 address +LS32_RE = '({hex}:{hex}|{ipv4})'.format(hex=HEXDIG_RE, ipv4=IPv4_RE) +# Substitutions into the following patterns for IPv6 patterns defined +# http://tools.ietf.org/html/rfc3986#page-20 +_subs = {'hex': HEXDIG_RE, 'ls32': LS32_RE} + +# Below: h16 = hexdig, see: https://tools.ietf.org/html/rfc5234 for details +# about ABNF (Augmented Backus-Naur Form) use in the comments +variations = [ + # 6( h16 ":" ) ls32 + '(%(hex)s:){6}%(ls32)s' % _subs, + # "::" 5( h16 ":" ) ls32 + '::(%(hex)s:){5}%(ls32)s' % _subs, + # [ h16 ] "::" 4( h16 ":" ) ls32 + '(%(hex)s)?::(%(hex)s:){4}%(ls32)s' % _subs, + # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 + '((%(hex)s:)?%(hex)s)?::(%(hex)s:){3}%(ls32)s' % _subs, + # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 + '((%(hex)s:){0,2}%(hex)s)?::(%(hex)s:){2}%(ls32)s' % _subs, + # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 + '((%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s' % _subs, + # [ *4( h16 ":" ) h16 ] "::" ls32 + '((%(hex)s:){0,4}%(hex)s)?::%(ls32)s' % _subs, + # [ *5( h16 ":" ) h16 ] "::" h16 + '((%(hex)s:){0,5}%(hex)s)?::%(hex)s' % _subs, + # [ *6( h16 ":" ) h16 ] "::" + '((%(hex)s:){0,6}%(hex)s)?::' % _subs, +] + +IPv6_RE = '(({0})|({1})|({2})|({3})|({4})|({5})|({6})|({7})|({8}))'.format( + *variations +) + +IPv_FUTURE_RE = 'v[0-9A-Fa-f]+.[%s]+' % ( + UNRESERVED_RE + SUB_DELIMITERS_RE + ':' +) + + +# RFC 6874 Zone ID ABNF +ZONE_ID = '(?:[' + UNRESERVED_RE + ']|' + PCT_ENCODED + ')+' +IPv6_ADDRZ_RE = IPv6_RE + '%25' + ZONE_ID + +IP_LITERAL_RE = r'\[({0}|(?:{1})|{2})\]'.format( + IPv6_RE, + IPv6_ADDRZ_RE, + IPv_FUTURE_RE, +) + +# Pattern for matching the host piece of the authority +HOST_RE = HOST_PATTERN = '({0}|{1}|{2})'.format( + REG_NAME, + IPv4_RE, + IP_LITERAL_RE, +) +USERINFO_RE = '^([' + UNRESERVED_RE + SUB_DELIMITERS_RE + ':]|%s)+' % ( + PCT_ENCODED +) +PORT_RE = '[0-9]{1,5}' + +# #################### +# Path Matcher Section +# #################### + +# See http://tools.ietf.org/html/rfc3986#section-3.3 for more information +# about the path patterns defined below. +segments = { + 'segment': PCHAR + '*', + # Non-zero length segment + 'segment-nz': PCHAR + '+', + # Non-zero length segment without ":" + 'segment-nz-nc': PCHAR.replace(':', '') + '+' +} + +# Path types taken from Section 3.3 (linked above) +PATH_EMPTY = '^$' +PATH_ROOTLESS = '%(segment-nz)s(/%(segment)s)*' % segments +PATH_NOSCHEME = '%(segment-nz-nc)s(/%(segment)s)*' % segments +PATH_ABSOLUTE = '/(%s)?' % PATH_ROOTLESS +PATH_ABEMPTY = '(/%(segment)s)*' % segments +PATH_RE = '^(%s|%s|%s|%s|%s)$' % ( + PATH_ABEMPTY, PATH_ABSOLUTE, PATH_NOSCHEME, PATH_ROOTLESS, PATH_EMPTY +) + +FRAGMENT_RE = QUERY_RE = ( + '^([/?:@' + UNRESERVED_RE + SUB_DELIMITERS_RE + ']|%s)*$' % PCT_ENCODED +) + +# ########################## +# Relative reference matcher +# ########################## + +# See http://tools.ietf.org/html/rfc3986#section-4.2 for details +RELATIVE_PART_RE = '(//%s%s|%s|%s|%s)' % ( + COMPONENT_PATTERN_DICT['authority'], + PATH_ABEMPTY, + PATH_ABSOLUTE, + PATH_NOSCHEME, + PATH_EMPTY, +) + +# See http://tools.ietf.org/html/rfc3986#section-3 for definition +HIER_PART_RE = '(//%s%s|%s|%s|%s)' % ( + COMPONENT_PATTERN_DICT['authority'], + PATH_ABEMPTY, + PATH_ABSOLUTE, + PATH_ROOTLESS, + PATH_EMPTY, +) diff --git a/src/urllib3/packages/rfc3986/api.py b/src/urllib3/packages/rfc3986/api.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/api.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2014 Rackspace +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Module containing the simple and functional API for rfc3986. + +This module defines functions and provides access to the public attributes +and classes of rfc3986. +""" + +from .parseresult import ParseResult +from .uri import URIReference + + +def uri_reference(uri, encoding='utf-8'): + """Parse a URI string into a URIReference. + + This is a convenience function. You could achieve the same end by using + ``URIReference.from_string(uri)``. + + :param str uri: The URI which needs to be parsed into a reference. + :param str encoding: The encoding of the string provided + :returns: A parsed URI + :rtype: :class:`URIReference` + """ + return URIReference.from_string(uri, encoding) + + +def is_valid_uri(uri, encoding='utf-8', **kwargs): + """Determine if the URI given is valid. + + This is a convenience function. You could use either + ``uri_reference(uri).is_valid()`` or + ``URIReference.from_string(uri).is_valid()`` to achieve the same result. + + :param str uri: The URI to be validated. + :param str encoding: The encoding of the string provided + :param bool require_scheme: Set to ``True`` if you wish to require the + presence of the scheme component. + :param bool require_authority: Set to ``True`` if you wish to require the + presence of the authority component. + :param bool require_path: Set to ``True`` if you wish to require the + presence of the path component. + :param bool require_query: Set to ``True`` if you wish to require the + presence of the query component. + :param bool require_fragment: Set to ``True`` if you wish to require the + presence of the fragment component. + :returns: ``True`` if the URI is valid, ``False`` otherwise. + :rtype: bool + """ + return URIReference.from_string(uri, encoding).is_valid(**kwargs) + + +def normalize_uri(uri, encoding='utf-8'): + """Normalize the given URI. + + This is a convenience function. You could use either + ``uri_reference(uri).normalize().unsplit()`` or + ``URIReference.from_string(uri).normalize().unsplit()`` instead. + + :param str uri: The URI to be normalized. + :param str encoding: The encoding of the string provided + :returns: The normalized URI. + :rtype: str + """ + normalized_reference = URIReference.from_string(uri, encoding).normalize() + return normalized_reference.unsplit() + + +def urlparse(uri, encoding='utf-8'): + """Parse a given URI and return a ParseResult. + + This is a partial replacement of the standard library's urlparse function. + + :param str uri: The URI to be parsed. + :param str encoding: The encoding of the string provided. + :returns: A parsed URI + :rtype: :class:`~rfc3986.parseresult.ParseResult` + """ + return ParseResult.from_string(uri, encoding, strict=False) diff --git a/src/urllib3/packages/rfc3986/builder.py b/src/urllib3/packages/rfc3986/builder.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/builder.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Ian Stapleton Cordasco +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Module containing the logic for the URIBuilder object.""" +from . import compat +from . import normalizers +from . import uri + + +class URIBuilder(object): + """Object to aid in building up a URI Reference from parts. + + .. note:: + + This object should be instantiated by the user, but it's recommended + that it is not provided with arguments. Instead, use the available + method to populate the fields. + + """ + + def __init__(self, scheme=None, userinfo=None, host=None, port=None, + path=None, query=None, fragment=None): + """Initialize our URI builder. + + :param str scheme: + (optional) + :param str userinfo: + (optional) + :param str host: + (optional) + :param int port: + (optional) + :param str path: + (optional) + :param str query: + (optional) + :param str fragment: + (optional) + """ + self.scheme = scheme + self.userinfo = userinfo + self.host = host + self.port = port + self.path = path + self.query = query + self.fragment = fragment + + def __repr__(self): + """Provide a convenient view of our builder object.""" + formatstr = ('URIBuilder(scheme={b.scheme}, userinfo={b.userinfo}, ' + 'host={b.host}, port={b.port}, path={b.path}, ' + 'query={b.query}, fragment={b.fragment})') + return formatstr.format(b=self) + + def add_scheme(self, scheme): + """Add a scheme to our builder object. + + After normalizing, this will generate a new URIBuilder instance with + the specified scheme and all other attributes the same. + + .. code-block:: python + + >>> URIBuilder().add_scheme('HTTPS') + URIBuilder(scheme='https', userinfo=None, host=None, port=None, + path=None, query=None, fragment=None) + + """ + scheme = normalizers.normalize_scheme(scheme) + return URIBuilder( + scheme=scheme, + userinfo=self.userinfo, + host=self.host, + port=self.port, + path=self.path, + query=self.query, + fragment=self.fragment, + ) + + def add_credentials(self, username, password): + """Add credentials as the userinfo portion of the URI. + + .. code-block:: python + + >>> URIBuilder().add_credentials('root', 's3crete') + URIBuilder(scheme=None, userinfo='root:s3crete', host=None, + port=None, path=None, query=None, fragment=None) + + >>> URIBuilder().add_credentials('root', None) + URIBuilder(scheme=None, userinfo='root', host=None, + port=None, path=None, query=None, fragment=None) + """ + if username is None: + raise ValueError('Username cannot be None') + userinfo = normalizers.normalize_username(username) + + if password is not None: + userinfo = '{}:{}'.format( + userinfo, + normalizers.normalize_password(password), + ) + + return URIBuilder( + scheme=self.scheme, + userinfo=userinfo, + host=self.host, + port=self.port, + path=self.path, + query=self.query, + fragment=self.fragment, + ) + + def add_host(self, host): + """Add hostname to the URI. + + .. code-block:: python + + >>> URIBuilder().add_host('google.com') + URIBuilder(scheme=None, userinfo=None, host='google.com', + port=None, path=None, query=None, fragment=None) + + """ + return URIBuilder( + scheme=self.scheme, + userinfo=self.userinfo, + host=normalizers.normalize_host(host), + port=self.port, + path=self.path, + query=self.query, + fragment=self.fragment, + ) + + def add_port(self, port): + """Add port to the URI. + + .. code-block:: python + + >>> URIBuilder().add_port(80) + URIBuilder(scheme=None, userinfo=None, host=None, port='80', + path=None, query=None, fragment=None) + + >>> URIBuilder().add_port(443) + URIBuilder(scheme=None, userinfo=None, host=None, port='443', + path=None, query=None, fragment=None) + + """ + port_int = int(port) + if port_int < 0: + raise ValueError( + 'ports are not allowed to be negative. You provided {}'.format( + port_int, + ) + ) + if port_int > 65535: + raise ValueError( + 'ports are not allowed to be larger than 65535. ' + 'You provided {}'.format( + port_int, + ) + ) + + return URIBuilder( + scheme=self.scheme, + userinfo=self.userinfo, + host=self.host, + port='{}'.format(port_int), + path=self.path, + query=self.query, + fragment=self.fragment, + ) + + def add_path(self, path): + """Add a path to the URI. + + .. code-block:: python + + >>> URIBuilder().add_path('sigmavirus24/rfc3985') + URIBuilder(scheme=None, userinfo=None, host=None, port=None, + path='/sigmavirus24/rfc3986', query=None, fragment=None) + + >>> URIBuilder().add_path('/checkout.php') + URIBuilder(scheme=None, userinfo=None, host=None, port=None, + path='/checkout.php', query=None, fragment=None) + + """ + if not path.startswith('/'): + path = '/{}'.format(path) + + return URIBuilder( + scheme=self.scheme, + userinfo=self.userinfo, + host=self.host, + port=self.port, + path=normalizers.normalize_path(path), + query=self.query, + fragment=self.fragment, + ) + + def add_query_from(self, query_items): + """Generate and add a query a dictionary or list of tuples. + + .. code-block:: python + + >>> URIBuilder().add_query_from({'a': 'b c'}) + URIBuilder(scheme=None, userinfo=None, host=None, port=None, + path=None, query='a=b+c', fragment=None) + + >>> URIBuilder().add_query_from([('a', 'b c')]) + URIBuilder(scheme=None, userinfo=None, host=None, port=None, + path=None, query='a=b+c', fragment=None) + + """ + query = normalizers.normalize_query(compat.urlencode(query_items)) + + return URIBuilder( + scheme=self.scheme, + userinfo=self.userinfo, + host=self.host, + port=self.port, + path=self.path, + query=query, + fragment=self.fragment, + ) + + def add_query(self, query): + """Add a pre-formated query string to the URI. + + .. code-block:: python + + >>> URIBuilder().add_query('a=b&c=d') + URIBuilder(scheme=None, userinfo=None, host=None, port=None, + path=None, query='a=b&c=d', fragment=None) + + """ + return URIBuilder( + scheme=self.scheme, + userinfo=self.userinfo, + host=self.host, + port=self.port, + path=self.path, + query=normalizers.normalize_query(query), + fragment=self.fragment, + ) + + def add_fragment(self, fragment): + """Add a fragment to the URI. + + .. code-block:: python + + >>> URIBuilder().add_fragment('section-2.6.1') + URIBuilder(scheme=None, userinfo=None, host=None, port=None, + path=None, query=None, fragment='section-2.6.1') + + """ + return URIBuilder( + scheme=self.scheme, + userinfo=self.userinfo, + host=self.host, + port=self.port, + path=self.path, + query=self.query, + fragment=normalizers.normalize_fragment(fragment), + ) + + def finalize(self): + """Create a URIReference from our builder. + + .. code-block:: python + + >>> URIBuilder().add_scheme('https').add_host('github.com' + ... ).add_path('sigmavirus24/rfc3986').finalize().unsplit() + 'https://github.com/sigmavirus24/rfc3986' + + >>> URIBuilder().add_scheme('https').add_host('github.com' + ... ).add_path('sigmavirus24/rfc3986').add_credentials( + ... 'sigmavirus24', 'not-re@l').finalize().unsplit() + 'https://sigmavirus24:not-re%[email protected]/sigmavirus24/rfc3986' + + """ + return uri.URIReference( + self.scheme, + normalizers.normalize_authority( + (self.userinfo, self.host, self.port) + ), + self.path, + self.query, + self.fragment, + ) diff --git a/src/urllib3/packages/rfc3986/compat.py b/src/urllib3/packages/rfc3986/compat.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/compat.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2014 Rackspace +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Compatibility module for Python 2 and 3 support.""" +import sys + +try: + from urllib.parse import quote as urlquote +except ImportError: # Python 2.x + from urllib import quote as urlquote + +try: + from urllib.parse import urlencode +except ImportError: # Python 2.x + from urllib import urlencode + +__all__ = ( + 'to_bytes', + 'to_str', + 'urlquote', + 'urlencode', +) + +PY3 = (3, 0) <= sys.version_info < (4, 0) +PY2 = (2, 6) <= sys.version_info < (2, 8) + + +if PY3: + unicode = str # Python 3.x + + +def to_str(b, encoding='utf-8'): + """Ensure that b is text in the specified encoding.""" + if hasattr(b, 'decode') and not isinstance(b, unicode): + b = b.decode(encoding) + return b + + +def to_bytes(s, encoding='utf-8'): + """Ensure that s is converted to bytes from the encoding.""" + if hasattr(s, 'encode') and not isinstance(s, bytes): + s = s.encode(encoding) + return s diff --git a/src/urllib3/packages/rfc3986/exceptions.py b/src/urllib3/packages/rfc3986/exceptions.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/exceptions.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +"""Exceptions module for rfc3986.""" + + +class RFC3986Exception(Exception): + """Base class for all rfc3986 exception classes.""" + + pass + + +class InvalidAuthority(RFC3986Exception): + """Exception when the authority string is invalid.""" + + def __init__(self, authority): + """Initialize the exception with the invalid authority.""" + super(InvalidAuthority, self).__init__( + "The authority ({0}) is not valid.".format(authority)) + + +class InvalidPort(RFC3986Exception): + """Exception when the port is invalid.""" + + def __init__(self, port): + """Initialize the exception with the invalid port.""" + super(InvalidPort, self).__init__( + 'The port ("{0}") is not valid.'.format(port)) + + +class ResolutionError(RFC3986Exception): + """Exception to indicate a failure to resolve a URI.""" + + def __init__(self, uri): + """Initialize the error with the failed URI.""" + super(ResolutionError, self).__init__( + "{0} is not an absolute URI.".format(uri.unsplit())) + + +class ValidationError(RFC3986Exception): + """Exception raised during Validation of a URI.""" + + pass + + +class MissingComponentError(ValidationError): + """Exception raised when a required component is missing.""" + + def __init__(self, uri, *component_names): + """Initialize the error with the missing component name.""" + verb = 'was' + if len(component_names) > 1: + verb = 'were' + + self.uri = uri + self.components = sorted(component_names) + components = ', '.join(self.components) + super(MissingComponentError, self).__init__( + "{} {} required but missing".format(components, verb), + uri, + self.components, + ) + + +class UnpermittedComponentError(ValidationError): + """Exception raised when a component has an unpermitted value.""" + + def __init__(self, component_name, component_value, allowed_values): + """Initialize the error with the unpermitted component.""" + super(UnpermittedComponentError, self).__init__( + "{} was required to be one of {!r} but was {!r}".format( + component_name, list(sorted(allowed_values)), component_value, + ), + component_name, + component_value, + allowed_values, + ) + self.component_name = component_name + self.component_value = component_value + self.allowed_values = allowed_values + + +class PasswordForbidden(ValidationError): + """Exception raised when a URL has a password in the userinfo section.""" + + def __init__(self, uri): + """Initialize the error with the URI that failed validation.""" + unsplit = getattr(uri, 'unsplit', lambda: uri) + super(PasswordForbidden, self).__init__( + '"{}" contained a password when validation forbade it'.format( + unsplit() + ) + ) + self.uri = uri + + +class InvalidComponentsError(ValidationError): + """Exception raised when one or more components are invalid.""" + + def __init__(self, uri, *component_names): + """Initialize the error with the invalid component name(s).""" + verb = 'was' + if len(component_names) > 1: + verb = 'were' + + self.uri = uri + self.components = sorted(component_names) + components = ', '.join(self.components) + super(InvalidComponentsError, self).__init__( + "{} {} found to be invalid".format(components, verb), + uri, + self.components, + ) diff --git a/src/urllib3/packages/rfc3986/misc.py b/src/urllib3/packages/rfc3986/misc.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/misc.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2014 Rackspace +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Module containing compiled regular expressions and constants. + +This module contains important constants, patterns, and compiled regular +expressions for parsing and validating URIs and their components. +""" + +import re + +from . import abnf_regexp + +# These are enumerated for the named tuple used as a superclass of +# URIReference +URI_COMPONENTS = ['scheme', 'authority', 'path', 'query', 'fragment'] + +important_characters = { + 'generic_delimiters': abnf_regexp.GENERIC_DELIMITERS, + 'sub_delimiters': abnf_regexp.SUB_DELIMITERS, + # We need to escape the '*' in this case + 're_sub_delimiters': abnf_regexp.SUB_DELIMITERS_RE, + 'unreserved_chars': abnf_regexp.UNRESERVED_CHARS, + # We need to escape the '-' in this case: + 're_unreserved': abnf_regexp.UNRESERVED_RE, +} + +# For details about delimiters and reserved characters, see: +# http://tools.ietf.org/html/rfc3986#section-2.2 +GENERIC_DELIMITERS = abnf_regexp.GENERIC_DELIMITERS_SET +SUB_DELIMITERS = abnf_regexp.SUB_DELIMITERS_SET +RESERVED_CHARS = abnf_regexp.RESERVED_CHARS_SET +# For details about unreserved characters, see: +# http://tools.ietf.org/html/rfc3986#section-2.3 +UNRESERVED_CHARS = abnf_regexp.UNRESERVED_CHARS_SET +NON_PCT_ENCODED = abnf_regexp.NON_PCT_ENCODED_SET + +URI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE) + +SUBAUTHORITY_MATCHER = re.compile(( + '^(?:(?P<userinfo>{0})@)?' # userinfo + '(?P<host>{1})' # host + ':?(?P<port>{2})?$' # port + ).format(abnf_regexp.USERINFO_RE, + abnf_regexp.HOST_PATTERN, + abnf_regexp.PORT_RE)) + + +IPv4_MATCHER = re.compile('^' + abnf_regexp.IPv4_RE + '$') + +# Matcher used to validate path components +PATH_MATCHER = re.compile(abnf_regexp.PATH_RE) + + +# ################################## +# Query and Fragment Matcher Section +# ################################## + +QUERY_MATCHER = re.compile(abnf_regexp.QUERY_RE) + +FRAGMENT_MATCHER = QUERY_MATCHER + +# Scheme validation, see: http://tools.ietf.org/html/rfc3986#section-3.1 +SCHEME_MATCHER = re.compile('^{0}$'.format(abnf_regexp.SCHEME_RE)) + +RELATIVE_REF_MATCHER = re.compile(r'^%s(\?%s)?(#%s)?$' % ( + abnf_regexp.RELATIVE_PART_RE, abnf_regexp.QUERY_RE, + abnf_regexp.FRAGMENT_RE, +)) + +# See http://tools.ietf.org/html/rfc3986#section-4.3 +ABSOLUTE_URI_MATCHER = re.compile(r'^%s:%s(\?%s)?$' % ( + abnf_regexp.COMPONENT_PATTERN_DICT['scheme'], + abnf_regexp.HIER_PART_RE, + abnf_regexp.QUERY_RE[1:-1], +)) + + +# Path merger as defined in http://tools.ietf.org/html/rfc3986#section-5.2.3 +def merge_paths(base_uri, relative_path): + """Merge a base URI's path with a relative URI's path.""" + if base_uri.path is None and base_uri.authority is not None: + return '/' + relative_path + else: + path = base_uri.path or '' + index = path.rfind('/') + return path[:index] + '/' + relative_path + + +UseExisting = object() diff --git a/src/urllib3/packages/rfc3986/normalizers.py b/src/urllib3/packages/rfc3986/normalizers.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/normalizers.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2014 Rackspace +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Module with functions to normalize components.""" +import re + +from . import compat +from . import misc + + +def normalize_scheme(scheme): + """Normalize the scheme component.""" + return scheme.lower() + + +def normalize_authority(authority): + """Normalize an authority tuple to a string.""" + userinfo, host, port = authority + result = '' + if userinfo: + result += normalize_percent_characters(userinfo) + '@' + if host: + result += normalize_host(host) + if port: + result += ':' + port + return result + + +def normalize_username(username): + """Normalize a username to make it safe to include in userinfo.""" + return compat.urlquote(username) + + +def normalize_password(password): + """Normalize a password to make safe for userinfo.""" + return compat.urlquote(password) + + +def normalize_host(host): + """Normalize a host string.""" + return host.lower() + + +def normalize_path(path): + """Normalize the path string.""" + if not path: + return path + + path = normalize_percent_characters(path) + return remove_dot_segments(path) + + +def normalize_query(query): + """Normalize the query string.""" + if not query: + return query + return normalize_percent_characters(query) + + +def normalize_fragment(fragment): + """Normalize the fragment string.""" + if not fragment: + return fragment + return normalize_percent_characters(fragment) + + +PERCENT_MATCHER = re.compile('%[A-Fa-f0-9]{2}') + + +def normalize_percent_characters(s): + """All percent characters should be upper-cased. + + For example, ``"%3afoo%DF%ab"`` should be turned into ``"%3Afoo%DF%AB"``. + """ + matches = set(PERCENT_MATCHER.findall(s)) + for m in matches: + if not m.isupper(): + s = s.replace(m, m.upper()) + return s + + +def remove_dot_segments(s): + """Remove dot segments from the string. + + See also Section 5.2.4 of :rfc:`3986`. + """ + # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code + segments = s.split('/') # Turn the path into a list of segments + output = [] # Initialize the variable to use to store output + + for segment in segments: + # '.' is the current directory, so ignore it, it is superfluous + if segment == '.': + continue + # Anything other than '..', should be appended to the output + elif segment != '..': + output.append(segment) + # In this case segment == '..', if we can, we should pop the last + # element + elif output: + output.pop() + + # If the path starts with '/' and the output is empty or the first string + # is non-empty + if s.startswith('/') and (not output or output[0]): + output.insert(0, '') + + # If the path starts with '/.' or '/..' ensure we add one more empty + # string to add a trailing '/' + if s.endswith(('/.', '/..')): + output.append('') + + return '/'.join(output) + + +def encode_component(uri_component, encoding): + """Encode the specific component in the provided encoding.""" + if uri_component is None: + return uri_component + + # Try to see if the component we're encoding is already percent-encoded + # so we can skip all '%' characters but still encode all others. + percent_encodings = len(PERCENT_MATCHER.findall( + compat.to_str(uri_component, encoding))) + + uri_bytes = compat.to_bytes(uri_component, encoding) + is_percent_encoded = percent_encodings == uri_bytes.count(b'%') + + encoded_uri = bytearray() + + for i in range(0, len(uri_bytes)): + # Will return a single character bytestring on both Python 2 & 3 + byte = uri_bytes[i:i+1] + byte_ord = ord(byte) + if ((is_percent_encoded and byte == b'%') + or (byte_ord < 128 and byte.decode() in misc.NON_PCT_ENCODED)): + encoded_uri.extend(byte) + continue + encoded_uri.extend('%{0:02x}'.format(byte_ord).encode()) + + return encoded_uri.decode(encoding) diff --git a/src/urllib3/packages/rfc3986/parseresult.py b/src/urllib3/packages/rfc3986/parseresult.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/parseresult.py @@ -0,0 +1,385 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2015 Ian Stapleton Cordasco +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Module containing the urlparse compatibility logic.""" +from collections import namedtuple + +from . import compat +from . import exceptions +from . import misc +from . import normalizers +from . import uri + +__all__ = ('ParseResult', 'ParseResultBytes') + +PARSED_COMPONENTS = ('scheme', 'userinfo', 'host', 'port', 'path', 'query', + 'fragment') + + +class ParseResultMixin(object): + def _generate_authority(self, attributes): + # I swear I did not align the comparisons below. That's just how they + # happened to align based on pep8 and attribute lengths. + userinfo, host, port = (attributes[p] + for p in ('userinfo', 'host', 'port')) + if (self.userinfo != userinfo or + self.host != host or + self.port != port): + if port: + port = '{0}'.format(port) + return normalizers.normalize_authority( + (compat.to_str(userinfo, self.encoding), + compat.to_str(host, self.encoding), + port) + ) + return self.authority + + def geturl(self): + """Shim to match the standard library method.""" + return self.unsplit() + + @property + def hostname(self): + """Shim to match the standard library.""" + return self.host + + @property + def netloc(self): + """Shim to match the standard library.""" + return self.authority + + @property + def params(self): + """Shim to match the standard library.""" + return self.query + + +class ParseResult(namedtuple('ParseResult', PARSED_COMPONENTS), + ParseResultMixin): + """Implementation of urlparse compatibility class. + + This uses the URIReference logic to handle compatibility with the + urlparse.ParseResult class. + """ + + slots = () + + def __new__(cls, scheme, userinfo, host, port, path, query, fragment, + uri_ref, encoding='utf-8'): + """Create a new ParseResult.""" + parse_result = super(ParseResult, cls).__new__( + cls, + scheme or None, + userinfo or None, + host, + port or None, + path or None, + query, + fragment) + parse_result.encoding = encoding + parse_result.reference = uri_ref + return parse_result + + @classmethod + def from_parts(cls, scheme=None, userinfo=None, host=None, port=None, + path=None, query=None, fragment=None, encoding='utf-8'): + """Create a ParseResult instance from its parts.""" + authority = '' + if userinfo is not None: + authority += userinfo + '@' + if host is not None: + authority += host + if port is not None: + authority += ':{0}'.format(port) + uri_ref = uri.URIReference(scheme=scheme, + authority=authority, + path=path, + query=query, + fragment=fragment, + encoding=encoding).normalize() + userinfo, host, port = authority_from(uri_ref, strict=True) + return cls(scheme=uri_ref.scheme, + userinfo=userinfo, + host=host, + port=port, + path=uri_ref.path, + query=uri_ref.query, + fragment=uri_ref.fragment, + uri_ref=uri_ref, + encoding=encoding) + + @classmethod + def from_string(cls, uri_string, encoding='utf-8', strict=True, + lazy_normalize=True): + """Parse a URI from the given unicode URI string. + + :param str uri_string: Unicode URI to be parsed into a reference. + :param str encoding: The encoding of the string provided + :param bool strict: Parse strictly according to :rfc:`3986` if True. + If False, parse similarly to the standard library's urlparse + function. + :returns: :class:`ParseResult` or subclass thereof + """ + reference = uri.URIReference.from_string(uri_string, encoding) + if not lazy_normalize: + reference = reference.normalize() + userinfo, host, port = authority_from(reference, strict) + + return cls(scheme=reference.scheme, + userinfo=userinfo, + host=host, + port=port, + path=reference.path, + query=reference.query, + fragment=reference.fragment, + uri_ref=reference, + encoding=encoding) + + @property + def authority(self): + """Return the normalized authority.""" + return self.reference.authority + + def copy_with(self, scheme=misc.UseExisting, userinfo=misc.UseExisting, + host=misc.UseExisting, port=misc.UseExisting, + path=misc.UseExisting, query=misc.UseExisting, + fragment=misc.UseExisting): + """Create a copy of this instance replacing with specified parts.""" + attributes = zip(PARSED_COMPONENTS, + (scheme, userinfo, host, port, path, query, fragment)) + attrs_dict = {} + for name, value in attributes: + if value is misc.UseExisting: + value = getattr(self, name) + attrs_dict[name] = value + authority = self._generate_authority(attrs_dict) + ref = self.reference.copy_with(scheme=attrs_dict['scheme'], + authority=authority, + path=attrs_dict['path'], + query=attrs_dict['query'], + fragment=attrs_dict['fragment']) + return ParseResult(uri_ref=ref, encoding=self.encoding, **attrs_dict) + + def encode(self, encoding=None): + """Convert to an instance of ParseResultBytes.""" + encoding = encoding or self.encoding + attrs = dict( + zip(PARSED_COMPONENTS, + (attr.encode(encoding) if hasattr(attr, 'encode') else attr + for attr in self))) + return ParseResultBytes( + uri_ref=self.reference, + encoding=encoding, + **attrs + ) + + def unsplit(self, use_idna=False): + """Create a URI string from the components. + + :returns: The parsed URI reconstituted as a string. + :rtype: str + """ + parse_result = self + if use_idna and self.host: + hostbytes = self.host.encode('idna') + host = hostbytes.decode(self.encoding) + parse_result = self.copy_with(host=host) + return parse_result.reference.unsplit() + + +class ParseResultBytes(namedtuple('ParseResultBytes', PARSED_COMPONENTS), + ParseResultMixin): + """Compatibility shim for the urlparse.ParseResultBytes object.""" + + def __new__(cls, scheme, userinfo, host, port, path, query, fragment, + uri_ref, encoding='utf-8', lazy_normalize=True): + """Create a new ParseResultBytes instance.""" + parse_result = super(ParseResultBytes, cls).__new__( + cls, + scheme or None, + userinfo or None, + host, + port or None, + path or None, + query or None, + fragment or None) + parse_result.encoding = encoding + parse_result.reference = uri_ref + parse_result.lazy_normalize = lazy_normalize + return parse_result + + @classmethod + def from_parts(cls, scheme=None, userinfo=None, host=None, port=None, + path=None, query=None, fragment=None, encoding='utf-8', + lazy_normalize=True): + """Create a ParseResult instance from its parts.""" + authority = '' + if userinfo is not None: + authority += userinfo + '@' + if host is not None: + authority += host + if port is not None: + authority += ':{0}'.format(int(port)) + uri_ref = uri.URIReference(scheme=scheme, + authority=authority, + path=path, + query=query, + fragment=fragment, + encoding=encoding) + if not lazy_normalize: + uri_ref = uri_ref.normalize() + to_bytes = compat.to_bytes + userinfo, host, port = authority_from(uri_ref, strict=True) + return cls(scheme=to_bytes(scheme, encoding), + userinfo=to_bytes(userinfo, encoding), + host=to_bytes(host, encoding), + port=port, + path=to_bytes(path, encoding), + query=to_bytes(query, encoding), + fragment=to_bytes(fragment, encoding), + uri_ref=uri_ref, + encoding=encoding, + lazy_normalize=lazy_normalize) + + @classmethod + def from_string(cls, uri_string, encoding='utf-8', strict=True, + lazy_normalize=True): + """Parse a URI from the given unicode URI string. + + :param str uri_string: Unicode URI to be parsed into a reference. + :param str encoding: The encoding of the string provided + :param bool strict: Parse strictly according to :rfc:`3986` if True. + If False, parse similarly to the standard library's urlparse + function. + :returns: :class:`ParseResultBytes` or subclass thereof + """ + reference = uri.URIReference.from_string(uri_string, encoding) + if not lazy_normalize: + reference = reference.normalize() + userinfo, host, port = authority_from(reference, strict) + + to_bytes = compat.to_bytes + return cls(scheme=to_bytes(reference.scheme, encoding), + userinfo=to_bytes(userinfo, encoding), + host=to_bytes(host, encoding), + port=port, + path=to_bytes(reference.path, encoding), + query=to_bytes(reference.query, encoding), + fragment=to_bytes(reference.fragment, encoding), + uri_ref=reference, + encoding=encoding, + lazy_normalize=lazy_normalize) + + @property + def authority(self): + """Return the normalized authority.""" + return self.reference.authority.encode(self.encoding) + + def copy_with(self, scheme=misc.UseExisting, userinfo=misc.UseExisting, + host=misc.UseExisting, port=misc.UseExisting, + path=misc.UseExisting, query=misc.UseExisting, + fragment=misc.UseExisting, lazy_normalize=True): + """Create a copy of this instance replacing with specified parts.""" + attributes = zip(PARSED_COMPONENTS, + (scheme, userinfo, host, port, path, query, fragment)) + attrs_dict = {} + for name, value in attributes: + if value is misc.UseExisting: + value = getattr(self, name) + if not isinstance(value, bytes) and hasattr(value, 'encode'): + value = value.encode(self.encoding) + attrs_dict[name] = value + authority = self._generate_authority(attrs_dict) + to_str = compat.to_str + ref = self.reference.copy_with( + scheme=to_str(attrs_dict['scheme'], self.encoding), + authority=to_str(authority, self.encoding), + path=to_str(attrs_dict['path'], self.encoding), + query=to_str(attrs_dict['query'], self.encoding), + fragment=to_str(attrs_dict['fragment'], self.encoding) + ) + if not lazy_normalize: + ref = ref.normalize() + return ParseResultBytes( + uri_ref=ref, + encoding=self.encoding, + lazy_normalize=lazy_normalize, + **attrs_dict + ) + + def unsplit(self, use_idna=False): + """Create a URI bytes object from the components. + + :returns: The parsed URI reconstituted as a string. + :rtype: bytes + """ + parse_result = self + if use_idna and self.host: + # self.host is bytes, to encode to idna, we need to decode it + # first + host = self.host.decode(self.encoding) + hostbytes = host.encode('idna') + parse_result = self.copy_with(host=hostbytes) + if self.lazy_normalize: + parse_result = parse_result.copy_with(lazy_normalize=False) + uri = parse_result.reference.unsplit() + return uri.encode(self.encoding) + + +def split_authority(authority): + # Initialize our expected return values + userinfo = host = port = None + # Initialize an extra var we may need to use + extra_host = None + # Set-up rest in case there is no userinfo portion + rest = authority + + if '@' in authority: + userinfo, rest = authority.rsplit('@', 1) + + # Handle IPv6 host addresses + if rest.startswith('['): + host, rest = rest.split(']', 1) + host += ']' + + if ':' in rest: + extra_host, port = rest.split(':', 1) + elif not host and rest: + host = rest + + if extra_host and not host: + host = extra_host + + return userinfo, host, port + + +def authority_from(reference, strict): + try: + subauthority = reference.authority_info() + except exceptions.InvalidAuthority: + if strict: + raise + userinfo, host, port = split_authority(reference.authority) + else: + # Thanks to Richard Barrell for this idea: + # https://twitter.com/0x2ba22e11/status/617338811975139328 + userinfo, host, port = (subauthority.get(p) + for p in ('userinfo', 'host', 'port')) + + if port: + try: + port = int(port) + except ValueError: + raise exceptions.InvalidPort(port) + return userinfo, host, port diff --git a/src/urllib3/packages/rfc3986/uri.py b/src/urllib3/packages/rfc3986/uri.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/uri.py @@ -0,0 +1,492 @@ +"""Module containing the implementation of the URIReference class.""" +# -*- coding: utf-8 -*- +# Copyright (c) 2014 Rackspace +# Copyright (c) 2015 Ian Stapleton Cordasco +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple +import warnings + +from . import compat +from . import exceptions as exc +from . import misc +from . import normalizers +from . import validators + + +class URIReference(namedtuple('URIReference', misc.URI_COMPONENTS)): + """Immutable object representing a parsed URI Reference. + + .. note:: + + This class is not intended to be directly instantiated by the user. + + This object exposes attributes for the following components of a + URI: + + - scheme + - authority + - path + - query + - fragment + + .. attribute:: scheme + + The scheme that was parsed for the URI Reference. For example, + ``http``, ``https``, ``smtp``, ``imap``, etc. + + .. attribute:: authority + + Component of the URI that contains the user information, host, + and port sub-components. For example, + ``google.com``, ``127.0.0.1:5000``, ``username@[::1]``, + ``username:[email protected]:443``, etc. + + .. attribute:: path + + The path that was parsed for the given URI Reference. For example, + ``/``, ``/index.php``, etc. + + .. attribute:: query + + The query component for a given URI Reference. For example, ``a=b``, + ``a=b%20c``, ``a=b+c``, ``a=b,c=d,e=%20f``, etc. + + .. attribute:: fragment + + The fragment component of a URI. For example, ``section-3.1``. + + This class also provides extra attributes for easier access to information + like the subcomponents of the authority component. + + .. attribute:: userinfo + + The user information parsed from the authority. + + .. attribute:: host + + The hostname, IPv4, or IPv6 adddres parsed from the authority. + + .. attribute:: port + + The port parsed from the authority. + """ + + slots = () + + def __new__(cls, scheme, authority, path, query, fragment, + encoding='utf-8'): + """Create a new URIReference.""" + ref = super(URIReference, cls).__new__( + cls, + scheme or None, + authority or None, + path or None, + query, + fragment) + ref.encoding = encoding + return ref + + __hash__ = tuple.__hash__ + + def __eq__(self, other): + """Compare this reference to another.""" + other_ref = other + if isinstance(other, tuple): + other_ref = URIReference(*other) + elif not isinstance(other, URIReference): + try: + other_ref = URIReference.from_string(other) + except TypeError: + raise TypeError( + 'Unable to compare URIReference() to {0}()'.format( + type(other).__name__)) + + # See http://tools.ietf.org/html/rfc3986#section-6.2 + naive_equality = tuple(self) == tuple(other_ref) + return naive_equality or self.normalized_equality(other_ref) + + @classmethod + def from_string(cls, uri_string, encoding='utf-8'): + """Parse a URI reference from the given unicode URI string. + + :param str uri_string: Unicode URI to be parsed into a reference. + :param str encoding: The encoding of the string provided + :returns: :class:`URIReference` or subclass thereof + """ + uri_string = compat.to_str(uri_string, encoding) + + split_uri = misc.URI_MATCHER.match(uri_string).groupdict() + return cls( + split_uri['scheme'], split_uri['authority'], + normalizers.encode_component(split_uri['path'], encoding), + normalizers.encode_component(split_uri['query'], encoding), + normalizers.encode_component(split_uri['fragment'], encoding), + encoding, + ) + + def authority_info(self): + """Return a dictionary with the ``userinfo``, ``host``, and ``port``. + + If the authority is not valid, it will raise a + :class:`~rfc3986.exceptions.InvalidAuthority` Exception. + + :returns: + ``{'userinfo': 'username:password', 'host': 'www.example.com', + 'port': '80'}`` + :rtype: dict + :raises rfc3986.exceptions.InvalidAuthority: + If the authority is not ``None`` and can not be parsed. + """ + if not self.authority: + return {'userinfo': None, 'host': None, 'port': None} + + match = misc.SUBAUTHORITY_MATCHER.match(self.authority) + + if match is None: + # In this case, we have an authority that was parsed from the URI + # Reference, but it cannot be further parsed by our + # misc.SUBAUTHORITY_MATCHER. In this case it must not be a valid + # authority. + raise exc.InvalidAuthority(self.authority.encode(self.encoding)) + + # We had a match, now let's ensure that it is actually a valid host + # address if it is IPv4 + matches = match.groupdict() + host = matches.get('host') + + if (host and misc.IPv4_MATCHER.match(host) and not + validators.valid_ipv4_host_address(host)): + # If we have a host, it appears to be IPv4 and it does not have + # valid bytes, it is an InvalidAuthority. + raise exc.InvalidAuthority(self.authority.encode(self.encoding)) + + return matches + + @property + def host(self): + """If present, a string representing the host.""" + try: + authority = self.authority_info() + except exc.InvalidAuthority: + return None + return authority['host'] + + @property + def port(self): + """If present, the port extracted from the authority.""" + try: + authority = self.authority_info() + except exc.InvalidAuthority: + return None + return authority['port'] + + @property + def userinfo(self): + """If present, the userinfo extracted from the authority.""" + try: + authority = self.authority_info() + except exc.InvalidAuthority: + return None + return authority['userinfo'] + + def is_absolute(self): + """Determine if this URI Reference is an absolute URI. + + See http://tools.ietf.org/html/rfc3986#section-4.3 for explanation. + + :returns: ``True`` if it is an absolute URI, ``False`` otherwise. + :rtype: bool + """ + return bool(misc.ABSOLUTE_URI_MATCHER.match(self.unsplit())) + + def is_valid(self, **kwargs): + """Determine if the URI is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param bool require_scheme: Set to ``True`` if you wish to require the + presence of the scheme component. + :param bool require_authority: Set to ``True`` if you wish to require + the presence of the authority component. + :param bool require_path: Set to ``True`` if you wish to require the + presence of the path component. + :param bool require_query: Set to ``True`` if you wish to require the + presence of the query component. + :param bool require_fragment: Set to ``True`` if you wish to require + the presence of the fragment component. + :returns: ``True`` if the URI is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + validators = [ + (self.scheme_is_valid, kwargs.get('require_scheme', False)), + (self.authority_is_valid, kwargs.get('require_authority', False)), + (self.path_is_valid, kwargs.get('require_path', False)), + (self.query_is_valid, kwargs.get('require_query', False)), + (self.fragment_is_valid, kwargs.get('require_fragment', False)), + ] + return all(v(r) for v, r in validators) + + def authority_is_valid(self, require=False): + """Determine if the authority component is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param bool require: + Set to ``True`` to require the presence of this component. + :returns: + ``True`` if the authority is valid. ``False`` otherwise. + :rtype: + bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + try: + self.authority_info() + except exc.InvalidAuthority: + return False + + return validators.authority_is_valid( + self.authority, + host=self.host, + require=require, + ) + + def scheme_is_valid(self, require=False): + """Determine if the scheme component is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param str require: Set to ``True`` to require the presence of this + component. + :returns: ``True`` if the scheme is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + return validators.scheme_is_valid(self.scheme, require) + + def path_is_valid(self, require=False): + """Determine if the path component is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param str require: Set to ``True`` to require the presence of this + component. + :returns: ``True`` if the path is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + return validators.path_is_valid(self.path, require) + + def query_is_valid(self, require=False): + """Determine if the query component is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param str require: Set to ``True`` to require the presence of this + component. + :returns: ``True`` if the query is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + return validators.query_is_valid(self.query, require) + + def fragment_is_valid(self, require=False): + """Determine if the fragment component is valid. + + .. deprecated:: 1.1.0 + + Use the Validator object instead. + + :param str require: Set to ``True`` to require the presence of this + component. + :returns: ``True`` if the fragment is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + return validators.fragment_is_valid(self.fragment, require) + + def normalize(self): + """Normalize this reference as described in Section 6.2.2. + + This is not an in-place normalization. Instead this creates a new + URIReference. + + :returns: A new reference object with normalized components. + :rtype: URIReference + """ + # See http://tools.ietf.org/html/rfc3986#section-6.2.2 for logic in + # this method. + return URIReference(normalizers.normalize_scheme(self.scheme or ''), + normalizers.normalize_authority( + (self.userinfo, self.host, self.port)), + normalizers.normalize_path(self.path or ''), + normalizers.normalize_query(self.query), + normalizers.normalize_fragment(self.fragment), + self.encoding) + + def normalized_equality(self, other_ref): + """Compare this URIReference to another URIReference. + + :param URIReference other_ref: (required), The reference with which + we're comparing. + :returns: ``True`` if the references are equal, ``False`` otherwise. + :rtype: bool + """ + return tuple(self.normalize()) == tuple(other_ref.normalize()) + + def resolve_with(self, base_uri, strict=False): + """Use an absolute URI Reference to resolve this relative reference. + + Assuming this is a relative reference that you would like to resolve, + use the provided base URI to resolve it. + + See http://tools.ietf.org/html/rfc3986#section-5 for more information. + + :param base_uri: Either a string or URIReference. It must be an + absolute URI or it will raise an exception. + :returns: A new URIReference which is the result of resolving this + reference using ``base_uri``. + :rtype: :class:`URIReference` + :raises rfc3986.exceptions.ResolutionError: + If the ``base_uri`` is not an absolute URI. + """ + if not isinstance(base_uri, URIReference): + base_uri = URIReference.from_string(base_uri) + + if not base_uri.is_absolute(): + raise exc.ResolutionError(base_uri) + + # This is optional per + # http://tools.ietf.org/html/rfc3986#section-5.2.1 + base_uri = base_uri.normalize() + + # The reference we're resolving + resolving = self + + if not strict and resolving.scheme == base_uri.scheme: + resolving = resolving.copy_with(scheme=None) + + # http://tools.ietf.org/html/rfc3986#page-32 + if resolving.scheme is not None: + target = resolving.copy_with( + path=normalizers.normalize_path(resolving.path) + ) + else: + if resolving.authority is not None: + target = resolving.copy_with( + scheme=base_uri.scheme, + path=normalizers.normalize_path(resolving.path) + ) + else: + if resolving.path is None: + if resolving.query is not None: + query = resolving.query + else: + query = base_uri.query + target = resolving.copy_with( + scheme=base_uri.scheme, + authority=base_uri.authority, + path=base_uri.path, + query=query + ) + else: + if resolving.path.startswith('/'): + path = normalizers.normalize_path(resolving.path) + else: + path = normalizers.normalize_path( + misc.merge_paths(base_uri, resolving.path) + ) + target = resolving.copy_with( + scheme=base_uri.scheme, + authority=base_uri.authority, + path=path, + query=resolving.query + ) + return target + + def unsplit(self): + """Create a URI string from the components. + + :returns: The URI Reference reconstituted as a string. + :rtype: str + """ + # See http://tools.ietf.org/html/rfc3986#section-5.3 + result_list = [] + if self.scheme: + result_list.extend([self.scheme, ':']) + if self.authority: + result_list.extend(['//', self.authority]) + if self.path: + result_list.append(self.path) + if self.query is not None: + result_list.extend(['?', self.query]) + if self.fragment is not None: + result_list.extend(['#', self.fragment]) + return ''.join(result_list) + + def copy_with(self, scheme=misc.UseExisting, authority=misc.UseExisting, + path=misc.UseExisting, query=misc.UseExisting, + fragment=misc.UseExisting): + """Create a copy of this reference with the new components. + + :param str scheme: + (optional) The scheme to use for the new reference. + :param str authority: + (optional) The authority to use for the new reference. + :param str path: + (optional) The path to use for the new reference. + :param str query: + (optional) The query to use for the new reference. + :param str fragment: + (optional) The fragment to use for the new reference. + :returns: + New URIReference with provided components. + :rtype: + URIReference + """ + attributes = { + 'scheme': scheme, + 'authority': authority, + 'path': path, + 'query': query, + 'fragment': fragment, + } + for key, value in list(attributes.items()): + if value is misc.UseExisting: + del attributes[key] + uri = self._replace(**attributes) + uri.encoding = self.encoding + return uri diff --git a/src/urllib3/packages/rfc3986/validators.py b/src/urllib3/packages/rfc3986/validators.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/validators.py @@ -0,0 +1,428 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Ian Stapleton Cordasco +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Module containing the validation logic for rfc3986.""" +from . import exceptions +from . import misc +from . import normalizers + + +class Validator(object): + """Object used to configure validation of all objects in rfc3986. + + .. versionadded:: 1.0 + + Example usage:: + + >>> from rfc3986 import api, validators + >>> uri = api.uri_reference('https://github.com/') + >>> validator = validators.Validator().require_presence_of( + ... 'scheme', 'host', 'path', + ... ).allow_schemes( + ... 'http', 'https', + ... ).allow_hosts( + ... '127.0.0.1', 'github.com', + ... ) + >>> validator.validate(uri) + >>> invalid_uri = rfc3986.uri_reference('imap://mail.google.com') + >>> validator.validate(invalid_uri) + Traceback (most recent call last): + ... + rfc3986.exceptions.MissingComponentError: ('path was required but + missing', URIReference(scheme=u'imap', authority=u'mail.google.com', + path=None, query=None, fragment=None), ['path']) + + """ + + COMPONENT_NAMES = frozenset([ + 'scheme', + 'userinfo', + 'host', + 'port', + 'path', + 'query', + 'fragment', + ]) + + def __init__(self): + """Initialize our default validations.""" + self.allowed_schemes = set() + self.allowed_hosts = set() + self.allowed_ports = set() + self.allow_password = True + self.required_components = { + 'scheme': False, + 'userinfo': False, + 'host': False, + 'port': False, + 'path': False, + 'query': False, + 'fragment': False, + } + self.validated_components = self.required_components.copy() + + def allow_schemes(self, *schemes): + """Require the scheme to be one of the provided schemes. + + .. versionadded:: 1.0 + + :param schemes: + Schemes, without ``://`` that are allowed. + :returns: + The validator instance. + :rtype: + Validator + """ + for scheme in schemes: + self.allowed_schemes.add(normalizers.normalize_scheme(scheme)) + return self + + def allow_hosts(self, *hosts): + """Require the host to be one of the provided hosts. + + .. versionadded:: 1.0 + + :param hosts: + Hosts that are allowed. + :returns: + The validator instance. + :rtype: + Validator + """ + for host in hosts: + self.allowed_hosts.add(normalizers.normalize_host(host)) + return self + + def allow_ports(self, *ports): + """Require the port to be one of the provided ports. + + .. versionadded:: 1.0 + + :param ports: + Ports that are allowed. + :returns: + The validator instance. + :rtype: + Validator + """ + for port in ports: + port_int = int(port, base=10) + if 0 <= port_int <= 65535: + self.allowed_ports.add(port) + return self + + def allow_use_of_password(self): + """Allow passwords to be present in the URI. + + .. versionadded:: 1.0 + + :returns: + The validator instance. + :rtype: + Validator + """ + self.allow_password = True + return self + + def forbid_use_of_password(self): + """Prevent passwords from being included in the URI. + + .. versionadded:: 1.0 + + :returns: + The validator instance. + :rtype: + Validator + """ + self.allow_password = False + return self + + def check_validity_of(self, *components): + """Check the validity of the components provided. + + This can be specified repeatedly. + + .. versionadded:: 1.1 + + :param components: + Names of components from :attr:`Validator.COMPONENT_NAMES`. + :returns: + The validator instance. + :rtype: + Validator + """ + components = [c.lower() for c in components] + for component in components: + if component not in self.COMPONENT_NAMES: + raise ValueError( + '"{}" is not a valid component'.format(component) + ) + self.validated_components.update({ + component: True for component in components + }) + return self + + def require_presence_of(self, *components): + """Require the components provided. + + This can be specified repeatedly. + + .. versionadded:: 1.0 + + :param components: + Names of components from :attr:`Validator.COMPONENT_NAMES`. + :returns: + The validator instance. + :rtype: + Validator + """ + components = [c.lower() for c in components] + for component in components: + if component not in self.COMPONENT_NAMES: + raise ValueError( + '"{}" is not a valid component'.format(component) + ) + self.required_components.update({ + component: True for component in components + }) + return self + + def validate(self, uri): + """Check a URI for conditions specified on this validator. + + .. versionadded:: 1.0 + + :param uri: + Parsed URI to validate. + :type uri: + rfc3986.uri.URIReference + :raises MissingComponentError: + When a required component is missing. + :raises UnpermittedComponentError: + When a component is not one of those allowed. + :raises PasswordForbidden: + When a password is present in the userinfo component but is + not permitted by configuration. + :raises InvalidComponentsError: + When a component was found to be invalid. + """ + if not self.allow_password: + check_password(uri) + + required_components = [ + component + for component, required in self.required_components.items() + if required + ] + validated_components = [ + component + for component, required in self.validated_components.items() + if required + ] + if required_components: + ensure_required_components_exist(uri, required_components) + if validated_components: + ensure_components_are_valid(uri, validated_components) + + ensure_one_of(self.allowed_schemes, uri, 'scheme') + ensure_one_of(self.allowed_hosts, uri, 'host') + ensure_one_of(self.allowed_ports, uri, 'port') + + +def check_password(uri): + """Assert that there is no password present in the uri.""" + userinfo = uri.userinfo + if not userinfo: + return + credentials = userinfo.split(':', 1) + if len(credentials) <= 1: + return + raise exceptions.PasswordForbidden(uri) + + +def ensure_one_of(allowed_values, uri, attribute): + """Assert that the uri's attribute is one of the allowed values.""" + value = getattr(uri, attribute) + if value is not None and allowed_values and value not in allowed_values: + raise exceptions.UnpermittedComponentError( + attribute, value, allowed_values, + ) + + +def ensure_required_components_exist(uri, required_components): + """Assert that all required components are present in the URI.""" + missing_components = sorted([ + component + for component in required_components + if getattr(uri, component) is None + ]) + if missing_components: + raise exceptions.MissingComponentError(uri, *missing_components) + + +def is_valid(value, matcher, require): + """Determine if a value is valid based on the provided matcher. + + :param str value: + Value to validate. + :param matcher: + Compiled regular expression to use to validate the value. + :param require: + Whether or not the value is required. + """ + if require: + return (value is not None + and matcher.match(value)) + + # require is False and value is not None + return value is None or matcher.match(value) + + +def authority_is_valid(authority, host=None, require=False): + """Determine if the authority string is valid. + + :param str authority: + The authority to validate. + :param str host: + (optional) The host portion of the authority to validate. + :param bool require: + (optional) Specify if authority must not be None. + :returns: + ``True`` if valid, ``False`` otherwise + :rtype: + bool + """ + validated = is_valid(authority, misc.SUBAUTHORITY_MATCHER, require) + if validated and host is not None and misc.IPv4_MATCHER.match(host): + return valid_ipv4_host_address(host) + return validated + + +def scheme_is_valid(scheme, require=False): + """Determine if the scheme is valid. + + :param str scheme: + The scheme string to validate. + :param bool require: + (optional) Set to ``True`` to require the presence of a scheme. + :returns: + ``True`` if the scheme is valid. ``False`` otherwise. + :rtype: + bool + """ + return is_valid(scheme, misc.SCHEME_MATCHER, require) + + +def path_is_valid(path, require=False): + """Determine if the path component is valid. + + :param str path: + The path string to validate. + :param bool require: + (optional) Set to ``True`` to require the presence of a path. + :returns: + ``True`` if the path is valid. ``False`` otherwise. + :rtype: + bool + """ + return is_valid(path, misc.PATH_MATCHER, require) + + +def query_is_valid(query, require=False): + """Determine if the query component is valid. + + :param str query: + The query string to validate. + :param bool require: + (optional) Set to ``True`` to require the presence of a query. + :returns: + ``True`` if the query is valid. ``False`` otherwise. + :rtype: + bool + """ + return is_valid(query, misc.QUERY_MATCHER, require) + + +def fragment_is_valid(fragment, require=False): + """Determine if the fragment component is valid. + + :param str fragment: + The fragment string to validate. + :param bool require: + (optional) Set to ``True`` to require the presence of a fragment. + :returns: + ``True`` if the fragment is valid. ``False`` otherwise. + :rtype: + bool + """ + return is_valid(fragment, misc.FRAGMENT_MATCHER, require) + + +def valid_ipv4_host_address(host): + """Determine if the given host is a valid IPv4 address.""" + # If the host exists, and it might be IPv4, check each byte in the + # address. + return all([0 <= int(byte, base=10) <= 255 for byte in host.split('.')]) + + +_COMPONENT_VALIDATORS = { + 'scheme': scheme_is_valid, + 'path': path_is_valid, + 'query': query_is_valid, + 'fragment': fragment_is_valid, +} + +_SUBAUTHORITY_VALIDATORS = set(['userinfo', 'host', 'port']) + + +def subauthority_component_is_valid(uri, component): + """Determine if the userinfo, host, and port are valid.""" + try: + subauthority_dict = uri.authority_info() + except exceptions.InvalidAuthority: + return False + + # If we can parse the authority into sub-components and we're not + # validating the port, we can assume it's valid. + if component != 'port': + return True + + try: + port = int(subauthority_dict['port']) + except TypeError: + # If the port wasn't provided it'll be None and int(None) raises a + # TypeError + return True + + return (0 <= port <= 65535) + + +def ensure_components_are_valid(uri, validated_components): + """Assert that all components are valid in the URI.""" + invalid_components = set([]) + for component in validated_components: + if component in _SUBAUTHORITY_VALIDATORS: + if not subauthority_component_is_valid(uri, component): + invalid_components.add(component) + # Python's peephole optimizer means that while this continue *is* + # actually executed, coverage.py cannot detect that. See also, + # https://bitbucket.org/ned/coveragepy/issues/198/continue-marked-as-not-covered + continue # nocov: Python 2.7, 3.3, 3.4 + + validator = _COMPONENT_VALIDATORS[component] + if not validator(getattr(uri, component)): + invalid_components.add(component) + + if invalid_components: + raise exceptions.InvalidComponentsError(uri, *invalid_components) diff --git a/src/urllib3/util/ssl_.py b/src/urllib3/util/ssl_.py --- a/src/urllib3/util/ssl_.py +++ b/src/urllib3/util/ssl_.py @@ -2,13 +2,14 @@ import errno import warnings import hmac -import socket +import re from binascii import hexlify, unhexlify from hashlib import md5, sha1, sha256 from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning from ..packages import six +from ..packages.rfc3986 import abnf_regexp SSLContext = None @@ -40,6 +41,16 @@ def _const_compare_digest_backport(a, b): _const_compare_digest = getattr(hmac, 'compare_digest', _const_compare_digest_backport) +# Borrow rfc3986's regular expressions for IPv4 +# and IPv6 addresses for use in is_ipaddress() +_IP_ADDRESS_REGEX = re.compile( + r'^(?:%s|%s|%s|%s)$' % ( + abnf_regexp.IPv4_RE, + abnf_regexp.IPv6_RE, + abnf_regexp.IPv6_ADDRZ_RE, + abnf_regexp.IPv_FUTURE_RE + ) +) try: # Test for SSL features import ssl @@ -56,25 +67,6 @@ def _const_compare_digest_backport(a, b): OP_NO_COMPRESSION = 0x20000 -# Python 2.7 doesn't have inet_pton on non-Linux so we fallback on inet_aton in -# those cases. This means that we can only detect IPv4 addresses in this case. -if hasattr(socket, 'inet_pton'): - inet_pton = socket.inet_pton -else: - # Maybe we can use ipaddress if the user has urllib3[secure]? - try: - import ipaddress - - def inet_pton(_, host): - if isinstance(host, bytes): - host = host.decode('ascii') - return ipaddress.ip_address(host) - - except ImportError: # Platform-specific: Non-Linux - def inet_pton(_, host): - return socket.inet_aton(host) - - # A secure default. # Sources for more information on TLS ciphers: # @@ -365,15 +357,4 @@ def is_ipaddress(hostname): # IDN A-label bytes are ASCII compatible. hostname = hostname.decode('ascii') - families = [socket.AF_INET] - if hasattr(socket, 'AF_INET6'): - families.append(socket.AF_INET6) - - for af in families: - try: - inet_pton(af, hostname) - except (socket.error, ValueError, OSError): - pass - else: - return True - return False + return _IP_ADDRESS_REGEX.match(hostname) is not None diff --git a/src/urllib3/util/url.py b/src/urllib3/util/url.py --- a/src/urllib3/util/url.py +++ b/src/urllib3/util/url.py @@ -1,7 +1,10 @@ from __future__ import absolute_import +import re from collections import namedtuple from ..exceptions import LocationParseError +from ..packages import six, rfc3986 +from ..packages.rfc3986.exceptions import RFC3986Exception url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] @@ -10,6 +13,9 @@ # urllib3 infers URLs without a scheme (None) to be http. NORMALIZABLE_SCHEMES = ('http', 'https', None) +# Regex for detecting URLs with schemes. RFC 3986 Section 3.1 +SCHEME_REGEX = re.compile(r"^[a-zA-Z][a-zA-Z0-9+\-.]*://") + class Url(namedtuple('Url', url_attrs)): """ @@ -98,6 +104,8 @@ def __str__(self): def split_first(s, delims): """ + Deprecated. No longer used by parse_url(). + Given a string and an iterable of delimiters, split on the first found delimiter. Return two split parts and the matched delimiter. @@ -133,6 +141,9 @@ def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is performed to parse incomplete urls. Fields not provided will be None. + This parser is RFC 3986 compliant. + + :param str url: URL to parse into a :class:`.Url` namedtuple. Partly backwards-compatible with :mod:`urlparse`. @@ -145,81 +156,55 @@ def parse_url(url): >>> parse_url('/foo?bar') Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) """ - - # While this code has overlap with stdlib's urlparse, it is much - # simplified for our needs and less annoying. - # Additionally, this implementations does silly things to be optimal - # on CPython. - if not url: # Empty return Url() - scheme = None - auth = None - host = None - port = None - path = None - fragment = None - query = None - - # Scheme - if '://' in url: - scheme, url = url.split('://', 1) - - # Find the earliest Authority Terminator - # (http://tools.ietf.org/html/rfc3986#section-3.2) - url, path_, delim = split_first(url, ['/', '?', '#']) - - if delim: - # Reassemble the path - path = delim + path_ - - # Auth - if '@' in url: - # Last '@' denotes end of auth part - auth, url = url.rsplit('@', 1) - - # IPv6 - if url and url[0] == '[': - host, url = url.split(']', 1) - host += ']' - - # Port - if ':' in url: - _host, port = url.split(':', 1) - - if not host: - host = _host - - if port: - # If given, ports must be integers. No whitespace, no plus or - # minus prefixes, no non-integer digits such as ^2 (superscript). - if not port.isdigit(): - raise LocationParseError(url) - try: - port = int(port) - except ValueError: - raise LocationParseError(url) - else: - # Blank ports are cool, too. (rfc3986#section-3.2.3) - port = None - - elif not host and url: - host = url - + # RFC 3986 doesn't like URLs that have a host but don't start + # with a scheme and we support URLs like that so we need to + # detect that problem and add an empty scheme indication. + # We don't get hurt on path-only URLs here as it's stripped + # off and given an empty scheme anyways. + if not SCHEME_REGEX.search(url): + url = "//" + url + + try: + parse_result = rfc3986.urlparse(url, encoding="utf-8") + except (ValueError, RFC3986Exception): + raise LocationParseError(url) + + # RFC 3986 doesn't assert ports must be non-negative. + if parse_result.port and parse_result.port < 0: + raise LocationParseError(url) + + # For the sake of backwards compatibility we put empty + # string values for path if there are any defined values + # beyond the path in the URL. + # TODO: Remove this when we break backwards compatibility. + path = parse_result.path if not path: - return Url(scheme, auth, host, port, path, query, fragment) - - # Fragment - if '#' in path: - path, fragment = path.split('#', 1) - - # Query - if '?' in path: - path, query = path.split('?', 1) - - return Url(scheme, auth, host, port, path, query, fragment) + if (parse_result.query is not None + or parse_result.fragment is not None): + path = "" + else: + path = None + + # Ensure that each part of the URL is a `str` for + # backwards compatbility. + def to_str(x): + if six.PY2 and isinstance(x, six.string_types): + return x.encode('utf-8') + return x + + return Url( + scheme=to_str(parse_result.scheme), + auth=to_str(parse_result.userinfo), + host=to_str(parse_result.hostname), + port=parse_result.port, + path=to_str(path), + query=to_str(parse_result.query), + fragment=to_str(parse_result.fragment) + ) def get_host(url):
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -1,3 +1,4 @@ +# coding: utf-8 import hashlib import warnings import logging @@ -152,12 +153,15 @@ def test_parse_url_normalization(self, url, expected_normalized_url): # Path/query/fragment ('', Url()), ('/', Url(path='/')), + ('/abc/../def', Url(path="/abc/../def")), ('#?/!google.com/?foo#bar', Url(path='', fragment='?/!google.com/?foo#bar')), ('/foo', Url(path='/foo')), ('/foo?bar=baz', Url(path='/foo', query='bar=baz')), ('/foo?bar=baz#banana?apple/orange', Url(path='/foo', query='bar=baz', fragment='banana?apple/orange')), + ('/redirect?target=http://localhost:61020/', Url(path='redirect', + query='target=http://localhost:61020/')), # Port ('http://google.com/', Url('http', host='google.com', path='/')), @@ -170,8 +174,7 @@ def test_parse_url_normalization(self, url, expected_normalized_url): ('http://foo:bar@baz@localhost/', Url('http', auth='foo:bar@baz', host='localhost', - path='/')), - ('http://@', Url('http', host=None, auth='')) + path='/')) ] non_round_tripping_parse_url_host_map = [ @@ -197,9 +200,13 @@ def test_unparse_url(self, url, expected_url): assert url == expected_url.url def test_parse_url_invalid_IPv6(self): - with pytest.raises(ValueError): + with pytest.raises(LocationParseError): parse_url('[::1') + def test_parse_url_negative_port(self): + with pytest.raises(LocationParseError): + parse_url("https://www.google.com:-80/") + def test_Url_str(self): U = Url('http', host='google.com') assert str(U) == U.url @@ -232,6 +239,31 @@ def test_request_uri(self, url, expected_request_uri): def test_netloc(self, url, expected_netloc): assert parse_url(url).netloc == expected_netloc + url_vulnerabilities = [ + # urlparse doesn't follow RFC 3986 Section 3.2 + ("http://google.com#@evil.com/", Url("http", + host="google.com", + path="", + fragment="@evil.com/")), + + # CVE-2016-5699 + ("http://127.0.0.1%0d%0aConnection%3a%20keep-alive", + Url("http", host="127.0.0.1%0d%0aConnection%3a%20keep-alive")), + + # NodeJS unicode -> double dot + (u"http://google.com/\uff2e\uff2e/abc", Url("http", + host="google.com", + path='/%ef%bc%ae%ef%bc%ae/abc')) + ] + + @pytest.mark.parametrize("url, expected_url", url_vulnerabilities) + def test_url_vulnerabilities(self, url, expected_url): + if expected_url is False: + with pytest.raises(LocationParseError): + parse_url(url) + else: + assert parse_url(url) == expected_url + @pytest.mark.parametrize('kwargs, expected', [ ({'accept_encoding': True}, {'accept-encoding': 'gzip,deflate'}),
Improve support for path-only URLs. Encountered while doing a substantial backend rewrite for v2. Currently, `parse_url` doesn't do well with non-urlencoded URLs that are intended to have no host portion, as frequently used by HTTP Location headers: ```python >>> from urllib3.util.url import parse_url >>> parse_url('/redirect?target=http://localhost:61020/') Url(scheme='/redirect?target=http', auth=None, host='localhost', port=61020, path='/', query=None, fragment=None) ``` As you can see here, the `://` in the URL confuses urllib3's URL handling logic, which means it ends up incorrectly assuming that the scheme is `/redirect?target=http`. This is causing problems in the v2 backend, which uses `parse_url` to attempt to isolate out the request URL to put into the HTTP/1.1 request. Frustratingly, urlparse gets this right: ```python >>> from urllib.parse import urlparse >>> urlparse('/redirect?target=http://localhost:61020/') ParseResult(scheme='', netloc='', path='/redirect', params='', query='target=http://localhost:61020/', fragment='') ``` It would be good if `parse_url` could spot a URL that is path-only and handle it sensibly. This is part of a larger problem, which is that `parse_url` and the `Url` class cannot be used to incrementally build up a URL target. This is required to emulate browser logic for redirects if that's a thing that we want to do: it needs to be possible to handle all URLs that could be returned in Location headers, including absolute, scheme-relative, absolute-path, and path-relative, but right now urllib3 can't really do that. The larger issue doesn't block v2, but this does. It's probably something that @sigmavirus24 could give guidance on if someone wants to pick it up.
The reason urlparse handles this correctly is also the reason why `rfc3986` gets it right: ``` >>> import rfc3986 >>> rfc3986.urlparse('/redirect?target=http://localhost:61020/') ParseResult(scheme=None, userinfo=None, host=None, port=None, path=u'/redirect', query=u'target=http://localhost:61020/', fragment=None) ``` Our API is just slightly incompatible with the way `parse_url` in urllib3 already works. If we're comfortable breaking that for v2, we can just switch over to a more robust implementation. We still need support for a few things that `parse_url` has grown support for but those should be pretty easy and there are (I think) branches for that work on the repo. I certainly have no problem with swapping to a more robust implementation. If you're open to making a patch against the v2 branch that swaps that in, that'd be really cool.
2018-11-29T04:16:15Z
[]
[]
urllib3/urllib3
1,489
urllib3__urllib3-1489
[ "1275" ]
f80ff34bffb2018528ccd2ccf82e533d321f2af8
diff --git a/dummyserver/server.py b/dummyserver/server.py --- a/dummyserver/server.py +++ b/dummyserver/server.py @@ -46,6 +46,8 @@ 'certfile': os.path.join(CERTS_PATH, 'client_no_intermediate.pem'), 'keyfile': os.path.join(CERTS_PATH, 'client_intermediate.key'), } +PASSWORD_KEYFILE = os.path.join(CERTS_PATH, 'server_password.key') +PASSWORD_CLIENT_KEYFILE = os.path.join(CERTS_PATH, 'client_password.key') NO_SAN_CERTS = { 'certfile': os.path.join(CERTS_PATH, 'server.no_san.crt'), 'keyfile': DEFAULT_CERTS['keyfile'] diff --git a/src/urllib3/connection.py b/src/urllib3/connection.py --- a/src/urllib3/connection.py +++ b/src/urllib3/connection.py @@ -226,7 +226,8 @@ class HTTPSConnection(HTTPConnection): ssl_version = None def __init__(self, host, port=None, key_file=None, cert_file=None, - strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + key_password=None, strict=None, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, ssl_context=None, server_hostname=None, **kw): HTTPConnection.__init__(self, host, port, strict=strict, @@ -234,6 +235,7 @@ def __init__(self, host, port=None, key_file=None, cert_file=None, self.key_file = key_file self.cert_file = cert_file + self.key_password = key_password self.ssl_context = ssl_context self.server_hostname = server_hostname @@ -255,6 +257,7 @@ def connect(self): sock=conn, keyfile=self.key_file, certfile=self.cert_file, + key_password=self.key_password, ssl_context=self.ssl_context, server_hostname=self.server_hostname ) @@ -272,7 +275,7 @@ class VerifiedHTTPSConnection(HTTPSConnection): assert_fingerprint = None def set_cert(self, key_file=None, cert_file=None, - cert_reqs=None, ca_certs=None, + cert_reqs=None, key_password=None, ca_certs=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None): """ @@ -291,6 +294,7 @@ def set_cert(self, key_file=None, cert_file=None, self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs + self.key_password = key_password self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) @@ -338,6 +342,7 @@ def connect(self): sock=conn, keyfile=self.key_file, certfile=self.cert_file, + key_password=self.key_password, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, server_hostname=server_hostname, diff --git a/src/urllib3/connectionpool.py b/src/urllib3/connectionpool.py --- a/src/urllib3/connectionpool.py +++ b/src/urllib3/connectionpool.py @@ -746,8 +746,8 @@ class HTTPSConnectionPool(HTTPConnectionPool): If ``assert_hostname`` is False, no verification is done. The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, - ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is - available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade + ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl` + is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket. """ @@ -759,7 +759,7 @@ def __init__(self, host, port=None, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, key_file=None, cert_file=None, cert_reqs=None, - ca_certs=None, ssl_version=None, + key_password=None, ca_certs=None, ssl_version=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None, **conn_kw): @@ -773,6 +773,7 @@ def __init__(self, host, port=None, self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs + self.key_password = key_password self.ca_certs = ca_certs self.ca_cert_dir = ca_cert_dir self.ssl_version = ssl_version @@ -787,6 +788,7 @@ def _prepare_conn(self, conn): if isinstance(conn, VerifiedHTTPSConnection): conn.set_cert(key_file=self.key_file, + key_password=self.key_password, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs, @@ -824,7 +826,9 @@ def _new_conn(self): conn = self.ConnectionCls(host=actual_host, port=actual_port, timeout=self.timeout.connect_timeout, - strict=self.strict, **self.conn_kw) + strict=self.strict, cert_file=self.cert_file, + key_file=self.key_file, key_password=self.key_password, + **self.conn_kw) return self._prepare_conn(conn) diff --git a/src/urllib3/contrib/pyopenssl.py b/src/urllib3/contrib/pyopenssl.py --- a/src/urllib3/contrib/pyopenssl.py +++ b/src/urllib3/contrib/pyopenssl.py @@ -432,7 +432,9 @@ def load_verify_locations(self, cafile=None, capath=None, cadata=None): def load_cert_chain(self, certfile, keyfile=None, password=None): self._ctx.use_certificate_chain_file(certfile) if password is not None: - self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password) + if not isinstance(password, six.binary_type): + password = password.encode('utf-8') + self._ctx.set_passwd_cb(lambda *_: password) self._ctx.use_privatekey_file(keyfile or certfile) def wrap_socket(self, sock, server_side=False, diff --git a/src/urllib3/poolmanager.py b/src/urllib3/poolmanager.py --- a/src/urllib3/poolmanager.py +++ b/src/urllib3/poolmanager.py @@ -19,7 +19,8 @@ log = logging.getLogger(__name__) SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', - 'ssl_version', 'ca_cert_dir', 'ssl_context') + 'ssl_version', 'ca_cert_dir', 'ssl_context', + 'key_password') # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. @@ -33,6 +34,7 @@ 'key_block', # bool 'key_source_address', # str 'key_key_file', # str + 'key_key_password', # str 'key_cert_file', # str 'key_cert_reqs', # str 'key_ca_certs', # str diff --git a/src/urllib3/util/ssl_.py b/src/urllib3/util/ssl_.py --- a/src/urllib3/util/ssl_.py +++ b/src/urllib3/util/ssl_.py @@ -281,7 +281,7 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None, def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None, ciphers=None, ssl_context=None, - ca_cert_dir=None): + ca_cert_dir=None, key_password=None): """ All arguments except for server_hostname, ssl_context, and ca_cert_dir have the same meaning as they do when using :func:`ssl.wrap_socket`. @@ -297,6 +297,8 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, A directory containing CA certificates in multiple separate files, as supported by OpenSSL's -CApath flag or the capath argument to SSLContext.load_verify_locations(). + :param key_password: + Optional password if the keyfile is encrypted. """ context = ssl_context if context is None: @@ -321,8 +323,17 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, # try to load OS default certs; works well on Windows (require Python3.4+) context.load_default_certs() + # Attempt to detect if we get the goofy behavior of the + # keyfile being encrypted and OpenSSL asking for the + # passphrase via the terminal and instead error out. + if keyfile and key_password is None and _is_key_file_encrypted(keyfile): + raise SSLError("Client private key is encrypted, password is required") + if certfile: - context.load_cert_chain(certfile, keyfile) + if key_password is None: + context.load_cert_chain(certfile, keyfile) + else: + context.load_cert_chain(certfile, keyfile, key_password) # If we detect server_hostname is an IP address then the SNI # extension should not be used according to RFC3546 Section 3.1 @@ -356,5 +367,15 @@ def is_ipaddress(hostname): if six.PY3 and isinstance(hostname, bytes): # IDN A-label bytes are ASCII compatible. hostname = hostname.decode('ascii') - return _IP_ADDRESS_REGEX.match(hostname) is not None + + +def _is_key_file_encrypted(key_file): + """Detects if a key file is encrypted or not.""" + with open(key_file, 'r') as f: + for line in f: + # Look for Proc-Type: 4,ENCRYPTED + if 'ENCRYPTED' in line: + return True + + return False
diff --git a/test/__init__.py b/test/__init__.py --- a/test/__init__.py +++ b/test/__init__.py @@ -137,6 +137,17 @@ def wrapper(*args, **kwargs): return wrapper +def requires_ssl_context_keyfile_password(test): + @functools.wraps(test) + def wrapper(*args, **kwargs): + if ((not ssl_.IS_PYOPENSSL and sys.version_info < (2, 7, 9)) + or ssl_.IS_SECURETRANSPORT): + pytest.skip("%s requires password parameter for " + "SSLContext.load_cert_chain()" % test.__name__) + return test(*args, **kwargs) + return wrapper + + def fails_on_travis_gce(test): """Expect the test to fail on Google Compute Engine instances for Travis. Travis uses GCE for its sudo: enabled builds. diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -17,13 +17,14 @@ DEFAULT_CLIENT_NO_INTERMEDIATE_CERTS, NO_SAN_CERTS, NO_SAN_CA, DEFAULT_CA_DIR, IPV6_ADDR_CERTS, IPV6_ADDR_CA, HAS_IPV6, - IP_SAN_CERTS) + IP_SAN_CERTS, PASSWORD_CLIENT_KEYFILE) from test import ( onlyPy279OrNewer, notSecureTransport, notOpenSSL098, requires_network, + requires_ssl_context_keyfile_password, fails_on_travis_gce, TARPIT_HOST, ) @@ -113,6 +114,38 @@ def test_client_no_intermediate(self): if not ('An existing connection was forcibly closed by the remote host' in str(e)): raise + @requires_ssl_context_keyfile_password + def test_client_key_password(self): + client_cert, client_key = ( + DEFAULT_CLIENT_CERTS['certfile'], + PASSWORD_CLIENT_KEYFILE, + ) + https_pool = HTTPSConnectionPool(self.host, self.port, + key_file=client_key, + cert_file=client_cert, + key_password="letmein") + r = https_pool.request('GET', '/certificate') + subject = json.loads(r.data.decode('utf-8')) + assert subject['organizationalUnitName'].startswith( + 'Testing server cert') + + @requires_ssl_context_keyfile_password + def test_client_encrypted_key_requires_password(self): + client_cert, client_key = ( + DEFAULT_CLIENT_CERTS['certfile'], + PASSWORD_CLIENT_KEYFILE, + ) + https_pool = HTTPSConnectionPool(self.host, self.port, + key_file=client_key, + cert_file=client_cert, + key_password=None) + + with pytest.raises(MaxRetryError) as e: + https_pool.request('GET', '/certificate') + + assert 'password is required' in str(e.value) + assert isinstance(e.value.reason, SSLError) + def test_verified(self): https_pool = HTTPSConnectionPool(self.host, self.port, cert_reqs='CERT_REQUIRED', diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -12,13 +12,16 @@ ) from urllib3.response import httplib from urllib3.util.ssl_ import HAS_SNI +from urllib3.util import ssl_ from urllib3.util.timeout import Timeout from urllib3.util.retry import Retry from urllib3._collections import HTTPHeaderDict from dummyserver.testcase import SocketDummyServerTestCase, consume_socket from dummyserver.server import ( - DEFAULT_CERTS, DEFAULT_CA, COMBINED_CERT_AND_KEY, get_unreachable_address) + DEFAULT_CERTS, DEFAULT_CA, COMBINED_CERT_AND_KEY, + PASSWORD_KEYFILE, get_unreachable_address +) from .. import onlyPy3, LogRecorder @@ -35,7 +38,7 @@ class MimeToolMessage(object): import pytest -from test import fails_on_travis_gce +from test import fails_on_travis_gce, requires_ssl_context_keyfile_password class TestCookies(SocketDummyServerTestCase): @@ -231,6 +234,80 @@ def socket_handler(listener): "certificates" ) + @requires_ssl_context_keyfile_password + def test_client_cert_with_string_password(self): + self.run_client_cert_with_password_test(u"letmein") + + @requires_ssl_context_keyfile_password + def test_client_cert_with_bytes_password(self): + self.run_client_cert_with_password_test(b"letmein") + + def run_client_cert_with_password_test(self, password): + """ + Tests client certificate password functionality + """ + done_receiving = Event() + client_certs = [] + + def socket_handler(listener): + sock = listener.accept()[0] + sock = self._wrap_in_ssl(sock) + + client_certs.append(sock.getpeercert()) + + data = b'' + while not data.endswith(b'\r\n\r\n'): + data += sock.recv(8192) + + sock.sendall( + b'HTTP/1.1 200 OK\r\n' + b'Server: testsocket\r\n' + b'Connection: close\r\n' + b'Content-Length: 6\r\n' + b'\r\n' + b'Valid!' + ) + + done_receiving.wait(5) + sock.close() + + self._start_server(socket_handler) + ssl_context = ssl_.SSLContext(ssl_.PROTOCOL_SSLv23) + ssl_context.load_cert_chain( + certfile=DEFAULT_CERTS['certfile'], + keyfile=PASSWORD_KEYFILE, + password=password + ) + + pool = HTTPSConnectionPool( + self.host, + self.port, + ssl_context=ssl_context, + cert_reqs='REQUIRED', + ca_certs=DEFAULT_CA, + ) + self.addCleanup(pool.close) + pool.request('GET', '/', retries=0) + done_receiving.set() + + self.assertEqual(len(client_certs), 1) + + @requires_ssl_context_keyfile_password + def test_load_keyfile_with_invalid_password(self): + context = ssl_.SSLContext(ssl_.PROTOCOL_SSLv23) + + # Different error is raised depending on context. + if ssl_.IS_PYOPENSSL: + from OpenSSL.SSL import Error + expected_error = Error + else: + expected_error = ssl.SSLError + + with pytest.raises(expected_error): + context.load_cert_chain(certfile=DEFAULT_CERTS["certfile"], + keyfile=PASSWORD_KEYFILE, + password=b'letmei') + class TestSocketClosing(SocketDummyServerTestCase):
Make sure PyOpenSSLContext.load_cert_chain sets password as byte string I ran into an issue creating a TransportAdapter using an SSL Context with a passpharse due to the fact that pyopenssl expects a byte string and not unicode (see https://github.com/pyca/pyopenssl/issues/701). Apparently, they have a reason for not casting it, so it would be nice if `urllib3` did the type check and casting instead. I'd be happy to submit a PR but just wanted to run the idea by you guys first. This could be something as simple as: ``` def load_cert_chain(self, certfile, keyfile=None, password=None): self._ctx.use_certificate_file(certfile) if password is not None: self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: six.binary_type(password)) self._ctx.use_privatekey_file(keyfile or certfile) ``` Or it could have a bit more robust type checking.
Why should the cast come in urllib3 and not in user code? I’m not opposed to adding the cast but the downside of adding it is the possibility of subtle errors due to the encode. Mainly for convenience. It works fine passing unicode to the built in SSL library, but not to pyopenssl; it seems a little unexpected to have to worry about what type of string you're passing in to `load_cert_chain` as an end user when the backend is 'magically' chosen for you. It was also kind of hard to debug what was happening since the error was coming from outside of python.
2018-12-01T20:15:31Z
[]
[]
urllib3/urllib3
1,492
urllib3__urllib3-1492
[ "303" ]
2a0957ea27e966166f81f88693af8e2f87d19fb6
diff --git a/dummyserver/handlers.py b/dummyserver/handlers.py --- a/dummyserver/handlers.py +++ b/dummyserver/handlers.py @@ -17,6 +17,7 @@ from urllib3.packages.six.moves.http_client import responses from urllib3.packages.six.moves.urllib.parse import urlsplit +from urllib3.packages.six import binary_type log = logging.getLogger(__name__) @@ -157,10 +158,15 @@ def upload(self, request): return Response("Wrong size: %d != %d" % (size, len(data)), status='400 Bad Request') - if filename != file_['filename']: - return Response("Wrong filename: %s != %s" % - (filename, file_.filename), - status='400 Bad Request') + got_filename = file_['filename'] + if(isinstance(got_filename, binary_type)): + got_filename = got_filename.decode('utf-8') + + # Tornado can leave the trailing \n in place on the filename. + if filename != got_filename: + return Response( + u"Wrong filename: %s != %s" % (filename, file_.filename), + status='400 Bad Request') return Response() @@ -304,49 +310,3 @@ def redirect_after(self, request): def shutdown(self, request): sys.exit() - - -# RFC2231-aware replacement of internal tornado function -def _parse_header(line): - r"""Parse a Content-type like header. - - Return the main content-type and a dictionary of options. - - >>> d = _parse_header("CD: fd; foo=\"bar\"; file*=utf-8''T%C3%A4st")[1] - >>> d['file'] == 'T\u00e4st' - True - >>> d['foo'] - 'bar' - """ - import tornado.httputil - import email.utils - from urllib3.packages import six - if not six.PY3: - line = line.encode('utf-8') - parts = tornado.httputil._parseparam(';' + line) - key = next(parts) - # decode_params treats first argument special, but we already stripped key - params = [('Dummy', 'value')] - for p in parts: - i = p.find('=') - if i >= 0: - name = p[:i].strip().lower() - value = p[i + 1:].strip() - params.append((name, value)) - params = email.utils.decode_params(params) - params.pop(0) # get rid of the dummy again - pdict = {} - for name, value in params: - value = email.utils.collapse_rfc2231_value(value) - if len(value) >= 2 and value[0] == '"' and value[-1] == '"': - value = value[1:-1] - pdict[name] = value - return key, pdict - - -# TODO: make the following conditional as soon as we know a version -# which does not require this fix. -# See https://github.com/facebook/tornado/issues/868 -if True: - import tornado.httputil - tornado.httputil._parse_header = _parse_header diff --git a/src/urllib3/fields.py b/src/urllib3/fields.py --- a/src/urllib3/fields.py +++ b/src/urllib3/fields.py @@ -1,6 +1,7 @@ from __future__ import absolute_import import email.utils import mimetypes +import re from .packages import six @@ -19,57 +20,147 @@ def guess_content_type(filename, default='application/octet-stream'): return default -def format_header_param(name, value): +def format_header_param_rfc2231(name, value): """ - Helper function to format and quote a single header parameter. + Helper function to format and quote a single header parameter using the + strategy defined in RFC 2231. Particularly useful for header parameters which might contain - non-ASCII values, like file names. This follows RFC 2231, as - suggested by RFC 2388 Section 4.4. + non-ASCII values, like file names. This follows RFC 2388 Section 4.4. :param name: The name of the parameter, a string expected to be ASCII only. :param value: - The value of the parameter, provided as a unicode string. + The value of the parameter, provided as ``bytes`` or `str``. + :ret: + An RFC-2231-formatted unicode string. """ + if isinstance(value, six.binary_type): + value = value.decode("utf-8") + if not any(ch in value for ch in '"\\\r\n'): - result = '%s="%s"' % (name, value) + result = u'%s="%s"' % (name, value) try: result.encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): pass else: return result - if not six.PY3 and isinstance(value, six.text_type): # Python 2: + + if not six.PY3: # Python 2: value = value.encode('utf-8') + + # encode_rfc2231 accepts an encoded string and returns an ascii-encoded + # string in Python 2 but accepts and returns unicode strings in Python 3 value = email.utils.encode_rfc2231(value, 'utf-8') value = '%s*=%s' % (name, value) + + if not six.PY3: # Python 2: + value = value.decode('utf-8') + return value +_HTML5_REPLACEMENTS = { + u"\u0022": u"%22", + # Replace "\" with "\\". + u"\u005C": u"\u005C\u005C", + u"\u005C": u"\u005C\u005C", +} + +# All control characters from 0x00 to 0x1F *except* 0x1B. +_HTML5_REPLACEMENTS.update({ + six.unichr(cc): u"%{:02X}".format(cc) + for cc + in range(0x00, 0x1F+1) + if cc not in (0x1B,) +}) + + +def _replace_multiple(value, needles_and_replacements): + + def replacer(match): + return needles_and_replacements[match.group(0)] + + pattern = re.compile( + r"|".join([ + re.escape(needle) for needle in needles_and_replacements.keys() + ]) + ) + + result = pattern.sub(replacer, value) + + return result + + +def format_header_param_html5(name, value): + """ + Helper function to format and quote a single header parameter using the + HTML5 strategy. + + Particularly useful for header parameters which might contain + non-ASCII values, like file names. This follows the `HTML5 Working Draft + Section 4.10.22.7`_ and matches the behavior of curl and modern browsers. + + .. _HTML5 Working Draft Section 4.10.22.7: + https://w3c.github.io/html/sec-forms.html#multipart-form-data + + :param name: + The name of the parameter, a string expected to be ASCII only. + :param value: + The value of the parameter, provided as ``bytes`` or `str``. + :ret: + A unicode string, stripped of troublesome characters. + """ + if isinstance(value, six.binary_type): + value = value.decode("utf-8") + + value = _replace_multiple(value, _HTML5_REPLACEMENTS) + + return u'%s="%s"' % (name, value) + + +# For backwards-compatibility. +format_header_param = format_header_param_html5 + + class RequestField(object): """ A data container for request body parameters. :param name: - The name of this request field. + The name of this request field. Must be unicode. :param data: The data/value body. :param filename: - An optional filename of the request field. + An optional filename of the request field. Must be unicode. :param headers: An optional dict-like object of headers to initially use for the field. + :param header_formatter: + An optional callable that is used to encode and format the headers. By + default, this is :func:`format_header_param_html5`. """ - def __init__(self, name, data, filename=None, headers=None): + def __init__( + self, + name, + data, + filename=None, + headers=None, + header_formatter=format_header_param_html5): self._name = name self._filename = filename self.data = data self.headers = {} if headers: self.headers = dict(headers) + self.header_formatter = header_formatter @classmethod - def from_tuples(cls, fieldname, value): + def from_tuples( + cls, + fieldname, + value, + header_formatter=format_header_param_html5): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. @@ -97,21 +188,24 @@ def from_tuples(cls, fieldname, value): content_type = None data = value - request_param = cls(fieldname, data, filename=filename) + request_param = cls( + fieldname, data, filename=filename, header_formatter=header_formatter) request_param.make_multipart(content_type=content_type) return request_param def _render_part(self, name, value): """ - Overridable helper function to format a single header parameter. + Overridable helper function to format a single header parameter. By + default, this calls ``self.header_formatter``. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ - return format_header_param(name, value) + + return self.header_formatter(name, value) def _render_parts(self, header_parts): """ @@ -133,7 +227,7 @@ def _render_parts(self, header_parts): if value is not None: parts.append(self._render_part(name, value)) - return '; '.join(parts) + return u'; '.join(parts) def render_headers(self): """ @@ -144,15 +238,15 @@ def render_headers(self): sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] for sort_key in sort_keys: if self.headers.get(sort_key, False): - lines.append('%s: %s' % (sort_key, self.headers[sort_key])) + lines.append(u'%s: %s' % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: - lines.append('%s: %s' % (header_name, header_value)) + lines.append(u'%s: %s' % (header_name, header_value)) - lines.append('\r\n') - return '\r\n'.join(lines) + lines.append(u'\r\n') + return u'\r\n'.join(lines) def make_multipart(self, content_disposition=None, content_type=None, content_location=None): @@ -168,10 +262,10 @@ def make_multipart(self, content_disposition=None, content_type=None, The 'Content-Location' of the request body. """ - self.headers['Content-Disposition'] = content_disposition or 'form-data' - self.headers['Content-Disposition'] += '; '.join([ - '', self._render_parts( - (('name', self._name), ('filename', self._filename)) + self.headers['Content-Disposition'] = content_disposition or u'form-data' + self.headers['Content-Disposition'] += u'; '.join([ + u'', self._render_parts( + ((u'name', self._name), (u'filename', self._filename)) ) ]) self.headers['Content-Type'] = content_type
diff --git a/test/test_fields.py b/test/test_fields.py --- a/test/test_fields.py +++ b/test/test_fields.py @@ -1,8 +1,7 @@ import pytest -from urllib3.fields import guess_content_type, RequestField +from urllib3.fields import format_header_param_rfc2231, guess_content_type, RequestField from urllib3.packages.six import u -from . import onlyPy2 class TestRequestField(object): @@ -53,13 +52,45 @@ def test_render_parts(self): parts = field._render_parts([('name', 'value'), ('filename', 'value')]) assert parts == 'name="value"; filename="value"' - def test_render_part(self): - field = RequestField('somename', 'data') + def test_render_part_rfc2231_unicode(self): + field = RequestField('somename', 'data', header_formatter=format_header_param_rfc2231) param = field._render_part('filename', u('n\u00e4me')) assert param == "filename*=utf-8''n%C3%A4me" - @onlyPy2 - def test_render_unicode_bytes_py2(self): + def test_render_part_rfc2231_ascii(self): + field = RequestField('somename', 'data', header_formatter=format_header_param_rfc2231) + param = field._render_part('filename', b'name') + assert param == 'filename="name"' + + def test_render_part_html5_unicode(self): field = RequestField('somename', 'data') - param = field._render_part('filename', 'n\xc3\xa4me') - assert param == "filename*=utf-8''n%C3%A4me" + param = field._render_part('filename', u('n\u00e4me')) + assert param == u('filename="n\u00e4me"') + + def test_render_part_html5_ascii(self): + field = RequestField('somename', 'data') + param = field._render_part('filename', b'name') + assert param == 'filename="name"' + + def test_render_part_html5_unicode_escape(self): + field = RequestField('somename', 'data') + param = field._render_part('filename', u('hello\\world\u0022')) + assert param == u('filename="hello\\\\world%22"') + + def test_render_part_html5_unicode_with_control_character(self): + field = RequestField('somename', 'data') + param = field._render_part('filename', u('hello\x1A\x1B\x1C')) + assert param == u('filename="hello%1A\x1B%1C"') + + def test_from_tuples_rfc2231(self): + field = RequestField.from_tuples( + u('fieldname'), + (u('filen\u00e4me'), 'data'), + header_formatter=format_header_param_rfc2231) + cd = field.headers['Content-Disposition'] + assert (cd == u("form-data; name=\"fieldname\"; filename*=utf-8''filen%C3%A4me")) + + def test_from_tuples_html5(self): + field = RequestField.from_tuples(u('fieldname'), (u('filen\u00e4me'), 'data')) + cd = field.headers['Content-Disposition'] + assert (cd == u('form-data; name="fieldname"; filename="filen\u00e4me"'))
Consider HTML 5 draft for multipart/form-data In several past posts, I've tried to make urllib3 more standards-compliant, the way I saw things. In particular, #120 aimed at adding a proper `Content-Type` header for _every_ form field, and #119 / #223 introduced [RFC 2231](http://tools.ietf.org/html/rfc2231) format for file name encoding. One consequence of that last change was my noticing that tornado doesn't decode that format, causing some test cases to fail. e11e03628f8b82021a74ba7f5b39ce9ad9011382 monkey-patched tornado to deal with that, and I filed facebook/tornado#868 to take this upstream. There [bdarnell pointed out](https://github.com/facebook/tornado/pull/869#issuecomment-23632083) the following: **The current HTML 5 draft has [a section on multipart/form-data](http://www.w3.org/html/wg/drafts/html/master/forms.html#multipart-form-data) which explicitely forbids the use of `Content-Type` for non-file fields, and also the use of RFC 2231 format for file names. It also provides its own mechanism for encoding field names.** Although I'm reluctant to abandon conformance with Standards Track RFCs for the sake of some lines in a draft, it appears that the HTML 5 draft might better reflect what current web servers actually implement, or are going to implement in the intermediate future. Perople have already [reported problems](http://stackoverflow.com/q/20591599/1468366) with our format in the wild. For this reason, I wonder whether the current choice of always using RFC 2231 might have to be revised. I can imagine two approaches, one would be dropping RFC 2231 support completely, the other would be introducing some switch to distinguish cases. I guess (although I don't like it) that the switch should default to the HTML 5 way. I guess I'd implement the switch as a global variable, since every field needs acces to it and concurrent use of multiple standards should be very unlikely. Do you have a better suggestion? So what would be needed for HTML 5 conforming handling of encodings? We'd need to properly distinguish between files and non-files. The [`RequestField` class](https://github.com/shazow/urllib3/blob/master/urllib3/fields.py#L53) could probably check whether both file name and content type are `None` to recognize non-file fields. That choice could be used to control the addition of the `Content-Type` header. For file names, we'd have to encode them to UTF-8 (or some user-configurable request encoding?), after making sure the result does not contain any invalid data: newlines must be stripped (or converted to spaces), quotation marks and backslashes quoted by `\` or substituted. I'm not sure which of these, and how much control we want to give our users over that process. If we allow for encodings other than UTF-8, which are not able to represent all Unicode codepoints, then we might need some more work. File name characters which are not representable must then be substituted or approximated somehow. For filed names we'd have to make sure we replace non-representable characters as XML decimal character entities instead. Other non-ASCII header fields should usually not occur, but if they do, I guess they should be treated the same way file names are. I notice that I made a mistake in reading [RFC 2388](http://tools.ietf.org/html/rfc2388): its [section 5.4](http://tools.ietf.org/html/rfc2388#section-5.4) states that field names are subject to a different encoding than file names. Grrr! Whoever though of that?!? In any case, to be conforming to that as well, we might even want to revise the RFC2231-using mode if we decide to keep it at all. I might find the time to write some of this myself, but before I start, I'd like to discuss the open questions above as to how this should get implemented. ##
Oh boy. First, my sincerest thanks for your thorough research and presenting of the situation. :) As I work through my backlog of emails, please excuse and remind me if I miss an important detail. The questions I noticed: - Should we use a global variable to switch between "RFC 2231" and "HTML 5" modes? -1 on this. Whenever possible, urllib3 tries to be configurable per-request so that you could execute multiple different libraries using different features/states of urllib3 without a conflicting global state. - Should we abandon RFC 2231 altogether in favour of HTML 5's spec? -1 on this too. I'd like to at least continue to support as much customizability of the request as possible, even if the functionality is a superset of an HTTP spec. That said, it may be wise to assume a conservative default (seems to be HTML 5 in this case). - What about distinguishing between files and non-files in the `RequestField` class? +1, that sounds perfect. - <Stuff about encoding that I don't quite understand> Honestly I'm not sure what's the Right Thing to do here. UTF8 does sound like the most sensible Python thing to do, but about other normalization... I'm inclined to start with a helper that lives in `urllib3.util` and see where we want to go from there. I'm reluctant to support anything other than UTF8 by default, but we should not get in the way if people want to encode things themselves. Did I miss anything? I would be more than happy to continue having you lead this if you're up for it. :) Have a look at [my current work in progress](https://github.com/gagern/urllib3/compare/issue303a), and let me know whether it's going in the right direction. I've managed to pass the request object along, so that things can be configured there. The new attributes which will control behavior still need to be documented somewhere, please let me know where you'd like to see them. Should I file a pull request to discuss the code, or wait till I consider it ready for merging (i.e. docs added)? With respect to that encoding issue: I've also introduced a request parameter to control encoding, set to UTF-8 by default. The main reason why someone might want to change that is because some HTML 5 web form without accept-charset attribute was part of a HTML file with charset other than UTF-8. In that case, HTML 5 requires using that charset, so urllib3 should provide some means to do so. Manually encoding field bodies would be part of the solution, but would not work for headers like file names which have to be Unicode for our mechanisms to work. Now everything is encoded using a configurable encoding, with xml character references as substitution, just like HTML 5 specifies. @gagern Yes, please open a PR so we can continue the discussion there and I can leave comments on the diff. :) The above summary and links were very helpful in understading the situation, thanks! Unfortunately, there still isn't a single, clear, widely adopted way to encode non-ascii file names, and web servers don't agree on what format they accept. It's a fairly simple monkeypatch to workaround if you're interacting with a single web server, but there is still a need to be able to change encoding formats per-request. @shazow @gagern Is any one working on this (or even wants to)? Also, here is an unbroken link to the relevant section in the current HTML5 draft: [multipart-form-data](http://w3c.github.io/html/sec-forms.html#multipart-form-data) I'm not aware of anyone working on it, so you're welcome to claim it. :)
2018-12-03T19:52:29Z
[]
[]
urllib3/urllib3
1,496
urllib3__urllib3-1496
[ "1537" ]
c2a96c698f931b3648e6fed976eda0ff8c3787c3
diff --git a/dummyserver/server.py b/dummyserver/server.py --- a/dummyserver/server.py +++ b/dummyserver/server.py @@ -60,11 +60,16 @@ 'certfile': os.path.join(CERTS_PATH, 'server.ipv6addr.crt'), 'keyfile': os.path.join(CERTS_PATH, 'server.ipv6addr.key'), } +IPV6_SAN_CERTS = { + 'certfile': os.path.join(CERTS_PATH, 'server.ipv6_san.crt'), + 'keyfile': DEFAULT_CERTS['keyfile'] +} DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem') DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem') NO_SAN_CA = os.path.join(CERTS_PATH, 'cacert.no_san.pem') DEFAULT_CA_DIR = os.path.join(CERTS_PATH, 'ca_path_test') IPV6_ADDR_CA = os.path.join(CERTS_PATH, 'server.ipv6addr.crt') +IPV6_SAN_CA = os.path.join(CERTS_PATH, 'server.ipv6_san.crt') COMBINED_CERT_AND_KEY = os.path.join(CERTS_PATH, 'server.combined.pem') diff --git a/src/urllib3/contrib/_securetransport/bindings.py b/src/urllib3/contrib/_securetransport/bindings.py --- a/src/urllib3/contrib/_securetransport/bindings.py +++ b/src/urllib3/contrib/_securetransport/bindings.py @@ -516,6 +516,8 @@ class SecurityConst(object): kTLSProtocol1 = 4 kTLSProtocol11 = 7 kTLSProtocol12 = 8 + kTLSProtocol13 = 10 + kTLSProtocolMaxSupported = 999 kSSLClientSide = 1 kSSLStreamType = 0 @@ -558,30 +560,27 @@ class SecurityConst(object): errSecInvalidTrustSettings = -25262 # Cipher suites. We only pick the ones our default cipher string allows. + # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030 TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F - TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3 + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9 + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8 TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F - TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2 TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024 TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028 TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014 TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B - TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039 - TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038 TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027 TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009 TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013 TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040 TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033 - TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032 TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D @@ -590,4 +589,5 @@ class SecurityConst(object): TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F TLS_AES_128_GCM_SHA256 = 0x1301 TLS_AES_256_GCM_SHA384 = 0x1302 - TLS_CHACHA20_POLY1305_SHA256 = 0x1303 + TLS_AES_128_CCM_8_SHA256 = 0x1305 + TLS_AES_128_CCM_SHA256 = 0x1304 diff --git a/src/urllib3/contrib/pyopenssl.py b/src/urllib3/contrib/pyopenssl.py --- a/src/urllib3/contrib/pyopenssl.py +++ b/src/urllib3/contrib/pyopenssl.py @@ -70,6 +70,7 @@ class UnsupportedExtension(Exception): from .. import util + __all__ = ['inject_into_urllib3', 'extract_from_urllib3'] # SNI always works. @@ -77,20 +78,19 @@ class UnsupportedExtension(Exception): # Map from urllib3 to PyOpenSSL compatible parameter-values. _openssl_versions = { - ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, + util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD, ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, } +if hasattr(ssl, 'PROTOCOL_SSLv3') and hasattr(OpenSSL.SSL, 'SSLv3_METHOD'): + _openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD + if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD -try: - _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD}) -except AttributeError: - pass _stdlib_to_openssl_verify = { ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, @@ -186,6 +186,10 @@ def idna_encode(name): except idna.core.IDNAError: return None + # Don't send IPv6 addresses through the IDNA encoder. + if ':' in name: + return name + name = idna_encode(name) if name is None: return None @@ -288,6 +292,10 @@ def recv(self, *args, **kwargs): raise timeout('The read operation timed out') else: return self.recv(*args, **kwargs) + + # TLS 1.3 post-handshake authentication + except OpenSSL.SSL.Error as e: + raise ssl.SSLError("read error: %r" % e) else: return data @@ -310,6 +318,10 @@ def recv_into(self, *args, **kwargs): else: return self.recv_into(*args, **kwargs) + # TLS 1.3 post-handshake authentication + except OpenSSL.SSL.Error as e: + raise ssl.SSLError("read error: %r" % e) + def settimeout(self, timeout): return self.socket.settimeout(timeout) @@ -362,6 +374,9 @@ def getpeercert(self, binary_form=False): 'subjectAltName': get_subj_alt_name(x509) } + def version(self): + return self.connection.get_protocol_version_name() + def _reuse(self): self._makefile_refs += 1 diff --git a/src/urllib3/contrib/securetransport.py b/src/urllib3/contrib/securetransport.py --- a/src/urllib3/contrib/securetransport.py +++ b/src/urllib3/contrib/securetransport.py @@ -23,6 +23,31 @@ urllib3.contrib.securetransport.inject_into_urllib3() Happy TLSing! + +This code is a bastardised version of the code found in Will Bond's oscrypto +library. An enormous debt is owed to him for blazing this trail for us. For +that reason, this code should be considered to be covered both by urllib3's +license and by oscrypto's: + + Copyright (c) 2015-2016 Will Bond <[email protected]> + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. """ from __future__ import absolute_import @@ -86,35 +111,32 @@ # individual cipher suites. We need to do this because this is how # SecureTransport wants them. CIPHER_SUITES = [ - SecurityConst.TLS_AES_256_GCM_SHA384, - SecurityConst.TLS_CHACHA20_POLY1305_SHA256, - SecurityConst.TLS_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384, + SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, - SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA, SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA, - SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + SecurityConst.TLS_AES_256_GCM_SHA384, + SecurityConst.TLS_AES_128_GCM_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384, SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256, + SecurityConst.TLS_AES_128_CCM_8_SHA256, + SecurityConst.TLS_AES_128_CCM_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256, SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256, SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA, @@ -122,9 +144,10 @@ ] # Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of -# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version. +# TLSv1 and a high of TLSv1.3. For everything else, we pin to that version. +# TLSv1 to 1.2 are supported on macOS 10.8+ and TLSv1.3 is macOS 10.13+ _protocol_to_min_max = { - ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12), + util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocolMaxSupported), } if hasattr(ssl, "PROTOCOL_SSLv2"): @@ -147,8 +170,6 @@ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = ( SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12 ) -if hasattr(ssl, "PROTOCOL_TLS"): - _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23] def inject_into_urllib3(): @@ -460,7 +481,14 @@ def handshake(self, # Set the minimum and maximum TLS versions. result = Security.SSLSetProtocolVersionMin(self.context, min_version) _assert_no_error(result) + + # TLS 1.3 isn't necessarily enabled by the OS + # so we have to detect when we error out and try + # setting TLS 1.3 if it's allowed. kTLSProtocolMaxSupported + # was added in macOS 10.13 along with kTLSProtocol13. result = Security.SSLSetProtocolVersionMax(self.context, max_version) + if result != 0 and max_version == SecurityConst.kTLSProtocolMaxSupported: + result = Security.SSLSetProtocolVersionMax(self.context, SecurityConst.kTLSProtocol12) _assert_no_error(result) # If there's a trust DB, we need to use it. We do that by telling @@ -669,6 +697,25 @@ def getpeercert(self, binary_form=False): return der_bytes + def version(self): + protocol = Security.SSLProtocol() + result = Security.SSLGetNegotiatedProtocolVersion(self.context, ctypes.byref(protocol)) + _assert_no_error(result) + if protocol.value == SecurityConst.kTLSProtocol13: + return 'TLSv1.3' + elif protocol.value == SecurityConst.kTLSProtocol12: + return 'TLSv1.2' + elif protocol.value == SecurityConst.kTLSProtocol11: + return 'TLSv1.1' + elif protocol.value == SecurityConst.kTLSProtocol1: + return 'TLSv1' + elif protocol.value == SecurityConst.kSSLProtocol3: + return 'SSLv3' + elif protocol.value == SecurityConst.kSSLProtocol2: + return 'SSLv2' + else: + raise ssl.SSLError('Unknown TLS version: %r' % protocol) + def _reuse(self): self._makefile_refs += 1 diff --git a/src/urllib3/util/__init__.py b/src/urllib3/util/__init__.py --- a/src/urllib3/util/__init__.py +++ b/src/urllib3/util/__init__.py @@ -12,6 +12,7 @@ resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, + PROTOCOL_TLS, ) from .timeout import ( current_time, @@ -35,6 +36,7 @@ 'IS_PYOPENSSL', 'IS_SECURETRANSPORT', 'SSLContext', + 'PROTOCOL_TLS', 'Retry', 'Timeout', 'Url', diff --git a/src/urllib3/util/ssl_.py b/src/urllib3/util/ssl_.py --- a/src/urllib3/util/ssl_.py +++ b/src/urllib3/util/ssl_.py @@ -54,11 +54,21 @@ def _const_compare_digest_backport(a, b): try: # Test for SSL features import ssl - from ssl import wrap_socket, CERT_REQUIRED, PROTOCOL_SSLv23 + from ssl import wrap_socket, CERT_REQUIRED from ssl import HAS_SNI # Has SNI? except ImportError: pass +try: # Platform-specific: Python 3.6 + from ssl import PROTOCOL_TLS + PROTOCOL_SSLv23 = PROTOCOL_TLS +except ImportError: + try: + from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS + PROTOCOL_SSLv23 = PROTOCOL_TLS + except ImportError: + PROTOCOL_SSLv23 = PROTOCOL_TLS = 2 + try: from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION @@ -75,30 +85,30 @@ def _const_compare_digest_backport(a, b): # - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ # # The general intent is: -# - Prefer TLS 1.3 cipher suites # - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), # - prefer ECDHE over DHE for better performance, # - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and # security, # - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, -# - disable NULL authentication, MD5 MACs and DSS for security reasons. +# - disable NULL authentication, MD5 MACs, DSS, and other +# insecure ciphers for security reasons. +# - NOTE: TLS 1.3 cipher suites are managed through a different interface +# not exposed by CPython (yet!) and are enabled by default if they're available. DEFAULT_CIPHERS = ':'.join([ - 'TLS13-AES-256-GCM-SHA384', - 'TLS13-CHACHA20-POLY1305-SHA256', - 'TLS13-AES-128-GCM-SHA256', + 'ECDHE+AESGCM', + 'ECDHE+CHACHA20', + 'DHE+AESGCM', + 'DHE+CHACHA20', 'ECDH+AESGCM', - 'ECDH+CHACHA20', 'DH+AESGCM', - 'DH+CHACHA20', - 'ECDH+AES256', - 'DH+AES256', - 'ECDH+AES128', + 'ECDH+AES', 'DH+AES', 'RSA+AESGCM', 'RSA+AES', '!aNULL', '!eNULL', '!MD5', + '!DSS', ]) try: @@ -205,7 +215,7 @@ def resolve_ssl_version(candidate): like resolve_cert_reqs """ if candidate is None: - return PROTOCOL_SSLv23 + return PROTOCOL_TLS if isinstance(candidate, str): res = getattr(ssl, candidate, None) @@ -251,7 +261,7 @@ def create_urllib3_context(ssl_version=None, cert_reqs=None, Constructed SSLContext object with specified options :rtype: SSLContext """ - context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) + context = SSLContext(ssl_version or PROTOCOL_TLS) context.set_ciphers(ciphers or DEFAULT_CIPHERS)
diff --git a/test/__init__.py b/test/__init__.py --- a/test/__init__.py +++ b/test/__init__.py @@ -177,6 +177,28 @@ def wrapper(*args, **kwargs): return wrapper +def requiresTLSv1(): + """Test requires TLSv1 available""" + return pytest.mark.skipif(not hasattr(ssl, "PROTOCOL_TLSv1"), reason="Test requires TLSv1") + + +def requiresTLSv1_1(): + """Test requires TLSv1.1 available""" + return pytest.mark.skipif(not hasattr(ssl, "PROTOCOL_TLSv1_1"), reason="Test requires TLSv1.1") + + +def requiresTLSv1_2(): + """Test requires TLSv1.2 available""" + return pytest.mark.skipif(not hasattr(ssl, "PROTOCOL_TLSv1_2"), reason="Test requires TLSv1.2") + + +def requiresTLSv1_3(): + """Test requires TLSv1.3 available""" + return pytest.mark.skipif( + not getattr(ssl, "HAS_TLSv1_3", False), reason="Test requires TLSv1.3" + ) + + class _ListHandler(logging.Handler): def __init__(self): super(_ListHandler, self).__init__() diff --git a/test/contrib/test_pyopenssl.py b/test/contrib/test_pyopenssl.py --- a/test/contrib/test_pyopenssl.py +++ b/test/contrib/test_pyopenssl.py @@ -31,7 +31,11 @@ def teardown_module(): pass -from ..with_dummyserver.test_https import TestHTTPS, TestHTTPS_TLSv1 # noqa: F401 +from ..with_dummyserver.test_https import ( # noqa: F401 + TestHTTPS, TestHTTPS_TLSv1, TestHTTPS_TLSv1_1, + TestHTTPS_TLSv1_2, TestHTTPS_TLSv1_3, TestHTTPS_IPSAN, + TestHTTPS_IPv6Addr, TestHTTPS_NoSAN, TestHTTPS_IPV6SAN +) from ..with_dummyserver.test_socketlevel import ( # noqa: F401 TestSNI, TestSocketClosing, TestClientCerts ) diff --git a/test/contrib/test_securetransport.py b/test/contrib/test_securetransport.py --- a/test/contrib/test_securetransport.py +++ b/test/contrib/test_securetransport.py @@ -27,7 +27,10 @@ def teardown_module(): pass -from ..with_dummyserver.test_https import TestHTTPS, TestHTTPS_TLSv1 # noqa: F401 +from ..with_dummyserver.test_https import ( # noqa: F401 + TestHTTPS, TestHTTPS_TLSv1, TestHTTPS_TLSv1_1, + TestHTTPS_TLSv1_2, TestHTTPS_TLSv1_3 +) from ..with_dummyserver.test_socketlevel import ( # noqa: F401 TestSNI, TestSocketClosing, TestClientCerts ) diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -17,7 +17,8 @@ DEFAULT_CLIENT_NO_INTERMEDIATE_CERTS, NO_SAN_CERTS, NO_SAN_CA, DEFAULT_CA_DIR, IPV6_ADDR_CERTS, IPV6_ADDR_CA, HAS_IPV6, - IP_SAN_CERTS, PASSWORD_CLIENT_KEYFILE) + IP_SAN_CERTS, IPV6_SAN_CERTS, IPV6_SAN_CA, + PASSWORD_CLIENT_KEYFILE) from test import ( onlyPy279OrNewer, @@ -26,6 +27,10 @@ requires_network, requires_ssl_context_keyfile_password, fails_on_travis_gce, + requiresTLSv1, + requiresTLSv1_1, + requiresTLSv1_2, + requiresTLSv1_3, TARPIT_HOST, ) from urllib3 import HTTPSConnectionPool @@ -57,7 +62,19 @@ log.addHandler(logging.StreamHandler(sys.stdout)) +TLSv1_CERTS = DEFAULT_CERTS.copy() +TLSv1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1", None) + +TLSv1_1_CERTS = DEFAULT_CERTS.copy() +TLSv1_1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1_1", None) + +TLSv1_2_CERTS = DEFAULT_CERTS.copy() +TLSv1_2_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1_2", None) + + class TestHTTPS(HTTPSDummyServerTestCase): + tls_protocol_name = None + def setUp(self): self._pool = HTTPSConnectionPool(self.host, self.port, ca_certs=DEFAULT_CA) self.addCleanup(self._pool.close) @@ -72,11 +89,6 @@ def test_dotted_fqdn(self): r = pool.request('GET', '/') self.assertEqual(r.status, 200, r.data) - def test_set_ssl_version_to_tlsv1(self): - self._pool.ssl_version = ssl.PROTOCOL_TLSv1 - r = self._pool.request('GET', '/') - self.assertEqual(r.status, 200, r.data) - def test_client_intermediate(self): client_cert, client_key = ( DEFAULT_CLIENT_CERTS['certfile'], @@ -591,28 +603,54 @@ def _request_without_resource_warnings(self, method, url): return [x for x in w if not isinstance(x.message, ResourceWarning)] + def test_set_ssl_version_to_tls_version(self): + if self.tls_protocol_name is None: + pytest.skip("Skipping base test class") -class TestHTTPS_TLSv1(HTTPSDummyServerTestCase): - certs = DEFAULT_CERTS.copy() - certs['ssl_version'] = ssl.PROTOCOL_TLSv1 - - def setUp(self): - self._pool = HTTPSConnectionPool(self.host, self.port) - self.addCleanup(self._pool.close) - - def test_discards_connection_on_sslerror(self): - self._pool.cert_reqs = 'CERT_REQUIRED' - with self.assertRaises(MaxRetryError) as cm: - self._pool.request('GET', '/', retries=0) - self.assertIsInstance(cm.exception.reason, SSLError) - self._pool.ca_certs = DEFAULT_CA - self._pool.request('GET', '/') + self._pool.ssl_version = self.certs['ssl_version'] + r = self._pool.request('GET', '/') + self.assertEqual(r.status, 200, r.data) def test_set_cert_default_cert_required(self): conn = VerifiedHTTPSConnection(self.host, self.port) conn.set_cert() self.assertEqual(conn.cert_reqs, ssl.CERT_REQUIRED) + def test_tls_protocol_name_of_socket(self): + if self.tls_protocol_name is None: + pytest.skip("Skipping base test class") + + conn = self._pool._get_conn() + conn.connect() + + if not hasattr(conn.sock, 'version'): + pytest.skip('SSLSocket.version() not available') + + self.assertEqual(conn.sock.version(), self.tls_protocol_name) + + +@requiresTLSv1() +class TestHTTPS_TLSv1(TestHTTPS): + tls_protocol_name = 'TLSv1' + certs = TLSv1_CERTS + + +@requiresTLSv1_1() +class TestHTTPS_TLSv1_1(TestHTTPS): + tls_protocol_name = 'TLSv1.1' + certs = TLSv1_1_CERTS + + +@requiresTLSv1_2() +class TestHTTPS_TLSv1_2(TestHTTPS): + tls_protocol_name = 'TLSv1.2' + certs = TLSv1_2_CERTS + + +@requiresTLSv1_3() +class TestHTTPS_TLSv1_3(TestHTTPS): + tls_protocol_name = 'TLSv1.3' + class TestHTTPS_NoSAN(HTTPSDummyServerTestCase): certs = NO_SAN_CERTS @@ -662,5 +700,23 @@ def test_strip_square_brackets_before_validating(self): self.assertEqual(r.status, 200) +class TestHTTPS_IPV6SAN(IPV6HTTPSDummyServerTestCase): + certs = IPV6_SAN_CERTS + + def test_can_validate_ipv6_san(self): + """Ensure that urllib3 can validate SANs with IPv6 addresses in them.""" + try: + import ipaddress # noqa: F401 + except ImportError: + pytest.skip("Only runs on systems with an ipaddress module") + + https_pool = HTTPSConnectionPool('[::1]', self.port, + cert_reqs='CERT_REQUIRED', + ca_certs=IPV6_SAN_CA) + self.addCleanup(https_pool.close) + r = https_pool.request('GET', '/') + self.assertEqual(r.status, 200) + + if __name__ == '__main__': unittest.main() diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -703,7 +703,7 @@ def socket_handler(listener): # First request should fail. response = pool.urlopen('GET', '/', retries=0, preload_content=False, - timeout=Timeout(connect=1, read=0.001)) + timeout=Timeout(connect=1, read=0.1)) try: self.assertRaises(ReadTimeoutError, response.read) finally:
OpenSSL.SSL.Error raised with invalid client cert and cryptography 2.5 In requests/requests#4961, I stumbled onto an issue where it appears that in the right scenario, OpenSSL errors are raised from `urllib.connectionpool.HTTPConnectionPool._make_request`. It happens with these deps ``` asn1crypto==0.24.0 certifi==2018.11.29 cryptography==2.5 ddt==1.2.0 idna==2.8 pyOpenSSL==19.0.0 requests==2.21.0 requests-unixsocket==0.1.5 trustme==0.5.0 urllib3==1.24.1 ``` [among others](https://github.com/cherrypy/cheroot/issues/173#issuecomment-460411259) on multiple Pythons and operating systems including Python 3.7.2 on macOS or Python 3.7 on Linux. Downgrading to pyOpenSSL 18 has no effect, but downgrading to cryptography 2.4 resolves the issue. I'm still unsure how cryptography 2.5 is relevant to the issue and I've been yet unable to replicate the issue in an isolated environment. One way you can currently replicate the issue is by checking out cherrypy/cheroot@e616c2542d and run `tox -- -k test_tls_client_auth --pdb` (to get a pdb prompt when the exception occurs). Unfortunately, because an exception was already raised, the `cl_pem` and other state are already cleared out. Let me see if I can't distill that test into a more isolated test that replicates the issue.
2018-12-07T04:21:17Z
[]
[]
urllib3/urllib3
1,507
urllib3__urllib3-1507
[ "1454" ]
799f50d70720accd2343905ce8225062b8b099fe
diff --git a/src/urllib3/connection.py b/src/urllib3/connection.py --- a/src/urllib3/connection.py +++ b/src/urllib3/connection.py @@ -282,15 +282,13 @@ def set_cert(self, key_file=None, cert_file=None, """ This method should only be called once, before the connection is used. """ - # If cert_reqs is not provided, we can try to guess. If the user gave - # us a cert database, we assume they want to use it: otherwise, if - # they gave us an SSL Context object we should use whatever is set for - # it. + # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also + # have an SSLContext object in which case we'll use its verify_mode. if cert_reqs is None: - if ca_certs or ca_cert_dir: - cert_reqs = 'CERT_REQUIRED' - elif self.ssl_context is not None: + if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode + else: + cert_reqs = resolve_cert_reqs(None) self.key_file = key_file self.cert_file = cert_file diff --git a/src/urllib3/connectionpool.py b/src/urllib3/connectionpool.py --- a/src/urllib3/connectionpool.py +++ b/src/urllib3/connectionpool.py @@ -767,9 +767,6 @@ def __init__(self, host, port=None, block, headers, retries, _proxy, _proxy_headers, **conn_kw) - if ca_certs and cert_reqs is None: - cert_reqs = 'CERT_REQUIRED' - self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs diff --git a/src/urllib3/util/ssl_.py b/src/urllib3/util/ssl_.py --- a/src/urllib3/util/ssl_.py +++ b/src/urllib3/util/ssl_.py @@ -54,7 +54,7 @@ def _const_compare_digest_backport(a, b): try: # Test for SSL features import ssl - from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 + from ssl import wrap_socket, CERT_REQUIRED, PROTOCOL_SSLv23 from ssl import HAS_SNI # Has SNI? except ImportError: pass @@ -189,7 +189,7 @@ def resolve_cert_reqs(candidate): constant which can directly be passed to wrap_socket. """ if candidate is None: - return CERT_NONE + return CERT_REQUIRED if isinstance(candidate, str): res = getattr(ssl, candidate, None)
diff --git a/test/contrib/test_socks.py b/test/contrib/test_socks.py --- a/test/contrib/test_socks.py +++ b/test/contrib/test_socks.py @@ -4,7 +4,7 @@ from urllib3.contrib import socks from urllib3.exceptions import ConnectTimeoutError, NewConnectionError -from dummyserver.server import DEFAULT_CERTS +from dummyserver.server import DEFAULT_CERTS, DEFAULT_CA from dummyserver.testcase import IPV4SocketDummyServerTestCase import pytest @@ -715,7 +715,7 @@ def request_handler(listener): self._start_server(request_handler) proxy_url = "socks5h://%s:%s" % (self.host, self.port) - pm = socks.SOCKSProxyManager(proxy_url) + pm = socks.SOCKSProxyManager(proxy_url, ca_certs=DEFAULT_CA) self.addCleanup(pm.clear) response = pm.request('GET', 'https://localhost') diff --git a/test/test_connectionpool.py b/test/test_connectionpool.py --- a/test/test_connectionpool.py +++ b/test/test_connectionpool.py @@ -1,5 +1,6 @@ from __future__ import absolute_import +import ssl import pytest from urllib3.connectionpool import ( @@ -350,7 +351,7 @@ def test_absolute_url(self): def test_ca_certs_default_cert_required(self): with connection_from_url('https://google.com:80', ca_certs=DEFAULT_CA) as pool: conn = pool._get_conn() - assert conn.cert_reqs == 'CERT_REQUIRED' + assert conn.cert_reqs == ssl.CERT_REQUIRED def test_cleanup_on_extreme_connection_error(self): """ diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -473,7 +473,7 @@ def test_timeout_elapsed(self, current_time): assert timeout.get_connect_duration() == 37 @pytest.mark.parametrize('candidate, requirements', [ - (None, ssl.CERT_NONE), + (None, ssl.CERT_REQUIRED), (ssl.CERT_NONE, ssl.CERT_NONE), (ssl.CERT_REQUIRED, ssl.CERT_REQUIRED), ('REQUIRED', ssl.CERT_REQUIRED), diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py --- a/test/with_dummyserver/test_https.py +++ b/test/with_dummyserver/test_https.py @@ -31,7 +31,6 @@ from urllib3 import HTTPSConnectionPool from urllib3.connection import ( VerifiedHTTPSConnection, - UnverifiedHTTPSConnection, RECENT_DATE, ) from urllib3.exceptions import ( @@ -60,7 +59,7 @@ class TestHTTPS(HTTPSDummyServerTestCase): def setUp(self): - self._pool = HTTPSConnectionPool(self.host, self.port) + self._pool = HTTPSConnectionPool(self.host, self.port, ca_certs=DEFAULT_CA) self.addCleanup(self._pool.close) def test_simple(self): @@ -69,7 +68,7 @@ def test_simple(self): @fails_on_travis_gce def test_dotted_fqdn(self): - pool = HTTPSConnectionPool(self.host + '.', self.port) + pool = HTTPSConnectionPool(self.host + '.', self.port, ca_certs=DEFAULT_CA) r = pool.request('GET', '/') self.assertEqual(r.status, 200, r.data) @@ -85,7 +84,8 @@ def test_client_intermediate(self): ) https_pool = HTTPSConnectionPool(self.host, self.port, key_file=client_key, - cert_file=client_cert) + cert_file=client_cert, + ca_certs=DEFAULT_CA) r = https_pool.request('GET', '/certificate') subject = json.loads(r.data.decode('utf-8')) assert subject['organizationalUnitName'].startswith( @@ -98,7 +98,8 @@ def test_client_no_intermediate(self): ) https_pool = HTTPSConnectionPool(self.host, self.port, cert_file=client_cert, - key_file=client_key) + key_file=client_key, + ca_certs=DEFAULT_CA) try: https_pool.request('GET', '/certificate', retries=False) except SSLError as e: @@ -121,6 +122,7 @@ def test_client_key_password(self): PASSWORD_CLIENT_KEYFILE, ) https_pool = HTTPSConnectionPool(self.host, self.port, + ca_certs=DEFAULT_CA, key_file=client_key, cert_file=client_cert, key_password="letmein") @@ -310,8 +312,7 @@ def test_no_ssl(self): def test_unverified_ssl(self): """ Test that bare HTTPSConnection can connect, make requests """ - pool = HTTPSConnectionPool(self.host, self.port) - pool.ConnectionCls = UnverifiedHTTPSConnection + pool = HTTPSConnectionPool(self.host, self.port, cert_reqs=ssl.CERT_NONE) self.addCleanup(pool.close) with mock.patch('warnings.warn') as warn: @@ -551,15 +552,14 @@ def new_pool(timeout, cert_reqs='CERT_REQUIRED'): def test_enhanced_ssl_connection(self): fingerprint = '92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A' - conn = VerifiedHTTPSConnection(self.host, self.port) - self.addCleanup(conn.close) https_pool = HTTPSConnectionPool(self.host, self.port, cert_reqs='CERT_REQUIRED', ca_certs=DEFAULT_CA, assert_fingerprint=fingerprint) self.addCleanup(https_pool.close) - https_pool._make_request(conn, 'GET', '/') + r = https_pool.request('GET', '/') + assert r.status == 200 @onlyPy279OrNewer def test_ssl_correct_system_time(self): @@ -610,8 +610,8 @@ def test_discards_connection_on_sslerror(self): def test_set_cert_default_cert_required(self): conn = VerifiedHTTPSConnection(self.host, self.port) - conn.set_cert(ca_certs=DEFAULT_CA) - self.assertEqual(conn.cert_reqs, 'CERT_REQUIRED') + conn.set_cert() + self.assertEqual(conn.cert_reqs, ssl.CERT_REQUIRED) class TestHTTPS_NoSAN(HTTPSDummyServerTestCase): diff --git a/test/with_dummyserver/test_no_ssl.py b/test/with_dummyserver/test_no_ssl.py --- a/test/with_dummyserver/test_no_ssl.py +++ b/test/with_dummyserver/test_no_ssl.py @@ -21,7 +21,7 @@ def test_simple(self): class TestHTTPSWithoutSSL(HTTPSDummyServerTestCase, TestWithoutSSL): def test_simple(self): - pool = urllib3.HTTPSConnectionPool(self.host, self.port) + pool = urllib3.HTTPSConnectionPool(self.host, self.port, cert_reqs="NONE") self.addCleanup(pool.close) try: pool.request('GET', '/') diff --git a/test/with_dummyserver/test_proxy_poolmanager.py b/test/with_dummyserver/test_proxy_poolmanager.py --- a/test/with_dummyserver/test_proxy_poolmanager.py +++ b/test/with_dummyserver/test_proxy_poolmanager.py @@ -28,7 +28,7 @@ def setUp(self): self.proxy_url = 'http://%s:%d' % (self.proxy_host, self.proxy_port) def test_basic_proxy(self): - http = proxy_from_url(self.proxy_url) + http = proxy_from_url(self.proxy_url, ca_certs=DEFAULT_CA) self.addCleanup(http.clear) r = http.request('GET', '%s/' % self.http_url) @@ -66,7 +66,7 @@ def test_proxy_conn_fail(self): self.assertEqual(type(e.reason), ProxyError) def test_oldapi(self): - http = ProxyManager(connection_from_url(self.proxy_url)) + http = ProxyManager(connection_from_url(self.proxy_url), ca_certs=DEFAULT_CA) self.addCleanup(http.clear) r = http.request('GET', '%s/' % self.http_url) @@ -146,7 +146,7 @@ def test_cross_host_redirect(self): self.assertNotEqual(r._pool.host, self.http_host_alt) def test_cross_protocol_redirect(self): - http = proxy_from_url(self.proxy_url) + http = proxy_from_url(self.proxy_url, ca_certs=DEFAULT_CA) self.addCleanup(http.clear) cross_protocol_location = '%s/echo?a=b' % self.https_url @@ -166,7 +166,8 @@ def test_cross_protocol_redirect(self): def test_headers(self): http = proxy_from_url(self.proxy_url, headers={'Foo': 'bar'}, - proxy_headers={'Hickory': 'dickory'}) + proxy_headers={'Hickory': 'dickory'}, + ca_certs=DEFAULT_CA) self.addCleanup(http.clear) r = http.request_encode_url('GET', '%s/headers' % self.http_url) @@ -190,13 +191,6 @@ def test_headers(self): self.assertEqual(returned_headers.get('Host'), '%s:%s' % (self.https_host, self.https_port)) - r = http.request_encode_url('GET', '%s/headers' % self.https_url_alt) - returned_headers = json.loads(r.data.decode()) - self.assertEqual(returned_headers.get('Foo'), 'bar') - self.assertIsNone(returned_headers.get('Hickory')) - self.assertEqual(returned_headers.get('Host'), - '%s:%s' % (self.https_host_alt, self.https_port)) - r = http.request_encode_body('POST', '%s/headers' % self.http_url) returned_headers = json.loads(r.data.decode()) self.assertEqual(returned_headers.get('Foo'), 'bar') @@ -254,7 +248,7 @@ def test_headerdict(self): self.assertEqual(returned_headers.get('Baz'), 'quux') def test_proxy_pooling(self): - http = proxy_from_url(self.proxy_url) + http = proxy_from_url(self.proxy_url, cert_reqs='NONE') self.addCleanup(http.clear) for x in range(2): @@ -320,7 +314,7 @@ def test_https_proxy_pool_timeout(self): def test_scheme_host_case_insensitive(self): """Assert that upper-case schemes and hosts are normalized.""" - http = proxy_from_url(self.proxy_url.upper()) + http = proxy_from_url(self.proxy_url.upper(), ca_certs=DEFAULT_CA) self.addCleanup(http.clear) r = http.request('GET', '%s/' % self.http_url.upper()) @@ -342,7 +336,7 @@ def setUp(self): self.proxy_url = 'http://[%s]:%d' % (self.proxy_host, self.proxy_port) def test_basic_ipv6_proxy(self): - http = proxy_from_url(self.proxy_url) + http = proxy_from_url(self.proxy_url, ca_certs=DEFAULT_CA) self.addCleanup(http.clear) r = http.request('GET', '%s/' % self.http_url) diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -972,7 +972,7 @@ def echo_socket_handler(listener): self._start_server(echo_socket_handler) base_url = 'http://%s:%d' % (self.host, self.port) - proxy = proxy_from_url(base_url) + proxy = proxy_from_url(base_url, ca_certs=DEFAULT_CA) self.addCleanup(proxy.clear) url = 'https://{0}'.format(self.host) @@ -1016,7 +1016,7 @@ def echo_socket_handler(listener): self._start_server(echo_socket_handler) base_url = 'http://%s:%d' % (self.host, self.port) - proxy = proxy_from_url(base_url) + proxy = proxy_from_url(base_url, cert_reqs='NONE') self.addCleanup(proxy.clear) url = 'https://[{0}]'.format(ipv6_addr) @@ -1070,8 +1070,7 @@ def socket_handler(listener): ssl_sock = ssl.wrap_socket(sock, server_side=True, keyfile=DEFAULT_CERTS['keyfile'], - certfile=DEFAULT_CERTS['certfile'], - ca_certs=DEFAULT_CA) + certfile=DEFAULT_CERTS['certfile']) buf = b'' while not buf.endswith(b'\r\n\r\n'): @@ -1090,7 +1089,7 @@ def socket_handler(listener): ssl_sock.close() self._start_server(socket_handler) - pool = HTTPSConnectionPool(self.host, self.port) + pool = HTTPSConnectionPool(self.host, self.port, ca_certs=DEFAULT_CA) self.addCleanup(pool.close) response = pool.urlopen('GET', '/', retries=0, preload_content=False, @@ -1180,7 +1179,7 @@ def socket_handler(listener): self._start_server(socket_handler) - pool = HTTPSConnectionPool(self.host, self.port) + pool = HTTPSConnectionPool(self.host, self.port, ca_certs=DEFAULT_CA) self.addCleanup(pool.close) response = pool.urlopen('GET', '/', retries=1) self.assertEqual(response.data, b'Success')
Shouldn't urllib3 verify SSL/TLS certificates by default ? The README says: > ... urllib3 brings many critical features that are missing from the Python standard libraries: > ... > Client-side SSL/TLS verification. > ... and only in https://urllib3.readthedocs.io/en/latest/user-guide.html#certificate-verification it's explained that > By default, urllib3 does not verify HTTPS requests. Because since [pep-0476](https://www.python.org/dev/peps/pep-0476) (ie python >= 2.7.9 or > 3.4 ) standard libraries verifies SSL certificates by default, nowadays projects switching from urllib2 or httplib to urllib3 without explicitly enabling certificate validation would loose the feature of certificate validation that was provided by standard library. Of course, this would be an incompatible breaking change, like it was for python at the time, but is it really still a sane default to not verify certificates nowadays ?
I think if we were to "do-it-all-over" we'd have TLS verification on by default but to preserve backwards compatibility we'll probably keep the current behavior until v2. Currently a warning is emitted when using an `HTTPSConnection` without TLS verification. For the records, see https://github.com/SeleniumHQ/selenium/issues/6534 for a case where switching from httplib to urllib3 accidentally disabled certificate validation ( the change introducing the issue https://github.com/SeleniumHQ/selenium/commit/8d98f3223c3757bac24f779f5d8c1e9e1e8edcfb ) I would personally be totally okay with us changing this behavior in a minor release (Python did it in a patch release, hilariously). I would want some level of consensus with the maintainers of this library and Requests. Knowing Python did this change in a patch makes me feel more comfortable upgrading to verify by default. I have a feeling many of our downstreams already implement this anyways. @theacodes, I believe Requests already has verifying on by default, so I’m fully in support of this. I’m assuming this isn’t an api change beyond changing the default value for the function signature? Correct. On Tue, Oct 16, 2018 at 9:41 AM Nate Prewitt <[email protected]> wrote: > @theacodes <https://github.com/theacodes>, I believe Requests already has > verifying on by default, so I’m fully in support of this. I’m assuming this > isn’t an api change beyond changing the default value for the function > signature? > > — > You are receiving this because you were mentioned. > > > Reply to this email directly, view it on GitHub > <https://github.com/urllib3/urllib3/issues/1454#issuecomment-430309401>, > or mute the thread > <https://github.com/notifications/unsubscribe-auth/AAPUc1wfMmXZ8f9lCErmvkgp0dKnQpmGks5ulgwugaJpZM4XdWzL> > . > I'd say this is probably ready to go then :)
2018-12-21T15:47:08Z
[]
[]
urllib3/urllib3
1,511
urllib3__urllib3-1511
[ "1510" ]
a252e2549ff797fe13e688f05296fa496e0c469a
diff --git a/src/urllib3/poolmanager.py b/src/urllib3/poolmanager.py --- a/src/urllib3/poolmanager.py +++ b/src/urllib3/poolmanager.py @@ -7,6 +7,7 @@ from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import port_by_scheme from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown +from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.url import parse_url @@ -342,8 +343,10 @@ def urlopen(self, method, url, redirect=True, **kw): # conn.is_same_host() which may use socket.gethostbyname() in the future. if (retries.remove_headers_on_redirect and not conn.is_same_host(redirect_location)): - for header in retries.remove_headers_on_redirect: - kw['headers'].pop(header, None) + headers = list(six.iterkeys(kw['headers'])) + for header in headers: + if header.lower() in retries.remove_headers_on_redirect: + kw['headers'].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn) diff --git a/src/urllib3/util/retry.py b/src/urllib3/util/retry.py --- a/src/urllib3/util/retry.py +++ b/src/urllib3/util/retry.py @@ -179,7 +179,8 @@ def __init__(self, total=10, connect=None, read=None, redirect=None, status=None self.raise_on_status = raise_on_status self.history = history or tuple() self.respect_retry_after_header = respect_retry_after_header - self.remove_headers_on_redirect = remove_headers_on_redirect + self.remove_headers_on_redirect = frozenset([ + h.lower() for h in remove_headers_on_redirect]) def new(self, **kw): params = dict(
diff --git a/test/test_retry.py b/test/test_retry.py --- a/test/test_retry.py +++ b/test/test_retry.py @@ -253,9 +253,9 @@ def test_retry_method_not_in_whitelist(self): def test_retry_default_remove_headers_on_redirect(self): retry = Retry() - assert list(retry.remove_headers_on_redirect) == ['Authorization'] + assert list(retry.remove_headers_on_redirect) == ['authorization'] def test_retry_set_remove_headers_on_redirect(self): retry = Retry(remove_headers_on_redirect=['X-API-Secret']) - assert list(retry.remove_headers_on_redirect) == ['X-API-Secret'] + assert list(retry.remove_headers_on_redirect) == ['x-api-secret'] diff --git a/test/with_dummyserver/test_poolmanager.py b/test/with_dummyserver/test_poolmanager.py --- a/test/with_dummyserver/test_poolmanager.py +++ b/test/with_dummyserver/test_poolmanager.py @@ -123,6 +123,17 @@ def test_redirect_cross_host_remove_headers(self): self.assertNotIn('Authorization', data) + r = http.request('GET', '%s/redirect' % self.base_url, + fields={'target': '%s/headers' % self.base_url_alt}, + headers={'authorization': 'foo'}) + + self.assertEqual(r.status, 200) + + data = json.loads(r.data.decode('utf-8')) + + self.assertNotIn('authorization', data) + self.assertNotIn('Authorization', data) + def test_redirect_cross_host_no_remove_headers(self): http = PoolManager() self.addCleanup(http.clear) @@ -155,6 +166,20 @@ def test_redirect_cross_host_set_removed_headers(self): self.assertNotIn('X-API-Secret', data) self.assertEqual(data['Authorization'], 'bar') + r = http.request('GET', '%s/redirect' % self.base_url, + fields={'target': '%s/headers' % self.base_url_alt}, + headers={'x-api-secret': 'foo', + 'authorization': 'bar'}, + retries=Retry(remove_headers_on_redirect=['X-API-Secret'])) + + self.assertEqual(r.status, 200) + + data = json.loads(r.data.decode('utf-8')) + + self.assertNotIn('x-api-secret', data) + self.assertNotIn('X-API-Secret', data) + self.assertEqual(data['Authorization'], 'bar') + def test_raise_on_redirect(self): http = PoolManager() self.addCleanup(http.clear)
authorization header be forwarded to cross-site when redirecting #1346 fixes only "Authorization" header. "authorization" header isn't supported. RFC7230 section 3.2 "Header Fields" (https://tools.ietf.org/html/rfc7230#section-3.2) says the following. > Each header field consists of a case-insensitive field name followed by a colon (":"), optional leading whitespace, the field value, and optional trailing whitespace.
2018-12-29T01:40:59Z
[]
[]
urllib3/urllib3
1,531
urllib3__urllib3-1531
[ "1529" ]
1d3e60e86fce8938845fdc052f47ed9ef3da8859
diff --git a/src/urllib3/connectionpool.py b/src/urllib3/connectionpool.py --- a/src/urllib3/connectionpool.py +++ b/src/urllib3/connectionpool.py @@ -26,6 +26,7 @@ from .packages.ssl_match_hostname import CertificateError from .packages import six from .packages.six.moves import queue +from .packages.rfc3986.normalizers import normalize_host from .connection import ( port_by_scheme, DummyConnection, @@ -65,7 +66,7 @@ def __init__(self, host, port=None): if not host: raise LocationValueError("No host specified.") - self.host = _ipv6_host(host, self.scheme) + self.host = _normalize_host(host, scheme=self.scheme) self._proxy_host = host.lower() self.port = port @@ -434,8 +435,8 @@ def is_same_host(self, url): # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) - - host = _ipv6_host(host, self.scheme) + if host is not None: + host = _normalize_host(host, scheme=scheme) # Use explicit default port for comparison when none is given if self.port and not port: @@ -878,9 +879,9 @@ def connection_from_url(url, **kw): return HTTPConnectionPool(host, port=port, **kw) -def _ipv6_host(host, scheme): +def _normalize_host(host, scheme): """ - Process IPv6 address literals + Normalize hosts for comparisons and use with sockets. """ # httplib doesn't like it when we include brackets in IPv6 addresses @@ -889,11 +890,8 @@ def _ipv6_host(host, scheme): # Instead, we need to make sure we never pass ``None`` as the port. # However, for backward compatibility reasons we can't actually # *assert* that. See http://bugs.python.org/issue28539 - # - # Also if an IPv6 address literal has a zone identifier, the - # percent sign might be URIencoded, convert it back into ASCII if host.startswith('[') and host.endswith(']'): - host = host.replace('%25', '%').strip('[]') + host = host.strip('[]') if scheme in NORMALIZABLE_SCHEMES: - host = host.lower() + host = normalize_host(host) return host diff --git a/src/urllib3/packages/rfc3986/__init__.py b/src/urllib3/packages/rfc3986/__init__.py --- a/src/urllib3/packages/rfc3986/__init__.py +++ b/src/urllib3/packages/rfc3986/__init__.py @@ -22,6 +22,8 @@ :license: Apache v2.0, see LICENSE for details """ +from .api import iri_reference +from .api import IRIReference from .api import is_valid_uri from .api import normalize_uri from .api import uri_reference @@ -34,14 +36,16 @@ __author_email__ = '[email protected]' __license__ = 'Apache v2.0' __copyright__ = 'Copyright 2014 Rackspace' -__version__ = '1.2.0' +__version__ = '1.3.0' __all__ = ( 'ParseResult', 'URIReference', + 'IRIReference', 'is_valid_uri', 'normalize_uri', 'uri_reference', + 'iri_reference', 'urlparse', '__title__', '__author__', diff --git a/src/urllib3/packages/rfc3986/_mixin.py b/src/urllib3/packages/rfc3986/_mixin.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/_mixin.py @@ -0,0 +1,353 @@ +"""Module containing the implementation of the URIMixin class.""" +import warnings + +from . import exceptions as exc +from . import misc +from . import normalizers +from . import validators + + +class URIMixin(object): + """Mixin with all shared methods for URIs and IRIs.""" + + __hash__ = tuple.__hash__ + + def authority_info(self): + """Return a dictionary with the ``userinfo``, ``host``, and ``port``. + + If the authority is not valid, it will raise a + :class:`~rfc3986.exceptions.InvalidAuthority` Exception. + + :returns: + ``{'userinfo': 'username:password', 'host': 'www.example.com', + 'port': '80'}`` + :rtype: dict + :raises rfc3986.exceptions.InvalidAuthority: + If the authority is not ``None`` and can not be parsed. + """ + if not self.authority: + return {'userinfo': None, 'host': None, 'port': None} + + match = self._match_subauthority() + + if match is None: + # In this case, we have an authority that was parsed from the URI + # Reference, but it cannot be further parsed by our + # misc.SUBAUTHORITY_MATCHER. In this case it must not be a valid + # authority. + raise exc.InvalidAuthority(self.authority.encode(self.encoding)) + + # We had a match, now let's ensure that it is actually a valid host + # address if it is IPv4 + matches = match.groupdict() + host = matches.get('host') + + if (host and misc.IPv4_MATCHER.match(host) and not + validators.valid_ipv4_host_address(host)): + # If we have a host, it appears to be IPv4 and it does not have + # valid bytes, it is an InvalidAuthority. + raise exc.InvalidAuthority(self.authority.encode(self.encoding)) + + return matches + + def _match_subauthority(self): + return misc.SUBAUTHORITY_MATCHER.match(self.authority) + + @property + def host(self): + """If present, a string representing the host.""" + try: + authority = self.authority_info() + except exc.InvalidAuthority: + return None + return authority['host'] + + @property + def port(self): + """If present, the port extracted from the authority.""" + try: + authority = self.authority_info() + except exc.InvalidAuthority: + return None + return authority['port'] + + @property + def userinfo(self): + """If present, the userinfo extracted from the authority.""" + try: + authority = self.authority_info() + except exc.InvalidAuthority: + return None + return authority['userinfo'] + + def is_absolute(self): + """Determine if this URI Reference is an absolute URI. + + See http://tools.ietf.org/html/rfc3986#section-4.3 for explanation. + + :returns: ``True`` if it is an absolute URI, ``False`` otherwise. + :rtype: bool + """ + return bool(misc.ABSOLUTE_URI_MATCHER.match(self.unsplit())) + + def is_valid(self, **kwargs): + """Determine if the URI is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param bool require_scheme: Set to ``True`` if you wish to require the + presence of the scheme component. + :param bool require_authority: Set to ``True`` if you wish to require + the presence of the authority component. + :param bool require_path: Set to ``True`` if you wish to require the + presence of the path component. + :param bool require_query: Set to ``True`` if you wish to require the + presence of the query component. + :param bool require_fragment: Set to ``True`` if you wish to require + the presence of the fragment component. + :returns: ``True`` if the URI is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + validators = [ + (self.scheme_is_valid, kwargs.get('require_scheme', False)), + (self.authority_is_valid, kwargs.get('require_authority', False)), + (self.path_is_valid, kwargs.get('require_path', False)), + (self.query_is_valid, kwargs.get('require_query', False)), + (self.fragment_is_valid, kwargs.get('require_fragment', False)), + ] + return all(v(r) for v, r in validators) + + def authority_is_valid(self, require=False): + """Determine if the authority component is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param bool require: + Set to ``True`` to require the presence of this component. + :returns: + ``True`` if the authority is valid. ``False`` otherwise. + :rtype: + bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + try: + self.authority_info() + except exc.InvalidAuthority: + return False + + return validators.authority_is_valid( + self.authority, + host=self.host, + require=require, + ) + + def scheme_is_valid(self, require=False): + """Determine if the scheme component is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param str require: Set to ``True`` to require the presence of this + component. + :returns: ``True`` if the scheme is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + return validators.scheme_is_valid(self.scheme, require) + + def path_is_valid(self, require=False): + """Determine if the path component is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param str require: Set to ``True`` to require the presence of this + component. + :returns: ``True`` if the path is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + return validators.path_is_valid(self.path, require) + + def query_is_valid(self, require=False): + """Determine if the query component is valid. + + .. deprecated:: 1.1.0 + + Use the :class:`~rfc3986.validators.Validator` object instead. + + :param str require: Set to ``True`` to require the presence of this + component. + :returns: ``True`` if the query is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + return validators.query_is_valid(self.query, require) + + def fragment_is_valid(self, require=False): + """Determine if the fragment component is valid. + + .. deprecated:: 1.1.0 + + Use the Validator object instead. + + :param str require: Set to ``True`` to require the presence of this + component. + :returns: ``True`` if the fragment is valid. ``False`` otherwise. + :rtype: bool + """ + warnings.warn("Please use rfc3986.validators.Validator instead. " + "This method will be eventually removed.", + DeprecationWarning) + return validators.fragment_is_valid(self.fragment, require) + + def normalized_equality(self, other_ref): + """Compare this URIReference to another URIReference. + + :param URIReference other_ref: (required), The reference with which + we're comparing. + :returns: ``True`` if the references are equal, ``False`` otherwise. + :rtype: bool + """ + return tuple(self.normalize()) == tuple(other_ref.normalize()) + + def resolve_with(self, base_uri, strict=False): + """Use an absolute URI Reference to resolve this relative reference. + + Assuming this is a relative reference that you would like to resolve, + use the provided base URI to resolve it. + + See http://tools.ietf.org/html/rfc3986#section-5 for more information. + + :param base_uri: Either a string or URIReference. It must be an + absolute URI or it will raise an exception. + :returns: A new URIReference which is the result of resolving this + reference using ``base_uri``. + :rtype: :class:`URIReference` + :raises rfc3986.exceptions.ResolutionError: + If the ``base_uri`` is not an absolute URI. + """ + if not isinstance(base_uri, URIMixin): + base_uri = type(self).from_string(base_uri) + + if not base_uri.is_absolute(): + raise exc.ResolutionError(base_uri) + + # This is optional per + # http://tools.ietf.org/html/rfc3986#section-5.2.1 + base_uri = base_uri.normalize() + + # The reference we're resolving + resolving = self + + if not strict and resolving.scheme == base_uri.scheme: + resolving = resolving.copy_with(scheme=None) + + # http://tools.ietf.org/html/rfc3986#page-32 + if resolving.scheme is not None: + target = resolving.copy_with( + path=normalizers.normalize_path(resolving.path) + ) + else: + if resolving.authority is not None: + target = resolving.copy_with( + scheme=base_uri.scheme, + path=normalizers.normalize_path(resolving.path) + ) + else: + if resolving.path is None: + if resolving.query is not None: + query = resolving.query + else: + query = base_uri.query + target = resolving.copy_with( + scheme=base_uri.scheme, + authority=base_uri.authority, + path=base_uri.path, + query=query + ) + else: + if resolving.path.startswith('/'): + path = normalizers.normalize_path(resolving.path) + else: + path = normalizers.normalize_path( + misc.merge_paths(base_uri, resolving.path) + ) + target = resolving.copy_with( + scheme=base_uri.scheme, + authority=base_uri.authority, + path=path, + query=resolving.query + ) + return target + + def unsplit(self): + """Create a URI string from the components. + + :returns: The URI Reference reconstituted as a string. + :rtype: str + """ + # See http://tools.ietf.org/html/rfc3986#section-5.3 + result_list = [] + if self.scheme: + result_list.extend([self.scheme, ':']) + if self.authority: + result_list.extend(['//', self.authority]) + if self.path: + result_list.append(self.path) + if self.query is not None: + result_list.extend(['?', self.query]) + if self.fragment is not None: + result_list.extend(['#', self.fragment]) + return ''.join(result_list) + + def copy_with(self, scheme=misc.UseExisting, authority=misc.UseExisting, + path=misc.UseExisting, query=misc.UseExisting, + fragment=misc.UseExisting): + """Create a copy of this reference with the new components. + + :param str scheme: + (optional) The scheme to use for the new reference. + :param str authority: + (optional) The authority to use for the new reference. + :param str path: + (optional) The path to use for the new reference. + :param str query: + (optional) The query to use for the new reference. + :param str fragment: + (optional) The fragment to use for the new reference. + :returns: + New URIReference with provided components. + :rtype: + URIReference + """ + attributes = { + 'scheme': scheme, + 'authority': authority, + 'path': path, + 'query': query, + 'fragment': fragment, + } + for key, value in list(attributes.items()): + if value is misc.UseExisting: + del attributes[key] + uri = self._replace(**attributes) + uri.encoding = self.encoding + return uri diff --git a/src/urllib3/packages/rfc3986/abnf_regexp.py b/src/urllib3/packages/rfc3986/abnf_regexp.py --- a/src/urllib3/packages/rfc3986/abnf_regexp.py +++ b/src/urllib3/packages/rfc3986/abnf_regexp.py @@ -13,6 +13,8 @@ # limitations under the License. """Module for the regular expressions crafted from ABNF.""" +import sys + # https://tools.ietf.org/html/rfc3986#page-13 GEN_DELIMS = GENERIC_DELIMITERS = ":/?#[]@" GENERIC_DELIMITERS_SET = set(GENERIC_DELIMITERS) @@ -25,7 +27,7 @@ ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' DIGIT = '0123456789' # https://tools.ietf.org/html/rfc3986#section-2.3 -UNRESERVED = UNRESERVED_CHARS = ALPHA + DIGIT + '._!-' +UNRESERVED = UNRESERVED_CHARS = ALPHA + DIGIT + r'._!-' UNRESERVED_CHARS_SET = set(UNRESERVED_CHARS) NON_PCT_ENCODED_SET = RESERVED_CHARS_SET.union(UNRESERVED_CHARS_SET) # We need to escape the '-' in this case: @@ -75,7 +77,7 @@ '%[0-9A-Fa-f]{2}', SUB_DELIMITERS_RE + UNRESERVED_RE ) # The pattern for an IPv4 address, e.g., 192.168.255.255, 127.0.0.1, -IPv4_RE = '([0-9]{1,3}.){3}[0-9]{1,3}' +IPv4_RE = r'([0-9]{1,3}\.){3}[0-9]{1,3}' # Hexadecimal characters used in each piece of an IPv6 address HEXDIG_RE = '[0-9A-Fa-f]{1,4}' # Least-significant 32 bits of an IPv6 address @@ -111,18 +113,18 @@ *variations ) -IPv_FUTURE_RE = 'v[0-9A-Fa-f]+.[%s]+' % ( +IPv_FUTURE_RE = r'v[0-9A-Fa-f]+\.[%s]+' % ( UNRESERVED_RE + SUB_DELIMITERS_RE + ':' ) - # RFC 6874 Zone ID ABNF ZONE_ID = '(?:[' + UNRESERVED_RE + ']|' + PCT_ENCODED + ')+' -IPv6_ADDRZ_RE = IPv6_RE + '%25' + ZONE_ID -IP_LITERAL_RE = r'\[({0}|(?:{1})|{2})\]'.format( - IPv6_RE, - IPv6_ADDRZ_RE, +IPv6_ADDRZ_RFC4007_RE = IPv6_RE + '(?:(?:%25|%)' + ZONE_ID + ')?' +IPv6_ADDRZ_RE = IPv6_RE + '(?:%25' + ZONE_ID + ')?' + +IP_LITERAL_RE = r'\[({0}|{1})\]'.format( + IPv6_ADDRZ_RFC4007_RE, IPv_FUTURE_RE, ) @@ -186,3 +188,80 @@ PATH_ROOTLESS, PATH_EMPTY, ) + +# ############### +# IRIs / RFC 3987 +# ############### + +# Only wide-unicode gets the high-ranges of UCSCHAR +if sys.maxunicode > 0xFFFF: # pragma: no cover + IPRIVATE = u'\uE000-\uF8FF\U000F0000-\U000FFFFD\U00100000-\U0010FFFD' + UCSCHAR_RE = ( + u'\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF' + u'\U00010000-\U0001FFFD\U00020000-\U0002FFFD' + u'\U00030000-\U0003FFFD\U00040000-\U0004FFFD' + u'\U00050000-\U0005FFFD\U00060000-\U0006FFFD' + u'\U00070000-\U0007FFFD\U00080000-\U0008FFFD' + u'\U00090000-\U0009FFFD\U000A0000-\U000AFFFD' + u'\U000B0000-\U000BFFFD\U000C0000-\U000CFFFD' + u'\U000D0000-\U000DFFFD\U000E1000-\U000EFFFD' + ) +else: # pragma: no cover + IPRIVATE = u'\uE000-\uF8FF' + UCSCHAR_RE = ( + u'\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF' + ) + +IUNRESERVED_RE = u'A-Za-z0-9\\._~\\-' + UCSCHAR_RE +IPCHAR = u'([' + IUNRESERVED_RE + SUB_DELIMITERS_RE + u':@]|%s)' % PCT_ENCODED + +isegments = { + 'isegment': IPCHAR + u'*', + # Non-zero length segment + 'isegment-nz': IPCHAR + u'+', + # Non-zero length segment without ":" + 'isegment-nz-nc': IPCHAR.replace(':', '') + u'+' +} + +IPATH_ROOTLESS = u'%(isegment-nz)s(/%(isegment)s)*' % isegments +IPATH_NOSCHEME = u'%(isegment-nz-nc)s(/%(isegment)s)*' % isegments +IPATH_ABSOLUTE = u'/(?:%s)?' % IPATH_ROOTLESS +IPATH_ABEMPTY = u'(?:/%(isegment)s)*' % isegments +IPATH_RE = u'^(?:%s|%s|%s|%s|%s)$' % ( + IPATH_ABEMPTY, IPATH_ABSOLUTE, IPATH_NOSCHEME, IPATH_ROOTLESS, PATH_EMPTY +) + +IREGULAR_NAME_RE = IREG_NAME = u'(?:{0}|[{1}])*'.format( + u'%[0-9A-Fa-f]{2}', SUB_DELIMITERS_RE + IUNRESERVED_RE +) + +IHOST_RE = IHOST_PATTERN = u'({0}|{1}|{2})'.format( + IREG_NAME, + IPv4_RE, + IP_LITERAL_RE, +) + +IUSERINFO_RE = u'^(?:[' + IUNRESERVED_RE + SUB_DELIMITERS_RE + u':]|%s)+' % ( + PCT_ENCODED +) + +IFRAGMENT_RE = (u'^(?:[/?:@' + IUNRESERVED_RE + SUB_DELIMITERS_RE + + u']|%s)*$' % PCT_ENCODED) +IQUERY_RE = (u'^(?:[/?:@' + IUNRESERVED_RE + SUB_DELIMITERS_RE + + IPRIVATE + u']|%s)*$' % PCT_ENCODED) + +IRELATIVE_PART_RE = u'(//%s%s|%s|%s|%s)' % ( + COMPONENT_PATTERN_DICT['authority'], + IPATH_ABEMPTY, + IPATH_ABSOLUTE, + IPATH_NOSCHEME, + PATH_EMPTY, +) + +IHIER_PART_RE = u'(//%s%s|%s|%s|%s)' % ( + COMPONENT_PATTERN_DICT['authority'], + IPATH_ABEMPTY, + IPATH_ABSOLUTE, + IPATH_ROOTLESS, + PATH_EMPTY, +) diff --git a/src/urllib3/packages/rfc3986/api.py b/src/urllib3/packages/rfc3986/api.py --- a/src/urllib3/packages/rfc3986/api.py +++ b/src/urllib3/packages/rfc3986/api.py @@ -19,6 +19,7 @@ and classes of rfc3986. """ +from .iri import IRIReference from .parseresult import ParseResult from .uri import URIReference @@ -37,6 +38,20 @@ def uri_reference(uri, encoding='utf-8'): return URIReference.from_string(uri, encoding) +def iri_reference(iri, encoding='utf-8'): + """Parse a IRI string into an IRIReference. + + This is a convenience function. You could achieve the same end by using + ``IRIReference.from_string(iri)``. + + :param str iri: The IRI which needs to be parsed into a reference. + :param str encoding: The encoding of the string provided + :returns: A parsed IRI + :rtype: :class:`IRIReference` + """ + return IRIReference.from_string(iri, encoding) + + def is_valid_uri(uri, encoding='utf-8', **kwargs): """Determine if the URI given is valid. diff --git a/src/urllib3/packages/rfc3986/exceptions.py b/src/urllib3/packages/rfc3986/exceptions.py --- a/src/urllib3/packages/rfc3986/exceptions.py +++ b/src/urllib3/packages/rfc3986/exceptions.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- """Exceptions module for rfc3986.""" +from . import compat + class RFC3986Exception(Exception): """Base class for all rfc3986 exception classes.""" @@ -14,7 +16,8 @@ class InvalidAuthority(RFC3986Exception): def __init__(self, authority): """Initialize the exception with the invalid authority.""" super(InvalidAuthority, self).__init__( - "The authority ({0}) is not valid.".format(authority)) + u"The authority ({0}) is not valid.".format( + compat.to_str(authority))) class InvalidPort(RFC3986Exception): @@ -109,3 +112,7 @@ def __init__(self, uri, *component_names): uri, self.components, ) + + +class MissingDependencyError(RFC3986Exception): + """Exception raised when an IRI is encoded without the 'idna' module.""" diff --git a/src/urllib3/packages/rfc3986/iri.py b/src/urllib3/packages/rfc3986/iri.py new file mode 100644 --- /dev/null +++ b/src/urllib3/packages/rfc3986/iri.py @@ -0,0 +1,143 @@ +"""Module containing the implementation of the IRIReference class.""" +# -*- coding: utf-8 -*- +# Copyright (c) 2014 Rackspace +# Copyright (c) 2015 Ian Stapleton Cordasco +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import namedtuple + +from . import compat +from . import exceptions +from . import misc +from . import normalizers +from . import uri + + +try: + import idna +except ImportError: # pragma: no cover + idna = None + + +class IRIReference(namedtuple('IRIReference', misc.URI_COMPONENTS), + uri.URIMixin): + """Immutable object representing a parsed IRI Reference. + + Can be encoded into an URIReference object via the procedure + specified in RFC 3987 Section 3.1 + + .. note:: + The IRI submodule is a new interface and may possibly change in + the future. Check for changes to the interface when upgrading. + """ + + slots = () + + def __new__(cls, scheme, authority, path, query, fragment, + encoding='utf-8'): + """Create a new IRIReference.""" + ref = super(IRIReference, cls).__new__( + cls, + scheme or None, + authority or None, + path or None, + query, + fragment) + ref.encoding = encoding + return ref + + def __eq__(self, other): + """Compare this reference to another.""" + other_ref = other + if isinstance(other, tuple): + other_ref = self.__class__(*other) + elif not isinstance(other, IRIReference): + try: + other_ref = self.__class__.from_string(other) + except TypeError: + raise TypeError( + 'Unable to compare {0}() to {1}()'.format( + type(self).__name__, type(other).__name__)) + + # See http://tools.ietf.org/html/rfc3986#section-6.2 + return tuple(self) == tuple(other_ref) + + def _match_subauthority(self): + return misc.ISUBAUTHORITY_MATCHER.match(self.authority) + + @classmethod + def from_string(cls, iri_string, encoding='utf-8'): + """Parse a IRI reference from the given unicode IRI string. + + :param str iri_string: Unicode IRI to be parsed into a reference. + :param str encoding: The encoding of the string provided + :returns: :class:`IRIReference` or subclass thereof + """ + iri_string = compat.to_str(iri_string, encoding) + + split_iri = misc.IRI_MATCHER.match(iri_string).groupdict() + return cls( + split_iri['scheme'], split_iri['authority'], + normalizers.encode_component(split_iri['path'], encoding), + normalizers.encode_component(split_iri['query'], encoding), + normalizers.encode_component(split_iri['fragment'], encoding), + encoding, + ) + + def encode(self, idna_encoder=None): + """Encode an IRIReference into a URIReference instance. + + If the ``idna`` module is installed or the ``rfc3986[idna]`` + extra is used then unicode characters in the IRI host + component will be encoded with IDNA2008. + + :param idna_encoder: + Function that encodes each part of the host component + If not given will raise an exception if the IRI + contains a host component. + :rtype: uri.URIReference + :returns: A URI reference + """ + authority = self.authority + if authority: + if idna_encoder is None: + if idna is None: # pragma: no cover + raise exceptions.MissingDependencyError( + "Could not import the 'idna' module " + "and the IRI hostname requires encoding" + ) + else: + def idna_encoder(x): + try: + return idna.encode(x, strict=True, std3_rules=True).lower() + except idna.IDNAError: + raise exceptions.InvalidAuthority(self.authority) + + authority = "" + if self.host: + authority = ".".join([compat.to_str(idna_encoder(part)) + for part in self.host.split(".")]) + + if self.userinfo is not None: + authority = (normalizers.encode_component( + self.userinfo, self.encoding) + '@' + authority) + + if self.port is not None: + authority += ":" + str(self.port) + + return uri.URIReference(self.scheme, + authority, + path=self.path, + query=self.query, + fragment=self.fragment, + encoding=self.encoding) diff --git a/src/urllib3/packages/rfc3986/misc.py b/src/urllib3/packages/rfc3986/misc.py --- a/src/urllib3/packages/rfc3986/misc.py +++ b/src/urllib3/packages/rfc3986/misc.py @@ -58,7 +58,14 @@ abnf_regexp.PORT_RE)) +HOST_MATCHER = re.compile('^' + abnf_regexp.HOST_RE + '$') IPv4_MATCHER = re.compile('^' + abnf_regexp.IPv4_RE + '$') +IPv6_MATCHER = re.compile(r'^\[' + abnf_regexp.IPv6_ADDRZ_RFC4007_RE + r'\]$') + +# Used by host validator +IPv6_NO_RFC4007_MATCHER = re.compile(r'^\[%s\]$' % ( + abnf_regexp.IPv6_ADDRZ_RE +)) # Matcher used to validate path components PATH_MATCHER = re.compile(abnf_regexp.PATH_RE) @@ -76,7 +83,8 @@ SCHEME_MATCHER = re.compile('^{0}$'.format(abnf_regexp.SCHEME_RE)) RELATIVE_REF_MATCHER = re.compile(r'^%s(\?%s)?(#%s)?$' % ( - abnf_regexp.RELATIVE_PART_RE, abnf_regexp.QUERY_RE, + abnf_regexp.RELATIVE_PART_RE, + abnf_regexp.QUERY_RE, abnf_regexp.FRAGMENT_RE, )) @@ -87,6 +95,42 @@ abnf_regexp.QUERY_RE[1:-1], )) +# ############### +# IRIs / RFC 3987 +# ############### + +IRI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE, re.UNICODE) + +ISUBAUTHORITY_MATCHER = re.compile(( + u'^(?:(?P<userinfo>{0})@)?' # iuserinfo + u'(?P<host>{1})' # ihost + u':?(?P<port>{2})?$' # port + ).format(abnf_regexp.IUSERINFO_RE, + abnf_regexp.IHOST_RE, + abnf_regexp.PORT_RE), re.UNICODE) + + +IHOST_MATCHER = re.compile('^' + abnf_regexp.IHOST_RE + '$', re.UNICODE) + +IPATH_MATCHER = re.compile(abnf_regexp.IPATH_RE, re.UNICODE) + +IQUERY_MATCHER = re.compile(abnf_regexp.IQUERY_RE, re.UNICODE) + +IFRAGMENT_MATCHER = re.compile(abnf_regexp.IFRAGMENT_RE, re.UNICODE) + + +RELATIVE_IRI_MATCHER = re.compile(u'^%s(?:\\?%s)?(?:%s)?$' % ( + abnf_regexp.IRELATIVE_PART_RE, + abnf_regexp.IQUERY_RE, + abnf_regexp.IFRAGMENT_RE +), re.UNICODE) + +ABSOLUTE_IRI_MATCHER = re.compile(u'^%s:%s(?:\\?%s)?$' % ( + abnf_regexp.COMPONENT_PATTERN_DICT['scheme'], + abnf_regexp.IHIER_PART_RE, + abnf_regexp.IQUERY_RE[1:-1] +), re.UNICODE) + # Path merger as defined in http://tools.ietf.org/html/rfc3986#section-5.2.3 def merge_paths(base_uri, relative_path): diff --git a/src/urllib3/packages/rfc3986/normalizers.py b/src/urllib3/packages/rfc3986/normalizers.py --- a/src/urllib3/packages/rfc3986/normalizers.py +++ b/src/urllib3/packages/rfc3986/normalizers.py @@ -49,6 +49,21 @@ def normalize_password(password): def normalize_host(host): """Normalize a host string.""" + if misc.IPv6_MATCHER.match(host): + percent = host.find('%') + if percent != -1: + percent_25 = host.find('%25') + + # Replace RFC 4007 IPv6 Zone ID delimiter '%' with '%25' + # from RFC 6874. If the host is '[<IPv6 addr>%25]' then we + # assume RFC 4007 and normalize to '[<IPV6 addr>%2525]' + if percent_25 == -1 or percent < percent_25 or \ + (percent == percent_25 and percent_25 == len(host) - 4): + host = host.replace('%', '%25', 1) + + # Don't normalize the casing of the Zone ID + return host[:percent].lower() + host[percent:] + return host.lower() @@ -147,6 +162,6 @@ def encode_component(uri_component, encoding): or (byte_ord < 128 and byte.decode() in misc.NON_PCT_ENCODED)): encoded_uri.extend(byte) continue - encoded_uri.extend('%{0:02x}'.format(byte_ord).encode()) + encoded_uri.extend('%{0:02x}'.format(byte_ord).encode().upper()) return encoded_uri.decode(encoding) diff --git a/src/urllib3/packages/rfc3986/uri.py b/src/urllib3/packages/rfc3986/uri.py --- a/src/urllib3/packages/rfc3986/uri.py +++ b/src/urllib3/packages/rfc3986/uri.py @@ -15,16 +15,14 @@ # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple -import warnings from . import compat -from . import exceptions as exc from . import misc from . import normalizers -from . import validators +from ._mixin import URIMixin -class URIReference(namedtuple('URIReference', misc.URI_COMPONENTS)): +class URIReference(namedtuple('URIReference', misc.URI_COMPONENTS), URIMixin): """Immutable object representing a parsed URI Reference. .. note:: @@ -116,228 +114,6 @@ def __eq__(self, other): naive_equality = tuple(self) == tuple(other_ref) return naive_equality or self.normalized_equality(other_ref) - @classmethod - def from_string(cls, uri_string, encoding='utf-8'): - """Parse a URI reference from the given unicode URI string. - - :param str uri_string: Unicode URI to be parsed into a reference. - :param str encoding: The encoding of the string provided - :returns: :class:`URIReference` or subclass thereof - """ - uri_string = compat.to_str(uri_string, encoding) - - split_uri = misc.URI_MATCHER.match(uri_string).groupdict() - return cls( - split_uri['scheme'], split_uri['authority'], - normalizers.encode_component(split_uri['path'], encoding), - normalizers.encode_component(split_uri['query'], encoding), - normalizers.encode_component(split_uri['fragment'], encoding), - encoding, - ) - - def authority_info(self): - """Return a dictionary with the ``userinfo``, ``host``, and ``port``. - - If the authority is not valid, it will raise a - :class:`~rfc3986.exceptions.InvalidAuthority` Exception. - - :returns: - ``{'userinfo': 'username:password', 'host': 'www.example.com', - 'port': '80'}`` - :rtype: dict - :raises rfc3986.exceptions.InvalidAuthority: - If the authority is not ``None`` and can not be parsed. - """ - if not self.authority: - return {'userinfo': None, 'host': None, 'port': None} - - match = misc.SUBAUTHORITY_MATCHER.match(self.authority) - - if match is None: - # In this case, we have an authority that was parsed from the URI - # Reference, but it cannot be further parsed by our - # misc.SUBAUTHORITY_MATCHER. In this case it must not be a valid - # authority. - raise exc.InvalidAuthority(self.authority.encode(self.encoding)) - - # We had a match, now let's ensure that it is actually a valid host - # address if it is IPv4 - matches = match.groupdict() - host = matches.get('host') - - if (host and misc.IPv4_MATCHER.match(host) and not - validators.valid_ipv4_host_address(host)): - # If we have a host, it appears to be IPv4 and it does not have - # valid bytes, it is an InvalidAuthority. - raise exc.InvalidAuthority(self.authority.encode(self.encoding)) - - return matches - - @property - def host(self): - """If present, a string representing the host.""" - try: - authority = self.authority_info() - except exc.InvalidAuthority: - return None - return authority['host'] - - @property - def port(self): - """If present, the port extracted from the authority.""" - try: - authority = self.authority_info() - except exc.InvalidAuthority: - return None - return authority['port'] - - @property - def userinfo(self): - """If present, the userinfo extracted from the authority.""" - try: - authority = self.authority_info() - except exc.InvalidAuthority: - return None - return authority['userinfo'] - - def is_absolute(self): - """Determine if this URI Reference is an absolute URI. - - See http://tools.ietf.org/html/rfc3986#section-4.3 for explanation. - - :returns: ``True`` if it is an absolute URI, ``False`` otherwise. - :rtype: bool - """ - return bool(misc.ABSOLUTE_URI_MATCHER.match(self.unsplit())) - - def is_valid(self, **kwargs): - """Determine if the URI is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param bool require_scheme: Set to ``True`` if you wish to require the - presence of the scheme component. - :param bool require_authority: Set to ``True`` if you wish to require - the presence of the authority component. - :param bool require_path: Set to ``True`` if you wish to require the - presence of the path component. - :param bool require_query: Set to ``True`` if you wish to require the - presence of the query component. - :param bool require_fragment: Set to ``True`` if you wish to require - the presence of the fragment component. - :returns: ``True`` if the URI is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - validators = [ - (self.scheme_is_valid, kwargs.get('require_scheme', False)), - (self.authority_is_valid, kwargs.get('require_authority', False)), - (self.path_is_valid, kwargs.get('require_path', False)), - (self.query_is_valid, kwargs.get('require_query', False)), - (self.fragment_is_valid, kwargs.get('require_fragment', False)), - ] - return all(v(r) for v, r in validators) - - def authority_is_valid(self, require=False): - """Determine if the authority component is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param bool require: - Set to ``True`` to require the presence of this component. - :returns: - ``True`` if the authority is valid. ``False`` otherwise. - :rtype: - bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - try: - self.authority_info() - except exc.InvalidAuthority: - return False - - return validators.authority_is_valid( - self.authority, - host=self.host, - require=require, - ) - - def scheme_is_valid(self, require=False): - """Determine if the scheme component is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param str require: Set to ``True`` to require the presence of this - component. - :returns: ``True`` if the scheme is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - return validators.scheme_is_valid(self.scheme, require) - - def path_is_valid(self, require=False): - """Determine if the path component is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param str require: Set to ``True`` to require the presence of this - component. - :returns: ``True`` if the path is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - return validators.path_is_valid(self.path, require) - - def query_is_valid(self, require=False): - """Determine if the query component is valid. - - .. deprecated:: 1.1.0 - - Use the :class:`~rfc3986.validators.Validator` object instead. - - :param str require: Set to ``True`` to require the presence of this - component. - :returns: ``True`` if the query is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - return validators.query_is_valid(self.query, require) - - def fragment_is_valid(self, require=False): - """Determine if the fragment component is valid. - - .. deprecated:: 1.1.0 - - Use the Validator object instead. - - :param str require: Set to ``True`` to require the presence of this - component. - :returns: ``True`` if the fragment is valid. ``False`` otherwise. - :rtype: bool - """ - warnings.warn("Please use rfc3986.validators.Validator instead. " - "This method will be eventually removed.", - DeprecationWarning) - return validators.fragment_is_valid(self.fragment, require) - def normalize(self): """Normalize this reference as described in Section 6.2.2. @@ -357,136 +133,21 @@ def normalize(self): normalizers.normalize_fragment(self.fragment), self.encoding) - def normalized_equality(self, other_ref): - """Compare this URIReference to another URIReference. + @classmethod + def from_string(cls, uri_string, encoding='utf-8'): + """Parse a URI reference from the given unicode URI string. - :param URIReference other_ref: (required), The reference with which - we're comparing. - :returns: ``True`` if the references are equal, ``False`` otherwise. - :rtype: bool + :param str uri_string: Unicode URI to be parsed into a reference. + :param str encoding: The encoding of the string provided + :returns: :class:`URIReference` or subclass thereof """ - return tuple(self.normalize()) == tuple(other_ref.normalize()) - - def resolve_with(self, base_uri, strict=False): - """Use an absolute URI Reference to resolve this relative reference. - - Assuming this is a relative reference that you would like to resolve, - use the provided base URI to resolve it. - - See http://tools.ietf.org/html/rfc3986#section-5 for more information. + uri_string = compat.to_str(uri_string, encoding) - :param base_uri: Either a string or URIReference. It must be an - absolute URI or it will raise an exception. - :returns: A new URIReference which is the result of resolving this - reference using ``base_uri``. - :rtype: :class:`URIReference` - :raises rfc3986.exceptions.ResolutionError: - If the ``base_uri`` is not an absolute URI. - """ - if not isinstance(base_uri, URIReference): - base_uri = URIReference.from_string(base_uri) - - if not base_uri.is_absolute(): - raise exc.ResolutionError(base_uri) - - # This is optional per - # http://tools.ietf.org/html/rfc3986#section-5.2.1 - base_uri = base_uri.normalize() - - # The reference we're resolving - resolving = self - - if not strict and resolving.scheme == base_uri.scheme: - resolving = resolving.copy_with(scheme=None) - - # http://tools.ietf.org/html/rfc3986#page-32 - if resolving.scheme is not None: - target = resolving.copy_with( - path=normalizers.normalize_path(resolving.path) - ) - else: - if resolving.authority is not None: - target = resolving.copy_with( - scheme=base_uri.scheme, - path=normalizers.normalize_path(resolving.path) - ) - else: - if resolving.path is None: - if resolving.query is not None: - query = resolving.query - else: - query = base_uri.query - target = resolving.copy_with( - scheme=base_uri.scheme, - authority=base_uri.authority, - path=base_uri.path, - query=query - ) - else: - if resolving.path.startswith('/'): - path = normalizers.normalize_path(resolving.path) - else: - path = normalizers.normalize_path( - misc.merge_paths(base_uri, resolving.path) - ) - target = resolving.copy_with( - scheme=base_uri.scheme, - authority=base_uri.authority, - path=path, - query=resolving.query - ) - return target - - def unsplit(self): - """Create a URI string from the components. - - :returns: The URI Reference reconstituted as a string. - :rtype: str - """ - # See http://tools.ietf.org/html/rfc3986#section-5.3 - result_list = [] - if self.scheme: - result_list.extend([self.scheme, ':']) - if self.authority: - result_list.extend(['//', self.authority]) - if self.path: - result_list.append(self.path) - if self.query is not None: - result_list.extend(['?', self.query]) - if self.fragment is not None: - result_list.extend(['#', self.fragment]) - return ''.join(result_list) - - def copy_with(self, scheme=misc.UseExisting, authority=misc.UseExisting, - path=misc.UseExisting, query=misc.UseExisting, - fragment=misc.UseExisting): - """Create a copy of this reference with the new components. - - :param str scheme: - (optional) The scheme to use for the new reference. - :param str authority: - (optional) The authority to use for the new reference. - :param str path: - (optional) The path to use for the new reference. - :param str query: - (optional) The query to use for the new reference. - :param str fragment: - (optional) The fragment to use for the new reference. - :returns: - New URIReference with provided components. - :rtype: - URIReference - """ - attributes = { - 'scheme': scheme, - 'authority': authority, - 'path': path, - 'query': query, - 'fragment': fragment, - } - for key, value in list(attributes.items()): - if value is misc.UseExisting: - del attributes[key] - uri = self._replace(**attributes) - uri.encoding = self.encoding - return uri + split_uri = misc.URI_MATCHER.match(uri_string).groupdict() + return cls( + split_uri['scheme'], split_uri['authority'], + normalizers.encode_component(split_uri['path'], encoding), + normalizers.encode_component(split_uri['query'], encoding), + normalizers.encode_component(split_uri['fragment'], encoding), + encoding, + ) diff --git a/src/urllib3/packages/rfc3986/validators.py b/src/urllib3/packages/rfc3986/validators.py --- a/src/urllib3/packages/rfc3986/validators.py +++ b/src/urllib3/packages/rfc3986/validators.py @@ -304,8 +304,28 @@ def authority_is_valid(authority, host=None, require=False): bool """ validated = is_valid(authority, misc.SUBAUTHORITY_MATCHER, require) + if validated and host is not None: + return host_is_valid(host, require) + return validated + + +def host_is_valid(host, require=False): + """Determine if the host string is valid. + + :param str host: + The host to validate. + :param bool require: + (optional) Specify if host must not be None. + :returns: + ``True`` if valid, ``False`` otherwise + :rtype: + bool + """ + validated = is_valid(host, misc.HOST_MATCHER, require) if validated and host is not None and misc.IPv4_MATCHER.match(host): return valid_ipv4_host_address(host) + elif validated and host is not None and misc.IPv6_MATCHER.match(host): + return misc.IPv6_NO_RFC4007_MATCHER.match(host) is not None return validated @@ -395,7 +415,9 @@ def subauthority_component_is_valid(uri, component): # If we can parse the authority into sub-components and we're not # validating the port, we can assume it's valid. - if component != 'port': + if component == 'host': + return host_is_valid(subauthority_dict['host']) + elif component != 'port': return True try: diff --git a/src/urllib3/util/url.py b/src/urllib3/util/url.py --- a/src/urllib3/util/url.py +++ b/src/urllib3/util/url.py @@ -4,7 +4,8 @@ from ..exceptions import LocationParseError from ..packages import six, rfc3986 -from ..packages.rfc3986.exceptions import RFC3986Exception +from ..packages.rfc3986.exceptions import RFC3986Exception, ValidationError +from ..packages.rfc3986.validators import Validator url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] @@ -14,12 +15,12 @@ NORMALIZABLE_SCHEMES = ('http', 'https', None) # Regex for detecting URLs with schemes. RFC 3986 Section 3.1 -SCHEME_REGEX = re.compile(r"^[a-zA-Z][a-zA-Z0-9+\-.]*://") +SCHEME_REGEX = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+\-]*:|/)") class Url(namedtuple('Url', url_attrs)): """ - Datastructure for representing an HTTP URL. Used as a return value for + Data structure for representing an HTTP URL. Used as a return value for :func:`parse_url`. Both the scheme and host are normalized as they are both case-insensitive according to RFC 3986. """ @@ -29,10 +30,8 @@ def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None): if path and not path.startswith('/'): path = '/' + path - if scheme: + if scheme is not None: scheme = scheme.lower() - if host and scheme in NORMALIZABLE_SCHEMES: - host = host.lower() return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment) @@ -78,23 +77,23 @@ def url(self): 'http://username:[email protected]:80/path?query#fragment' """ scheme, auth, host, port, path, query, fragment = self - url = '' + url = u'' # We use "is not None" we want things to happen with empty strings (or 0 port) if scheme is not None: - url += scheme + '://' + url += scheme + u'://' if auth is not None: - url += auth + '@' + url += auth + u'@' if host is not None: url += host if port is not None: - url += ':' + str(port) + url += u':' + str(port) if path is not None: url += path if query is not None: - url += '?' + query + url += u'?' + query if fragment is not None: - url += '#' + fragment + url += u'#' + fragment return url @@ -104,7 +103,7 @@ def __str__(self): def split_first(s, delims): """ - Deprecated. No longer used by parse_url(). + .. deprecated:: 1.25 Given a string and an iterable of delimiters, split on the first found delimiter. Return two split parts and the matched delimiter. @@ -161,6 +160,8 @@ def parse_url(url): return Url() is_string = not isinstance(url, six.binary_type) + if not is_string: + url = url.decode("utf-8") # RFC 3986 doesn't like URLs that have a host but don't start # with a scheme and we support URLs like that so we need to @@ -171,22 +172,53 @@ def parse_url(url): url = "//" + url try: - parse_result = rfc3986.urlparse(url, encoding="utf-8") + iri_ref = rfc3986.IRIReference.from_string(url, encoding="utf-8") except (ValueError, RFC3986Exception): + six.raise_from(LocationParseError(url), None) + + def idna_encode(name): + if name and any([ord(x) > 128 for x in name]): + try: + import idna + except ImportError: + raise LocationParseError("Unable to parse URL without the 'idna' module") + try: + return idna.encode(name, strict=True, std3_rules=True).lower() + except idna.IDNAError: + raise LocationParseError(u"Name '%s' is not a valid IDNA label" % name) + return name + + has_authority = iri_ref.authority is not None + uri_ref = iri_ref.encode(idna_encoder=idna_encode) + + # rfc3986 strips the authority if it's invalid + if has_authority and uri_ref.authority is None: raise LocationParseError(url) - # RFC 3986 doesn't assert ports must be non-negative. - if parse_result.port and parse_result.port < 0: - raise LocationParseError(url) + # Only normalize schemes we understand to not break http+unix + # or other schemes that don't follow RFC 3986. + if uri_ref.scheme is None or uri_ref.scheme.lower() in NORMALIZABLE_SCHEMES: + uri_ref = uri_ref.normalize() + + # Validate all URIReference components and ensure that all + # components that were set before are still set after + # normalization has completed. + validator = Validator() + try: + validator.check_validity_of( + *validator.COMPONENT_NAMES + ).validate(uri_ref) + except ValidationError: + six.raise_from(LocationParseError(url), None) # For the sake of backwards compatibility we put empty # string values for path if there are any defined values # beyond the path in the URL. # TODO: Remove this when we break backwards compatibility. - path = parse_result.path + path = uri_ref.path if not path: - if (parse_result.query is not None - or parse_result.fragment is not None): + if (uri_ref.query is not None + or uri_ref.fragment is not None): path = "" else: path = None @@ -201,13 +233,13 @@ def to_input_type(x): return x return Url( - scheme=to_input_type(parse_result.scheme), - auth=to_input_type(parse_result.userinfo), - host=to_input_type(parse_result.hostname), - port=parse_result.port, + scheme=to_input_type(uri_ref.scheme), + auth=to_input_type(uri_ref.userinfo), + host=to_input_type(uri_ref.host), + port=int(uri_ref.port) if uri_ref.port is not None else None, path=to_input_type(path), - query=to_input_type(parse_result.query), - fragment=to_input_type(parse_result.fragment) + query=to_input_type(uri_ref.query), + fragment=to_input_type(uri_ref.fragment) )
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -131,12 +131,24 @@ def test_invalid_host(self, location): with pytest.raises(LocationParseError): get_host(location) + @pytest.mark.parametrize('url', [ + 'http://user\\@google.com', + 'http://google\\.com', + 'user\\@google.com', + 'http://google.com#fragment#', + 'http://user@[email protected]/', + ]) + def test_invalid_url(self, url): + with pytest.raises(LocationParseError): + parse_url(url) + @pytest.mark.parametrize('url, expected_normalized_url', [ ('HTTP://GOOGLE.COM/MAIL/', 'http://google.com/MAIL/'), ('HTTP://JeremyCline:[email protected]:8080/', 'http://JeremyCline:[email protected]:8080/'), ('HTTPS://Example.Com/?Key=Value', 'https://example.com/?Key=Value'), ('Https://Example.Com/#Fragment', 'https://example.com/#Fragment'), + ('[::Ff%etH0%Ff]/%ab%Af', '[::ff%25etH0%Ff]/%AB%AF'), ]) def test_parse_url_normalization(self, url, expected_normalized_url): """Assert parse_url normalizes the scheme/host, and only the scheme/host""" @@ -155,8 +167,7 @@ def test_parse_url_normalization(self, url, expected_normalized_url): # Path/query/fragment ('', Url()), ('/', Url(path='/')), - ('/abc/../def', Url(path="/abc/../def")), - ('#?/!google.com/?foo#bar', Url(path='', fragment='?/!google.com/?foo#bar')), + ('#?/!google.com/?foo', Url(path='', fragment='?/!google.com/?foo')), ('/foo', Url(path='/foo')), ('/foo?bar=baz', Url(path='/foo', query='bar=baz')), ('/foo?bar=baz#banana?apple/orange', Url(path='/foo', @@ -173,10 +184,10 @@ def test_parse_url_normalization(self, url, expected_normalized_url): # Auth ('http://foo:bar@localhost/', Url('http', auth='foo:bar', host='localhost', path='/')), ('http://foo@localhost/', Url('http', auth='foo', host='localhost', path='/')), - ('http://foo:bar@baz@localhost/', Url('http', - auth='foo:bar@baz', - host='localhost', - path='/')), + ('http://foo:bar@localhost/', Url('http', + auth='foo:bar', + host='localhost', + path='/')), # Unicode type (Python 2.x) (u'http://foo:bar@localhost/', Url(u'http', @@ -194,6 +205,9 @@ def test_parse_url_normalization(self, url, expected_normalized_url): ('?', Url(path='', query='')), ('#', Url(path='', fragment='')), + # Path normalization + ('/abc/../def', Url(path="/def")), + # Empty Port ('http://google.com:', Url('http', host='google.com')), ('http://google.com:/', Url('http', host='google.com', path='/')), @@ -211,6 +225,23 @@ def test_parse_url(self, url, expected_url): def test_unparse_url(self, url, expected_url): assert url == expected_url.url + @pytest.mark.parametrize( + ['url', 'expected_url'], + [ + # RFC 3986 5.2.4 + ('/abc/../def', Url(path="/def")), + ('/..', Url(path="/")), + ('/./abc/./def/', Url(path='/abc/def/')), + ('/.', Url(path='/')), + ('/./', Url(path='/')), + ('/abc/./.././d/././e/.././f/./../../ghi', Url(path='/ghi')) + ] + ) + def test_parse_and_normalize_url_paths(self, url, expected_url): + actual_url = parse_url(url) + assert actual_url == expected_url + assert actual_url.url == expected_url.url + def test_parse_url_invalid_IPv6(self): with pytest.raises(LocationParseError): parse_url('[::1') @@ -260,12 +291,36 @@ def test_netloc(self, url, expected_netloc): # CVE-2016-5699 ("http://127.0.0.1%0d%0aConnection%3a%20keep-alive", - Url("http", host="127.0.0.1%0d%0aConnection%3a%20keep-alive")), + Url("http", host="127.0.0.1%0d%0aconnection%3a%20keep-alive")), # NodeJS unicode -> double dot (u"http://google.com/\uff2e\uff2e/abc", Url("http", host="google.com", - path='/%ef%bc%ae%ef%bc%ae/abc')) + path='/%EF%BC%AE%EF%BC%AE/abc')), + + # Scheme without :// + ("javascript:a='@google.com:12345/';alert(0)", + Url(scheme="javascript", + path="a='@google.com:12345/';alert(0)")), + + ("//google.com/a/b/c", Url(host="google.com", path="/a/b/c")), + + # International URLs + (u'http://ヒ:キ@ヒ.abc.ニ/ヒ?キ#ワ', Url(u'http', + host=u'xn--pdk.abc.xn--idk', + auth=u'%E3%83%92:%E3%82%AD', + path=u'/%E3%83%92', + query=u'%E3%82%AD', + fragment=u'%E3%83%AF')), + + # Injected headers (CVE-2016-5699, CVE-2019-9740, CVE-2019-9947) + ("10.251.0.83:7777?a=1 HTTP/1.1\r\nX-injected: header", + Url(host='10.251.0.83', port=7777, path='', + query='a=1%20HTTP/1.1%0D%0AX-injected:%20header')), + + ("http://127.0.0.1:6379?\r\nSET test failure12\r\n:8080/test/?test=a", + Url(scheme='http', host='127.0.0.1', port=6379, path='', + query='%0D%0ASET%20test%20failure12%0D%0A:8080/test/?test=a')), ] @pytest.mark.parametrize("url, expected_url", url_vulnerabilities)
URLs without // don't parse correctly URLs like: `mailto:[email protected]` don't parse correctly due to our logic on when to add a scheme delimeter to a URL.
Hm, what _should_ that parse to? Per RFC3986 should be Scheme: mailto Userinfo: johndoe Hostname: example.com We have the same problem with `javascript:` @sethmlarson I think you may be confused. `:` marks the end of the scheme, so you're correct that `mailto` is the scheme. `//` delinates the beginning of the authority, however, and the authority includes the userinfo and hostname and port. Effectively, without `//` the text following the `:` is actually part of the path if I remember correctly. rfc3986 has specific tests covering this, by the way, https://github.com/python-hyper/rfc3986/blob/0d82fee4b21482ae6cf4337dc8216e4d1ce507ea/tests/test_uri.py#L13 You can check everywhere that that particular fixture is used. https://github.com/python-hyper/rfc3986/blob/0d82fee4b21482ae6cf4337dc8216e4d1ce507ea/tests/test_uri.py#L47 You're right @sigmavirus24, our scheme detecting regex is wrong but the above example shouldn't parse that way. Thanks for correcting me before I go too off the rails. :)
2019-01-27T23:40:09Z
[]
[]
urllib3/urllib3
1,583
urllib3__urllib3-1583
[ "1582" ]
2001292974c0c98e1f06a51494babd5c601285d8
diff --git a/src/urllib3/util/ssl_.py b/src/urllib3/util/ssl_.py --- a/src/urllib3/util/ssl_.py +++ b/src/urllib3/util/ssl_.py @@ -44,11 +44,10 @@ def _const_compare_digest_backport(a, b): # Borrow rfc3986's regular expressions for IPv4 # and IPv6 addresses for use in is_ipaddress() _IP_ADDRESS_REGEX = re.compile( - r'^(?:%s|%s|%s|%s)$' % ( + r'^(?:%s|%s|%s)$' % ( abnf_regexp.IPv4_RE, abnf_regexp.IPv6_RE, - abnf_regexp.IPv6_ADDRZ_RE, - abnf_regexp.IPv_FUTURE_RE + abnf_regexp.IPv6_ADDRZ_RFC4007_RE ) ) @@ -370,7 +369,8 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, def is_ipaddress(hostname): - """Detects whether the hostname given is an IP address. + """Detects whether the hostname given is an IPv4 or IPv6 address. + Also detects IPv6 addresses with Zone IDs. :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise.
diff --git a/test/test_ssl.py b/test/test_ssl.py --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -5,11 +5,21 @@ @pytest.mark.parametrize('addr', [ + # IPv6 '::1', '::', + 'FE80::8939:7684:D84b:a5A4%251', + + # IPv4 '127.0.0.1', '8.8.8.8', - b'127.0.0.1' + b'127.0.0.1', + + # IPv6 w/ Zone IDs + 'FE80::8939:7684:D84b:a5A4%251', + b'FE80::8939:7684:D84b:a5A4%251', + 'FE80::8939:7684:D84b:a5A4%19', + b'FE80::8939:7684:D84b:a5A4%19' ]) def test_is_ipaddress_true(addr): assert ssl_.is_ipaddress(addr) @@ -17,7 +27,9 @@ def test_is_ipaddress_true(addr): @pytest.mark.parametrize('addr', [ 'www.python.org', - b'www.python.org' + b'www.python.org', + 'v2.sg.media-imdb.com', + b'v2.sg.media-imdb.com' ]) def test_is_ipaddress_false(addr): assert not ssl_.is_ipaddress(addr)
SSLV3_ALERT_HANDSHAKE_FAILURE after upgrading to v1.25.x SSL handshake failure after upgrading from 1.24.2 to 1.25 or 1.25.1. Travis link: https://travis-ci.org/h3llrais3r/Auto-Subliminal/jobs/524114951 Downgrading to version 1.24.2 fixes the problem. Travis link: https://travis-ci.org/h3llrais3r/Auto-Subliminal/builds/524029312 ``` Traceback (most recent call last): File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/urllib3/connectionpool.py", line 603, in urlopen chunked=chunked) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/urllib3/connectionpool.py", line 344, in _make_request self._validate_conn(conn) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/urllib3/connectionpool.py", line 843, in _validate_conn conn.connect() File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/urllib3/connection.py", line 350, in connect ssl_context=context) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/urllib3/util/ssl_.py", line 369, in ssl_wrap_socket return context.wrap_socket(sock) File "/opt/python/3.6.3/lib/python3.6/ssl.py", line 407, in wrap_socket _context=self, _session=session) File "/opt/python/3.6.3/lib/python3.6/ssl.py", line 814, in __init__ self.do_handshake() File "/opt/python/3.6.3/lib/python3.6/ssl.py", line 1068, in do_handshake self._sslobj.do_handshake() File "/opt/python/3.6.3/lib/python3.6/ssl.py", line 689, in do_handshake self._sslobj.do_handshake() ssl.SSLError: [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:777) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/requests/adapters.py", line 449, in send timeout=timeout File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/urllib3/connectionpool.py", line 641, in urlopen _stacktrace=sys.exc_info()[2]) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/urllib3/util/retry.py", line 399, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='v2.sg.media-imdb.com', port=443): Max retries exceeded with url: /suggests/s/Southpaw.json (Caused by SSLError(SSLError(1, '[SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:777)'),)) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/travis/build/h3llrais3r/Auto-Subliminal/autosubliminal/indexer.py", line 351, in get_imdb_id_and_year api_obj = self._search(title, year) File "/home/travis/build/h3llrais3r/Auto-Subliminal/autosubliminal/indexer.py", line 273, in _search search_results = ImdbFacade().search_for_title(title) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/imdbpie/facade.py", line 87, in search_for_title for result in self._client.search_for_title(query): File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/imdbpie/imdbpie.py", line 182, in search_for_title search_results = self._suggest_search(title) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/imdbpie/imdbpie.py", line 160, in _suggest_search search_results = self._get(url=url, query=query_encoded) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/imdbpie/imdbpie.py", line 308, in _get resp = self.session.get(url, headers=headers, params=params) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/requests/sessions.py", line 546, in get return self.request('GET', url, **kwargs) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/requests/sessions.py", line 533, in request resp = self.send(prep, **send_kwargs) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/requests/sessions.py", line 646, in send r = adapter.send(request, **kwargs) File "/home/travis/build/h3llrais3r/Auto-Subliminal/lib/requests/adapters.py", line 514, in send raise SSLError(e, request=request) requests.exceptions.SSLError: HTTPSConnectionPool(host='v2.sg.media-imdb.com', port=443): Max retries exceeded with url: /suggests/s/Southpaw.json (Caused by SSLError(SSLError(1, '[SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:777)'),)) ```
Are you providing a proper CA cert for that ConnectionPool? One of the changes in 1.25 is that `cert_reqs` defaults to `CERT_REQUIRED`. I'm not using a CA cert. I'm using your `urllib3` lib via the `requests` lib and up until now I didn't had to provide any certificate at all. Any idea on how to fix this without a CA cert except reverting to previous release? Or perhaps this is rather an issue for `requests` to be compatible with the version 1.25.x? I figured out this issue. It's detecting the hostname `v2.sg.media-imdb.com` as an "IP address" due to the hostname matching the IPvFuture specification in RFC 3986. That hostname is technically a valid IPvFuture address and we don't send SNI for IP addresses. Maybe we need to change that behavior to only include IPv4 and IPv6 addresses. In the past we would have used SNI for this hostname because we didn't detect IPvFuture addresses. I think I'm going to change `is_ipaddress()` to only detect IPv4 and IPv6 addresses. Thank you so much for reporting this issue. :)
2019-04-24T20:11:04Z
[]
[]
urllib3/urllib3
1,586
urllib3__urllib3-1586
[ "1585" ]
a0d2bfd6098fe996fca74b80cbe473236187157c
diff --git a/src/urllib3/util/url.py b/src/urllib3/util/url.py --- a/src/urllib3/util/url.py +++ b/src/urllib3/util/url.py @@ -6,6 +6,7 @@ from ..packages import six, rfc3986 from ..packages.rfc3986.exceptions import RFC3986Exception, ValidationError from ..packages.rfc3986.validators import Validator +from ..packages.rfc3986 import abnf_regexp, normalizers, compat, misc url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'] @@ -17,6 +18,9 @@ # Regex for detecting URLs with schemes. RFC 3986 Section 3.1 SCHEME_REGEX = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+\-]*:|/)") +PATH_CHARS = abnf_regexp.UNRESERVED_CHARS_SET | abnf_regexp.SUB_DELIMITERS_SET | {':', '@', '/'} +QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {'?'} + class Url(namedtuple('Url', url_attrs)): """ @@ -136,6 +140,37 @@ def split_first(s, delims): return s[:min_idx], s[min_idx + 1:], min_delim +def _encode_invalid_chars(component, allowed_chars, encoding='utf-8'): + """Percent-encodes a URI component without reapplying + onto an already percent-encoded component. Based on + rfc3986.normalizers.encode_component() + """ + if component is None: + return component + + # Try to see if the component we're encoding is already percent-encoded + # so we can skip all '%' characters but still encode all others. + percent_encodings = len(normalizers.PERCENT_MATCHER.findall( + compat.to_str(component, encoding))) + + uri_bytes = component.encode('utf-8', 'surrogatepass') + is_percent_encoded = percent_encodings == uri_bytes.count(b'%') + + encoded_component = bytearray() + + for i in range(0, len(uri_bytes)): + # Will return a single character bytestring on both Python 2 & 3 + byte = uri_bytes[i:i+1] + byte_ord = ord(byte) + if ((is_percent_encoded and byte == b'%') + or (byte_ord < 128 and byte.decode() in allowed_chars)): + encoded_component.extend(byte) + continue + encoded_component.extend('%{0:02x}'.format(byte_ord).encode().upper()) + + return encoded_component.decode(encoding) + + def parse_url(url): """ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is @@ -160,8 +195,6 @@ def parse_url(url): return Url() is_string = not isinstance(url, six.binary_type) - if not is_string: - url = url.decode("utf-8") # RFC 3986 doesn't like URLs that have a host but don't start # with a scheme and we support URLs like that so we need to @@ -171,11 +204,6 @@ def parse_url(url): if not SCHEME_REGEX.search(url): url = "//" + url - try: - iri_ref = rfc3986.IRIReference.from_string(url, encoding="utf-8") - except (ValueError, RFC3986Exception): - six.raise_from(LocationParseError(url), None) - def idna_encode(name): if name and any([ord(x) > 128 for x in name]): try: @@ -188,8 +216,18 @@ def idna_encode(name): raise LocationParseError(u"Name '%s' is not a valid IDNA label" % name) return name - has_authority = iri_ref.authority is not None - uri_ref = iri_ref.encode(idna_encoder=idna_encode) + try: + split_iri = misc.IRI_MATCHER.match(compat.to_str(url)).groupdict() + iri_ref = rfc3986.IRIReference( + split_iri['scheme'], split_iri['authority'], + _encode_invalid_chars(split_iri['path'], PATH_CHARS), + _encode_invalid_chars(split_iri['query'], QUERY_CHARS), + _encode_invalid_chars(split_iri['fragment'], FRAGMENT_CHARS) + ) + has_authority = iri_ref.authority is not None + uri_ref = iri_ref.encode(idna_encoder=idna_encode) + except (ValueError, RFC3986Exception): + return six.raise_from(LocationParseError(url), None) # rfc3986 strips the authority if it's invalid if has_authority and uri_ref.authority is None: @@ -209,7 +247,7 @@ def idna_encode(name): *validator.COMPONENT_NAMES ).validate(uri_ref) except ValidationError: - six.raise_from(LocationParseError(url), None) + return six.raise_from(LocationParseError(url), None) # For the sake of backwards compatibility we put empty # string values for path if there are any defined values
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -135,8 +135,15 @@ def test_invalid_host(self, location): 'http://user\\@google.com', 'http://google\\.com', 'user\\@google.com', - 'http://google.com#fragment#', 'http://user@[email protected]/', + + # Invalid IDNA labels + u'http://\uD7FF.com', + u'http://❤️', + + # Unicode surrogates + u'http://\uD800.com', + u'http://\uDC00.com', ]) def test_invalid_url(self, url): with pytest.raises(LocationParseError): @@ -149,6 +156,15 @@ def test_invalid_url(self, url): ('HTTPS://Example.Com/?Key=Value', 'https://example.com/?Key=Value'), ('Https://Example.Com/#Fragment', 'https://example.com/#Fragment'), ('[::Ff%etH0%Ff]/%ab%Af', '[::ff%25etH0%Ff]/%AB%AF'), + + # Invalid characters for the query/fragment getting encoded + ('http://google.com/p[]?parameter[]=\"hello\"#fragment#', + 'http://google.com/p%5B%5D?parameter%5B%5D=%22hello%22#fragment%23'), + + # Percent encoding isn't applied twice despite '%' being invalid + # but the percent encoding is still normalized. + ('http://google.com/p%5B%5d?parameter%5b%5D=%22hello%22#fragment%23', + 'http://google.com/p%5B%5D?parameter%5B%5D=%22hello%22#fragment%23') ]) def test_parse_url_normalization(self, url, expected_normalized_url): """Assert parse_url normalizes the scheme/host, and only the scheme/host""" @@ -214,7 +230,14 @@ def test_parse_url_normalization(self, url, expected_normalized_url): # Uppercase IRI (u'http://Königsgäßchen.de/straße', - Url('http', host='xn--knigsgchen-b4a3dun.de', path='/stra%C3%9Fe')) + Url('http', host='xn--knigsgchen-b4a3dun.de', path='/stra%C3%9Fe')), + + # Unicode Surrogates + (u'http://google.com/\uD800', Url('http', host='google.com', path='%ED%A0%80')), + (u'http://google.com?q=\uDC00', + Url('http', host='google.com', path='', query='q=%ED%B0%80')), + (u'http://google.com#\uDC00', + Url('http', host='google.com', path='', fragment='%ED%B0%80')), ] @pytest.mark.parametrize(
parse_url no longer accepts [] in the URL With urllib3 1.25.1: ``` >>> parse_url('https://example.com/somevar[]') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/testrunner/testvenv/lib/python2.7/site-packages/urllib3/util/url.py", line 212, in parse_url six.raise_from(LocationParseError(url), None) File "/home/testrunner/testvenv/lib/python2.7/site-packages/urllib3/packages/six.py", line 718, in raise_from raise value urllib3.exceptions.LocationParseError: Failed to parse: https://example.com/somevar[] ``` Using 1.24.2: ``` >>> parse_url('https://example.com/somevar[]') Url(scheme='https', auth=None, host='example.com', port=None, path='/somevar[]', query=None, fragment=None) ``` ``` >>> parse_url('https://example.com/?somevar["whatever"]') Url(scheme='https', auth=None, host='example.com', port=None, path='/', query='somevar["whatever"]', fragment=None) ``` This is used by various bits of software (ElasticSearch for instance): ``` >>> parse_url('http://example.com/api/search/?grouping=datasets&q=dontcare&projection=["variables.nonexistingfield"]') Url(scheme='http', auth=None, host='example.com', port=None, path='/api/search/', query='grouping=datasets&q=dontcare&projection=["variables.nonexistingfield"]', fragment=None) ```
Per [RFC 3986 Appendix A](https://tools.ietf.org/html/rfc3986) the only valid characters in the path component are `unreserved`, `pct-encoded`, `sub-delims`, `:`, and `@` which doesn't include `[` or `]`. Can you see if percent-encoding the square braces works as you expect? Maybe we need to do this as a part of normalization? Okay, this time not in the path: ``` >>> parse_url('http://example.com/api/search/?grouping=datasets&q=dontcare&projection=["variables.nonexistingfield"]') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/testrunner/testvenv/lib/python2.7/site-packages/urllib3/util/url.py", line 212, in parse_url six.raise_from(LocationParseError(url), None) File "/home/testrunner/testvenv/lib/python2.7/site-packages/urllib3/packages/six.py", line 718, in raise_from raise value urllib3.exceptions.LocationParseError: Failed to parse: http://example.com/api/search/?grouping=datasets&q=dontcare&projection=["variables.nonexistingfield"] ``` You'll find plenty of PHP sites out there that use: ``` >>> parse_url('http://www.example.com/foo.php?bar[]=1&bar[]=2&bar[]=3') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/testrunner/testvenv/lib/python2.7/site-packages/urllib3/util/url.py", line 212, in parse_url six.raise_from(LocationParseError(url), None) File "/home/testrunner/testvenv/lib/python2.7/site-packages/urllib3/packages/six.py", line 718, in raise_from raise value urllib3.exceptions.LocationParseError: Failed to parse: http://www.example.com/foo.php?bar[]=1&bar[]=2&bar[]=3 ``` This creates an array in `$_GET` in PHP for instance. I understand that this may not be valid as a URL to enter directly, but it is data we have to parse and `parse_url` usually works wonders for that. How would you recommend I take a user provided string, pull apart the pieces to URL encode the brackets and the like, because `urllib.quote` on the whole thing will create this mess: ``` >>> urllib.quote('http://www.example.com/foo.php?bar[]=1&bar[]=2&bar[]=3') 'http%3A//www.example.com/foo.php%3Fbar%5B%5D%3D1%26bar%5B%5D%3D2%26bar%5B%5D%3D3' >>> parse_url(urllib.quote('http://www.example.com/foo.php?bar[]=1&bar[]=2&bar[]=3')) Url(scheme=None, auth=None, host='http%3a', port=None, path='//www.example.com/foo.php%3Fbar%5B%5D%3D1%26bar%5B%5D%3D2%26bar%5B%5D%3D3', query=None, fragment=None) ``` If you're looking for an alternative to `parse_url` you can check out the library we're using for URL parsing ([`rfc3986`](https://github.com/python-hyper/rfc3986)) and then not do any validation on the path or query components? Then you can encode the brackets yourself using potentially `urllib.quote()` or something like it. I think we should consider percent-encoding invalid characters in the path, query, and fragment components automatically as a part of normalization though. Thoughts @urllib3/maintainers? Do we automatically percent encode any other stuff? On Thu, Apr 25, 2019, 10:44 AM Seth Michael Larson <[email protected]> wrote: > I think we should consider percent-encoding invalid characters in the > path, query, and fragment components automatically as a part of > normalization though. Thoughts @urllib3/maintainers > <https://github.com/orgs/urllib3/teams/maintainers>? > > — > You are receiving this because you are on a team that was mentioned. > Reply to this email directly, view it on GitHub > <https://github.com/urllib3/urllib3/issues/1585#issuecomment-486770925>, > or mute the thread > <https://github.com/notifications/unsubscribe-auth/AAB5I42ZPAN7D7WOUAPQSFLPSHUWRANCNFSM4HIPAGXQ> > . > No we don't currently. But previously we just passed through whatever the user gave us in the path and query components into the target. We want to do validation at least for control characters at a minimum but I don't know if anything else can hurt us here. So I guess another option is to not normalize and validate only for the bare minimum otherwise pass-through. I would like to point out that we found the issue in urllib3 because it was accidentally upgraded by pip and requests just passes the URL through unmodified. So we would potentially have to intercept the URL before passing it to requests (as well as deal with any `Location` headers/redirects ourselves) just to modify it before passing it to requests which passes it to `urllib3`. Hrm. I'm okay with being a little lax on path and query validation. Ultimately it's up to the server to decide if a request is valid or not. On Thu, Apr 25, 2019, 10:52 AM Seth Michael Larson <[email protected]> wrote: > No we don't currently. But previously we just passed through whatever the > user gave us in the path and query components into the target. We want to > do validation at least for control characters at a minimum but I don't know > if anything else can hurt us here. > > — > You are receiving this because you are on a team that was mentioned. > Reply to this email directly, view it on GitHub > <https://github.com/urllib3/urllib3/issues/1585#issuecomment-486773960>, > or mute the thread > <https://github.com/notifications/unsubscribe-auth/AAB5I466G2X3IJSBHQC66OTPSHVWHANCNFSM4HIPAGXQ> > . > @theacodes Actually scratch that we totally do inside of `IRIReference.from_string()`. The path, query and fragment components all receive some percent encoding just not for `[` or `]`.
2019-04-25T18:12:09Z
[]
[]
urllib3/urllib3
1,608
urllib3__urllib3-1608
[ "1603" ]
0a97553f48505066d27f5119e8bbead8c9cc0a77
diff --git a/src/urllib3/connection.py b/src/urllib3/connection.py --- a/src/urllib3/connection.py +++ b/src/urllib3/connection.py @@ -249,12 +249,23 @@ def connect(self): conn = self._new_conn() self._prepare_conn(conn) + # Wrap socket using verification with the root certs in + # trusted_root_certs + default_ssl_context = False if self.ssl_context is None: + default_ssl_context = True self.ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(None), - cert_reqs=resolve_cert_reqs(None), + ssl_version=resolve_ssl_version(self.ssl_version), + cert_reqs=resolve_cert_reqs(self.cert_reqs), ) + # Try to load OS default certs if none are given. + # Works well on Windows (requires Python3.4+) + context = self.ssl_context + if (not self.ca_certs and not self.ca_cert_dir and default_ssl_context + and hasattr(context, 'load_default_certs')): + context.load_default_certs() + self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, @@ -331,7 +342,9 @@ def connect(self): # Wrap socket using verification with the root certs in # trusted_root_certs + default_ssl_context = False if self.ssl_context is None: + default_ssl_context = True self.ssl_context = create_urllib3_context( ssl_version=resolve_ssl_version(self.ssl_version), cert_reqs=resolve_cert_reqs(self.cert_reqs), @@ -339,6 +352,13 @@ def connect(self): context = self.ssl_context context.verify_mode = resolve_cert_reqs(self.cert_reqs) + + # Try to load OS default certs if none are given. + # Works well on Windows (requires Python3.4+) + if (not self.ca_certs and not self.ca_cert_dir and default_ssl_context + and hasattr(context, 'load_default_certs')): + context.load_default_certs() + self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file,
diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -35,6 +35,7 @@ class MimeToolMessage(object): import select import socket import ssl +import mock import pytest @@ -1184,6 +1185,83 @@ def socket_handler(listener): response = pool.urlopen('GET', '/', retries=1) self.assertEqual(response.data, b'Success') + def test_ssl_load_default_certs_when_empty(self): + def socket_handler(listener): + sock = listener.accept()[0] + ssl_sock = ssl.wrap_socket(sock, + server_side=True, + keyfile=DEFAULT_CERTS['keyfile'], + certfile=DEFAULT_CERTS['certfile'], + ca_certs=DEFAULT_CA) + + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf += ssl_sock.recv(65536) + + ssl_sock.send(b'HTTP/1.1 200 OK\r\n' + b'Content-Type: text/plain\r\n' + b'Content-Length: 5\r\n\r\n' + b'Hello') + + ssl_sock.close() + sock.close() + + context = mock.create_autospec(ssl_.SSLContext) + context.load_default_certs = mock.Mock() + context.options = 0 + + with mock.patch("urllib3.util.ssl_.SSLContext", lambda *_, **__: context): + + self._start_server(socket_handler) + pool = HTTPSConnectionPool(self.host, self.port) + self.addCleanup(pool.close) + + with self.assertRaises(MaxRetryError): + pool.request("GET", "/", timeout=0.01) + + context.load_default_certs.assert_called_with() + + def test_ssl_dont_load_default_certs_when_given(self): + def socket_handler(listener): + sock = listener.accept()[0] + ssl_sock = ssl.wrap_socket(sock, + server_side=True, + keyfile=DEFAULT_CERTS['keyfile'], + certfile=DEFAULT_CERTS['certfile'], + ca_certs=DEFAULT_CA) + + buf = b'' + while not buf.endswith(b'\r\n\r\n'): + buf += ssl_sock.recv(65536) + + ssl_sock.send(b'HTTP/1.1 200 OK\r\n' + b'Content-Type: text/plain\r\n' + b'Content-Length: 5\r\n\r\n' + b'Hello') + + ssl_sock.close() + sock.close() + + context = mock.create_autospec(ssl_.SSLContext) + context.load_default_certs = mock.Mock() + context.options = 0 + + with mock.patch("urllib3.util.ssl_.SSLContext", lambda *_, **__: context): + for kwargs in [{"ca_certs": "/a"}, + {"ca_cert_dir": "/a"}, + {"ca_certs": "a", "ca_cert_dir": "a"}, + {"ssl_context": context}]: + + self._start_server(socket_handler) + + pool = HTTPSConnectionPool(self.host, self.port, **kwargs) + self.addCleanup(pool.close) + + with self.assertRaises(MaxRetryError): + pool.request("GET", "/", timeout=0.01) + + context.load_default_certs.assert_not_called() + class TestErrorWrapping(SocketDummyServerTestCase):
Default certificates are never loaded since version 1.25 Opening an HTTPS URL with ``urllib3>=1.25`` results in ``SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1056)')))``, example to reproduce is following: ``` import urllib3 http = urllib3.PoolManager() http.request('GET', 'https://google.com/') ``` The reason is that default system certificates are never loaded because ``HTTPSConnection`` or ``VerificedHTTPSConnection`` creates ssl context without loading certificates and passes it to ``ssl_wrap_socket`` where default certificates are not loaded too because passed ``ssl_context`` is not ``None``. Related lines of code: ``src/urllib3/connection.py``: ``` if self.ssl_context is None: self.ssl_context = create_urllib3_context( ssl_version=resolve_ssl_version(None), cert_reqs=resolve_cert_reqs(None), ) self.sock = ssl_wrap_socket( sock=conn, keyfile=self.key_file, certfile=self.cert_file, key_password=self.key_password, ssl_context=self.ssl_context, server_hostname=self.server_hostname ) ``` ``src/urllib3/util/ssl_.py``: ``` elif ssl_context is None and hasattr(context, 'load_default_certs'): # try to load OS default certs; works well on Windows (require Python3.4+) context.load_default_certs() ```
Yep, this looks incorrect. Unfortunately I can't currently think of a good way to handle this and maintain all our interfaces+contracts without calling `load_default_certs()` inside of `HTTPSConnection`. cc: @urllib3/maintainers Basically this boils down to `create_urllib3_context()` not knowing about `ca_cert` or `ca_cert_dir` and so it can't make the call whether to load the default system certs or not. The loading of certs is all done in `ssl_wrap_socket()`. (imo `create_urllib3_context()` should probably be the place to load certs)
2019-05-16T15:46:03Z
[]
[]
urllib3/urllib3
1,657
urllib3__urllib3-1657
[ "1650" ]
5b047b645f5f93900d5e2fc31230848c25eb1f5f
diff --git a/src/urllib3/connection.py b/src/urllib3/connection.py --- a/src/urllib3/connection.py +++ b/src/urllib3/connection.py @@ -429,7 +429,7 @@ def _match_hostname(cert, asserted_hostname): try: match_hostname(cert, asserted_hostname) except CertificateError as e: - log.error( + log.warning( "Certificate did not match expected hostname: %s. " "Certificate: %s", asserted_hostname, cert,
diff --git a/test/test_connection.py b/test/test_connection.py --- a/test/test_connection.py +++ b/test/test_connection.py @@ -32,7 +32,7 @@ def test_match_hostname_mismatch(self): cert = {"subjectAltName": [("DNS", "foo")]} asserted_hostname = "bar" try: - with mock.patch("urllib3.connection.log.error") as mock_log: + with mock.patch("urllib3.connection.log.warning") as mock_log: _match_hostname(cert, asserted_hostname) except CertificateError as e: assert "hostname 'bar' doesn't match 'foo'" in str(e)
urllib3 should not log an error on certificate mismatch Logging an error and then reraising the exception does not achieve much as the application code will have to handle the exception anyway. It's better to let the application decide if it's a noteworthy event or not. I personally don't think that a certificate mismatch is different from a server not responding or the other dozens of failures that can happen while marking an HTTP request (where urllib3 does not log an error). In my case the "error" is reported to Sentry but there is nothing I can do as a client to fix it, which is usually a pretty good indicator that the log level should be changed to something else. If it was up to me I would change it to an `info` level. https://github.com/urllib3/urllib3/blob/2c055b4a60affa3529660b928fff375fde3993f7/src/urllib3/connection.py#L428-L440
I agree that logging this error is probably not necessary especially because we raise an exception right afterwards. Would be interesting to dig up the commit where this line was added and look at the reasoning. So the reality is that not everyone is using urllib3 directly and `_peer_cert` isn't always accessible to users. Having a log message that provides details can help users debug this without having to jump through hoops, modify random code they don't own, etc. The log level, I'm fine with changing, but also it seems like this is only a problem because the sentry client is intercepting logs which has other ramifications (i.e., it's probably bypassing any kind of log-scrubbing and potentially leaking any data that hasn't yet been sanitized). Should we be doing more `log.X()` calls? Connections can fail in so many different areas, do you have an insight into why is this one different than the rest? So I think this was added by SurveyMonkey / @msabramo because they were trying to pin down which server in a group was serving an invalid certificate. I'm also on the fence about logging done by libraries. In some cases they can be super helpful, in others, they can be incredibly unhelpful. I'm 99% certain that OpenStack silences all urllib3 logs by default because the connectionpools are _incredibly_ noisy otherwise. I think applications should control what logs and when, but, with libraries architected the way Requests and urllib3 are architected, logging makes perfect sense to have so that a user can flip a switch when needed. I don't know where else to add logging since people only ever complain about it's presence and rarely complain about it's absence. The main problem here is that the logging framework does not allow a final user to opt out of this specific error in a good way. I could: - Disable the logger `urllib3.connection` or set it to `CRITICAL` but then I'd miss all other messages that could be important, now and in a future release of `urllib3`. - Create a `logging.Filter` that parses this specific log record and silences it, very fragile as it could break any time the message is changed. The only solutions I can think of would be: - Lower this specific message to `WARNING`, it would still show up on most logging setups (by default levels of `WARNING` and more are enabled) while still allowing to opt out of it by configuring `urllib3.connection` to `ERROR`. - Move all these nice-to-have-but-not-really-errors to their own named logger so that application code can easily silence them while keeping other valuable logs.
2019-07-26T16:49:31Z
[]
[]
urllib3/urllib3
1,665
urllib3__urllib3-1665
[ "800" ]
cf6ab7aaa4211d75603ace1fcaa1e7f7f772458e
diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py --- a/src/urllib3/util/response.py +++ b/src/urllib3/util/response.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from email.errors import StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect from ..packages.six.moves import http_client as httplib from ..exceptions import HeaderParsingError @@ -66,6 +67,25 @@ def assert_header_parsing(headers): if isinstance(payload, (bytes, str)): unparsed_data = payload + if defects: + # httplib is assuming a response body is available + # when parsing headers even when httplib only sends + # header data to parse_headers() This results in + # defects on multipart responses in particular. + # See: https://github.com/urllib3/urllib3/issues/800 + + # So we ignore the following defects: + # - StartBoundaryNotFoundDefect: + # The claimed start boundary was never found. + # - MultipartInvariantViolationDefect: + # A message claimed to be a multipart but no subparts were found. + defects = [ + defect + for defect in defects + if not isinstance( + defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect) + ) + ] if defects or unparsed_data: raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
diff --git a/test/test_util.py b/test/test_util.py --- a/test/test_util.py +++ b/test/test_util.py @@ -748,6 +748,20 @@ def test_assert_header_parsing_throws_typeerror_with_non_headers(self, headers): with pytest.raises(TypeError): assert_header_parsing(headers) + @onlyPy3 + def test_assert_header_parsing_no_error_on_multipart(self): + from http import client + + header_msg = io.BytesIO() + header_msg.write( + b'Content-Type: multipart/encrypted;protocol="application/' + b'HTTP-SPNEGO-session-encrypted";boundary="Encrypted Boundary"' + b"\nServer: Microsoft-HTTPAPI/2.0\nDate: Fri, 16 Aug 2019 19:28:01 GMT" + b"\nContent-Length: 1895\n\n\n" + ) + header_msg.seek(0) + assert_header_parsing(client.parse_headers(header_msg)) + class TestUtilSSL(object): """Test utils that use an SSL backend.""" diff --git a/test/with_dummyserver/test_socketlevel.py b/test/with_dummyserver/test_socketlevel.py --- a/test/with_dummyserver/test_socketlevel.py +++ b/test/with_dummyserver/test_socketlevel.py @@ -1808,3 +1808,40 @@ def socket_handler(listener): ) as pool: pool.urlopen("GET", "/not_found", preload_content=False) assert pool.num_connections == 1 + + +class TestMultipartResponse(SocketDummyServerTestCase): + def test_multipart_assert_header_parsing_no_defects(self): + def socket_handler(listener): + for _ in range(2): + sock = listener.accept()[0] + while not sock.recv(65536).endswith(b"\r\n\r\n"): + pass + + sock.sendall( + b"HTTP/1.1 404 Not Found\r\n" + b"Server: example.com\r\n" + b"Content-Type: multipart/mixed; boundary=36eeb8c4e26d842a\r\n" + b"Content-Length: 73\r\n" + b"\r\n" + b"--36eeb8c4e26d842a\r\n" + b"Content-Type: text/plain\r\n" + b"\r\n" + b"1\r\n" + b"--36eeb8c4e26d842a--\r\n", + ) + sock.close() + + self._start_server(socket_handler) + from urllib3.connectionpool import log + + with mock.patch.object(log, "warning") as log_warning: + with HTTPConnectionPool(self.host, self.port, timeout=3) as pool: + resp = pool.urlopen("GET", "/") + assert resp.status == 404 + assert ( + resp.headers["content-type"] + == "multipart/mixed; boundary=36eeb8c4e26d842a" + ) + assert len(resp.data) == 73 + log_warning.assert_not_called()
Multipart mixed responses generate warnings If you read a multipart/mixed response, the connectionpool issues a couple of warnings about defects in the message. I'm not sure what the expected, correct behavior is, but these warnings seem spurious. Stick this perl script somewhere: ``` #!/usr/bin/perl print "Server: Some Server Name\r\n"; print "Content-Type: multipart/mixed; boundary=36eeb8c4e26d842a\r\n"; print "Content-Length: 178\r\n"; print "\r\n\r\n"; print "--36eeb8c4e26d842a\r\n"; print "Content-Type: text/plain\r\n"; print "\r\n"; print "7\r\n"; print "--36eeb8c4e26d842a\r\n"; print "Content-Type: text/plain\r\n"; print "\r\n"; print "9\r\n"; print "--36eeb8c4e26d842a\r\n"; print "Content-Type: text/plain\r\n"; print "\r\n"; print "11\r\n"; print "--36eeb8c4e26d842a--\r\n"; ``` Read it with requests (naturally, you'll have to change the URI to wherever you put the script): ``` import requests, logging logging.basicConfig(level=logging.WARNING) logging.getLogger("requests").setLevel(logging.DEBUG) headers = {'accept': "multipart/mixed"} r = requests.get("http://localhost:8124/cgi-bin/mpm.pl", headers=headers) print(r) ``` The following errors are displayed: ``` DEBUG:requests.packages.urllib3.connectionpool:"GET http://localhost:8124/cgi-bin/mpm.pl HTTP/1.1" 200 178 WARNING:requests.packages.urllib3.connectionpool:Failed to parse headers (url=http://localhost:8888/http://localhost:8124/cgi-bin/mpm.pl): [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: '' Traceback (most recent call last): File "/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/connectionpool.py", line 390, in _make_request assert_header_parsing(httplib_response.msg) File "/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/util/response.py", line 58, in assert_header_parsing raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) requests.packages.urllib3.exceptions.HeaderParsingError: [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: '' ``` It took me quite a while to work out that they were spurious (because in real life, the server side that is generating the multipart/mixed is more complicated!) See also kennethreitz/requests#3001 ##
Actually I'm mistaken, nevermind. (Deleted comment) Hey all, so I took a quick look at this by taking the text and shoving it into a text file (with a status-line) and serving it via netcat while requesting it with only plain `http.client`. I wasn't very familiar with [`assert_header_parsing`](https://github.com/shazow/urllib3/blob/c325f97b7e892cefe5676e129c35c9be5d8180cd/urllib3/util/response.py#L31) but the reason you're seeing this is because of http.client's parsing of the headers. Here's the code that brought me to this conclusion: ``` py >>> conn = http.client.HTTPConnection('localhost', 3234) >>> conn.request('GET', '/') >>> conn.getresponse() <http.client.HTTPResponse object at 0x11000e6a0> >>> r = _ >>> r.headers <http.client.HTTPMessage object at 0x11003b0b8> >>> r.headers.defects [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()] ``` That said, this doesn't seem to be a problem for http.client or urllib3. This simply issues a log warning which might not be the right log level for this (since it isn't anything more than informational). That said, @ndw, what would you consider to be an appropriate fix here? Lowering the warning level so that they require an even "finer" logging level would be fine, I guess, if the conditions that are being reported really are harmless. They're noisy and confusing when the logging level is set to warning. On the other hand, if I understood what the warnings meant, I might be equally content to amend my code so as not to raise them. However, AFAICT, there is a start boundary, so I don't know why it's "not found" and I couldn't find anything in the MIME RFC that seemed to correspond to a multipart invariancy. I think this traces all the way back to the email/rfc822 parser, and how it's trying to track your header. in the python source, I don't think it gets to [line 355](https://github.com/python/cpython/blob/2d264235f6e066611b412f7c2e1603866e0f7f1b/Lib/email/feedparser.py#L365), thus it raises the error. I think the issue is [here](https://github.com/python/cpython/blob/2d264235f6e066611b412f7c2e1603866e0f7f1b/Lib/email/feedparser.py#L343). Since it sees something equivalent to a blank line, but not NeedMoreData before it sees your boundary, it raises the error. To fix, if you can call it that, try adding your boundary before you print any blank lines on your perl script. Is this issue still outstanding? As far as I know, yes. Hello, I see some fixes, but in 0.77.3 I´m still having this issue, can you please say which files I must change and what to apply the fix? Kind regards @McVillano urllib3 never had a 0.77.3 release, I think you're confusing urllib3 and Home Assistant here.
2019-08-16T21:58:06Z
[]
[]