instance_id
stringlengths
10
57
patch
stringlengths
261
37.7k
repo
stringlengths
7
53
base_commit
stringlengths
40
40
hints_text
stringclasses
301 values
test_patch
stringlengths
212
2.22M
problem_statement
stringlengths
23
37.7k
version
stringclasses
1 value
environment_setup_commit
stringlengths
40
40
FAIL_TO_PASS
listlengths
1
4.94k
PASS_TO_PASS
listlengths
0
7.82k
meta
dict
created_at
stringlengths
25
25
license
stringclasses
8 values
__index_level_0__
int64
0
6.41k
slarse__labelbot-22
diff --git a/labelbot/parse.py b/labelbot/parse.py new file mode 100644 index 0000000..2c40500 --- /dev/null +++ b/labelbot/parse.py @@ -0,0 +1,41 @@ +"""Parse label information from issue and config files. + +.. module:: parse + :synopsis: Functions for extracting info from files. +""" +import re + +from typing import List + +COMMENT_CHAR = "#" +# maybe need an OS independent match here +ALLOWED_LABEL_SEP = "\n" + + +def parse_wanted_labels(text: str) -> List[str]: + """Extract the labels defined by :label:`LABEL` tokens in a string. + + Args: + text: Typically an issue body with label markup. + Returns: + A list of extracted labels. + """ + label_pattern = ":label:`(.*?)`" + return re.findall(label_pattern, text, re.MULTILINE) + + +def parse_allowed_labels(text: str) -> List[str]: + """Parse the allowd labels from the contents of a .allowed-labels file. + Each line constitutes a single label. Lines starting with a # sign are + ignored. + + Args: + text: The contents of a .allowed-labels file. + Returns: + A list of defined labels + """ + return [ + stripped_line + for stripped_line in (line.strip() for line in text.split(ALLOWED_LABEL_SEP)) + if not stripped_line.startswith(COMMENT_CHAR) and stripped_line + ]
slarse/labelbot
b5cb9deb4b2d7542184a416199f9364b9ad97028
diff --git a/tests/test_parse.py b/tests/test_parse.py new file mode 100644 index 0000000..d6a185a --- /dev/null +++ b/tests/test_parse.py @@ -0,0 +1,58 @@ +import pytest +from labelbot import parse + +SINGLE_LABEL_ISSUE = """This is an issue body. + +I want to express some kind of concern. :label:`concern` +""" +SINGLE_LABEL = ["concern"] + +MULTI_LABEL_ISSUE = """This is another issue body. +I want to both express :label:`concern` but also +:label:`happy` :label:`multiple words in label`. +""" +MULTI_LABELS = ["concern", "happy", "multiple words in label"] + +NO_LABEL_ISSUE = """I just want to write an issue that does not contain any +labels whatsoever.""" + + [email protected]( + "text, expected_labels", + [ + (SINGLE_LABEL_ISSUE, SINGLE_LABEL), + (MULTI_LABEL_ISSUE, MULTI_LABELS), + (NO_LABEL_ISSUE, []), + ], + ids=["single-label", "multi-label", "no label"], +) +def test_parse_wanted_labels_correctly_parses_labels(text, expected_labels): + actual_labels = parse.parse_wanted_labels(text) + assert sorted(actual_labels) == sorted(expected_labels) + + +ALLOWED_LABELS_TEXT = """nice label + another label + +best label +""" +ALLOWED_LABELS = ["nice label", "best label", "another label"] +COMMENTED_LABELS_TEXT = """nice label + # another label +best label""" +COMMENT_LABELS = ["nice label", "best label"] +EMPTY_LABELS_TEXT = "" + + [email protected]( + "text, expected_labels", + [ + (ALLOWED_LABELS_TEXT, ALLOWED_LABELS), + (COMMENTED_LABELS_TEXT, COMMENT_LABELS), + (EMPTY_LABELS_TEXT, []), + ], + ids=("multi-labels", "commented labels", "no labels"), +) +def test_parse_allowed_labels_correctly_parses_labeles(text, expected_labels): + actual_labels = parse.parse_allowed_labels(text) + assert sorted(actual_labels) == sorted(expected_labels)
Functions for parsing wanted labels from issue body and allowed labels from labels file Labels should be defined in the issue body like so: ``` Blablabla :label:`some label` :label:`some other label` ``` No artificial restrictions on where the labels are placed or how many there are. The function should only return **unique labels**, duplicates are discarded. The labels in the `.allowed-labels` file should simply be newline separated labels. Lines starting with `#` are ignored.
0.0
b5cb9deb4b2d7542184a416199f9364b9ad97028
[ "tests/test_parse.py::test_parse_wanted_labels_correctly_parses_labels[single-label]", "tests/test_parse.py::test_parse_wanted_labels_correctly_parses_labels[multi-label]", "tests/test_parse.py::test_parse_wanted_labels_correctly_parses_labels[no", "tests/test_parse.py::test_parse_allowed_labels_correctly_parses_labeles[multi-labels]", "tests/test_parse.py::test_parse_allowed_labels_correctly_parses_labeles[commented", "tests/test_parse.py::test_parse_allowed_labels_correctly_parses_labeles[no" ]
[]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
2019-04-11 13:11:17+00:00
mit
5,535
slarse__labelbot-45
diff --git a/docs/deploy.md b/docs/deploy.md new file mode 100644 index 0000000..bffffe8 --- /dev/null +++ b/docs/deploy.md @@ -0,0 +1,56 @@ +# Deployment instructions for AWS Lambda + +## Prerequisites +* You will need a Github account +* You will need an AWS account. + + +## Creating the lambda function + +1. Create a function on AWS Lambda and author from scratch. Under `choose or create an execution role`, choose `create a execution role from a policy template` with +`Amazon S3 read only permissions` and name your role. Set your runtime to `Python 3.7` and +choose a name for your function. +press `Create function` +2. Choose `upload a .zip file` for your `Code entry type` +3. Set your handler to `labelbot.bot.lambda_handler` +4. Add an API Gateway as a trigger. Create a new API and set it to Open. +Press add, and then save. This will give you a webhook url to add to your github app. + +## Create the github app +1. Under `Settings>Developer settings>Github Apps`, press the `New Github App` button. +2. Give your app a name and enter a homepage url, for example your fork of labelbot. +3. Under `Webhook url` enter your API gateway url. +4. Under `Webhook secret (optional)`, enter a secret token, as described in Githubs [documentation](https://developer.github.com/webhooks/securing/#setting-your-secret-token). +5. Under `Permissions`, add `read-only` access to `Repository contents` +and add `Read and write` access to `Issues` +6. Under `Subscribe to events`, subscribe to the `Label` event. +7. Under `Where can this GitHub App be installed?`, set `Only on this account` +8. Press the `Create Github App` +9. Generate a private key and save it to an S3 bucket that is not publicly accessible. + +## Set enviroment variables to hold private data in your Lambda function. +1. `APP_ID` : Shall be set to the App ID of your github app. +2. `BUCKET_NAME`: The name of your S3 bucket. +3. `BUCKET_KEY`: the unique identifier of your key file stored in S3. +4. `SECRET_KEY`: Shall be the same value as your secret token, that was set to secure the webhook. + +After all enviroment variables have been added, save the changes. + +## Give your lambda function read access to your s3 bucket. +In `S3>:your bucket:>Permissions>Bucket Policy` give your lambda role the rights to read from your bucket. + +## Creating and uploading a deployment package +To create a deployment package it is required to build in a linux +enviroment with python 3.7 and pip installed, as Lambda runs in a linux +enviroment. + +Run the `package.sh` script from the repository root, which packages the +application iin a zip file, that can be uploaded to AWS to deploy the +application. It will create a file +called `labelbot.zip` which should be uploaded to your AWS lambda +function. Save after uploading the file. + + + +## You have now created and deployed your own github app with AWS Lambda +You can now install it on your own account and use it to label issues by pressing the `Install App` button. \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index e3e2d49..dd909cf 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,6 +10,7 @@ Welcome to Labelbot's documentation! :maxdepth: 2 :caption: Contents: + deploy code diff --git a/labelbot/auth.py b/labelbot/auth.py index c23a0a1..a0bfa67 100644 --- a/labelbot/auth.py +++ b/labelbot/auth.py @@ -1,9 +1,8 @@ -"""Handles authentication tokens with the GitHub API. +"""Functions for handling authentication procedures. .. module:: auth - :synopsis: Functions for generating tokens used for authentication with GitHub. + :synopsis: Functions for handling authentication procedures. .. moduleauthor:: Lars Hummelgren <[email protected]> & Joakim Croona <[email protected]> - """ import datetime @@ -12,6 +11,10 @@ import json import jwcrypto import python_jwt import requests +import boto3 +import botocore +import hmac +import hashlib USER_AGENT = "label-bot" @@ -31,7 +34,7 @@ def generate_jwt_token(private_pem: bytes, app_id: int) -> str: return python_jwt.generate_jwt(payload, private_key, "RS256", duration) -def generate_installation_access_token(jwt_token: str, installation_id): +def generate_installation_access_token(jwt_token: str, installation_id) -> str: """Generates an installation access token using a JWT token and an installation id. An installation access token is valid for 1 hour. @@ -50,3 +53,40 @@ def generate_installation_access_token(jwt_token: str, installation_id): url = f"https://api.github.com/app/installations/{installation_id}/access_tokens" r = requests.post(url, headers=headers) return r.json()["token"] + + +def get_pem(bucket_name: str, bucket_key: str) -> bytes: + """Reads a private PEM file from an S3 bucket. + + Args: + bucket_name: Name of the S3 bucket. + bucket_key: Bucket key for the PEM file. + Returns: + Contents of the PEM file. + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).download_file(bucket_key, "/tmp/key.pem") + with open("/tmp/key.pem", "rb") as f: + return f.read() + + +def authenticate_request(shared_secret: str, body: str, signature: str) -> bool: + """Checks if the MAC (message authentication code) sent in the request is really + from GitHub. + + Args: + shared_secret: A secret shared between GitHub and the bot. + body: Body of the HTTP request. + signature: The header containing the MAC. + Returns: + True iff the signature is a MAC computed with the body of the request and the + shared secret. + """ + if signature is None: + return False + + sha_body = hmac.new( + shared_secret.encode("utf8"), body.encode("utf8"), hashlib.sha1 + ).hexdigest() + _, sha_github = signature.split("=") + return hmac.compare_digest(sha_body, sha_github) diff --git a/labelbot/bot.py b/labelbot/bot.py index 163071f..7d6090f 100644 --- a/labelbot/bot.py +++ b/labelbot/bot.py @@ -1,14 +1,17 @@ +"""Event handler for AWS lambda. + +This is the main module of labelbot, and contains the event handler for AWS lambda. If +for any reason one would like to use a different service than AWS lambda, this is the +functionality that needs to be changed. + +.. module:: bot + :synopsis: Event handler for AWS lambda. +.. moduleauthor:: Simon Larsén <[email protected]> & Joakim Croona <[email protected]> +""" import json -from jwcrypto import jwk -import python_jwt import os -import boto3 -import botocore -import hmac -import hashlib from labelbot import auth from labelbot import github_api -from labelbot import parse def lambda_handler(event, context): @@ -22,12 +25,15 @@ def lambda_handler(event, context): app_id = int(os.getenv("APP_ID")) secret_key = os.getenv("SECRET_KEY") - authenticated = authenticate_request(secret_key, event["body"], event["headers"]["X-Hub-Signature"]) + authenticated = auth.authenticate_request( + secret_key, event["body"], event["headers"]["X-Hub-Signature"] + ) if not authenticated: return {"statuscode": 403} + bucket_name = os.getenv("BUCKET_NAME") bucket_key = os.getenv("BUCKET_KEY") - pem = get_pem(bucket_name, bucket_key) + pem = auth.get_pem(bucket_name, bucket_key) jwt_token = auth.generate_jwt_token(pem, app_id) access_token = auth.generate_installation_access_token(jwt_token, installation_id) @@ -37,25 +43,3 @@ def lambda_handler(event, context): ) return {"statusCode": 200 if success else 403, "body": json.dumps("temp")} - - -def get_pem(bucket_name, key): - """Reads key from s3""" - s3 = boto3.resource("s3") - s3.Bucket(bucket_name).download_file(key, "/tmp/key.pem") - with open("/tmp/key.pem", "rb") as f: - pem = f.read() - return pem - - -def authenticate_request(key: str, body: str, signature: str) -> bool: - """ Chacks if the X-Hub-Signature header exists, and if it does, verifies that the body - matches the hash sent from github.""" - if signature is None: - return False - - sha_body = hmac.new( - key.encode("utf8"), body.encode("utf8"), hashlib.sha1 - ).hexdigest() - alg, sha_github = signature.split("=") - return hmac.compare_digest(sha_body, sha_github) diff --git a/labelbot/github_api.py b/labelbot/github_api.py index d1a052b..66be23d 100644 --- a/labelbot/github_api.py +++ b/labelbot/github_api.py @@ -6,7 +6,7 @@ import json import sys import base64 -from typing import Sequence, List +from typing import Iterable, List import requests from labelbot import parse @@ -60,7 +60,7 @@ def set_allowed_labels( def set_labels( - labels: Sequence[str], owner: str, repo: str, issue_nr: int, access_token: str + labels: Iterable[str], owner: str, repo: str, issue_nr: int, access_token: str ) -> bool: """Unconditionally set the provided labels on a repository issue. diff --git a/setup.py b/setup.py index 0ac9d8b..da892e9 100644 --- a/setup.py +++ b/setup.py @@ -3,12 +3,18 @@ from setuptools import setup, find_packages with open("README.md", mode="r", encoding="utf-8") as f: readme = f.read() -test_requirements = ["pytest>=4.0.0", "codecov", "pytest-cov", "responses","pytest-mock"] +test_requirements = [ + "pytest>=4.0.0", + "codecov", + "pytest-cov", + "responses", + "pytest-mock", +] required = ["python_jwt", "jwcrypto", "requests", "boto3"] setup( name="labelbot", - version="0.0.4", + version="0.0.6", description=( "A GitHub label bot for allowing unprivileged users to label issues " "with allowed labels."
slarse/labelbot
c269a68c3820ebc0d60788a32c34916607323a7e
diff --git a/tests/test_auth.py b/tests/test_auth.py index e1df380..c1984a0 100644 --- a/tests/test_auth.py +++ b/tests/test_auth.py @@ -33,3 +33,21 @@ def test_auth_jwt_token(): """Tests that jwt_token is truthy""" result = auth.generate_jwt_token(rsa_key.encode("utf8"), 12334) assert result + + +class TestAuthenticateRequest: + SECRET = "d653a60adc0a16a93e99f0620a67f4a67ef901df" + BODY = "Hello, World!" + SIGN = "sha1=8727505c9c036b2337a06d2e63f091a7aa41ae60" + + def test_correct_hash(self): + result = auth.authenticate_request(self.SECRET, self.BODY, self.SIGN) + assert result + + def test_incorrect_hash(self): + result = auth.authenticate_request(self.SECRET, self.BODY.lower(), self.SIGN) + assert not result + + def test_no_signature(self): + result = auth.authenticate_request(self.SECRET, self.BODY, None) + assert not result diff --git a/tests/test_bot.py b/tests/test_bot.py index 9c6aecf..8e1b66c 100644 --- a/tests/test_bot.py +++ b/tests/test_bot.py @@ -1,34 +1,32 @@ from labelbot import bot +import responses import pytest -SECRET = "d653a60adc0a16a93e99f0620a67f4a67ef901df" -BODY = "Hello, World!" -SIGN = "sha1=8727505c9c036b2337a06d2e63f091a7aa41ae60" - -class TestAuthenticateRequest: - def test_correct_hash(self): - result = bot.authenticate_request(SECRET,BODY, SIGN) - assert result - - def test_incorrect_hash(self): - result = bot.authenticate_request(SECRET,BODY.lower(), SIGN) - assert not result - - def test_no_signature(self): - result = bot.authenticate_request(SECRET,BODY, None) - assert not result - @pytest.fixture def env_setup(mocker): - values = {"APP_ID": "243554", "SECRET_KEY": "66535665634", "BUCKET_NAME": "My_bucket", "BUCKET_KEY": "my_file"} + values = { + "APP_ID": "243554", + "SECRET_KEY": "66535665634", + "BUCKET_NAME": "My_bucket", + "BUCKET_KEY": "my_file", + } mocker.patch("os.getenv", autospec=True, side_effect=values.get) yield values -def test_lambda_handler_authentication(env_setup): - event = {"headers": {"X-Hub-Signature": "sha1=4afefa55e46cc2ac696127dae55b49aeb999b7e8"},"body": jsonstring} + [email protected] +def test_lambda_handler_authentication_failure(env_setup): + """Test that lambda_handler correctly handles unauthorized requests (here, + all of the secrets are bogus). + """ + event = { + "headers": {"X-Hub-Signature": "sha1=4afefa55e46cc2ac696127dae55b49aeb999b7e8"}, + "body": jsonstring, + } result = bot.lambda_handler(event, None) - assert result + assert result["statuscode"] == 403 + jsonstring = """{ "action": "reopened", @@ -199,5 +197,3 @@ jsonstring = """{ "node_id": "MDIzOkludGVncmF0aW9uSW5zdGFsbGF0aW9uODI1OTU4" } }""" - -
AWS lambda setup procedure Add it to the docs
0.0
c269a68c3820ebc0d60788a32c34916607323a7e
[ "tests/test_auth.py::TestAuthenticateRequest::test_correct_hash", "tests/test_auth.py::TestAuthenticateRequest::test_incorrect_hash", "tests/test_auth.py::TestAuthenticateRequest::test_no_signature" ]
[ "tests/test_auth.py::test_auth_jwt_token", "tests/test_bot.py::test_lambda_handler_authentication_failure" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-04-17 14:56:45+00:00
mit
5,536
slarse__labelbot-46
diff --git a/labelbot/auth.py b/labelbot/auth.py index c23a0a1..a0bfa67 100644 --- a/labelbot/auth.py +++ b/labelbot/auth.py @@ -1,9 +1,8 @@ -"""Handles authentication tokens with the GitHub API. +"""Functions for handling authentication procedures. .. module:: auth - :synopsis: Functions for generating tokens used for authentication with GitHub. + :synopsis: Functions for handling authentication procedures. .. moduleauthor:: Lars Hummelgren <[email protected]> & Joakim Croona <[email protected]> - """ import datetime @@ -12,6 +11,10 @@ import json import jwcrypto import python_jwt import requests +import boto3 +import botocore +import hmac +import hashlib USER_AGENT = "label-bot" @@ -31,7 +34,7 @@ def generate_jwt_token(private_pem: bytes, app_id: int) -> str: return python_jwt.generate_jwt(payload, private_key, "RS256", duration) -def generate_installation_access_token(jwt_token: str, installation_id): +def generate_installation_access_token(jwt_token: str, installation_id) -> str: """Generates an installation access token using a JWT token and an installation id. An installation access token is valid for 1 hour. @@ -50,3 +53,40 @@ def generate_installation_access_token(jwt_token: str, installation_id): url = f"https://api.github.com/app/installations/{installation_id}/access_tokens" r = requests.post(url, headers=headers) return r.json()["token"] + + +def get_pem(bucket_name: str, bucket_key: str) -> bytes: + """Reads a private PEM file from an S3 bucket. + + Args: + bucket_name: Name of the S3 bucket. + bucket_key: Bucket key for the PEM file. + Returns: + Contents of the PEM file. + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).download_file(bucket_key, "/tmp/key.pem") + with open("/tmp/key.pem", "rb") as f: + return f.read() + + +def authenticate_request(shared_secret: str, body: str, signature: str) -> bool: + """Checks if the MAC (message authentication code) sent in the request is really + from GitHub. + + Args: + shared_secret: A secret shared between GitHub and the bot. + body: Body of the HTTP request. + signature: The header containing the MAC. + Returns: + True iff the signature is a MAC computed with the body of the request and the + shared secret. + """ + if signature is None: + return False + + sha_body = hmac.new( + shared_secret.encode("utf8"), body.encode("utf8"), hashlib.sha1 + ).hexdigest() + _, sha_github = signature.split("=") + return hmac.compare_digest(sha_body, sha_github) diff --git a/labelbot/bot.py b/labelbot/bot.py index 163071f..7d6090f 100644 --- a/labelbot/bot.py +++ b/labelbot/bot.py @@ -1,14 +1,17 @@ +"""Event handler for AWS lambda. + +This is the main module of labelbot, and contains the event handler for AWS lambda. If +for any reason one would like to use a different service than AWS lambda, this is the +functionality that needs to be changed. + +.. module:: bot + :synopsis: Event handler for AWS lambda. +.. moduleauthor:: Simon Larsén <[email protected]> & Joakim Croona <[email protected]> +""" import json -from jwcrypto import jwk -import python_jwt import os -import boto3 -import botocore -import hmac -import hashlib from labelbot import auth from labelbot import github_api -from labelbot import parse def lambda_handler(event, context): @@ -22,12 +25,15 @@ def lambda_handler(event, context): app_id = int(os.getenv("APP_ID")) secret_key = os.getenv("SECRET_KEY") - authenticated = authenticate_request(secret_key, event["body"], event["headers"]["X-Hub-Signature"]) + authenticated = auth.authenticate_request( + secret_key, event["body"], event["headers"]["X-Hub-Signature"] + ) if not authenticated: return {"statuscode": 403} + bucket_name = os.getenv("BUCKET_NAME") bucket_key = os.getenv("BUCKET_KEY") - pem = get_pem(bucket_name, bucket_key) + pem = auth.get_pem(bucket_name, bucket_key) jwt_token = auth.generate_jwt_token(pem, app_id) access_token = auth.generate_installation_access_token(jwt_token, installation_id) @@ -37,25 +43,3 @@ def lambda_handler(event, context): ) return {"statusCode": 200 if success else 403, "body": json.dumps("temp")} - - -def get_pem(bucket_name, key): - """Reads key from s3""" - s3 = boto3.resource("s3") - s3.Bucket(bucket_name).download_file(key, "/tmp/key.pem") - with open("/tmp/key.pem", "rb") as f: - pem = f.read() - return pem - - -def authenticate_request(key: str, body: str, signature: str) -> bool: - """ Chacks if the X-Hub-Signature header exists, and if it does, verifies that the body - matches the hash sent from github.""" - if signature is None: - return False - - sha_body = hmac.new( - key.encode("utf8"), body.encode("utf8"), hashlib.sha1 - ).hexdigest() - alg, sha_github = signature.split("=") - return hmac.compare_digest(sha_body, sha_github) diff --git a/labelbot/github_api.py b/labelbot/github_api.py index d1a052b..66be23d 100644 --- a/labelbot/github_api.py +++ b/labelbot/github_api.py @@ -6,7 +6,7 @@ import json import sys import base64 -from typing import Sequence, List +from typing import Iterable, List import requests from labelbot import parse @@ -60,7 +60,7 @@ def set_allowed_labels( def set_labels( - labels: Sequence[str], owner: str, repo: str, issue_nr: int, access_token: str + labels: Iterable[str], owner: str, repo: str, issue_nr: int, access_token: str ) -> bool: """Unconditionally set the provided labels on a repository issue. diff --git a/setup.py b/setup.py index 0ac9d8b..da892e9 100644 --- a/setup.py +++ b/setup.py @@ -3,12 +3,18 @@ from setuptools import setup, find_packages with open("README.md", mode="r", encoding="utf-8") as f: readme = f.read() -test_requirements = ["pytest>=4.0.0", "codecov", "pytest-cov", "responses","pytest-mock"] +test_requirements = [ + "pytest>=4.0.0", + "codecov", + "pytest-cov", + "responses", + "pytest-mock", +] required = ["python_jwt", "jwcrypto", "requests", "boto3"] setup( name="labelbot", - version="0.0.4", + version="0.0.6", description=( "A GitHub label bot for allowing unprivileged users to label issues " "with allowed labels."
slarse/labelbot
c269a68c3820ebc0d60788a32c34916607323a7e
diff --git a/tests/test_auth.py b/tests/test_auth.py index e1df380..c1984a0 100644 --- a/tests/test_auth.py +++ b/tests/test_auth.py @@ -33,3 +33,21 @@ def test_auth_jwt_token(): """Tests that jwt_token is truthy""" result = auth.generate_jwt_token(rsa_key.encode("utf8"), 12334) assert result + + +class TestAuthenticateRequest: + SECRET = "d653a60adc0a16a93e99f0620a67f4a67ef901df" + BODY = "Hello, World!" + SIGN = "sha1=8727505c9c036b2337a06d2e63f091a7aa41ae60" + + def test_correct_hash(self): + result = auth.authenticate_request(self.SECRET, self.BODY, self.SIGN) + assert result + + def test_incorrect_hash(self): + result = auth.authenticate_request(self.SECRET, self.BODY.lower(), self.SIGN) + assert not result + + def test_no_signature(self): + result = auth.authenticate_request(self.SECRET, self.BODY, None) + assert not result diff --git a/tests/test_bot.py b/tests/test_bot.py index 9c6aecf..8e1b66c 100644 --- a/tests/test_bot.py +++ b/tests/test_bot.py @@ -1,34 +1,32 @@ from labelbot import bot +import responses import pytest -SECRET = "d653a60adc0a16a93e99f0620a67f4a67ef901df" -BODY = "Hello, World!" -SIGN = "sha1=8727505c9c036b2337a06d2e63f091a7aa41ae60" - -class TestAuthenticateRequest: - def test_correct_hash(self): - result = bot.authenticate_request(SECRET,BODY, SIGN) - assert result - - def test_incorrect_hash(self): - result = bot.authenticate_request(SECRET,BODY.lower(), SIGN) - assert not result - - def test_no_signature(self): - result = bot.authenticate_request(SECRET,BODY, None) - assert not result - @pytest.fixture def env_setup(mocker): - values = {"APP_ID": "243554", "SECRET_KEY": "66535665634", "BUCKET_NAME": "My_bucket", "BUCKET_KEY": "my_file"} + values = { + "APP_ID": "243554", + "SECRET_KEY": "66535665634", + "BUCKET_NAME": "My_bucket", + "BUCKET_KEY": "my_file", + } mocker.patch("os.getenv", autospec=True, side_effect=values.get) yield values -def test_lambda_handler_authentication(env_setup): - event = {"headers": {"X-Hub-Signature": "sha1=4afefa55e46cc2ac696127dae55b49aeb999b7e8"},"body": jsonstring} + [email protected] +def test_lambda_handler_authentication_failure(env_setup): + """Test that lambda_handler correctly handles unauthorized requests (here, + all of the secrets are bogus). + """ + event = { + "headers": {"X-Hub-Signature": "sha1=4afefa55e46cc2ac696127dae55b49aeb999b7e8"}, + "body": jsonstring, + } result = bot.lambda_handler(event, None) - assert result + assert result["statuscode"] == 403 + jsonstring = """{ "action": "reopened", @@ -199,5 +197,3 @@ jsonstring = """{ "node_id": "MDIzOkludGVncmF0aW9uSW5zdGFsbGF0aW9uODI1OTU4" } }""" - -
General refactor Make sure functions are in appropriate modules, double check docstrings, and stuff like that. Make it look nice.
0.0
c269a68c3820ebc0d60788a32c34916607323a7e
[ "tests/test_auth.py::TestAuthenticateRequest::test_correct_hash", "tests/test_auth.py::TestAuthenticateRequest::test_incorrect_hash", "tests/test_auth.py::TestAuthenticateRequest::test_no_signature" ]
[ "tests/test_auth.py::test_auth_jwt_token", "tests/test_bot.py::test_lambda_handler_authentication_failure" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-04-17 14:58:02+00:00
mit
5,537
sloria__environs-104
diff --git a/CHANGELOG.md b/CHANGELOG.md index 97876fd..dc28b52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## 6.0.0 + +Features: + +- Default parser methods are now defined as bound methods. + This enables static analysis features, e.g. autocomplete ([#103](https://github.com/sloria/environs/issues/103)). + Thanks [rugleb](https://github.com/rugleb) for the suggestion. + _Backwards-incompatible_: As a result of this change, adding a parser name that is the same as an existing method + will result in an error being raised. + +```python +import environs + +env = environs.Env() + +# Below conflicts with built-in `url` method. +# In <6.0.0, this would override the built-in method. +# In >=6.0.0, this raises an error: +# environs.ParserConflictError: Env already has a method with name 'url'. Use a different name. [email protected]_for("url") +def https_url(value): + return "https://" + value +``` + +- _Backwards-incompatible_: Rename `Env.__parser_map__` to `Env.__custom_parsers__`. + ## 5.2.1 (2019-08-08) Bug fixes: @@ -161,3 +187,7 @@ Bug fixes: ## 0.1.0 (2016-04-25) - First PyPI release. + +``` + +``` diff --git a/environs.py b/environs.py index 2c60439..da97c0c 100644 --- a/environs.py +++ b/environs.py @@ -19,16 +19,24 @@ __all__ = ["EnvError", "Env"] MARSHMALLOW_VERSION_INFO = tuple([int(part) for part in ma.__version__.split(".") if part.isdigit()]) _PROXIED_PATTERN = re.compile(r"\s*{{\s*(\S*)\s*}}\s*") -T = typing.TypeVar("T") +_T = typing.TypeVar("_T") +_StrType = str +_BoolType = bool +_IntType = int + FieldFactory = typing.Callable[..., ma.fields.Field] -Subcast = typing.Union[typing.Type, typing.Callable[..., T]] +Subcast = typing.Union[typing.Type, typing.Callable[..., _T]] FieldType = typing.Type[ma.fields.Field] FieldOrFactory = typing.Union[FieldType, FieldFactory] -ParserMethod = typing.Callable[..., T] +ParserMethod = typing.Callable[..., _T] class EnvError(ValueError): - pass + """Raised when an environment variable or if a required environment variable is unset.""" + + +class ParserConflictError(ValueError): + """Raised when adding a custom parser that conflicts with a built-in parser method.""" def _field2method( @@ -36,7 +44,7 @@ def _field2method( ) -> ParserMethod: def method( self: "Env", name: str, default: typing.Any = ma.missing, subcast: Subcast = None, **kwargs - ) -> T: + ) -> _T: missing = kwargs.pop("missing", None) or default if isinstance(field_or_factory, type) and issubclass(field_or_factory, ma.fields.Field): field = typing.cast(typing.Type[ma.fields.Field], field_or_factory)(missing=missing, **kwargs) @@ -106,8 +114,8 @@ def _preprocess_list(value: typing.Union[str, typing.Iterable], **kwargs) -> typ def _preprocess_dict( - value: typing.Union[str, typing.Mapping[str, T]], subcast: Subcast, **kwargs -) -> typing.Mapping[str, T]: + value: typing.Union[str, typing.Mapping[str, _T]], subcast: Subcast, **kwargs +) -> typing.Mapping[str, _T]: if isinstance(value, Mapping): return value @@ -176,44 +184,42 @@ class Env: __call__ = _field2method(ma.fields.Field, "__call__") # type: ParserMethod - default_parser_map = dict( - bool=_field2method(ma.fields.Bool, "bool"), - str=_field2method(ma.fields.Str, "str"), - int=_field2method(ma.fields.Int, "int"), - float=_field2method(ma.fields.Float, "float"), - decimal=_field2method(ma.fields.Decimal, "decimal"), - list=_field2method(_make_list_field, "list", preprocess=_preprocess_list), - dict=_field2method(ma.fields.Dict, "dict", preprocess=_preprocess_dict), - json=_field2method(ma.fields.Field, "json", preprocess=_preprocess_json), - datetime=_field2method(ma.fields.DateTime, "datetime"), - date=_field2method(ma.fields.Date, "date"), - path=_field2method(PathField, "path"), - log_level=_field2method(LogLevelField, "log_level"), - timedelta=_field2method(ma.fields.TimeDelta, "timedelta"), - uuid=_field2method(ma.fields.UUID, "uuid"), - url=_field2method(URLField, "url"), - dj_db_url=_func2method(_dj_db_url_parser, "dj_db_url"), - dj_email_url=_func2method(_dj_email_url_parser, "dj_email_url"), - ) # type: typing.Dict[str, ParserMethod] + int = _field2method(ma.fields.Int, "int") + bool = _field2method(ma.fields.Bool, "bool") + str = _field2method(ma.fields.Str, "str") + float = _field2method(ma.fields.Float, "float") + decimal = _field2method(ma.fields.Decimal, "decimal") + list = _field2method(_make_list_field, "list", preprocess=_preprocess_list) + dict = _field2method(ma.fields.Dict, "dict", preprocess=_preprocess_dict) + json = _field2method(ma.fields.Field, "json", preprocess=_preprocess_json) + datetime = _field2method(ma.fields.DateTime, "datetime") + date = _field2method(ma.fields.Date, "date") + path = _field2method(PathField, "path") + log_level = _field2method(LogLevelField, "log_level") + timedelta = _field2method(ma.fields.TimeDelta, "timedelta") + uuid = _field2method(ma.fields.UUID, "uuid") + url = _field2method(URLField, "url") + dj_db_url = _func2method(_dj_db_url_parser, "dj_db_url") + dj_email_url = _func2method(_dj_email_url_parser, "dj_email_url") def __init__(self): - self._fields = {} # type: typing.Dict[str, ma.fields.Field] - self._values = {} # type: typing.Dict[str, typing.Any] - self._prefix = None # type: typing.Optional[str] - self.__parser_map__ = self.default_parser_map.copy() + self._fields = {} # type: typing.Dict[_StrType, ma.fields.Field] + self._values = {} # type: typing.Dict[_StrType, typing.Any] + self._prefix = None # type: typing.Optional[_StrType] + self.__custom_parsers__ = {} - def __repr__(self) -> str: + def __repr__(self) -> _StrType: return "<{} {}>".format(self.__class__.__name__, self._values) __str__ = __repr__ @staticmethod def read_env( - path: str = None, - recurse: bool = True, - stream: str = None, - verbose: bool = False, - override: bool = False, + path: _StrType = None, + recurse: _BoolType = True, + stream: _StrType = None, + verbose: _BoolType = False, + override: _BoolType = False, ) -> DotEnv: """Read a .env file into os.environ. @@ -246,7 +252,7 @@ class Env: return load_dotenv(start, stream=stream, verbose=verbose, override=override) @contextlib.contextmanager - def prefixed(self, prefix: str) -> typing.Iterator["Env"]: + def prefixed(self, prefix: _StrType) -> typing.Iterator["Env"]: """Context manager for parsing envvars with a common prefix.""" try: old_prefix = self._prefix @@ -260,20 +266,24 @@ class Env: self._prefix = None self._prefix = old_prefix - def __getattr__(self, name: str, **kwargs): + def __getattr__(self, name: _StrType, **kwargs): try: - return functools.partial(self.__parser_map__[name], self) + return functools.partial(self.__custom_parsers__[name], self) except KeyError as error: raise AttributeError("{} has no attribute {}".format(self, name)) from error - def add_parser(self, name: str, func: typing.Callable) -> None: + def add_parser(self, name: _StrType, func: typing.Callable) -> None: """Register a new parser method with the name ``name``. ``func`` must receive the input value for an environment variable. """ - self.__parser_map__[name] = _func2method(func, method_name=name) + if hasattr(self, name): + raise ParserConflictError( + "Env already has a method with name '{}'. Use a different name.".format(name) + ) + self.__custom_parsers__[name] = _func2method(func, method_name=name) return None - def parser_for(self, name: str) -> typing.Callable[[typing.Callable], typing.Callable]: + def parser_for(self, name: _StrType) -> typing.Callable[[typing.Callable], typing.Callable]: """Decorator that registers a new parser method with the name ``name``. The decorated function must receive the input value for an environment variable. """ @@ -284,11 +294,11 @@ class Env: return decorator - def add_parser_from_field(self, name: str, field_cls: typing.Type[ma.fields.Field]): + def add_parser_from_field(self, name: _StrType, field_cls: typing.Type[ma.fields.Field]): """Register a new parser method with name ``name``, given a marshmallow ``Field``.""" - self.__parser_map__[name] = _field2method(field_cls, method_name=name) + self.__custom_parsers__[name] = _field2method(field_cls, method_name=name) - def dump(self) -> typing.Mapping[str, typing.Any]: + def dump(self) -> typing.Mapping[_StrType, typing.Any]: """Dump parsed environment variables to a dictionary of simple data types (numbers and strings). """ @@ -297,8 +307,8 @@ class Env: return dump_result.data if MARSHMALLOW_VERSION_INFO[0] < 3 else dump_result def _get_from_environ( - self, key: str, default: typing.Any, *, proxied: bool = False - ) -> typing.Tuple[str, typing.Any, typing.Optional[str]]: + self, key: _StrType, default: typing.Any, *, proxied: _BoolType = False + ) -> typing.Tuple[_StrType, typing.Any, typing.Optional[_StrType]]: """Access a value from os.environ. Handles proxied variables, e.g. SMTP_LOGIN={{MAILGUN_LOGIN}}. Returns a tuple (envvar_key, envvar_value, proxied_key). The ``envvar_key`` will be different from @@ -316,5 +326,5 @@ class Env: return (key, self._get_from_environ(proxied_key, default, proxied=True)[1], proxied_key) return env_key, value, None - def _get_key(self, key: str, *, omit_prefix: bool = False) -> str: + def _get_key(self, key: _StrType, *, omit_prefix: _BoolType = False) -> _StrType: return self._prefix + key if self._prefix and not omit_prefix else key
sloria/environs
c80b3db7f742be0c503d047d6f1526efd6df655e
diff --git a/tests/test_environs.py b/tests/test_environs.py index 1bcf877..b57b179 100644 --- a/tests/test_environs.py +++ b/tests/test_environs.py @@ -282,31 +282,38 @@ class TestCustomTypes: def test_add_parser(self, set_env, env): set_env({"URL": "test.test/"}) - def url(value): + def https_url(value): return "https://" + value - env.add_parser("url", url) - assert env.url("URL") == "https://test.test/" + env.add_parser("https_url", https_url) + assert env.https_url("URL") == "https://test.test/" with pytest.raises(environs.EnvError) as excinfo: env.url("NOT_SET") assert excinfo.value.args[0] == 'Environment variable "NOT_SET" not set' - assert env.url("NOT_SET", "default.test/") == "https://default.test/" + assert env.https_url("NOT_SET", "default.test/") == "https://default.test/" + + def test_cannot_override_built_in_parser(self, set_env, env): + def https_url(value): + return "https://" + value + + with pytest.raises(environs.ParserConflictError): + env.add_parser("url", https_url) def test_parser_for(self, set_env, env): set_env({"URL": "test.test/"}) - @env.parser_for("url") - def url(value): + @env.parser_for("https_url") + def https_url(value): return "https://" + value - assert env.url("URL") == "https://test.test/" + assert env.https_url("URL") == "https://test.test/" with pytest.raises(environs.EnvError) as excinfo: - env.url("NOT_SET") + env.https_url("NOT_SET") assert excinfo.value.args[0] == 'Environment variable "NOT_SET" not set' - assert env.url("NOT_SET", "default.test/") == "https://default.test/" + assert env.https_url("NOT_SET", "default.test/") == "https://default.test/" def test_parser_function_can_take_extra_arguments(self, set_env, env): set_env({"ENV": "dev"}) @@ -324,17 +331,17 @@ class TestCustomTypes: env.enum("ENV", choices=["dev", "prod"]) def test_add_parser_from_field(self, set_env, env): - class MyURL(fields.Field): + class HTTPSURL(fields.Field): def _deserialize(self, value, *args, **kwargs): return "https://" + value - env.add_parser_from_field("url", MyURL) + env.add_parser_from_field("https_url", HTTPSURL) set_env({"URL": "test.test/"}) - assert env.url("URL") == "https://test.test/" + assert env.https_url("URL") == "https://test.test/" with pytest.raises(environs.EnvError) as excinfo: - env.url("NOT_SET") + env.https_url("NOT_SET") assert excinfo.value.args[0] == 'Environment variable "NOT_SET" not set' @@ -371,13 +378,13 @@ class TestDumping: assert result["LOG_LEVEL"] == logging.WARNING def test_env_with_custom_parser(self, set_env, env): - @env.parser_for("url") - def url(value): + @env.parser_for("https_url") + def https_url(value): return "https://" + value set_env({"URL": "test.test"}) - env.url("URL") + env.https_url("URL") assert env.dump() == {"URL": "https://test.test"}
IDE autocomplete support HI! Thanks for the package, looks amazing, but what about IDE autocomplete / IntelliSense / hints? The IDE (I'm using PyCharm) does not prompt existing methods of the `Env` class instance (see picture below). ![Screenshot from 2019-09-20 11-15-20](https://user-images.githubusercontent.com/26823697/65311230-9dc5ab00-db98-11e9-9c5d-1bc372f71db6.png) For me, this is an important drawback of this package. Will it be decided? Thanks!
0.0
c80b3db7f742be0c503d047d6f1526efd6df655e
[ "tests/test_environs.py::TestCustomTypes::test_cannot_override_built_in_parser" ]
[ "tests/test_environs.py::TestCasting::test_call", "tests/test_environs.py::TestCasting::test_call_with_default", "tests/test_environs.py::TestCasting::test_basic", "tests/test_environs.py::TestCasting::test_empty_str", "tests/test_environs.py::TestCasting::test_int_cast", "tests/test_environs.py::TestCasting::test_invalid_int", "tests/test_environs.py::TestCasting::test_float_cast", "tests/test_environs.py::TestCasting::test_list_cast", "tests/test_environs.py::TestCasting::test_list_with_default_from_string", "tests/test_environs.py::TestCasting::test_list_with_default_from_list", "tests/test_environs.py::TestCasting::test_list_with_subcast", "tests/test_environs.py::TestCasting::test_bool", "tests/test_environs.py::TestCasting::test_list_with_spaces", "tests/test_environs.py::TestCasting::test_dict", "tests/test_environs.py::TestCasting::test_dict_with_subcast", "tests/test_environs.py::TestCasting::test_dict_with_default_from_string", "tests/test_environs.py::TestCasting::test_dict_with_default_from_dict", "tests/test_environs.py::TestCasting::test_decimal_cast", "tests/test_environs.py::TestCasting::test_missing_raises_error", "tests/test_environs.py::TestCasting::test_default_set", "tests/test_environs.py::TestCasting::test_json_cast", "tests/test_environs.py::TestCasting::test_datetime_cast", "tests/test_environs.py::TestCasting::test_date_cast", "tests/test_environs.py::TestCasting::test_timedelta_cast", "tests/test_environs.py::TestCasting::test_uuid_cast", "tests/test_environs.py::TestCasting::test_url_cast", "tests/test_environs.py::TestCasting::test_path_cast", "tests/test_environs.py::TestCasting::test_log_level_cast", "tests/test_environs.py::TestCasting::test_invalid_log_level", "tests/test_environs.py::TestCasting::test_invalid_url[foo]", "tests/test_environs.py::TestCasting::test_invalid_url[42]", "tests/test_environs.py::TestCasting::test_invalid_url[foo@bar]", "tests/test_environs.py::TestProxiedVariables::test_reading_proxied_variable", "tests/test_environs.py::TestProxiedVariables::test_reading_missing_proxied_variable", "tests/test_environs.py::TestProxiedVariables::test_reading_proxied_variable_in_prefix_scope", "tests/test_environs.py::TestEnvFileReading::test_read_env", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse", "tests/test_environs.py::TestEnvFileReading::test_read_env_non_recurse", "tests/test_environs.py::TestValidation::test_can_add_validator", "tests/test_environs.py::TestValidation::test_can_add_marshmallow_validator", "tests/test_environs.py::TestValidation::test_validator_can_raise_enverror", "tests/test_environs.py::TestValidation::test_failed_vars_are_not_serialized", "tests/test_environs.py::TestCustomTypes::test_add_parser", "tests/test_environs.py::TestCustomTypes::test_parser_for", "tests/test_environs.py::TestCustomTypes::test_parser_function_can_take_extra_arguments", "tests/test_environs.py::TestCustomTypes::test_add_parser_from_field", "tests/test_environs.py::TestDumping::test_dump", "tests/test_environs.py::TestDumping::test_env_with_custom_parser", "tests/test_environs.py::test_repr", "tests/test_environs.py::test_str", "tests/test_environs.py::test_env_isolation", "tests/test_environs.py::TestPrefix::test_prefixed", "tests/test_environs.py::TestPrefix::test_dump_with_prefixed", "tests/test_environs.py::TestNestedPrefix::test_nested_prefixed", "tests/test_environs.py::TestNestedPrefix::test_dump_with_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_dump_with_nested_prefixed", "tests/test_environs.py::TestDjango::test_dj_db_url", "tests/test_environs.py::TestDjango::test_dj_db_url_passes_kwargs", "tests/test_environs.py::TestDjango::test_dj_email_url" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-09-21 20:28:01+00:00
mit
5,538
sloria__environs-105
diff --git a/CHANGELOG.md b/CHANGELOG.md index dc28b52..871fd8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,13 @@ def https_url(value): return "https://" + value ``` +Bug fixes: + +- Fix error message for prefixed variables ([#102](https://github.com/sloria/environs/issues/102)). + Thanks [AGeekInside](https://github.com/AGeekInside) for reporting. + +Other changes: + - _Backwards-incompatible_: Rename `Env.__parser_map__` to `Env.__custom_parsers__`. ## 5.2.1 (2019-08-08) diff --git a/environs.py b/environs.py index da97c0c..cb895f6 100644 --- a/environs.py +++ b/environs.py @@ -52,8 +52,9 @@ def _field2method( field = typing.cast(FieldFactory, field_or_factory)(subcast=subcast, missing=missing, **kwargs) parsed_key, raw_value, proxied_key = self._get_from_environ(name, ma.missing) self._fields[parsed_key] = field + source_key = proxied_key or parsed_key if raw_value is ma.missing and field.missing is ma.missing: - raise EnvError('Environment variable "{}" not set'.format(proxied_key or parsed_key)) + raise EnvError('Environment variable "{}" not set'.format(source_key)) if raw_value or raw_value == "": value = raw_value else: @@ -63,7 +64,9 @@ def _field2method( try: value = field.deserialize(value) except ma.ValidationError as error: - raise EnvError('Environment variable "{}" invalid: {}'.format(name, error.args[0])) from error + raise EnvError( + 'Environment variable "{}" invalid: {}'.format(source_key, error.args[0]) + ) from error else: self._values[parsed_key] = value return value
sloria/environs
8dd86d49465cbe67d3295dc713edbf49b16890f9
diff --git a/tests/test_environs.py b/tests/test_environs.py index b57b179..bc5d284 100644 --- a/tests/test_environs.py +++ b/tests/test_environs.py @@ -435,6 +435,11 @@ class TestPrefix: env("NOT_FOUND", "mydefault") == "mydefault" assert env.dump() == {"APP_STR": "foo", "APP_INT": 42, "APP_NOT_FOUND": "mydefault"} + def test_error_message_for_prefixed_var(self, env): + with env.prefixed("APP_"): + with pytest.raises(environs.EnvError, match='Environment variable "APP_INT" invalid'): + env.int("INT", validate=lambda val: val < 42) + class TestNestedPrefix: @pytest.fixture(autouse=True)
Error message within prefixed() context The error message does not prepend the prefix to error message output, when an error occurs in an env.prefixed() context. The code below illustrates the issue. The environment variable is named TEST_VAR, but the error message is showing VAR. ```python ######@shim-dev:~/workspace/shim (environ-adoption) $ export TEST_VAR=-1 ######@shim-dev:~/workspace/shim (environ-adoption) $ ipython Python 3.6.5 (default, Aug 15 2019, 19:51:45) Type 'copyright', 'credits' or 'license' for more information IPython 7.7.0 -- An enhanced Interactive Python. Type '?' for help. In [1]: from environs import Env In [2]: env = Env() In [3]: with env.prefixed("TEST_"): ...: invalid_test = env.int("VAR", validate=lambda n: n > 0) ...: --------------------------------------------------------------------------- ValidationError Traceback (most recent call last) ~/.pyenv/versions/3.6.5/lib/python3.6/site-packages/environs.py in method(self, name, default, subcast, **kwargs) 55 try: ---> 56 value = field.deserialize(value) 57 except ma.ValidationError as error: ~/.pyenv/versions/3.6.5/lib/python3.6/site-packages/marshmallow/fields.py in deserialize(self, value, attr, data) 265 output = self._deserialize(value, attr, data) --> 266 self._validate(output) 267 return output ~/.pyenv/versions/3.6.5/lib/python3.6/site-packages/marshmallow/fields.py in _validate(self, value) 205 if errors: --> 206 raise ValidationError(errors, **kwargs) 207 ValidationError: ['Invalid value.'] The above exception was the direct cause of the following exception: EnvError Traceback (most recent call last) <ipython-input-3-be1d17efe506> in <module> 1 with env.prefixed("TEST_"): ----> 2 invalid_test = env.int("VAR", validate=lambda n: n > 0) 3 ~/.pyenv/versions/3.6.5/lib/python3.6/site-packages/environs.py in method(self, name, default, subcast, **kwargs) 56 value = field.deserialize(value) 57 except ma.ValidationError as error: ---> 58 raise EnvError('Environment variable "{}" invalid: {}'.format(name, error.args[0])) from error 59 else: 60 self._values[parsed_key] = value EnvError: Environment variable "VAR" invalid: ['Invalid value.'] In [4]: ```
0.0
8dd86d49465cbe67d3295dc713edbf49b16890f9
[ "tests/test_environs.py::TestPrefix::test_error_message_for_prefixed_var" ]
[ "tests/test_environs.py::TestCasting::test_call", "tests/test_environs.py::TestCasting::test_call_with_default", "tests/test_environs.py::TestCasting::test_basic", "tests/test_environs.py::TestCasting::test_empty_str", "tests/test_environs.py::TestCasting::test_int_cast", "tests/test_environs.py::TestCasting::test_invalid_int", "tests/test_environs.py::TestCasting::test_float_cast", "tests/test_environs.py::TestCasting::test_list_cast", "tests/test_environs.py::TestCasting::test_list_with_default_from_string", "tests/test_environs.py::TestCasting::test_list_with_default_from_list", "tests/test_environs.py::TestCasting::test_list_with_subcast", "tests/test_environs.py::TestCasting::test_bool", "tests/test_environs.py::TestCasting::test_list_with_spaces", "tests/test_environs.py::TestCasting::test_dict", "tests/test_environs.py::TestCasting::test_dict_with_subcast", "tests/test_environs.py::TestCasting::test_dict_with_default_from_string", "tests/test_environs.py::TestCasting::test_dict_with_default_from_dict", "tests/test_environs.py::TestCasting::test_decimal_cast", "tests/test_environs.py::TestCasting::test_missing_raises_error", "tests/test_environs.py::TestCasting::test_default_set", "tests/test_environs.py::TestCasting::test_json_cast", "tests/test_environs.py::TestCasting::test_datetime_cast", "tests/test_environs.py::TestCasting::test_date_cast", "tests/test_environs.py::TestCasting::test_timedelta_cast", "tests/test_environs.py::TestCasting::test_uuid_cast", "tests/test_environs.py::TestCasting::test_url_cast", "tests/test_environs.py::TestCasting::test_path_cast", "tests/test_environs.py::TestCasting::test_log_level_cast", "tests/test_environs.py::TestCasting::test_invalid_log_level", "tests/test_environs.py::TestCasting::test_invalid_url[foo]", "tests/test_environs.py::TestCasting::test_invalid_url[42]", "tests/test_environs.py::TestCasting::test_invalid_url[foo@bar]", "tests/test_environs.py::TestProxiedVariables::test_reading_proxied_variable", "tests/test_environs.py::TestProxiedVariables::test_reading_missing_proxied_variable", "tests/test_environs.py::TestProxiedVariables::test_reading_proxied_variable_in_prefix_scope", "tests/test_environs.py::TestEnvFileReading::test_read_env", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse", "tests/test_environs.py::TestEnvFileReading::test_read_env_non_recurse", "tests/test_environs.py::TestValidation::test_can_add_validator", "tests/test_environs.py::TestValidation::test_can_add_marshmallow_validator", "tests/test_environs.py::TestValidation::test_validator_can_raise_enverror", "tests/test_environs.py::TestValidation::test_failed_vars_are_not_serialized", "tests/test_environs.py::TestCustomTypes::test_add_parser", "tests/test_environs.py::TestCustomTypes::test_cannot_override_built_in_parser", "tests/test_environs.py::TestCustomTypes::test_parser_for", "tests/test_environs.py::TestCustomTypes::test_parser_function_can_take_extra_arguments", "tests/test_environs.py::TestCustomTypes::test_add_parser_from_field", "tests/test_environs.py::TestDumping::test_dump", "tests/test_environs.py::TestDumping::test_env_with_custom_parser", "tests/test_environs.py::test_repr", "tests/test_environs.py::test_str", "tests/test_environs.py::test_env_isolation", "tests/test_environs.py::TestPrefix::test_prefixed", "tests/test_environs.py::TestPrefix::test_dump_with_prefixed", "tests/test_environs.py::TestNestedPrefix::test_nested_prefixed", "tests/test_environs.py::TestNestedPrefix::test_dump_with_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_dump_with_nested_prefixed", "tests/test_environs.py::TestDjango::test_dj_db_url", "tests/test_environs.py::TestDjango::test_dj_db_url_passes_kwargs", "tests/test_environs.py::TestDjango::test_dj_email_url" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-09-22 18:56:28+00:00
mit
5,539
sloria__environs-176
diff --git a/CHANGELOG.md b/CHANGELOG.md index 9948757..27083f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,12 @@ env = Env(expand_vars=True) SMTP_LOGIN = env.str("SMTP_LOGIN") # => 'sloria' ``` +Bug fixes: + +- Fix deferred validation behavior for `dj_db_url`, `dj_email_url`, `dj_cache_url`, + and custom parsers ([#121](https://github.com/sloria/environs/issues/121)). + Thanks [hukkinj1](https://github.com/hukkinj1) for reporting. + Other changes: - Test against Python 3.9. diff --git a/environs/__init__.py b/environs/__init__.py index e936e35..14e6b98 100644 --- a/environs/__init__.py +++ b/environs/__init__.py @@ -14,6 +14,7 @@ from urllib.parse import urlparse, ParseResult from pathlib import Path import marshmallow as ma +from marshmallow.utils import _Missing from dotenv.main import load_dotenv, _walk_to_root __version__ = "8.0.0" @@ -34,7 +35,7 @@ FieldFactory = typing.Callable[..., ma.fields.Field] Subcast = typing.Union[typing.Type, typing.Callable[..., _T]] FieldType = typing.Type[ma.fields.Field] FieldOrFactory = typing.Union[FieldType, FieldFactory] -ParserMethod = typing.Callable[..., _T] +ParserMethod = typing.Callable[..., typing.Union[_T, _Missing]] class EnvError(ValueError): @@ -64,7 +65,7 @@ def _field2method( ) -> ParserMethod: def method( self: "Env", name: str, default: typing.Any = ma.missing, subcast: Subcast = None, **kwargs - ) -> _T: + ) -> typing.Union[_T, _Missing]: if self._sealed: raise EnvSealedError("Env has already been sealed. New values cannot be parsed.") missing = kwargs.pop("missing", None) or default @@ -76,11 +77,11 @@ def _field2method( self._fields[parsed_key] = field source_key = proxied_key or parsed_key if raw_value is ma.missing and field.missing is ma.missing: - message = "Environment variable not set." if self.eager: - raise EnvValidationError('Environment variable "{}" not set'.format(source_key), [message]) + raise EnvError('Environment variable "{}" not set'.format(proxied_key or parsed_key)) else: - self._errors[parsed_key].append(message) + self._errors[parsed_key].append("Environment variable not set.") + return ma.missing if raw_value or raw_value == "": value = raw_value else: @@ -110,11 +111,29 @@ def _func2method(func: typing.Callable, method_name: str) -> ParserMethod: if self._sealed: raise EnvSealedError("Env has already been sealed. New values cannot be parsed.") parsed_key, raw_value, proxied_key = self._get_from_environ(name, default) - if raw_value is ma.missing: - raise EnvError('Environment variable "{}" not set'.format(proxied_key or parsed_key)) - value = func(raw_value, **kwargs) self._fields[parsed_key] = ma.fields.Field(**kwargs) - self._values[parsed_key] = value + source_key = proxied_key or parsed_key + if raw_value is ma.missing: + if self.eager: + raise EnvError('Environment variable "{}" not set'.format(proxied_key or parsed_key)) + else: + self._errors[parsed_key].append("Environment variable not set.") + return ma.missing + if raw_value or raw_value == "": + value = raw_value + else: + value = ma.missing + try: + value = func(raw_value, **kwargs) + except (EnvError, ma.ValidationError) as error: + messages = error.messages if isinstance(error, ma.ValidationError) else [error.args[0]] + if self.eager: + raise EnvValidationError( + 'Environment variable "{}" invalid: {}'.format(source_key, error.args[0]), messages + ) from error + self._errors[parsed_key].extend(messages) + else: + self._values[parsed_key] = value return value method.__name__ = method_name @@ -178,7 +197,10 @@ def _dj_db_url_parser(value: str, **kwargs) -> dict: "The dj_db_url parser requires the dj-database-url package. " "You can install it with: pip install dj-database-url" ) from error - return dj_database_url.parse(value, **kwargs) + try: + return dj_database_url.parse(value, **kwargs) + except Exception as error: + raise ma.ValidationError("Not a valid database URL.") from error def _dj_email_url_parser(value: str, **kwargs) -> dict: @@ -189,7 +211,10 @@ def _dj_email_url_parser(value: str, **kwargs) -> dict: "The dj_email_url parser requires the dj-email-url package. " "You can install it with: pip install dj-email-url" ) from error - return dj_email_url.parse(value, **kwargs) + try: + return dj_email_url.parse(value, **kwargs) + except Exception as error: + raise ma.ValidationError("Not a valid email URL.") from error def _dj_cache_url_parser(value: str, **kwargs) -> dict: @@ -200,7 +225,12 @@ def _dj_cache_url_parser(value: str, **kwargs) -> dict: "The dj_cache_url parser requires the django-cache-url package. " "You can install it with: pip install django-cache-url" ) from error - return django_cache_url.parse(value, **kwargs) + try: + return django_cache_url.parse(value, **kwargs) + except Exception as error: + # django_cache_url may raise Exception("Unknown backend...") + # so use that error message in the validation error + raise ma.ValidationError(error.args[0]) from error class URLField(ma.fields.URL):
sloria/environs
aba2745f4806e80ffcaab6f698e7d5ff42e3864e
diff --git a/tests/test_environs.py b/tests/test_environs.py index 44ee6ad..f442e9c 100644 --- a/tests/test_environs.py +++ b/tests/test_environs.py @@ -620,6 +620,19 @@ class TestDeferredValidation: assert "INT" in exc.error_messages assert "DTIME" in exc.error_messages + def test_deferred_required_validation(self, env): + env.int("STR") + env.int("INT") + env.datetime("DTIME") + with pytest.raises(environs.EnvValidationError) as excinfo: + env.seal() + exc = excinfo.value + assert exc.error_messages == { + "STR": ["Environment variable not set."], + "INT": ["Environment variable not set."], + "DTIME": ["Environment variable not set."], + } + def test_cannot_add_after_seal(self, env, set_env): set_env({"STR": "foo", "INT": "42"}) env.str("STR") @@ -638,6 +651,72 @@ class TestDeferredValidation: with pytest.raises(environs.EnvSealedError, match="Env has already been sealed"): env.https_url("URL") + # Regression tests for https://github.com/sloria/environs/issues/121 + def test_dj_db_url_with_deferred_validation_missing(self, env): + env.dj_db_url("DATABASE_URL") + with pytest.raises(environs.EnvValidationError) as excinfo: + env.seal() + + exc = excinfo.value + assert exc.error_messages == {"DATABASE_URL": ["Environment variable not set."]} + + def test_dj_db_url_with_deferred_validation_invalid(self, env, set_env): + set_env({"DATABASE_URL": "invalid://"}) + env.dj_db_url("DATABASE_URL") + with pytest.raises(environs.EnvValidationError) as excinfo: + env.seal() + exc = excinfo.value + assert exc.error_messages == {"DATABASE_URL": ["Not a valid database URL."]} + + def test_dj_email_url_with_deferred_validation_missing(self, env): + env.dj_email_url("EMAIL_URL") + with pytest.raises(environs.EnvValidationError) as excinfo: + env.seal() + exc = excinfo.value + assert exc.error_messages == {"EMAIL_URL": ["Environment variable not set."]} + + def test_dj_cache_url_with_deferred_validation_missing(self, env): + env.dj_cache_url("CACHE_URL") + with pytest.raises(environs.EnvValidationError) as excinfo: + env.seal() + + exc = excinfo.value + assert exc.error_messages == {"CACHE_URL": ["Environment variable not set."]} + + def test_dj_cache_url_with_deferred_validation_invalid(self, env, set_env): + set_env({"CACHE_URL": "invalid://"}) + env.dj_cache_url("CACHE_URL") + with pytest.raises(environs.EnvValidationError) as excinfo: + env.seal() + exc = excinfo.value + assert exc.error_messages == {"CACHE_URL": ['Unknown backend: "invalid"']} + + def test_custom_parser_with_deferred_validation_missing(self, env): + @env.parser_for("always_fail") + def always_fail(value): + raise environs.EnvError("Invalid!") + + env.always_fail("MY_VAR") + + with pytest.raises(environs.EnvValidationError) as excinfo: + env.seal() + exc = excinfo.value + assert exc.error_messages == {"MY_VAR": ["Environment variable not set."]} + + def test_custom_parser_with_deferred_validation_invalid(self, env, set_env): + set_env({"MY_VAR": "foo"}) + + @env.parser_for("always_fail") + def always_fail(value): + raise environs.EnvError("Invalid!") + + env.always_fail("MY_VAR") + + with pytest.raises(environs.EnvValidationError) as excinfo: + env.seal() + exc = excinfo.value + assert exc.error_messages == {"MY_VAR": ["Invalid!"]} + class TestExpandVars: @pytest.fixture
Env.dj_db_url, Env.dj_email_url and custom parsers don't work with deferred validation `Env.dj_db_url` and `Env.dj_email_url` typecasting methods and custom parsers don't work with deferred validation. Here's minimal code to reproduce: ```python env = Env(eager=False) env.dj_email_url("NON_EXISTING_ENVVAR") ``` This raises an exception, but shouldn't due to `eager=False` being set.
0.0
aba2745f4806e80ffcaab6f698e7d5ff42e3864e
[ "tests/test_environs.py::TestDeferredValidation::test_dj_db_url_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_dj_db_url_with_deferred_validation_invalid", "tests/test_environs.py::TestDeferredValidation::test_dj_email_url_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_dj_cache_url_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_dj_cache_url_with_deferred_validation_invalid", "tests/test_environs.py::TestDeferredValidation::test_custom_parser_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_custom_parser_with_deferred_validation_invalid" ]
[ "tests/test_environs.py::TestCasting::test_call", "tests/test_environs.py::TestCasting::test_call_with_default", "tests/test_environs.py::TestCasting::test_basic", "tests/test_environs.py::TestCasting::test_empty_str", "tests/test_environs.py::TestCasting::test_int_cast", "tests/test_environs.py::TestCasting::test_invalid_int", "tests/test_environs.py::TestCasting::test_float_cast", "tests/test_environs.py::TestCasting::test_list_cast", "tests/test_environs.py::TestCasting::test_list_with_default_from_string", "tests/test_environs.py::TestCasting::test_list_with_default_from_list", "tests/test_environs.py::TestCasting::test_list_with_subcast", "tests/test_environs.py::TestCasting::test_list_with_empty_env_and_subcast", "tests/test_environs.py::TestCasting::test_bool", "tests/test_environs.py::TestCasting::test_list_with_spaces", "tests/test_environs.py::TestCasting::test_dict", "tests/test_environs.py::TestCasting::test_dict_with_subcast", "tests/test_environs.py::TestCasting::test_dict_without_subcast_key", "tests/test_environs.py::TestCasting::test_dict_with_subcast_key", "tests/test_environs.py::TestCasting::test_dict_with_default_from_string", "tests/test_environs.py::TestCasting::test_dict_with_default_from_dict", "tests/test_environs.py::TestCasting::test_dict_with_equal", "tests/test_environs.py::TestCasting::test_decimal_cast", "tests/test_environs.py::TestCasting::test_missing_raises_error", "tests/test_environs.py::TestCasting::test_default_set", "tests/test_environs.py::TestCasting::test_json_cast", "tests/test_environs.py::TestCasting::test_datetime_cast", "tests/test_environs.py::TestCasting::test_date_cast", "tests/test_environs.py::TestCasting::test_timedelta_cast", "tests/test_environs.py::TestCasting::test_uuid_cast", "tests/test_environs.py::TestCasting::test_url_cast", "tests/test_environs.py::TestCasting::test_path_cast", "tests/test_environs.py::TestCasting::test_log_level_cast", "tests/test_environs.py::TestCasting::test_invalid_log_level", "tests/test_environs.py::TestCasting::test_invalid_url[foo]", "tests/test_environs.py::TestCasting::test_invalid_url[42]", "tests/test_environs.py::TestCasting::test_invalid_url[foo@bar]", "tests/test_environs.py::TestProxiedVariables::test_reading_proxied_variable", "tests/test_environs.py::TestProxiedVariables::test_reading_missing_proxied_variable", "tests/test_environs.py::TestProxiedVariables::test_reading_proxied_variable_in_prefix_scope", "tests/test_environs.py::TestEnvFileReading::test_read_env", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse", "tests/test_environs.py::TestEnvFileReading::test_read_env_non_recurse", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse_from_subfolder", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse_start_from_subfolder[.custom.env]", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse_start_from_subfolder[path1]", "tests/test_environs.py::TestEnvFileReading::test_read_env_directory", "tests/test_environs.py::TestValidation::test_can_add_validator", "tests/test_environs.py::TestValidation::test_can_add_marshmallow_validator", "tests/test_environs.py::TestValidation::test_validator_can_raise_enverror", "tests/test_environs.py::TestValidation::test_failed_vars_are_not_serialized", "tests/test_environs.py::TestCustomTypes::test_add_parser", "tests/test_environs.py::TestCustomTypes::test_cannot_override_built_in_parser", "tests/test_environs.py::TestCustomTypes::test_parser_for", "tests/test_environs.py::TestCustomTypes::test_parser_function_can_take_extra_arguments", "tests/test_environs.py::TestCustomTypes::test_add_parser_from_field", "tests/test_environs.py::TestDumping::test_dump", "tests/test_environs.py::TestDumping::test_env_with_custom_parser", "tests/test_environs.py::test_repr", "tests/test_environs.py::test_str", "tests/test_environs.py::test_env_isolation", "tests/test_environs.py::TestPrefix::test_prefixed", "tests/test_environs.py::TestPrefix::test_dump_with_prefixed", "tests/test_environs.py::TestPrefix::test_error_message_for_prefixed_var", "tests/test_environs.py::TestNestedPrefix::test_nested_prefixed", "tests/test_environs.py::TestNestedPrefix::test_dump_with_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_dump_with_nested_prefixed", "tests/test_environs.py::TestDjango::test_dj_db_url", "tests/test_environs.py::TestDjango::test_dj_db_url_passes_kwargs", "tests/test_environs.py::TestDjango::test_dj_email_url", "tests/test_environs.py::TestDjango::test_dj_cache_url", "tests/test_environs.py::TestDeferredValidation::test_valid", "tests/test_environs.py::TestDeferredValidation::test_validation", "tests/test_environs.py::TestDeferredValidation::test_deferred_required_validation", "tests/test_environs.py::TestDeferredValidation::test_cannot_add_after_seal", "tests/test_environs.py::TestDeferredValidation::test_custom_parser_not_called_after_seal", "tests/test_environs.py::TestExpandVars::test_full_expand_vars", "tests/test_environs.py::TestExpandVars::test_multiple_expands", "tests/test_environs.py::TestExpandVars::test_recursive_expands", "tests/test_environs.py::TestExpandVars::test_escaped_expand", "tests/test_environs.py::TestExpandVars::test_composite_types" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-10-31 21:43:53+00:00
mit
5,540
sloria__environs-315
diff --git a/CHANGELOG.md b/CHANGELOG.md index 9692767..37487c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## 10.3.0 (unreleased) + +Features: + +- `Env.__repr__` and `Env.__str__` no longer display environment variables, to prevent accidental exfiltration of sensitive data ([#292](https://github.com/sloria/environs/issues/292)). + Thanks [mro-rhansen2](https://github.com/mro-rhansen2) for the suggestion. + ## 10.2.0 (2024-01-09) Features: diff --git a/environs/__init__.py b/environs/__init__.py index 8ba8fd2..b52774d 100644 --- a/environs/__init__.py +++ b/environs/__init__.py @@ -387,7 +387,7 @@ class Env: self.__custom_parsers__: typing.Dict[_StrType, ParserMethod] = {} def __repr__(self) -> _StrType: - return f"<{self.__class__.__name__} {self._values}>" + return f"<{self.__class__.__name__}(eager={self.eager}, expand_vars={self.expand_vars})>" @staticmethod def read_env(
sloria/environs
a3d86cb4b7ad0acba48191bc549981f2e8a3403a
diff --git a/tests/test_environs.py b/tests/test_environs.py index 2430e66..95484e9 100644 --- a/tests/test_environs.py +++ b/tests/test_environs.py @@ -487,15 +487,17 @@ class TestDumping: def test_repr(set_env, env): + env = environs.Env(eager=True, expand_vars=True) set_env({"FOO": "foo", "BAR": "42"}) env.str("FOO") - assert repr(env) == "<Env {}>".format({"FOO": "foo"}) + assert repr(env) == "<Env(eager=True, expand_vars=True)>" def test_str(set_env, env): + env = environs.Env(eager=True, expand_vars=True) set_env({"FOO": "foo", "BAR": "42"}) env.str("FOO") - assert repr(env) == "<Env {}>".format({"FOO": "foo"}) + assert str(env) == "<Env(eager=True, expand_vars=True)>" def test_env_isolation(set_env):
All environment variable names and associated values logged on unhandled env attribute error We had a surprise recently in our code base when an attribute error was raised on access to an `Env` instance. All environment variables and their associated values were logged to stdout which was subsequently scraped and shipped to our log analytics stack. The culprit seems to be that the `Env.__repr__` magic method deliberately stringifies the `self._values` dictionary and includes the results in the string representation of any given `Env` instance. Was this a deliberate design decision? Environment variables are often used to handle secrets in many systems. The choice to spit all of those values out without user input seems unwise and doesn't appear to provide any immediate value to the library itself. There are obviously failings on our end in this. Specifically, our test suite didn't catch the issue because this particular section of the code doesn't have great coverage admittedly. Be that as it may, it doesn't seem like a sound design choice to arbitrarily leak information that is generally assumed to be sensitive, if not downright privileged, in nature. My workaround (aside from fixing the typo in the method name) was to simply monkeypatch the `Env.__repr__` method with something that was safer and wouldn't divulge information that we don't want to appear in plain text logs in case there are any other "oopsies" in the future. If y'all are opposed to removing the stringification of the `self._values` dictionary altogether, then would y'all consider a `safe: bool = False` constructor parameter? If that variable is set to `True`, then `Env` wouldn't haphazardly report on every value within its scope.
0.0
a3d86cb4b7ad0acba48191bc549981f2e8a3403a
[ "tests/test_environs.py::test_repr", "tests/test_environs.py::test_str" ]
[ "tests/test_environs.py::TestCasting::test_call", "tests/test_environs.py::TestCasting::test_call_with_default", "tests/test_environs.py::TestCasting::test_basic", "tests/test_environs.py::TestCasting::test_empty_str", "tests/test_environs.py::TestCasting::test_int_cast", "tests/test_environs.py::TestCasting::test_invalid_int", "tests/test_environs.py::TestCasting::test_float_cast", "tests/test_environs.py::TestCasting::test_list_cast", "tests/test_environs.py::TestCasting::test_list_with_default_from_string", "tests/test_environs.py::TestCasting::test_list_with_default_from_list", "tests/test_environs.py::TestCasting::test_list_with_subcast", "tests/test_environs.py::TestCasting::test_list_with_empty_env_and_subcast", "tests/test_environs.py::TestCasting::test_bool", "tests/test_environs.py::TestCasting::test_list_with_spaces", "tests/test_environs.py::TestCasting::test_list_with_spaces_as_delimiter", "tests/test_environs.py::TestCasting::test_dict", "tests/test_environs.py::TestCasting::test_dict_with_spaces_as_delimiter", "tests/test_environs.py::TestCasting::test_dict_with_subcast_values", "tests/test_environs.py::TestCasting::test_dict_without_subcast_keys", "tests/test_environs.py::TestCasting::test_dict_with_subcast_keys", "tests/test_environs.py::TestCasting::test_dict_with_subcast_key_deprecated", "tests/test_environs.py::TestCasting::test_custom_subcast_list", "tests/test_environs.py::TestCasting::test_custom_subcast_keys_values", "tests/test_environs.py::TestCasting::test_dict_with_default_from_string", "tests/test_environs.py::TestCasting::test_dict_with_default_from_dict", "tests/test_environs.py::TestCasting::test_dict_with_equal", "tests/test_environs.py::TestCasting::test_decimal_cast", "tests/test_environs.py::TestCasting::test_missing_raises_error", "tests/test_environs.py::TestCasting::test_default_set", "tests/test_environs.py::TestCasting::test_json_cast", "tests/test_environs.py::TestCasting::test_invalid_json_raises_error", "tests/test_environs.py::TestCasting::test_json_default", "tests/test_environs.py::TestCasting::test_datetime_cast", "tests/test_environs.py::TestCasting::test_date_cast", "tests/test_environs.py::TestCasting::test_timedelta_cast", "tests/test_environs.py::TestCasting::test_time_cast", "tests/test_environs.py::TestCasting::test_uuid_cast", "tests/test_environs.py::TestCasting::test_url_cast", "tests/test_environs.py::TestCasting::test_url_db_cast", "tests/test_environs.py::TestCasting::test_path_cast", "tests/test_environs.py::TestCasting::test_path_default_value", "tests/test_environs.py::TestCasting::test_log_level_cast", "tests/test_environs.py::TestCasting::test_invalid_log_level", "tests/test_environs.py::TestCasting::test_invalid_url[foo]", "tests/test_environs.py::TestCasting::test_invalid_url[42]", "tests/test_environs.py::TestCasting::test_invalid_url[foo@bar]", "tests/test_environs.py::TestCasting::test_enum_cast", "tests/test_environs.py::TestCasting::test_enum_cast_ignore_case", "tests/test_environs.py::TestCasting::test_invalid_enum", "tests/test_environs.py::TestCasting::test_invalid_enum_ignore_case", "tests/test_environs.py::TestEnvFileReading::test_read_env", "tests/test_environs.py::TestEnvFileReading::test_read_env_returns_false_if_file_not_found", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse", "tests/test_environs.py::TestEnvFileReading::test_read_env_non_recurse", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse_from_subfolder", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse_start_from_subfolder[.custom.env]", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse_start_from_subfolder[path1]", "tests/test_environs.py::TestEnvFileReading::test_read_env_directory", "tests/test_environs.py::TestValidation::test_can_add_validator", "tests/test_environs.py::TestValidation::test_can_add_marshmallow_validator", "tests/test_environs.py::TestValidation::test_validator_can_raise_enverror", "tests/test_environs.py::TestValidation::test_failed_vars_are_not_serialized", "tests/test_environs.py::TestCustomTypes::test_add_parser", "tests/test_environs.py::TestCustomTypes::test_cannot_override_built_in_parser", "tests/test_environs.py::TestCustomTypes::test_parser_for", "tests/test_environs.py::TestCustomTypes::test_parser_function_can_take_extra_arguments", "tests/test_environs.py::TestCustomTypes::test_add_parser_from_field", "tests/test_environs.py::TestDumping::test_dump", "tests/test_environs.py::TestDumping::test_env_with_custom_parser", "tests/test_environs.py::test_env_isolation", "tests/test_environs.py::TestPrefix::test_prefixed", "tests/test_environs.py::TestPrefix::test_dump_with_prefixed", "tests/test_environs.py::TestPrefix::test_error_message_for_prefixed_var", "tests/test_environs.py::TestNestedPrefix::test_nested_prefixed", "tests/test_environs.py::TestNestedPrefix::test_dump_with_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_dump_with_nested_prefixed", "tests/test_environs.py::TestDjango::test_dj_db_url", "tests/test_environs.py::TestDjango::test_dj_db_url_passes_kwargs", "tests/test_environs.py::TestDjango::test_dj_email_url", "tests/test_environs.py::TestDjango::test_dj_cache_url", "tests/test_environs.py::TestDeferredValidation::test_valid", "tests/test_environs.py::TestDeferredValidation::test_validation", "tests/test_environs.py::TestDeferredValidation::test_deferred_required_validation", "tests/test_environs.py::TestDeferredValidation::test_cannot_add_after_seal", "tests/test_environs.py::TestDeferredValidation::test_custom_parser_not_called_after_seal", "tests/test_environs.py::TestDeferredValidation::test_dj_db_url_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_dj_db_url_with_deferred_validation_invalid", "tests/test_environs.py::TestDeferredValidation::test_dj_email_url_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_dj_cache_url_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_dj_cache_url_with_deferred_validation_invalid", "tests/test_environs.py::TestDeferredValidation::test_custom_parser_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_custom_parser_with_deferred_validation_invalid", "tests/test_environs.py::TestExpandVars::test_full_expand_vars", "tests/test_environs.py::TestExpandVars::test_multiple_expands", "tests/test_environs.py::TestExpandVars::test_recursive_expands", "tests/test_environs.py::TestExpandVars::test_default_expands", "tests/test_environs.py::TestExpandVars::test_escaped_expand", "tests/test_environs.py::TestExpandVars::test_composite_types" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2024-01-11 16:01:28+00:00
mit
5,541
sloria__environs-316
diff --git a/CHANGELOG.md b/CHANGELOG.md index 37487c8..b5264fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,11 @@ Features: - `Env.__repr__` and `Env.__str__` no longer display environment variables, to prevent accidental exfiltration of sensitive data ([#292](https://github.com/sloria/environs/issues/292)). Thanks [mro-rhansen2](https://github.com/mro-rhansen2) for the suggestion. +Bug fixes: + +- Fix passing `None` as default to `env.list` ([#298](https://github.com/sloria/environs/issues/298)). + Thanks [lucas-bremond](https://github.com/lucas-bremond) for reporting. + ## 10.2.0 (2024-01-09) Features: diff --git a/environs/__init__.py b/environs/__init__.py index b52774d..3fc16ca 100644 --- a/environs/__init__.py +++ b/environs/__init__.py @@ -201,7 +201,7 @@ def _make_list_field(*, subcast: typing.Optional[type], **kwargs) -> ma.fields.L def _preprocess_list( value: typing.Union[str, typing.Iterable], *, delimiter: str = ",", **kwargs ) -> typing.Iterable: - if ma.utils.is_iterable_but_not_string(value): + if ma.utils.is_iterable_but_not_string(value) or value is None: return value return typing.cast(str, value).split(delimiter) if value != "" else []
sloria/environs
7922458380aac4ba2120b89dcb91f9b98eacc6a0
diff --git a/tests/test_environs.py b/tests/test_environs.py index 95484e9..e3248eb 100644 --- a/tests/test_environs.py +++ b/tests/test_environs.py @@ -82,12 +82,16 @@ class TestCasting: set_env({"LIST": "1,2,3"}) assert env.list("LIST") == ["1", "2", "3"] - def test_list_with_default_from_string(self, set_env, env): + def test_list_with_default_from_string(self, env): assert env.list("LIST", "1,2") == ["1", "2"] - def test_list_with_default_from_list(self, set_env, env): + def test_list_with_default_from_list(self, env): assert env.list("LIST", ["1"]) == ["1"] + # https://github.com/sloria/environs/issues/298 + def test_list_with_default_none(self, env): + assert env.list("LIST", default=None) is None + def test_list_with_subcast(self, set_env, env): set_env({"LIST": "1,2,3"}) assert env.list("LIST", subcast=int) == [1, 2, 3]
Inconsistent behavior with `env.list` When `MY_ENV_VAR` is __not__ defined, ```py env.int("MY_ENV_VAR", default=None) ``` returns `None` (as expected), but: ```py env.list("MY_ENV_VAR", subcast=str, default=None) ``` yields the following Exception: ```py Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.10/site-packages/environs/__init__.py", line 122, in method value = preprocess(value, **preprocess_kwargs) File "/usr/local/lib/python3.10/site-packages/environs/__init__.py", line 206, in _preprocess_list return typing.cast(str, value).split(delimiter) if value != "" else [] AttributeError: 'NoneType' object has no attribute 'split' ``` I believe the second example should return `None` as well. Thoughts?
0.0
7922458380aac4ba2120b89dcb91f9b98eacc6a0
[ "tests/test_environs.py::TestCasting::test_list_with_default_none" ]
[ "tests/test_environs.py::TestCasting::test_call", "tests/test_environs.py::TestCasting::test_call_with_default", "tests/test_environs.py::TestCasting::test_basic", "tests/test_environs.py::TestCasting::test_empty_str", "tests/test_environs.py::TestCasting::test_int_cast", "tests/test_environs.py::TestCasting::test_invalid_int", "tests/test_environs.py::TestCasting::test_float_cast", "tests/test_environs.py::TestCasting::test_list_cast", "tests/test_environs.py::TestCasting::test_list_with_default_from_string", "tests/test_environs.py::TestCasting::test_list_with_default_from_list", "tests/test_environs.py::TestCasting::test_list_with_subcast", "tests/test_environs.py::TestCasting::test_list_with_empty_env_and_subcast", "tests/test_environs.py::TestCasting::test_bool", "tests/test_environs.py::TestCasting::test_list_with_spaces", "tests/test_environs.py::TestCasting::test_list_with_spaces_as_delimiter", "tests/test_environs.py::TestCasting::test_dict", "tests/test_environs.py::TestCasting::test_dict_with_spaces_as_delimiter", "tests/test_environs.py::TestCasting::test_dict_with_subcast_values", "tests/test_environs.py::TestCasting::test_dict_without_subcast_keys", "tests/test_environs.py::TestCasting::test_dict_with_subcast_keys", "tests/test_environs.py::TestCasting::test_dict_with_subcast_key_deprecated", "tests/test_environs.py::TestCasting::test_custom_subcast_list", "tests/test_environs.py::TestCasting::test_custom_subcast_keys_values", "tests/test_environs.py::TestCasting::test_dict_with_default_from_string", "tests/test_environs.py::TestCasting::test_dict_with_default_from_dict", "tests/test_environs.py::TestCasting::test_dict_with_equal", "tests/test_environs.py::TestCasting::test_decimal_cast", "tests/test_environs.py::TestCasting::test_missing_raises_error", "tests/test_environs.py::TestCasting::test_default_set", "tests/test_environs.py::TestCasting::test_json_cast", "tests/test_environs.py::TestCasting::test_invalid_json_raises_error", "tests/test_environs.py::TestCasting::test_json_default", "tests/test_environs.py::TestCasting::test_datetime_cast", "tests/test_environs.py::TestCasting::test_date_cast", "tests/test_environs.py::TestCasting::test_timedelta_cast", "tests/test_environs.py::TestCasting::test_time_cast", "tests/test_environs.py::TestCasting::test_uuid_cast", "tests/test_environs.py::TestCasting::test_url_cast", "tests/test_environs.py::TestCasting::test_url_db_cast", "tests/test_environs.py::TestCasting::test_path_cast", "tests/test_environs.py::TestCasting::test_path_default_value", "tests/test_environs.py::TestCasting::test_log_level_cast", "tests/test_environs.py::TestCasting::test_invalid_log_level", "tests/test_environs.py::TestCasting::test_invalid_url[foo]", "tests/test_environs.py::TestCasting::test_invalid_url[42]", "tests/test_environs.py::TestCasting::test_invalid_url[foo@bar]", "tests/test_environs.py::TestCasting::test_enum_cast", "tests/test_environs.py::TestCasting::test_enum_cast_ignore_case", "tests/test_environs.py::TestCasting::test_invalid_enum", "tests/test_environs.py::TestCasting::test_invalid_enum_ignore_case", "tests/test_environs.py::TestEnvFileReading::test_read_env", "tests/test_environs.py::TestEnvFileReading::test_read_env_returns_false_if_file_not_found", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse", "tests/test_environs.py::TestEnvFileReading::test_read_env_non_recurse", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse_from_subfolder", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse_start_from_subfolder[.custom.env]", "tests/test_environs.py::TestEnvFileReading::test_read_env_recurse_start_from_subfolder[path1]", "tests/test_environs.py::TestEnvFileReading::test_read_env_directory", "tests/test_environs.py::TestValidation::test_can_add_validator", "tests/test_environs.py::TestValidation::test_can_add_marshmallow_validator", "tests/test_environs.py::TestValidation::test_validator_can_raise_enverror", "tests/test_environs.py::TestValidation::test_failed_vars_are_not_serialized", "tests/test_environs.py::TestCustomTypes::test_add_parser", "tests/test_environs.py::TestCustomTypes::test_cannot_override_built_in_parser", "tests/test_environs.py::TestCustomTypes::test_parser_for", "tests/test_environs.py::TestCustomTypes::test_parser_function_can_take_extra_arguments", "tests/test_environs.py::TestCustomTypes::test_add_parser_from_field", "tests/test_environs.py::TestDumping::test_dump", "tests/test_environs.py::TestDumping::test_env_with_custom_parser", "tests/test_environs.py::test_repr", "tests/test_environs.py::test_str", "tests/test_environs.py::test_env_isolation", "tests/test_environs.py::TestPrefix::test_prefixed", "tests/test_environs.py::TestPrefix::test_dump_with_prefixed", "tests/test_environs.py::TestPrefix::test_error_message_for_prefixed_var", "tests/test_environs.py::TestNestedPrefix::test_nested_prefixed", "tests/test_environs.py::TestNestedPrefix::test_dump_with_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_nested_prefixed", "tests/test_environs.py::TestFailedNestedPrefix::test_failed_dump_with_nested_prefixed", "tests/test_environs.py::TestDjango::test_dj_db_url", "tests/test_environs.py::TestDjango::test_dj_db_url_passes_kwargs", "tests/test_environs.py::TestDjango::test_dj_email_url", "tests/test_environs.py::TestDjango::test_dj_cache_url", "tests/test_environs.py::TestDeferredValidation::test_valid", "tests/test_environs.py::TestDeferredValidation::test_validation", "tests/test_environs.py::TestDeferredValidation::test_deferred_required_validation", "tests/test_environs.py::TestDeferredValidation::test_cannot_add_after_seal", "tests/test_environs.py::TestDeferredValidation::test_custom_parser_not_called_after_seal", "tests/test_environs.py::TestDeferredValidation::test_dj_db_url_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_dj_db_url_with_deferred_validation_invalid", "tests/test_environs.py::TestDeferredValidation::test_dj_email_url_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_dj_cache_url_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_dj_cache_url_with_deferred_validation_invalid", "tests/test_environs.py::TestDeferredValidation::test_custom_parser_with_deferred_validation_missing", "tests/test_environs.py::TestDeferredValidation::test_custom_parser_with_deferred_validation_invalid", "tests/test_environs.py::TestExpandVars::test_full_expand_vars", "tests/test_environs.py::TestExpandVars::test_multiple_expands", "tests/test_environs.py::TestExpandVars::test_recursive_expands", "tests/test_environs.py::TestExpandVars::test_default_expands", "tests/test_environs.py::TestExpandVars::test_escaped_expand", "tests/test_environs.py::TestExpandVars::test_composite_types" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2024-01-11 16:08:00+00:00
mit
5,542
sloria__environs-77
diff --git a/README.md b/README.md index bc89ef1..a5d5a21 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ It allows you to store configuration separate from your code, as per * [Basic usage](#basic-usage) * [Supported types](#supported-types) * [Reading .env files](#reading-env-files) + * [Reading a specific file](#reading-a-specific-file) * [Handling prefixes](#handling-prefixes) * [Proxied variables](#proxied-variables) * [Validation](#validation) @@ -119,6 +120,27 @@ env.bool("DEBUG") # => True env.int("PORT") # => 4567 ``` +### Reading a specific file + +By default, `Env.read_env` will look for a `.env` file in current +directory and recurse upwards until a `.env` file is found. + +You can also read a specific file: + +```python +from environs import Env + +with open(".env.test", "w") as fobj: + fobj.write("A=foo\n") + fobj.write("B=123\n") + +env = Env() +env.read_env(".env.test", recurse=False) + +assert env("A") == "foo" +assert env.int("B") == 123 +``` + ## Handling prefixes ```python diff --git a/environs.py b/environs.py index 4b60270..bc9d22b 100644 --- a/environs.py +++ b/environs.py @@ -12,6 +12,12 @@ except ImportError: # Python 2 import urlparse +try: + from collections.abc import Mapping +except ImportError: + # Python 2 + from collections import Mapping + import marshmallow as ma from dotenv import load_dotenv from dotenv.main import _walk_to_root @@ -108,6 +114,9 @@ def _preprocess_list(value, **kwargs): def _preprocess_dict(value, **kwargs): + if isinstance(value, Mapping): + return value + subcast = kwargs.get("subcast") return { key.strip(): subcast(val.strip()) if subcast else val.strip()
sloria/environs
559d768a8b0f70969fe2ca0eb91e223575bac5bb
diff --git a/tests/test_environs.py b/tests/test_environs.py index 3d54638..5c5afe0 100644 --- a/tests/test_environs.py +++ b/tests/test_environs.py @@ -74,6 +74,12 @@ class TestCasting: set_env({"LIST": "1,2,3"}) assert env.list("LIST") == ["1", "2", "3"] + def test_list_with_default_from_string(self, set_env, env): + assert env.list("LIST", "1,2") == ["1", "2"] + + def test_list_with_default_from_list(self, set_env, env): + assert env.list("LIST", ["1"]) == ["1"] + def test_list_with_subcast(self, set_env, env): set_env({"LIST": "1,2,3"}) assert env.list("LIST", subcast=int) == [1, 2, 3] @@ -100,6 +106,12 @@ class TestCasting: set_env({"DICT": "key1=1,key2=2"}) assert env.dict("DICT", subcast=int) == {"key1": 1, "key2": 2} + def test_dict_with_default_from_string(self, set_env, env): + assert env.dict("DICT", "key1=1,key2=2") == {"key1": "1", "key2": "2"} + + def test_dict_with_default_from_dict(self, set_env, env): + assert env.dict("DICT", {"key1": "1"}) == {"key1": "1"} + def test_decimat_cast(self, set_env, env): set_env({"DECIMAL": "12.34"}) assert env.decimal("DECIMAL") == Decimal("12.34")
Allow opening other files, not just .env Currently environs read only hard-coded `.env` files, and only path is available. Yet more and more tools allow custom .env files. I believe environs should add parameter like `file='.env'` to be able to override defaults.
0.0
559d768a8b0f70969fe2ca0eb91e223575bac5bb
[ "tests/test_environs.py::TestCasting::test_dict_with_default_from_dict" ]
[ "tests/test_environs.py::TestCasting::test_call", "tests/test_environs.py::TestCasting::test_call_with_default", "tests/test_environs.py::TestCasting::test_basic", "tests/test_environs.py::TestCasting::test_empty_str", "tests/test_environs.py::TestCasting::test_int_cast", "tests/test_environs.py::TestCasting::test_invalid_int", "tests/test_environs.py::TestCasting::test_float_cast", "tests/test_environs.py::TestCasting::test_list_cast", "tests/test_environs.py::TestCasting::test_list_with_default_from_string", "tests/test_environs.py::TestCasting::test_list_with_default_from_list", "tests/test_environs.py::TestCasting::test_list_with_subcast", "tests/test_environs.py::TestCasting::test_bool", "tests/test_environs.py::TestCasting::test_list_with_spaces", "tests/test_environs.py::TestCasting::test_dict", "tests/test_environs.py::TestCasting::test_dict_with_subcast", "tests/test_environs.py::TestCasting::test_dict_with_default_from_string", "tests/test_environs.py::TestCasting::test_decimat_cast", "tests/test_environs.py::TestCasting::test_missing_raises_error", "tests/test_environs.py::TestCasting::test_default_set", "tests/test_environs.py::TestCasting::test_json_cast", "tests/test_environs.py::TestCasting::test_datetime_cast", "tests/test_environs.py::TestCasting::test_date_cast", "tests/test_environs.py::TestCasting::test_timedelta_cast", "tests/test_environs.py::TestCasting::test_uuid_cast", "tests/test_environs.py::TestCasting::test_url_cast", "tests/test_environs.py::TestCasting::test_invalid_url[foo]", "tests/test_environs.py::TestCasting::test_invalid_url[42]", "tests/test_environs.py::TestCasting::test_invalid_url[foo@bar]", "tests/test_environs.py::TestProxiedVariables::test_reading_proxied_variable", "tests/test_environs.py::TestProxiedVariables::test_reading_missing_proxied_variable", "tests/test_environs.py::TestProxiedVariables::test_reading_proxied_variable_in_prefix_scope", "tests/test_environs.py::TestEnvFileReading::test_read_env", "tests/test_environs.py::TestValidation::test_can_add_validator", "tests/test_environs.py::TestValidation::test_can_add_marshmallow_validator", "tests/test_environs.py::TestValidation::test_validator_can_raise_enverror", "tests/test_environs.py::TestValidation::test_failed_vars_are_not_serialized", "tests/test_environs.py::TestCustomTypes::test_add_parser", "tests/test_environs.py::TestCustomTypes::test_parser_for", "tests/test_environs.py::TestCustomTypes::test_parser_function_can_take_extra_arguments", "tests/test_environs.py::TestCustomTypes::test_add_parser_from_field", "tests/test_environs.py::TestDumping::test_dump", "tests/test_environs.py::TestDumping::test_env_with_custom_parser", "tests/test_environs.py::test_repr", "tests/test_environs.py::test_str", "tests/test_environs.py::test_env_isolation", "tests/test_environs.py::TestPrefix::test_prefixed", "tests/test_environs.py::TestPrefix::test_dump_with_prefixed", "tests/test_environs.py::TestNestedPrefix::test_nested_prefixed", "tests/test_environs.py::TestNestedPrefix::test_dump_with_nested_prefixed", "tests/test_environs.py::TestDjango::test_dj_db_url", "tests/test_environs.py::TestDjango::test_dj_db_url_passes_kwargs", "tests/test_environs.py::TestDjango::test_dj_email_url" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-05-15 15:29:47+00:00
mit
5,543
sloria__sphinx-issues-131
diff --git a/README.rst b/README.rst index 654f972..1c875b6 100644 --- a/README.rst +++ b/README.rst @@ -152,6 +152,8 @@ Changelog 3.1.0 (unreleased) ------------------ +- Default to linking GH Sponsors for the :user: role `Issue #93 <https://github.com/sloria/sphinx-issues/issues/129>`_. + Thanks @webknjaz for the suggestion. - Support Python 3.8-3.12. Older versions are no longer supported. - Remove `__version__`, `__author__`, and `__license__` attributes. Use `importlib.metadata` to read this metadata instead. diff --git a/src/sphinx_issues/__init__.py b/src/sphinx_issues/__init__.py index b5c7bec..f63824a 100644 --- a/src/sphinx_issues/__init__.py +++ b/src/sphinx_issues/__init__.py @@ -369,7 +369,10 @@ def setup(app): # e.g. 'https://github.com/{user}' app.add_config_value( "issues_user_uri", - default="https://github.com/{user}", + # Default to sponsors URL. + # GitHub will automatically redirect to profile + # if Sponsors isn't set up. + default="https://github.com/sponsors/{user}", rebuild="html", types=[str], )
sloria/sphinx-issues
9d176de1b54abf813355823f55d5fc35a1c64f3b
diff --git a/tests/test_sphinx_issues.py b/tests/test_sphinx_issues.py index 8462d1d..84b9f52 100644 --- a/tests/test_sphinx_issues.py +++ b/tests/test_sphinx_issues.py @@ -86,13 +86,13 @@ def inliner(app): "#42", "https://github.com/marshmallow-code/marshmallow/pull/42", ), - (user_role, "user", "sloria", "@sloria", "https://github.com/sloria"), + (user_role, "user", "sloria", "@sloria", "https://github.com/sponsors/sloria"), ( user_role, "user", "Steven Loria <sloria>", "Steven Loria", - "https://github.com/sloria", + "https://github.com/sponsors/sloria", ), ( cve_role,
Default to linking GH Sponsors for the `:user:` role So I've been using regular `extlinks` for this (having seen the idea in the tox repo years ago). But I improved it for many of the projects I contribute to by linking the GitHub Sponsors pages of people, since this role is advertised for use in bylines. Here's some examples: https://setuptools.pypa.io/en/latest/history.html#v67-7-0 / https://yarl.aio-libs.org/en/latest/changes/#released-versions / https://yarl.aio-libs.org/en/latest/contributing/guidelines/#alright-so-how-to-add-a-news-fragment. The primary motivation is to give credit to people who contribute / send pull requests. Then, when somebody else reads that changelog and clicks on the usernames, they are hinted to tip. It's also worth noting, that for the accounts that don't have GH Sponsors set up, GH itself automatically redirects to their normal profile pages, so there's always a fallback. This is basically what I propose: ```diff - issues_user_uri = "https://github.com/{user}" + issues_user_uri = "https://github.com/sponsors/{user}" ```
0.0
9d176de1b54abf813355823f55d5fc35a1c64f3b
[ "tests/test_sphinx_issues.py::test_roles[app0-role4-user-sloria-@sloria-https://github.com/sponsors/sloria]", "tests/test_sphinx_issues.py::test_roles[app0-role5-user-Steven", "tests/test_sphinx_issues.py::test_roles[app1-role4-user-sloria-@sloria-https://github.com/sponsors/sloria]", "tests/test_sphinx_issues.py::test_roles[app1-role5-user-Steven", "tests/test_sphinx_issues.py::test_roles[app2-role4-user-sloria-@sloria-https://github.com/sponsors/sloria]", "tests/test_sphinx_issues.py::test_roles[app2-role5-user-Steven" ]
[ "tests/test_sphinx_issues.py::test_roles[app0-role0-issue-42-#42-https://github.com/marshmallow-code/marshmallow/issues/42]", "tests/test_sphinx_issues.py::test_roles[app0-role1-issue-Hard", "tests/test_sphinx_issues.py::test_roles[app0-role2-issue-Not", "tests/test_sphinx_issues.py::test_roles[app0-role3-pr-42-#42-https://github.com/marshmallow-code/marshmallow/pull/42]", "tests/test_sphinx_issues.py::test_roles[app0-cve_role-cve-CVE-2018-17175-CVE-2018-17175-https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17175]", "tests/test_sphinx_issues.py::test_roles[app0-cwe_role-cve-CWE-787-CWE-787-https://cwe.mitre.org/data/definitions/787.html]", "tests/test_sphinx_issues.py::test_roles[app0-role8-commit-123abc456def-@123abc4-https://github.com/marshmallow-code/marshmallow/commit/123abc456def]", "tests/test_sphinx_issues.py::test_roles[app0-role9-issue-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/issues/42]", "tests/test_sphinx_issues.py::test_roles[app0-role10-pr-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/pull/42]", "tests/test_sphinx_issues.py::test_roles[app0-role11-commit-sloria/webargs@abc123def456-sloria/webargs@abc123d-https://github.com/sloria/webargs/commit/abc123def456]", "tests/test_sphinx_issues.py::test_roles[app1-role0-issue-42-#42-https://github.com/marshmallow-code/marshmallow/issues/42]", "tests/test_sphinx_issues.py::test_roles[app1-role1-issue-Hard", "tests/test_sphinx_issues.py::test_roles[app1-role2-issue-Not", "tests/test_sphinx_issues.py::test_roles[app1-role3-pr-42-#42-https://github.com/marshmallow-code/marshmallow/pull/42]", "tests/test_sphinx_issues.py::test_roles[app1-cve_role-cve-CVE-2018-17175-CVE-2018-17175-https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17175]", "tests/test_sphinx_issues.py::test_roles[app1-cwe_role-cve-CWE-787-CWE-787-https://cwe.mitre.org/data/definitions/787.html]", "tests/test_sphinx_issues.py::test_roles[app1-role8-commit-123abc456def-@123abc4-https://github.com/marshmallow-code/marshmallow/commit/123abc456def]", "tests/test_sphinx_issues.py::test_roles[app1-role9-issue-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/issues/42]", "tests/test_sphinx_issues.py::test_roles[app1-role10-pr-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/pull/42]", "tests/test_sphinx_issues.py::test_roles[app1-role11-commit-sloria/webargs@abc123def456-sloria/webargs@abc123d-https://github.com/sloria/webargs/commit/abc123def456]", "tests/test_sphinx_issues.py::test_roles[app2-role0-issue-42-#42-https://github.com/marshmallow-code/marshmallow/issues/42]", "tests/test_sphinx_issues.py::test_roles[app2-role1-issue-Hard", "tests/test_sphinx_issues.py::test_roles[app2-role2-issue-Not", "tests/test_sphinx_issues.py::test_roles[app2-role3-pr-42-#42-https://github.com/marshmallow-code/marshmallow/pull/42]", "tests/test_sphinx_issues.py::test_roles[app2-cve_role-cve-CVE-2018-17175-CVE-2018-17175-https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17175]", "tests/test_sphinx_issues.py::test_roles[app2-cwe_role-cve-CWE-787-CWE-787-https://cwe.mitre.org/data/definitions/787.html]", "tests/test_sphinx_issues.py::test_roles[app2-role8-commit-123abc456def-@123abc4-https://github.com/marshmallow-code/marshmallow/commit/123abc456def]", "tests/test_sphinx_issues.py::test_roles[app2-role9-issue-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/issues/42]", "tests/test_sphinx_issues.py::test_roles[app2-role10-pr-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/pull/42]", "tests/test_sphinx_issues.py::test_roles[app2-role11-commit-sloria/webargs@abc123def456-sloria/webargs@abc123d-https://github.com/sloria/webargs/commit/abc123def456]", "tests/test_sphinx_issues.py::test_issue_role_multiple[app0]", "tests/test_sphinx_issues.py::test_issue_role_multiple[app1]", "tests/test_sphinx_issues.py::test_issue_role_multiple[app2]", "tests/test_sphinx_issues.py::test_issue_role_multiple_with_external[app0]", "tests/test_sphinx_issues.py::test_issue_role_multiple_with_external[app1]", "tests/test_sphinx_issues.py::test_issue_role_multiple_with_external[app2]", "tests/test_sphinx_issues.py::test_roles_custom_uri[role0-issue-42-#42-https://gitlab.company.com/myteam/super_great_project/-/issues/42]", "tests/test_sphinx_issues.py::test_roles_custom_uri[role1-issue-Hard", "tests/test_sphinx_issues.py::test_roles_custom_uri[role2-issue-Not", "tests/test_sphinx_issues.py::test_roles_custom_uri[role3-pr-42-!42-https://gitlab.company.com/myteam/super_great_project/-/merge_requests/42]", "tests/test_sphinx_issues.py::test_roles_custom_uri[role4-user-sloria-@sloria-https://gitlab.company.com/sloria]", "tests/test_sphinx_issues.py::test_roles_custom_uri[role5-user-Steven", "tests/test_sphinx_issues.py::test_roles_custom_uri[role6-commit-123abc456def-@123abc4-https://gitlab.company.com/myteam/super_great_project/-/commit/123abc456def]", "tests/test_sphinx_issues.py::test_roles_custom_uri[role7-issue-sloria/webargs#42-sloria/webargs#42-https://gitlab.company.com/sloria/webargs/-/issues/42]", "tests/test_sphinx_issues.py::test_roles_custom_uri[role8-pr-sloria/webargs#42-sloria/webargs!42-https://gitlab.company.com/sloria/webargs/-/merge_requests/42]", "tests/test_sphinx_issues.py::test_roles_custom_uri[role9-commit-sloria/webargs@abc123def456-sloria/webargs@abc123d-https://gitlab.company.com/sloria/webargs/-/commit/abc123def456]", "tests/test_sphinx_issues.py::test_sphinx_build_integration" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2024-01-19 14:07:40+00:00
mit
5,544
sloria__sphinx-issues-29
diff --git a/README.rst b/README.rst index a7e7aad..efa129f 100644 --- a/README.rst +++ b/README.rst @@ -5,20 +5,22 @@ sphinx-issues .. image:: https://travis-ci.org/sloria/sphinx-issues.svg?branch=master :target: https://travis-ci.org/sloria/sphinx-issues -A Sphinx extension for linking to your project's issue tracker. Includes roles for linking to both issues and user profiles, with built-in support for GitHub (though this works with other services). +A Sphinx extension for linking to your project's issue tracker. Includes roles for linking to issues, pull requests, user profiles, with built-in support for GitHub (though this works with other services). Example ******* -For an example usage, check out `marshmallow's changelog <http://marshmallow.readthedocs.org/en/latest/changelog.html#changelog>`_, which makes use of the roles in this library. +For an example usage, check out `marshmallow's changelog <http://marshmallow.readthedocs.org/en/latest/changelog.html>`_, which makes use of the roles in this library. Installation and Configuration ****************************** -:: - $ pip install sphinx-issues +.. code-block:: console -Add ``sphinx_issues`` to ``extensions`` in your ``conf.py``. If your project is on Github, add the ``issues_github_path`` config variable. Otherwise, use ``issues_uri``. + pip install sphinx-issues + + +Add ``sphinx_issues`` to ``extensions`` in your ``conf.py``. If your project is on GitHub, add the ``issues_github_path`` config variable. Otherwise, use ``issues_uri`` and ``issues_pr_uri``. .. code-block:: python @@ -35,11 +37,12 @@ Add ``sphinx_issues`` to ``extensions`` in your ``conf.py``. If your project is # equivalent to issues_uri = 'https://github.com/sloria/marshmallow/issues/{issue}' + issues_pr_uri = 'https://github.com/sloria/marshmallow/pull/{pr}' Usage ***** -Use the ``:issue:`` role in your docs like so: +Use the ``:issue:`` and ``:pr:`` roles in your docs like so: .. code-block:: rst @@ -47,6 +50,8 @@ Use the ``:issue:`` role in your docs like so: See issues :issue:`12,13` + See PR :pr:`58` + Use the ``:user:`` role in your docs to link to user profiles (Github by default, but can be configured via the ``issues_user_uri`` config variable). @@ -77,6 +82,7 @@ Changelog 1.0.0 (unreleased) ------------------ +- Add ``:pr:`` role. Thanks @jnotham for the suggestion. - Drop support for Python 3.4. 0.4.0 (2017-11-25) diff --git a/sphinx_issues.py b/sphinx_issues.py index 24b9386..532858e 100644 --- a/sphinx_issues.py +++ b/sphinx_issues.py @@ -41,59 +41,89 @@ def user_role(name, rawtext, text, lineno, link = nodes.reference(text=text, refuri=ref, **options) return [link], [] - -def _make_issue_node(issue_no, config, options=None): - options = options or {} - if issue_no not in ('-', '0'): - if config.issues_uri: - ref = config.issues_uri.format(issue=issue_no) - elif config.issues_github_path: - ref = 'https://github.com/{0}/issues/{1}'.format( - config.issues_github_path, issue_no - ) +class IssueRole(object): + def __init__(self, uri_config_option, format_kwarg, github_uri_template): + self.uri_config_option = uri_config_option + self.format_kwarg = format_kwarg + self.github_uri_template = github_uri_template + + def make_node(self, issue_no, config, options=None): + options = options or {} + if issue_no not in ('-', '0'): + uri_template = getattr(config, self.uri_config_option, None) + if uri_template: + ref = uri_template.format(**{self.format_kwarg: issue_no}) + elif config.issues_github_path: + ref = self.github_uri_template.format( + issues_github_path=config.issues_github_path, + n=issue_no, + ) + else: + raise ValueError( + 'Neither {} nor issues_github_path ' + 'is set'.format(self.uri_config_option) + ) + issue_text = '#{0}'.format(issue_no) + link = nodes.reference(text=issue_text, refuri=ref, **options) else: - raise ValueError('Neither issues_uri nor issues_github_path is set') - issue_text = '#{0}'.format(issue_no) - link = nodes.reference(text=issue_text, refuri=ref, **options) - else: - link = None - return link - - -def issue_role(name, rawtext, text, lineno, - inliner, options=None, content=None): - """Sphinx role for linking to an issue. Must have - `issues_uri` or `issues_github_path` configured in ``conf.py``. - - Examples: :: - - :issue:`123` - :issue:`42,45` - """ - options = options or {} - content = content or [] - issue_nos = [each.strip() for each in utils.unescape(text).split(',')] - config = inliner.document.settings.env.app.config - ret = [] - for i, issue_no in enumerate(issue_nos): - node = _make_issue_node(issue_no, config, options=options) - ret.append(node) - if i != len(issue_nos) - 1: - sep = nodes.raw(text=', ', format='html') - ret.append(sep) - return ret, [] + link = None + return link + + def __call__(self, name, rawtext, text, lineno, + inliner, options=None, content=None): + options = options or {} + content = content or [] + issue_nos = [each.strip() for each in utils.unescape(text).split(',')] + config = inliner.document.settings.env.app.config + ret = [] + for i, issue_no in enumerate(issue_nos): + node = self.make_node(issue_no, config, options=options) + ret.append(node) + if i != len(issue_nos) - 1: + sep = nodes.raw(text=', ', format='html') + ret.append(sep) + return ret, [] + + +"""Sphinx role for linking to an issue. Must have +`issues_uri` or `issues_github_path` configured in ``conf.py``. +Examples: :: + :issue:`123` + :issue:`42,45` +""" +issue_role = IssueRole( + uri_config_option='issues_uri', + format_kwarg='issue', + github_uri_template='https://github.com/{issues_github_path}/issues/{n}' +) + +"""Sphinx role for linking to a pull request. Must have +`issues_pr_uri` or `issues_github_path` configured in ``conf.py``. +Examples: :: + :pr:`123` + :pr:`42,45` +""" +pr_role = IssueRole( + uri_config_option='issues_pr_uri', + format_kwarg='pr', + github_uri_template='https://github.com/{issues_github_path}/pull/{n}' +) def setup(app): # Format template for issues URI # e.g. 'https://github.com/sloria/marshmallow/issues/{issue} app.add_config_value('issues_uri', default=None, rebuild='html') + # Format template for PR URI + # e.g. 'https://github.com/sloria/marshmallow/pull/{issue} + app.add_config_value('issues_pr_uri', default=None, rebuild='html') # Shortcut for Github, e.g. 'sloria/marshmallow' app.add_config_value('issues_github_path', default=None, rebuild='html') # Format template for user profile URI # e.g. 'https://github.com/{user}' app.add_config_value('issues_user_uri', default=None, rebuild='html') app.add_role('issue', issue_role) + app.add_role('pr', pr_role) app.add_role('user', user_role) return { 'version': __version__,
sloria/sphinx-issues
d82697e269a7290dcb186a257b249224883218f7
diff --git a/test_sphinx_issues.py b/test_sphinx_issues.py index c7f466c..c45ae3c 100644 --- a/test_sphinx_issues.py +++ b/test_sphinx_issues.py @@ -10,6 +10,7 @@ from sphinx.application import Sphinx from sphinx_issues import ( issue_role, user_role, + pr_role, setup as issues_setup ) @@ -18,8 +19,11 @@ import pytest @pytest.yield_fixture(params=[ # Parametrize config - {'issues_github_path': 'sloria/marshmallow'}, - {'issues_uri': 'https://github.com/sloria/marshmallow/issues/{issue}'} + {'issues_github_path': 'marshmallow-code/marshmallow'}, + { + 'issues_uri': 'https://github.com/marshmallow-code/marshmallow/issues/{issue}', + 'issues_pr_uri': 'https://github.com/marshmallow-code/marshmallow/pull/{pr}', + } ]) def app(request): src, doctree, confdir, outdir = [mkdtemp() for _ in range(4)] @@ -57,7 +61,7 @@ def test_issue_role(inliner): ) link = result[0][0] assert link.astext() == '#42' - issue_url = 'https://github.com/sloria/marshmallow/issues/42' + issue_url = 'https://github.com/marshmallow-code/marshmallow/issues/42' assert link.attributes['refuri'] == issue_url @@ -71,7 +75,7 @@ def test_issue_role_multiple(inliner): ) link1 = result[0][0] assert link1.astext() == '#42' - issue_url = 'https://github.com/sloria/marshmallow/issues/' + issue_url = 'https://github.com/marshmallow-code/marshmallow/issues/' assert link1.attributes['refuri'] == issue_url + '42' sep = result[0][1] @@ -106,3 +110,17 @@ def test_user_role_explicit_name(inliner): link = result[0][0] assert link.astext() == 'Steven Loria' assert link.attributes['refuri'] == 'https://github.com/sloria' + + +def test_pr_role(inliner): + result = pr_role( + name=None, + rawtext='', + text='42', + lineno=None, + inliner=inliner + ) + link = result[0][0] + assert link.astext() == '#42' + issue_url = 'https://github.com/marshmallow-code/marshmallow/pull/42' + assert link.attributes['refuri'] == issue_url
Add :pr: role as alias for :issue:? Some contributors to scikit-learn have presumed that in the changelog `:issue:` is strictly an issue, while often we would rather reference PRs (this helps us checking that everything is covered in the changelog). Perhaps (optionally) adding a `:pr:` alias would be a good idea.
0.0
d82697e269a7290dcb186a257b249224883218f7
[ "test_sphinx_issues.py::test_issue_role[app0]", "test_sphinx_issues.py::test_issue_role[app1]", "test_sphinx_issues.py::test_issue_role_multiple[app0]", "test_sphinx_issues.py::test_issue_role_multiple[app1]", "test_sphinx_issues.py::test_user_role[app0]", "test_sphinx_issues.py::test_user_role[app1]", "test_sphinx_issues.py::test_user_role_explicit_name[app0]", "test_sphinx_issues.py::test_user_role_explicit_name[app1]", "test_sphinx_issues.py::test_pr_role[app0]", "test_sphinx_issues.py::test_pr_role[app1]" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-07-14 22:32:55+00:00
mit
5,545
sloria__sphinx-issues-66
diff --git a/README.rst b/README.rst index 020b6bb..4e36f8b 100644 --- a/README.rst +++ b/README.rst @@ -38,6 +38,7 @@ Add ``sphinx_issues`` to ``extensions`` in your ``conf.py``. If your project is # equivalent to issues_uri = "https://github.com/sloria/marshmallow/issues/{issue}" issues_pr_uri = "https://github.com/sloria/marshmallow/pull/{pr}" + issues_commit_uri = "https://github.com/sloria/marshmallow/commit/{commit}" Usage ***** @@ -65,6 +66,13 @@ You can also use explicit names if you want to use a different name than the git This change is due to :user:`Andreas Mueller <amueller>`. + +Use the ``:commit:`` role to link to commits. + +.. code-block:: rst + + Fixed in :commit:`6bb9124d5e9dbb2f7b52864c3d8af7feb1b69403`. + Use the ``:cve:`` role to link to CVEs on https://cve.mitre.org. .. code-block:: rst @@ -88,6 +96,7 @@ Changelog 1.2.0 (unreleased) ------------------ +- Add ``:commit:`` role for linking to commits. - Test against Python 3.7. 1.1.0 (2018-09-18) diff --git a/sphinx_issues.py b/sphinx_issues.py index 6fe22c8..a665425 100644 --- a/sphinx_issues.py +++ b/sphinx_issues.py @@ -62,10 +62,17 @@ def cve_role(name, rawtext, text, lineno, inliner, options=None, content=None): class IssueRole(object): - def __init__(self, uri_config_option, format_kwarg, github_uri_template): + def __init__( + self, uri_config_option, format_kwarg, github_uri_template, format_text=None + ): self.uri_config_option = uri_config_option self.format_kwarg = format_kwarg self.github_uri_template = github_uri_template + self.format_text = format_text or self.default_format_text + + @staticmethod + def default_format_text(issue_no): + return "#{0}".format(issue_no) def make_node(self, issue_no, config, options=None): options = options or {} @@ -82,7 +89,7 @@ class IssueRole(object): "Neither {} nor issues_github_path " "is set".format(self.uri_config_option) ) - issue_text = "#{0}".format(issue_no) + issue_text = self.format_text(issue_no) link = nodes.reference(text=issue_text, refuri=ref, **options) else: link = None @@ -130,6 +137,23 @@ pr_role = IssueRole( ) +def format_commit_text(sha): + return sha[:7] + + +"""Sphinx role for linking to a commit. Must have +`issues_pr_uri` or `issues_github_path` configured in ``conf.py``. +Examples: :: + :commit:`123abc456def` +""" +commit_role = IssueRole( + uri_config_option="issues_commit_uri", + format_kwarg="commit", + github_uri_template="https://github.com/{issues_github_path}/commit/{n}", + format_text=format_commit_text, +) + + def setup(app): # Format template for issues URI # e.g. 'https://github.com/sloria/marshmallow/issues/{issue} @@ -137,6 +161,9 @@ def setup(app): # Format template for PR URI # e.g. 'https://github.com/sloria/marshmallow/pull/{issue} app.add_config_value("issues_pr_uri", default=None, rebuild="html") + # Format template for commit URI + # e.g. 'https://github.com/sloria/marshmallow/commits/{commit} + app.add_config_value("issues_commit_uri", default=None, rebuild="html") # Shortcut for Github, e.g. 'sloria/marshmallow' app.add_config_value("issues_github_path", default=None, rebuild="html") # Format template for user profile URI @@ -145,6 +172,7 @@ def setup(app): app.add_role("issue", issue_role) app.add_role("pr", pr_role) app.add_role("user", user_role) + app.add_role("commit", commit_role) app.add_role("cve", cve_role) return { "version": __version__,
sloria/sphinx-issues
7f4cd73f72f694dba3332afc4ac4c6d75106e613
diff --git a/test_sphinx_issues.py b/test_sphinx_issues.py index 3947870..46e4519 100644 --- a/test_sphinx_issues.py +++ b/test_sphinx_issues.py @@ -13,6 +13,7 @@ from sphinx_issues import ( user_role, pr_role, cve_role, + commit_role, setup as issues_setup, ) @@ -26,6 +27,7 @@ import pytest { "issues_uri": "https://github.com/marshmallow-code/marshmallow/issues/{issue}", "issues_pr_uri": "https://github.com/marshmallow-code/marshmallow/pull/{pr}", + "issues_commit_uri": "https://github.com/marshmallow-code/marshmallow/commit/{commit}", }, ] ) @@ -108,3 +110,12 @@ def test_cve_role(inliner): assert link.astext() == "CVE-2018-17175" issue_url = "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17175" assert link.attributes["refuri"] == issue_url + + +def test_commit_role(inliner): + sha = "123abc456def" + result = commit_role(name=None, rawtext="", text=sha, lineno=None, inliner=inliner) + link = result[0][0] + assert link.astext() == sha[:7] + url = "https://github.com/marshmallow-code/marshmallow/commit/{}".format(sha) + assert link.attributes["refuri"] == url
Support linking to commits Example: ```rst :commit:`123abc456def` ```
0.0
7f4cd73f72f694dba3332afc4ac4c6d75106e613
[ "test_sphinx_issues.py::test_issue_role[app0]", "test_sphinx_issues.py::test_issue_role[app1]", "test_sphinx_issues.py::test_issue_role_multiple[app0]", "test_sphinx_issues.py::test_issue_role_multiple[app1]", "test_sphinx_issues.py::test_user_role[app0]", "test_sphinx_issues.py::test_user_role[app1]", "test_sphinx_issues.py::test_user_role_explicit_name[app0]", "test_sphinx_issues.py::test_user_role_explicit_name[app1]", "test_sphinx_issues.py::test_pr_role[app0]", "test_sphinx_issues.py::test_pr_role[app1]", "test_sphinx_issues.py::test_cve_role[app0]", "test_sphinx_issues.py::test_cve_role[app1]", "test_sphinx_issues.py::test_commit_role[app0]", "test_sphinx_issues.py::test_commit_role[app1]" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-12-26 14:58:52+00:00
mit
5,546
sloria__sphinx-issues-67
diff --git a/README.rst b/README.rst index 4e36f8b..035588a 100644 --- a/README.rst +++ b/README.rst @@ -51,6 +51,8 @@ Use the ``:issue:`` and ``:pr:`` roles in your docs like so: See issues :issue:`12,13` + See :issue:`sloria/konch#45`. + See PR :pr:`58` @@ -97,6 +99,7 @@ Changelog ------------------ - Add ``:commit:`` role for linking to commits. +- Add support for linking to external repos. - Test against Python 3.7. 1.1.0 (2018-09-18) diff --git a/sphinx_issues.py b/sphinx_issues.py index a665425..d359d1b 100644 --- a/sphinx_issues.py +++ b/sphinx_issues.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- """A Sphinx extension for linking to your project's issue tracker.""" +import re + from docutils import nodes, utils from sphinx.util.nodes import split_explicit_title @@ -62,6 +64,9 @@ def cve_role(name, rawtext, text, lineno, inliner, options=None, content=None): class IssueRole(object): + + EXTERNAL_REPO_REGEX = re.compile(r"^(\w+)/(.+)([#@])([\w]+)$") + def __init__( self, uri_config_option, format_kwarg, github_uri_template, format_text=None ): @@ -74,8 +79,25 @@ class IssueRole(object): def default_format_text(issue_no): return "#{0}".format(issue_no) - def make_node(self, issue_no, config, options=None): + def make_node(self, name, issue_no, config, options=None): + name_map = {"pr": "pull", "issue": "issues", "commit": "commit"} options = options or {} + repo_match = self.EXTERNAL_REPO_REGEX.match(issue_no) + if repo_match: # External repo + username, repo, symbol, issue = repo_match.groups() + if name not in name_map: + raise ValueError( + "External repo linking not supported for :{}:".format(name) + ) + path = name_map.get(name) + ref = "https://github.com/{issues_github_path}/{path}/{n}".format( + issues_github_path="{}/{}".format(username, repo), path=path, n=issue + ) + formatted_issue = self.format_text(issue).lstrip("#") + text = "{username}/{repo}{symbol}{formatted_issue}".format(**locals()) + link = nodes.reference(text=text, refuri=ref, **options) + return link + if issue_no not in ("-", "0"): uri_template = getattr(config, self.uri_config_option, None) if uri_template: @@ -104,7 +126,7 @@ class IssueRole(object): config = inliner.document.settings.env.app.config ret = [] for i, issue_no in enumerate(issue_nos): - node = self.make_node(issue_no, config, options=options) + node = self.make_node(name, issue_no, config, options=options) ret.append(node) if i != len(issue_nos) - 1: sep = nodes.raw(text=", ", format="html") @@ -117,6 +139,7 @@ class IssueRole(object): Examples: :: :issue:`123` :issue:`42,45` + :issue:`sloria/konch#123` """ issue_role = IssueRole( uri_config_option="issues_uri", @@ -129,6 +152,7 @@ issue_role = IssueRole( Examples: :: :pr:`123` :pr:`42,45` + :pr:`sloria/konch#43` """ pr_role = IssueRole( uri_config_option="issues_pr_uri", @@ -145,6 +169,7 @@ def format_commit_text(sha): `issues_pr_uri` or `issues_github_path` configured in ``conf.py``. Examples: :: :commit:`123abc456def` + :commit:`sloria/konch@123abc456def` """ commit_role = IssueRole( uri_config_option="issues_commit_uri",
sloria/sphinx-issues
5307e33d749e440afd52c9d4cd7c8a0bc6d70cb2
diff --git a/test_sphinx_issues.py b/test_sphinx_issues.py index bc5264b..684ebae 100644 --- a/test_sphinx_issues.py +++ b/test_sphinx_issues.py @@ -92,6 +92,30 @@ def inliner(app): "123abc4", "https://github.com/marshmallow-code/marshmallow/commit/123abc456def", ), + # External issue + ( + issue_role, + "issue", + "sloria/webargs#42", + "sloria/webargs#42", + "https://github.com/sloria/webargs/issues/42", + ), + # External PR + ( + pr_role, + "pr", + "sloria/webargs#42", + "sloria/webargs#42", + "https://github.com/sloria/webargs/pull/42", + ), + # External commit + ( + commit_role, + "commit", + "sloria/webargs@abc123def456", + "sloria/webargs@abc123d", + "https://github.com/sloria/webargs/commit/abc123def456", + ), ], ) def test_roles(inliner, role, role_name, text, expected_text, expected_url):
Support linking to other repos It would be nice if you could do e.g. this: :issue:`user/repo#123` For example, if you fix a bug that was caused by another library and you want to link to their issue instead of something in your own/main issue tracker.
0.0
5307e33d749e440afd52c9d4cd7c8a0bc6d70cb2
[ "test_sphinx_issues.py::test_roles[app0-role6-issue-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/issues/42]", "test_sphinx_issues.py::test_roles[app0-role7-pr-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/pull/42]", "test_sphinx_issues.py::test_roles[app0-role8-commit-sloria/webargs@abc123def456-sloria/webargs@abc123d-https://github.com/sloria/webargs/commit/abc123def456]", "test_sphinx_issues.py::test_roles[app1-role6-issue-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/issues/42]", "test_sphinx_issues.py::test_roles[app1-role7-pr-sloria/webargs#42-sloria/webargs#42-https://github.com/sloria/webargs/pull/42]", "test_sphinx_issues.py::test_roles[app1-role8-commit-sloria/webargs@abc123def456-sloria/webargs@abc123d-https://github.com/sloria/webargs/commit/abc123def456]" ]
[ "test_sphinx_issues.py::test_roles[app0-role0-issue-42-#42-https://github.com/marshmallow-code/marshmallow/issues/42]", "test_sphinx_issues.py::test_roles[app0-role1-pr-42-#42-https://github.com/marshmallow-code/marshmallow/pull/42]", "test_sphinx_issues.py::test_roles[app0-user_role-user-sloria-@sloria-https://github.com/sloria]", "test_sphinx_issues.py::test_roles[app0-user_role-user-Steven", "test_sphinx_issues.py::test_roles[app0-cve_role-cve-CVE-2018-17175-CVE-2018-17175-https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17175]", "test_sphinx_issues.py::test_roles[app0-role5-commit-123abc456def-123abc4-https://github.com/marshmallow-code/marshmallow/commit/123abc456def]", "test_sphinx_issues.py::test_roles[app1-role0-issue-42-#42-https://github.com/marshmallow-code/marshmallow/issues/42]", "test_sphinx_issues.py::test_roles[app1-role1-pr-42-#42-https://github.com/marshmallow-code/marshmallow/pull/42]", "test_sphinx_issues.py::test_roles[app1-user_role-user-sloria-@sloria-https://github.com/sloria]", "test_sphinx_issues.py::test_roles[app1-user_role-user-Steven", "test_sphinx_issues.py::test_roles[app1-cve_role-cve-CVE-2018-17175-CVE-2018-17175-https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17175]", "test_sphinx_issues.py::test_roles[app1-role5-commit-123abc456def-123abc4-https://github.com/marshmallow-code/marshmallow/commit/123abc456def]", "test_sphinx_issues.py::test_issue_role_multiple[app0]", "test_sphinx_issues.py::test_issue_role_multiple[app1]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-12-26 16:00:43+00:00
mit
5,547
smarie__mkdocs-gallery-46
diff --git a/src/mkdocs_gallery/gen_data_model.py b/src/mkdocs_gallery/gen_data_model.py index b12b51f..b3ead8f 100644 --- a/src/mkdocs_gallery/gen_data_model.py +++ b/src/mkdocs_gallery/gen_data_model.py @@ -22,7 +22,13 @@ from typing import List, Dict, Any, Tuple, Union, Iterable from pathlib import Path from .errors import ExtensionError -from .utils import _smart_copy_md5, get_md5sum, _replace_by_new_if_needed, _new_file, matches_filepath_pattern +from .utils import (_smart_copy_md5, + get_md5sum, + _replace_by_new_if_needed, + _new_file, + matches_filepath_pattern, + is_relative_to, + ) def _has_readme(folder: Path) -> bool: @@ -660,7 +666,7 @@ class Gallery(GalleryBase): self._attach(all_info=all_info) # Check that generated dir is inside docs dir - if not self.generated_dir.as_posix().startswith(self.all_info.mkdocs_docs_dir.as_posix()): + if not is_relative_to(self.all_info.mkdocs_docs_dir, self.generated_dir): raise ValueError("Generated gallery dirs can only be located as subfolders of the mkdocs 'docs_dir'.") def has_subsections(self) -> bool: diff --git a/src/mkdocs_gallery/plugin.py b/src/mkdocs_gallery/plugin.py index dcc674f..61d5f24 100644 --- a/src/mkdocs_gallery/plugin.py +++ b/src/mkdocs_gallery/plugin.py @@ -25,6 +25,7 @@ from . import glr_path_static from .binder import copy_binder_files # from .docs_resolv import embed_code_links from .gen_gallery import parse_config, generate_gallery_md, summarize_failing_examples, fill_mkdocs_nav +from .utils import is_relative_to class ConfigList(co.OptionallyRequired): @@ -348,8 +349,7 @@ markdown_extensions: def wrap_callback(original_callback): def _callback(event): for g in excluded_dirs: - # TODO maybe use fnmatch rather ? - if event.src_path.startswith(g): + if is_relative_to(g, Path(event.src_path)): # ignore this event: the file is in the gallery target dir. # log.info(f"Ignoring event: {event}") return diff --git a/src/mkdocs_gallery/utils.py b/src/mkdocs_gallery/utils.py index b5ddcf2..e388817 100644 --- a/src/mkdocs_gallery/utils.py +++ b/src/mkdocs_gallery/utils.py @@ -347,3 +347,32 @@ def matches_filepath_pattern(filepath: Path, pattern: str) -> bool: result = re.search(pattern, str(filepath)) return True if result is not None else False + + +def is_relative_to(parentpath: Path, subpath: Path) -> bool: + """ + Check if subpath is relative to parentpath + + Parameters + ---------- + parentpath + The (potential) parent path + + subpath + The (potential) subpath + + Returns + ------- + rc + A boolean indicating whether subpath is relative to parentpath + """ + + if not (isinstance(parentpath, Path) and isinstance(subpath, Path)): + raise TypeError("Arguments must both be pathlib objects") + + try: + subpath.relative_to(parentpath) + return True + + except ValueError: + return False
smarie/mkdocs-gallery
94689a05333f37a72caa863c0fbbad11eb45fab4
diff --git a/tests/test_utils.py b/tests/test_utils.py index 58d4c6a..879fba9 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -2,7 +2,7 @@ from pathlib import Path import re import os import pytest -from mkdocs_gallery.utils import matches_filepath_pattern +from mkdocs_gallery.utils import matches_filepath_pattern, is_relative_to class TestFilepathPatternMatch: @@ -42,3 +42,28 @@ class TestFilepathPatternMatch: with pytest.raises(AssertionError): matches_filepath_pattern(filepath, pattern) + + +class TestRelativePaths: + + @pytest.mark.parametrize( + "path1, path2, expected", [ + ("parent", "parent/sub", True), + ("notparent", "parent/sub", False), + ]) + def test_behavior(self, path1, path2, expected): + """Test that the function behaves as expected""" + + assert is_relative_to(Path(path1), Path(path2)) == expected + + @pytest.mark.parametrize( + "path1, path2", [ + ("parent", "parent/sub"), + (Path("parent"), "parent/sub"), + ("parent", Path("parent/sub")), + ]) + def test_not_paths_raises(self, path1, path2): + """Test that the function raises an exception when both arguments are not Path objects""" + + with pytest.raises(TypeError): + is_relative_to(path1, path2)
TypeError: startswith first arg must be str or a tuple of str, not WindowsPath This happens when we use the `mkdocs serve` mode: ``` File "C:\(...)\mkdocs_gallery\plugin.py", line 352, in _callback if event.src_path.startswith(g): TypeError: startswith first arg must be str or a tuple of str, not WindowsPath ``` Probably related to the last changes with the pathlib paths everywhere.
0.0
94689a05333f37a72caa863c0fbbad11eb45fab4
[ "tests/test_utils.py::TestFilepathPatternMatch::test_ok[filename]", "tests/test_utils.py::TestFilepathPatternMatch::test_ok[filename\\\\.ext]", "tests/test_utils.py::TestFilepathPatternMatch::test_ok[\\\\.ext]", "tests/test_utils.py::TestFilepathPatternMatch::test_ok[/filename]", "tests/test_utils.py::TestFilepathPatternMatch::test_ok[directory]", "tests/test_utils.py::TestFilepathPatternMatch::test_ok[/directory]", "tests/test_utils.py::TestFilepathPatternMatch::test_fails[wrong_filename]", "tests/test_utils.py::TestFilepathPatternMatch::test_fails[wrong_filename\\\\.ext]", "tests/test_utils.py::TestFilepathPatternMatch::test_fails[\\\\.wrong_ext]", "tests/test_utils.py::TestFilepathPatternMatch::test_fails[/wrong_filename]", "tests/test_utils.py::TestFilepathPatternMatch::test_fails[wrong_directory]", "tests/test_utils.py::TestFilepathPatternMatch::test_fails[/wrong_directory]", "tests/test_utils.py::TestFilepathPatternMatch::test_not_path_raises", "tests/test_utils.py::TestRelativePaths::test_behavior[parent-parent/sub-True]", "tests/test_utils.py::TestRelativePaths::test_behavior[notparent-parent/sub-False]", "tests/test_utils.py::TestRelativePaths::test_not_paths_raises[parent-parent/sub]", "tests/test_utils.py::TestRelativePaths::test_not_paths_raises[path11-parent/sub]", "tests/test_utils.py::TestRelativePaths::test_not_paths_raises[parent-path22]" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-04-11 21:17:33+00:00
bsd-3-clause
5,548
smarie__mkdocs-gallery-90
diff --git a/docs/changelog.md b/docs/changelog.md index 680bacd..b3d415e 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,5 +1,9 @@ # Changelog +### 0.10.0 - Support for asynchronous code + +- Gallery scripts now support top-level asynchronous code. PR [#90](https://github.com/smarie/mkdocs-gallery/pull/90) by [pmeier](https://github.com/pmeier) + ### 0.9.0 - Pyvista - Pyvista can now be used in gallery examples as in `sphinx-gallery`. PR [#91](https://github.com/smarie/mkdocs-gallery/pull/91) by [Louis-Pujol](https://github.com/Louis-Pujol) diff --git a/docs/examples/plot_12_async.py b/docs/examples/plot_12_async.py new file mode 100644 index 0000000..4aa6f3e --- /dev/null +++ b/docs/examples/plot_12_async.py @@ -0,0 +1,76 @@ +""" +# Support for asynchronous code + +[PEP 429](https://peps.python.org/pep-0492), which was first implemented in +[Python 3.5](https://docs.python.org/3/whatsnew/3.5.html#whatsnew-pep-492), added initial syntax for asynchronous +programming in Python: `async` and `await`. + +While this was a major improvement in particular for UX development, one major +downside is that it "poisons" the caller's code base. If you want to `await` a coroutine, you have to be inside a `async def` +context. Doing so turns the function into a coroutine function and thus forces the caller to also `await` its results. +Rinse and repeat until you reach the beginning of the stack. + +Since version `0.10.0`, `mkdocs-gallery` is now able to automatically detect code blocks using async programming, and to handle them nicely so that you don't have to wrap them. This feature is enabled by default and does not require any configuration option. Generated notebooks remain consistent with [`jupyter` notebooks](https://jupyter.org/), or rather the [`IPython` kernel](https://ipython.org/) running +the code inside of them, that is equipped with +[background handling to allow top-level asynchronous code](https://ipython.readthedocs.io/en/stable/interactive/autoawait.html). +""" + +import asyncio +import time + + +async def afn(): + start = time.time() + await asyncio.sleep(0.3) + stop = time.time() + return stop - start + + +f"I waited for {await afn():.1f} seconds!" + + +# %% +# Without any handling, the snippet above would trigger a `SyntaxError`, since we are using `await` outside of an +# asynchronous context. With the handling, it works just fine. +# +# The background handling will only be applied if it is actually needed. Meaning, you can still run your asynchronous +# code manually if required. + +asyncio.run(afn()) + + +# %% +# Apart from `await` all other asynchronous syntax is supported as well. +# +# ## Asynchronous Generators + + +async def agen(): + for chunk in "I'm an async iterator!".split(): + yield chunk + + +async for chunk in agen(): + print(chunk, end=" ") + + +# %% +# ## Asynchronous Comprehensions + +" ".join([chunk async for chunk in agen()]) + +# %% +# ## Asynchronous Context Managers + +import contextlib + + [email protected] +async def acm(): + print("Entering asynchronous context manager!") + yield + print("Exiting asynchronous context manager!") + + +async with acm(): + print("Inside the context!") diff --git a/src/mkdocs_gallery/gen_single.py b/src/mkdocs_gallery/gen_single.py index 55a64d7..3967e97 100644 --- a/src/mkdocs_gallery/gen_single.py +++ b/src/mkdocs_gallery/gen_single.py @@ -27,7 +27,7 @@ from functools import partial from io import StringIO from pathlib import Path from shutil import copyfile -from textwrap import indent +from textwrap import indent, dedent from time import time from typing import List, Set, Tuple @@ -739,6 +739,72 @@ def _reset_cwd_syspath(cwd, path_to_remove): os.chdir(cwd) +def _parse_code(bcontent, src_file, *, compiler_flags): + code_ast = compile(bcontent, src_file, "exec", compiler_flags | ast.PyCF_ONLY_AST, dont_inherit=1) + if _needs_async_handling(bcontent, src_file, compiler_flags=compiler_flags): + code_ast = _apply_async_handling(code_ast, compiler_flags=compiler_flags) + return code_ast + + +def _needs_async_handling(bcontent, src_file, *, compiler_flags) -> bool: + try: + compile(bcontent, src_file, "exec", compiler_flags, dont_inherit=1) + except SyntaxError as error: + # mkdocs-gallery supports top-level async code similar to jupyter notebooks. + # Without handling, this will raise a SyntaxError. In such a case, we apply a + # minimal async handling and try again. If the error persists, we bubble it up + # and let the caller handle it. + try: + compile( + f"async def __async_wrapper__():\n{indent(bcontent, ' ' * 4)}", + src_file, + "exec", + compiler_flags, + dont_inherit=1, + ) + except SyntaxError: + # Raise the original error to avoid leaking the internal async handling to + # generated output. + raise error from None + else: + return True + else: + return False + + +def _apply_async_handling(code_ast, *, compiler_flags): + async_handling = compile( + dedent( + """ + async def __async_wrapper__(): + # original AST goes here + return locals() + import asyncio as __asyncio__ + __async_wrapper_locals__ = __asyncio__.run(__async_wrapper__()) + __async_wrapper_result__ = __async_wrapper_locals__.pop("__async_wrapper_result__", None) + globals().update(__async_wrapper_locals__) + __async_wrapper_result__ + """ + ), + "<_apply_async_handling()>", + "exec", + compiler_flags | ast.PyCF_ONLY_AST, + dont_inherit=1, + ) + + *original_body, last_node = code_ast.body + if isinstance(last_node, ast.Expr): + last_node = ast.Assign( + targets=[ast.Name(id="__async_wrapper_result__", ctx=ast.Store())], value=last_node.value + ) + original_body.append(last_node) + + async_wrapper = async_handling.body[0] + async_wrapper.body = [*original_body, *async_wrapper.body] + + return ast.fix_missing_locations(async_handling) + + def execute_code_block(compiler, block, script: GalleryScript): """Execute the code block of the example file. @@ -788,9 +854,7 @@ def execute_code_block(compiler, block, script: GalleryScript): try: ast_Module = _ast_module() - code_ast = ast_Module([bcontent]) - flags = ast.PyCF_ONLY_AST | compiler.flags - code_ast = compile(bcontent, src_file, "exec", flags, dont_inherit=1) + code_ast = _parse_code(bcontent, src_file, compiler_flags=compiler.flags) ast.increment_lineno(code_ast, lineno - 1) is_last_expr, mem_max = _exec_and_get_memory(compiler, ast_Module, code_ast, script=script)
smarie/mkdocs-gallery
3322e6b8dac2b8d7f061a2d2ce7440ab91f40cb9
diff --git a/tests/test_gen_single.py b/tests/test_gen_single.py new file mode 100644 index 0000000..2702bf8 --- /dev/null +++ b/tests/test_gen_single.py @@ -0,0 +1,158 @@ +import ast +import codeop +import sys +from textwrap import dedent + +import pytest + +from mkdocs_gallery.gen_single import _needs_async_handling, _parse_code + +SRC_FILE = __file__ +COMPILER = codeop.Compile() +COMPILER_FLAGS = codeop.Compile().flags + + +needs_ast_unparse = pytest.mark.skipif( + sys.version_info < (3, 9), reason="ast.unparse is only available for Python >= 3.9" +) + + +def test_non_async_syntax_error(): + with pytest.raises(SyntaxError, match="unexpected indent"): + _parse_code("foo = None\n bar = None", src_file=SRC_FILE, compiler_flags=COMPILER_FLAGS) + + +@needs_ast_unparse [email protected]( + ("code", "needs"), + [ + pytest.param("None", False, id="no_async"), + pytest.param( + dedent( + """ + async def afn(): + return True + + import asyncio + assert asyncio.run(afn()) + """ + ), + False, + id="asyncio_run", + ), + pytest.param( + dedent( + """ + async def afn(): + return True + + assert await afn() + """ + ), + True, + id="await", + ), + pytest.param( + dedent( + """ + async def agen(): + yield True + + async for item in agen(): + assert item + """ + ), + True, + id="async_for", + ), + pytest.param( + dedent( + """ + async def agen(): + yield True + + assert [item async for item in agen()] == [True] + """ + ), + True, + id="async_comprehension", + ), + pytest.param( + dedent( + """ + import contextlib + + @contextlib.asynccontextmanager + async def acm(): + yield True + + async with acm() as ctx: + assert ctx + """ + ), + True, + id="async_context_manager", + ), + ], +) +def test_async_handling(code, needs): + assert _needs_async_handling(code, src_file=SRC_FILE, compiler_flags=COMPILER_FLAGS) is needs + + # Since AST objects are quite involved to compare, we unparse again and check that nothing has changed. Note that + # since we are dealing with AST and not CST here, all whitespace is eliminated in the process and this needs to be + # reflected in the input as well. + code_stripped = "\n".join(line for line in code.splitlines() if line) + code_unparsed = ast.unparse(_parse_code(code, src_file=SRC_FILE, compiler_flags=COMPILER_FLAGS)) + assert (code_unparsed == code_stripped) ^ needs + + if needs: + assert not _needs_async_handling(code_unparsed, src_file=SRC_FILE, compiler_flags=COMPILER_FLAGS) + + exec(COMPILER(code_unparsed, SRC_FILE, "exec"), {}) + + +@needs_ast_unparse +def test_async_handling_locals(): + sentinel = "sentinel" + code = dedent( + """ + async def afn(): + return True + + sentinel = {sentinel} + + assert await afn() + """.format( + sentinel=repr(sentinel) + ) + ) + code_unparsed = ast.unparse(_parse_code(code, src_file=SRC_FILE, compiler_flags=COMPILER_FLAGS)) + + locals = {} + exec(COMPILER(code_unparsed, SRC_FILE, "exec"), locals) + + assert "sentinel" in locals and locals["sentinel"] == sentinel + + +@needs_ast_unparse +def test_async_handling_last_expression(): + code = dedent( + """ + async def afn(): + return True + + result = await afn() + assert result + result + """ + ) + + code_unparsed_ast = _parse_code(code, src_file=SRC_FILE, compiler_flags=COMPILER_FLAGS) + code_unparsed = ast.unparse(code_unparsed_ast) + + last = code_unparsed_ast.body[-1] + assert isinstance(last, ast.Expr) + + locals = {} + exec(COMPILER(code_unparsed, SRC_FILE, "exec"), locals) + assert eval(ast.unparse(last.value), locals)
Support for async examples? When I'm inside a `jupyter` notebook, the following is a valid code block: ```python import asyncio import time start = time.time() await asyncio.sleep(1) stop = time.time() f"I waited for {stop - start} seconds!" ``` However, if you put this inside a gallery script and try to run this, you'll hit ``` SyntaxError: 'await' outside function ``` This is expected, since we are currently not inside an `async` context. The difference to `jupyter` notebooks is that they have an `async` event loop in the background. I would like to have the same convenience for example scripts as well. Without, it is a pain to write example that have `async` code in it. Plus it will become nightmarish if one at the same time also wants to support the downloadable jupyter notebook. If this is something that we want, I'm happy to help designing and implementing this feature.
0.0
3322e6b8dac2b8d7f061a2d2ce7440ab91f40cb9
[ "tests/test_gen_single.py::test_non_async_syntax_error", "tests/test_gen_single.py::test_async_handling[no_async]", "tests/test_gen_single.py::test_async_handling[asyncio_run]", "tests/test_gen_single.py::test_async_handling[await]", "tests/test_gen_single.py::test_async_handling[async_for]", "tests/test_gen_single.py::test_async_handling[async_comprehension]", "tests/test_gen_single.py::test_async_handling[async_context_manager]", "tests/test_gen_single.py::test_async_handling_locals", "tests/test_gen_single.py::test_async_handling_last_expression" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2023-12-16 00:04:03+00:00
bsd-3-clause
5,549
smarie__python-makefun-80
diff --git a/src/makefun/main.py b/src/makefun/main.py index 40c366f..14c7b0e 100644 --- a/src/makefun/main.py +++ b/src/makefun/main.py @@ -11,9 +11,24 @@ import itertools from collections import OrderedDict from copy import copy from inspect import getsource +from keyword import iskeyword from textwrap import dedent from types import FunctionType + +if sys.version_info >= (3, 0): + is_identifier = str.isidentifier +else: + def is_identifier(string): + """ + Replacement for `str.isidentifier` when it is not available (e.g. on Python 2). + :param string: + :return: + """ + if len(string) == 0 or string[0].isdigit(): + return False + return all([s.isalnum() for s in string.split("_")]) + try: # python 3.3+ from inspect import signature, Signature, Parameter except ImportError: @@ -73,6 +88,7 @@ def create_wrapper(wrapped, add_impl=True, # type: bool doc=None, # type: str qualname=None, # type: str + co_name=None, # type: str module_name=None, # type: str **attrs ): @@ -84,7 +100,8 @@ def create_wrapper(wrapped, """ return wraps(wrapped, new_sig=new_sig, prepend_args=prepend_args, append_args=append_args, remove_args=remove_args, func_name=func_name, inject_as_first_arg=inject_as_first_arg, add_source=add_source, - add_impl=add_impl, doc=doc, qualname=qualname, module_name=module_name, **attrs)(wrapper) + add_impl=add_impl, doc=doc, qualname=qualname, module_name=module_name, co_name=co_name, + **attrs)(wrapper) def getattr_partial_aware(obj, att_name, *att_default): @@ -106,6 +123,7 @@ def create_function(func_signature, # type: Union[str, Signature] add_impl=True, # type: bool doc=None, # type: str qualname=None, # type: str + co_name=None, # type: str module_name=None, # type: str **attrs): """ @@ -130,6 +148,9 @@ def create_function(func_signature, # type: Union[str, Signature] - `__annotations__` attribute is created to match the annotations in the signature. - `__doc__` attribute is copied from `func_impl.__doc__` except if overridden using `doc` - `__module__` attribute is copied from `func_impl.__module__` except if overridden using `module_name` + - `__code__.co_name` (see above) defaults to the same value as the above `__name__` attribute, except when that + value is not a valid Python identifier, in which case it will be `<lambda>`. It can be overridden by providing + a `co_name` that is either a valid Python identifier or `<lambda>`. Finally two new attributes are optionally created @@ -138,6 +159,13 @@ def create_function(func_signature, # type: Union[str, Signature] - `__func_impl__` attribute: set if `add_impl` is `True` (default), this attribute contains a pointer to `func_impl` + A lambda function will be created in the following cases: + + - when `func_signature` is a `Signature` object and `func_impl` is itself a lambda function, + - when the function name, either derived from a `func_signature` string, or given explicitly with `func_name`, + is not a valid Python identifier, or + - when the provided `co_name` is `<lambda>`. + :param func_signature: either a string without 'def' such as "foo(a, b: int, *args, **kwargs)" or "(a, b: int)", or a `Signature` object, for example from the output of `inspect.signature` or from the `funcsigs.signature` backport. Note that these objects can be created manually too. If the signature is provided as a string and @@ -159,6 +187,9 @@ def create_function(func_signature, # type: Union[str, Signature] :param qualname: a string representing the qualified name to be used. If None (default), the `__qualname__` will default to the one of `func_impl` if `func_signature` is a `Signature`, or to the name defined in `func_signature` if `func_signature` is a `str` and contains a non-empty name. + :param co_name: a string representing the name to be used in the compiled code of the function. If None (default), + the `__code__.co_name` will default to the one of `func_impl` if `func_signature` is a `Signature`, or to the + name defined in `func_signature` if `func_signature` is a `str` and contains a non-empty name. :param module_name: the name of the module to be set on the function (under __module__ ). If None (default), `func_impl.__module__` will be used. :param attrs: other keyword attributes that should be set on the function. Note that `func_impl.__dict__` is not @@ -177,10 +208,24 @@ def create_function(func_signature, # type: Union[str, Signature] # name defaults user_provided_name = True if func_name is None: - # allow None for now, we'll raise a ValueError later if needed + # allow None, this will result in a lambda function being created func_name = getattr_partial_aware(func_impl, '__name__', None) user_provided_name = False + # co_name default + user_provided_co_name = co_name is not None + if not user_provided_co_name: + if func_name is None: + co_name = '<lambda>' + else: + co_name = func_name + else: + if not (_is_valid_func_def_name(co_name) + or _is_lambda_func_name(co_name)): + raise ValueError("Invalid co_name %r for created function. " + "It is not possible to declare a function " + "with the provided co_name." % co_name) + # qname default user_provided_qname = True if qualname is None: @@ -208,25 +253,28 @@ def create_function(func_signature, # type: Union[str, Signature] func_name = func_name_from_str if not user_provided_qname: qualname = func_name + if not user_provided_co_name: + co_name = func_name + create_lambda = not _is_valid_func_def_name(co_name) + + # if lambda, strip the name, parentheses and colon from the signature + if create_lambda: + name_len = len(func_name_from_str) if func_name_from_str else 0 + func_signature_str = func_signature_str[name_len + 1: -2] # fix the signature if needed - if func_name_from_str is None: - if func_name is None: - raise ValueError("Invalid signature for created function: `None` function name. This " - "probably happened because the decorated function %s has no __name__. You may " - "wish to pass an explicit `func_name` or to complete the signature string" - "with the name before the parenthesis." % func_impl) - func_signature_str = func_name + func_signature_str + elif func_name_from_str is None: + func_signature_str = co_name + func_signature_str elif isinstance(func_signature, Signature): # create the signature string - if func_name is None: - raise ValueError("Invalid signature for created function: `None` function name. This " - "probably happened because the decorated function %s has no __name__. You may " - "wish to pass an explicit `func_name` or to provide the new signature as a " - "string containing the name" % func_impl) - func_signature_str = get_signature_string(func_name, func_signature, evaldict) + create_lambda = not _is_valid_func_def_name(co_name) + if create_lambda: + # create signature string (or argument string in the case of a lambda function + func_signature_str = get_lambda_argument_string(func_signature, evaldict) + else: + func_signature_str = get_signature_string(co_name, func_signature, evaldict) else: raise TypeError("Invalid type for `func_signature`: %s" % type(func_signature)) @@ -255,6 +303,11 @@ def create_function(func_signature, # type: Union[str, Signature] body = get_legacy_py_generator_body_template() % (func_signature_str, params_str) elif isasyncgenfunction(func_impl): body = "async def %s\n async for y in _func_impl_(%s):\n yield y\n" % (func_signature_str, params_str) + elif create_lambda: + if func_signature_str: + body = "lambda_ = lambda %s: _func_impl_(%s)\n" % (func_signature_str, params_str) + else: + body = "lambda_ = lambda: _func_impl_(%s)\n" % (params_str) else: body = "def %s\n return _func_impl_(%s)\n" % (func_signature_str, params_str) @@ -264,7 +317,10 @@ def create_function(func_signature, # type: Union[str, Signature] # create the function by compiling code, mapping the `_func_impl_` symbol to `func_impl` protect_eval_dict(evaldict, func_name, params_names) evaldict['_func_impl_'] = func_impl - f = _make(func_name, params_names, body, evaldict) + if create_lambda: + f = _make("lambda_", params_names, body, evaldict) + else: + f = _make(co_name, params_names, body, evaldict) # add the source annotation if needed if add_source: @@ -297,6 +353,24 @@ def _is_generator_func(func_impl): return isgeneratorfunction(func_impl) +def _is_lambda_func_name(func_name): + """ + Return True if func_name is the name of a lambda + :param func_name: + :return: + """ + return func_name == (lambda: None).__code__.co_name + + +def _is_valid_func_def_name(func_name): + """ + Return True if func_name is valid in a function definition. + :param func_name: + :return: + """ + return is_identifier(func_name) and not iskeyword(func_name) + + class _SymbolRef: """ A class used to protect signature default values and type hints when the local context would not be able @@ -366,6 +440,17 @@ def get_signature_string(func_name, func_signature, evaldict): return "%s%s:" % (func_name, s) +def get_lambda_argument_string(func_signature, evaldict): + """ + Returns the string to be used as arguments in a lambda function definition. + If there is a non-native symbol in the defaults, it is created as a variable in the evaldict + :param func_name: + :param func_signature: + :return: + """ + return get_signature_string('', func_signature, evaldict)[1:-2] + + TYPES_WITH_SAFE_REPR = (int, str, bytes, bool) # IMPORTANT note: float is not in the above list because not all floats have a repr that is valid for the # compiler: float('nan'), float('-inf') and float('inf') or float('+inf') have an invalid repr. @@ -694,6 +779,7 @@ def wraps(wrapped_fun, append_args=None, # type: Union[str, Parameter, Iterable[Union[str, Parameter]]] remove_args=None, # type: Union[str, Iterable[str]] func_name=None, # type: str + co_name=None, # type: str inject_as_first_arg=False, # type: bool add_source=True, # type: bool add_impl=True, # type: bool @@ -774,17 +860,22 @@ def wraps(wrapped_fun, :param qualname: a string representing the qualified name to be used. If None (default), the `__qualname__` will default to the one of `wrapped_fun`, or the one in `new_sig` if `new_sig` is provided as a string with a non-empty function name. + :param co_name: a string representing the name to be used in the compiled code of the function. If None (default), + the `__code__.co_name` will default to the one of `func_impl` if `func_signature` is a `Signature`, or to the + name defined in `func_signature` if `func_signature` is a `str` and contains a non-empty name. :param module_name: the name of the module to be set on the function (under __module__ ). If None (default), the `__module__` attribute of `wrapped_fun` will be used. :param attrs: other keyword attributes that should be set on the function. Note that the full `__dict__` of `wrapped_fun` is automatically copied. :return: a decorator """ - func_name, func_sig, doc, qualname, module_name, all_attrs = _get_args_for_wrapping(wrapped_fun, new_sig, - remove_args, - prepend_args, append_args, - func_name, doc, - qualname, module_name, attrs) + func_name, func_sig, doc, qualname, co_name, module_name, all_attrs = _get_args_for_wrapping(wrapped_fun, new_sig, + remove_args, + prepend_args, + append_args, + func_name, doc, + qualname, co_name, + module_name, attrs) return with_signature(func_sig, func_name=func_name, @@ -792,12 +883,13 @@ def wraps(wrapped_fun, add_source=add_source, add_impl=add_impl, doc=doc, qualname=qualname, + co_name=co_name, module_name=module_name, **all_attrs) def _get_args_for_wrapping(wrapped, new_sig, remove_args, prepend_args, append_args, - func_name, doc, qualname, module_name, attrs): + func_name, doc, qualname, co_name, module_name, attrs): """ Internal method used by @wraps and create_wrapper @@ -809,6 +901,7 @@ def _get_args_for_wrapping(wrapped, new_sig, remove_args, prepend_args, append_a :param func_name: :param doc: :param qualname: + :param co_name: :param module_name: :param attrs: :return: @@ -860,6 +953,10 @@ def _get_args_for_wrapping(wrapped, new_sig, remove_args, prepend_args, append_a qualname = getattr_partial_aware(wrapped, '__qualname__', None) if module_name is None: module_name = getattr_partial_aware(wrapped, '__module__', None) + if co_name is None: + code = getattr_partial_aware(wrapped, '__code__', None) + if code is not None: + co_name = code.co_name # attributes: start from the wrapped dict, add '__wrapped__' if needed, and override with all attrs. all_attrs = copy(getattr_partial_aware(wrapped, '__dict__')) @@ -874,7 +971,7 @@ def _get_args_for_wrapping(wrapped, new_sig, remove_args, prepend_args, append_a all_attrs['__wrapped__'] = wrapped all_attrs.update(attrs) - return func_name, func_sig, doc, qualname, module_name, all_attrs + return func_name, func_sig, doc, qualname, co_name, module_name, all_attrs def with_signature(func_signature, # type: Union[str, Signature] @@ -884,6 +981,7 @@ def with_signature(func_signature, # type: Union[str, Signature] add_impl=True, # type: bool doc=None, # type: str qualname=None, # type: str + co_name=None, # type: str module_name=None, # type: str **attrs ): @@ -925,12 +1023,15 @@ def with_signature(func_signature, # type: Union[str, Signature] :param qualname: a string representing the qualified name to be used. If None (default), the `__qualname__` will default to the one of `func_impl` if `func_signature` is a `Signature`, or to the name defined in `func_signature` if `func_signature` is a `str` and contains a non-empty name. + :param co_name: a string representing the name to be used in the compiled code of the function. If None (default), + the `__code__.co_name` will default to the one of `func_impl` if `func_signature` is a `Signature`, or to the + name defined in `func_signature` if `func_signature` is a `str` and contains a non-empty name. :param module_name: the name of the module to be set on the function (under __module__ ). If None (default), the `__module__` attribute of the decorated function will be used. :param attrs: other keyword attributes that should be set on the function. Note that the full `__dict__` of the decorated function is not automatically copied. """ - if func_signature is None: + if func_signature is None and co_name is None: # make sure that user does not provide non-default other args if inject_as_first_arg or not add_source or not add_impl: raise ValueError("If `func_signature=None` no new signature will be generated so only `func_name`, " @@ -959,6 +1060,7 @@ def with_signature(func_signature, # type: Union[str, Signature] add_impl=add_impl, doc=doc, qualname=qualname, + co_name=co_name, module_name=module_name, _with_sig_=True, # special trick to tell create_function that we're @with_signature **attrs
smarie/python-makefun
504f531cbc1ef41194908e2b5880505414a2b6f3
diff --git a/tests/test_advanced.py b/tests/test_advanced.py index f8a9ac7..c2c8141 100644 --- a/tests/test_advanced.py +++ b/tests/test_advanced.py @@ -5,7 +5,7 @@ import pytest from makefun.main import get_signature_from_string, with_signature -from makefun import wraps +from makefun import create_wrapper, wraps try: # python 3.3+ from inspect import signature, Signature, Parameter @@ -108,6 +108,96 @@ def tests_wraps_sigchange(): assert goo('hello') == 'hello' +def tests_wraps_lambda(): + """ Tests that `@wraps` can duplicate the signature of a lambda """ + foo = lambda a: a + + @wraps(foo) + def goo(*args, **kwargs): + return foo(*args, **kwargs) + + assert goo.__name__ == (lambda: None).__name__ + assert str(signature(goo)) == '(a)' + assert goo('hello') == 'hello' + + +def tests_wraps_renamed_lambda(): + """ Tests that `@wraps` can duplicate the signature of a lambda that has been renamed """ + foo = lambda a: a + foo.__name__ = 'bar' + + @wraps(foo) + def goo(*args, **kwargs): + return foo(*args, **kwargs) + + assert goo.__name__ == 'bar' + assert str(signature(goo)) == '(a)' + assert goo('hello') == 'hello' + + +def test_lambda_signature_str(): + """ Tests that `@with_signature` can create a lambda from a signature string """ + new_sig = '(a, b=5)' + + @with_signature(new_sig, func_name='<lambda>') + def foo(a, b): + return a + b + + assert foo.__name__ == '<lambda>' + assert foo.__code__.co_name == '<lambda>' + assert str(signature(foo)) == new_sig + assert foo(a=4) == 9 + + +def test_co_name(): + """ Tests that `@with_signature` can be used to change the __code__.co_name """ + @with_signature('()', co_name='bar') + def foo(): + return 'hello' + + assert foo.__name__ == 'foo' + assert foo.__code__.co_name == 'bar' + assert foo() == 'hello' + + +def test_with_signature_lambda(): + """ Tests that `@with_signature` can be used to change the __code__.co_name to `'<lambda>'` """ + @with_signature('()', co_name='<lambda>') + def foo(): + return 'hello' + + assert foo.__code__.co_name == '<lambda>' + assert foo() == 'hello' + + +def test_create_wrapper_lambda(): + """ Tests that `create_wrapper` returns a lambda function when given a lambda function to wrap""" + def foo(): + return 'hello' + bar = create_wrapper(lambda: None, foo) + + assert bar.__name__ == '<lambda>' + assert bar() == 'hello' + + +def test_invalid_co_name(): + """ Tests that `@with_signature` raises a `ValueError` when given an `co_name` that cannot be duplicated. """ + with pytest.raises(ValueError): + @with_signature('()', co_name='<invalid>') + def foo(): + return 'hello' + + +def test_invalid_func_name(): + """ Tests that `@with_signature` can duplicate a func_name that is invalid in a function definition. """ + @with_signature('()', func_name='<invalid>') + def foo(): + return 'hello' + + assert foo.__name__ == '<invalid>' + assert foo() == 'hello' + + @pytest.mark.skipif(sys.version_info < (3, 0), reason="requires python3 or higher") def test_qualname_when_nested(): """ Tests that qualname is correctly set when `@with_signature` is applied on nested functions """
wraps() fails when wrapped function is a lambda Currently, it's not possible to do `@wraps(f)` when f is a lambda function: ``` from makefun import wraps def call_and_multiply(f): @wraps(f) def wrapper(a): return f(a) * 2 return wrapper times_two = call_and_multiply(lambda a: a) ``` fails with: ``` Error in generated code: def <lambda>(a): return _func_impl_(a=a) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<stdin>", line 3, in call_and_multiply File "/home/andy/.local/lib/python3.10/site-packages/makefun/main.py", line 954, in replace_f return create_function(func_signature=func_signature, File "/home/andy/.local/lib/python3.10/site-packages/makefun/main.py", line 267, in create_function f = _make(func_name, params_names, body, evaldict) File "/home/andy/.local/lib/python3.10/site-packages/makefun/main.py", line 629, in _make code = compile(body, filename, 'single') File "<makefun-gen-0>", line 1 def <lambda>(a): ^ SyntaxError: invalid syntax ``` Resolved by #80. If that's not the right way to go, then it would be helpful to throw an error explaining that lambda functions aren't allowed. Thanks.
0.0
504f531cbc1ef41194908e2b5880505414a2b6f3
[ "tests/test_advanced.py::tests_wraps_lambda", "tests/test_advanced.py::test_lambda_signature_str", "tests/test_advanced.py::test_co_name", "tests/test_advanced.py::test_with_signature_lambda", "tests/test_advanced.py::test_create_wrapper_lambda", "tests/test_advanced.py::test_invalid_co_name", "tests/test_advanced.py::test_invalid_func_name" ]
[ "tests/test_advanced.py::test_non_representable_defaults", "tests/test_advanced.py::test_preserve_attributes", "tests/test_advanced.py::test_empty_name_in_string", "tests/test_advanced.py::test_same_than_wraps_basic", "tests/test_advanced.py::tests_wraps_sigchange", "tests/test_advanced.py::tests_wraps_renamed_lambda", "tests/test_advanced.py::test_qualname_when_nested", "tests/test_advanced.py::test_type_hint_error", "tests/test_advanced.py::test_type_hint_error2" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-04-05 02:50:15+00:00
bsd-3-clause
5,550
smarie__python-makefun-97
diff --git a/docs/changelog.md b/docs/changelog.md index 159307b..d870b3e 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,5 +1,10 @@ # Changelog +### 1.15.2 - bugfix + + - Fixed `SyntaxError` happening when the name of a native coroutine function to create contains `'return'`. + Fixes [#96](https://github.com/smarie/python-makefun/issues/96). + ### 1.15.1 - bugfixes - Fixed `ValueError: Invalid co_name` happening on python 2 when the name of a function to create starts or ends with diff --git a/src/makefun/main.py b/src/makefun/main.py index ffa389b..1e763e7 100644 --- a/src/makefun/main.py +++ b/src/makefun/main.py @@ -312,7 +312,7 @@ def create_function(func_signature, # type: Union[str, Signature] body = "def %s\n return _func_impl_(%s)\n" % (func_signature_str, params_str) if iscoroutinefunction(func_impl): - body = ("async " + body).replace('return', 'return await') + body = ("async " + body).replace('return _func_impl_', 'return await _func_impl_') # create the function by compiling code, mapping the `_func_impl_` symbol to `func_impl` protect_eval_dict(evaldict, func_name, params_names)
smarie/python-makefun
34077397ff05c4e00b2b9e523134e26f6d8efcea
diff --git a/tests/test_generators_coroutines.py b/tests/test_generators_coroutines.py index 7a9f13a..f9ec779 100644 --- a/tests/test_generators_coroutines.py +++ b/tests/test_generators_coroutines.py @@ -95,3 +95,25 @@ def test_native_coroutine(): from asyncio import get_event_loop out = get_event_loop().run_until_complete(dynamic_fun(0.1)) assert out == 0.1 + + [email protected](sys.version_info < (3, 5), reason="native coroutines with async/await require python3.6 or higher") +def test_issue_96(): + """Same as `test_native_coroutine` but tests that we can use 'return' in the coroutine name""" + + # define the handler that should be called + from tests._test_py35 import make_native_coroutine_handler + my_native_coroutine_handler = make_native_coroutine_handler() + + # create the dynamic function + dynamic_fun = create_function("foo_returns_bar(sleep_time=2)", my_native_coroutine_handler) + + # check that this is a coroutine for inspect and for asyncio + assert iscoroutinefunction(dynamic_fun) + from asyncio import iscoroutinefunction as is_native_co + assert is_native_co(dynamic_fun) + + # verify that the new function is a native coroutine and behaves correctly + from asyncio import get_event_loop + out = get_event_loop().run_until_complete(dynamic_fun(0.1)) + assert out == 0.1
`SyntaxError` when creating a native coroutine function with name containing 'return' ```python from makefun import create_function async def my_native_coroutine_handler(sleep_time): await sleep(sleep_time) return sleep_time create_function("foo_returns_bar(sleep_time=2)", my_native_coroutine_handler) ``` raises ``` filename = '<makefun-gen-%d>' % (next(_compile_count),) try: > code = compile(body, filename, 'single') E File "<makefun-gen-11>", line 1 E async def foo_return awaits_bar(sleep_time=2): E ^ E SyntaxError: invalid syntax ..\src\makefun\main.py:714: SyntaxError ```
0.0
34077397ff05c4e00b2b9e523134e26f6d8efcea
[ "tests/test_generators_coroutines.py::test_issue_96" ]
[ "tests/test_generators_coroutines.py::test_generator", "tests/test_generators_coroutines.py::test_generator_with_signature", "tests/test_generators_coroutines.py::test_generator_based_coroutine", "tests/test_generators_coroutines.py::test_native_coroutine" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-11-09 21:34:37+00:00
bsd-3-clause
5,551
smarie__python-pytest-harvest-41
diff --git a/.gitignore b/.gitignore index 0fbba57..4980c87 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,7 @@ coverage.xml *.cover .hypothesis/ .pytest_cache/ +pytest_harvest/_version.py # Translations *.mo diff --git a/setup.cfg b/setup.cfg index ab42292..4ef910e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -18,5 +18,5 @@ description-file = README.md test=pytest [tool:pytest] -addopts = --verbose +addopts = --verbose --doctest-modules testpaths = pytest_harvest/tests
smarie/python-pytest-harvest
00a713c1759313d30db6848a0444c4d2c8c0e730
diff --git a/pytest_harvest/plugin.py b/pytest_harvest/plugin.py index 855d4e0..5e7f62a 100644 --- a/pytest_harvest/plugin.py +++ b/pytest_harvest/plugin.py @@ -48,7 +48,7 @@ def pytest_runtest_makereport(item, call): # ------------- To collect benchmark results ------------ FIXTURE_STORE = OrderedDict() -"""The default fixture store, that is also available through the `fixture_store` fixture. It is recommended to access +"""The default fixture store, that is also available through the `fixture_store` fixture. It is recommended to access it through `get_fixture_store(session)` so as to be xdist-compliant""" @@ -90,23 +90,23 @@ results_bag = create_results_bag_fixture('fixture_store', name='results_bag') A "results bag" fixture: a dictionary where you can store anything (results, context, etc.) during your tests execution. It offers a "much"-like api: you can access all entries using the object protocol such as in `results_bag.a = 1`. -This fixture has function-scope so a new, empty instance is injected in each test node. +This fixture has function-scope so a new, empty instance is injected in each test node. -There are several ways to gather all results after they have been stored. +There are several ways to gather all results after they have been stored. - * To get the raw stored results, use the `fixture_store` fixture: `fixture_store['results_bag']` will contain all + * To get the raw stored results, use the `fixture_store` fixture: `fixture_store['results_bag']` will contain all result bags for all tests. - - * If you are interested in both the stored results AND some stored fixture values (through `@saved_fixture`), you + + * If you are interested in both the stored results AND some stored fixture values (through `@saved_fixture`), you might rather wish to leverage the following helpers: - - use one of the `session_results_dct`, `module_results_dct`, `session_results_df` or `module_results_df` + - use one of the `session_results_dct`, `module_results_dct`, `session_results_df` or `module_results_df` fixtures. They contain all available information, in a nicely summarized way. - - - use the `get_session_synthesis_dct(session)` helper method to create a similar synthesis than the above with + + - use the `get_session_synthesis_dct(session)` helper method to create a similar synthesis than the above with more customization capabilities. -If you wish to create custom results bags similar to this one (for example to create several with different names), +If you wish to create custom results bags similar to this one (for example to create several with different names), use `create_results_bag_fixture`. """ diff --git a/pytest_harvest/results_session.py b/pytest_harvest/results_session.py index ca199ed..200ec99 100644 --- a/pytest_harvest/results_session.py +++ b/pytest_harvest/results_session.py @@ -9,6 +9,7 @@ except ImportError: pass from pytest_harvest.common import HARVEST_PREFIX +from _pytest.doctest import DoctestItem PYTEST_OBJ_NAME = 'pytest_obj' @@ -322,6 +323,9 @@ def _pytest_item_matches_filter(item, filterset): if item_obj in filterset: return True # support class methods: the item object can be a bound method while the filter is maybe not + elif item_obj is None: + # This can happen with DoctestItem + return False elif _is_unbound_present(item_obj, filterset): return True elif any(item_obj.__module__ == f for f in filterset): @@ -443,6 +447,9 @@ def get_pytest_params(item): if isinstance(item, _MinimalItem): # Our special _MinimalItem object - when xdist is used and worker states have been saved + restored return item.get_pytest_params() + elif isinstance(item, DoctestItem): + # No fixtures or parameters + return OrderedDict() else: param_dct = OrderedDict() for param_name in item.fixturenames: # note: item.funcargnames gives the exact same list diff --git a/pytest_harvest/tests_raw/test_get_session_results.py b/pytest_harvest/tests_raw/test_get_session_results.py index 732a055..4b2be66 100644 --- a/pytest_harvest/tests_raw/test_get_session_results.py +++ b/pytest_harvest/tests_raw/test_get_session_results.py @@ -1,5 +1,5 @@ # META -# {'passed': 16, 'skipped': 1, 'failed': 1} +# {'passed': 17, 'skipped': 1, 'failed': 1} # END META import os from itertools import product @@ -210,6 +210,26 @@ def test_synthesis_contains_everything(request): assert len(missing) == 0 +def doctestable(): + """Do nothing, but have a doctest. + + Examples + -------- + >>> 1 + 1 + 2 + """ + return + + +# For some reason, adding a monkeypatch will cause an extra failure for +# DoctestItem, possibly because it's a setup/teardown +def test_deal_with_doctest(dummy, request): + """ Tests that setup/teardown harvesting with DoctestItem works """ + synth_dct = get_session_synthesis_dct(request, filter_incomplete=False) + assert 'pytest_harvest/tests_raw/test_get_session_results.py::pytest_harvest.tests_raw.test_get_session_results.doctestable' \ + in synth_dct + + @yield_fixture(scope='session', autouse=True) def make_synthesis(request): """This checks that the session-scoped fixture teardown hook works as well"""
pytest-harvest fails when some doctests exist
0.0
00a713c1759313d30db6848a0444c4d2c8c0e730
[ "pytest_harvest/tests_raw/test_get_session_results.py::pytest_harvest.tests_raw.test_get_session_results.doctestable" ]
[ "pytest_harvest/tests_raw/test_get_session_results.py::test_foo[1-hello]", "pytest_harvest/tests_raw/test_get_session_results.py::test_foo[1-world]", "pytest_harvest/tests_raw/test_get_session_results.py::test_foo[2-hello]", "pytest_harvest/tests_raw/test_get_session_results.py::test_foo[2-world]", "pytest_harvest/tests_raw/test_get_session_results.py::test_synthesis_skipped", "pytest_harvest/tests_raw/test_get_session_results.py::test_synthesis_failed", "pytest_harvest/tests_raw/test_get_session_results.py::TestX::test_easy[True]", "pytest_harvest/tests_raw/test_get_session_results.py::TestX::test_easy[False]", "pytest_harvest/tests_raw/test_get_session_results.py::test_synthesis_id_formatting" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-09-11 11:57:03+00:00
bsd-3-clause
5,552
smarkets__marge-bot-256
diff --git a/README.md b/README.md index bbcb532..27ee749 100644 --- a/README.md +++ b/README.md @@ -87,6 +87,8 @@ optional arguments: [env var: MARGE_ADD_TESTED] (default: False) --batch Enable processing MRs in batches [env var: MARGE_BATCH] (default: False) + --use-no-ff-batches Disable fast forwarding when merging MR batches. + [env var: MARGE_USE_NO_FF_BATCHES] (default: False) --add-part-of Add "Part-of: <$MR_URL>" to each commit in MR. [env var: MARGE_ADD_PART_OF] (default: False) --add-reviewers Add "Reviewed-by: $approver" for each approver of MR to each commit in MR. diff --git a/marge/app.py b/marge/app.py index 0748643..a991811 100644 --- a/marge/app.py +++ b/marge/app.py @@ -208,6 +208,11 @@ def _parse_config(args): action='store_true', help='Debug logging (includes all HTTP requests etc).\n', ) + parser.add_argument( + '--use-no-ff-batches', + action='store_true', + help='Disable fast forwarding when merging MR batches' + ) config = parser.parse_args(args) if config.use_merge_strategy and config.batch: diff --git a/marge/batch_job.py b/marge/batch_job.py index b99fd87..2f4cae8 100644 --- a/marge/batch_job.py +++ b/marge/batch_job.py @@ -108,6 +108,19 @@ class BatchMergeJob(MergeJob): if getattr(changed_mr, attr) != getattr(merge_request, attr): raise CannotMerge(error_message.format(attr.replace('_', ' '))) + def merge_batch(self, target_branch, source_branch, no_ff=False): + if no_ff: + return self._repo.merge( + target_branch, + source_branch, + '--no-ff', + ) + + return self._repo.fast_forward( + target_branch, + source_branch, + ) + def accept_mr( self, merge_request, @@ -142,9 +155,10 @@ class BatchMergeJob(MergeJob): self.maybe_reapprove(merge_request, approvals) # This switches git to <target_branch> - final_sha = self._repo.fast_forward( + final_sha = self.merge_batch( merge_request.target_branch, merge_request.source_branch, + self._options.use_no_ff_batches, ) # Don't force push in case the remote has changed.
smarkets/marge-bot
5de1f8cc465630e97f5d419521c6b5becef52afe
diff --git a/tests/test_batch_job.py b/tests/test_batch_job.py index 109b905..4ac2c5b 100644 --- a/tests/test_batch_job.py +++ b/tests/test_batch_job.py @@ -138,6 +138,28 @@ class TestBatchJob: force=True, ) + def test_merge_batch(self, api, mocklab): + batch_merge_job = self.get_batch_merge_job(api, mocklab) + target_branch = 'master' + source_branch = mocklab.merge_request_info['source_branch'] + batch_merge_job.merge_batch(target_branch, source_branch, no_ff=False) + batch_merge_job._repo.fast_forward.assert_called_once_with( + target_branch, + source_branch, + ) + + def test_merge_batch_with_no_ff_enabled(self, api, mocklab): + batch_merge_job = self.get_batch_merge_job(api, mocklab) + target_branch = 'master' + source_branch = mocklab.merge_request_info['source_branch'] + batch_merge_job.merge_batch(target_branch, source_branch, no_ff=True) + batch_merge_job._repo.merge.assert_called_once_with( + target_branch, + source_branch, + '--no-ff' + ) + batch_merge_job._repo.fast_forward.assert_not_called() + def test_ensure_mr_not_changed(self, api, mocklab): with patch('marge.batch_job.MergeRequest') as mr_class: batch_merge_job = self.get_batch_merge_job(api, mocklab)
When merging batches, fast fordwarding is enabled Fast forward only is enabled when merging batches. This is problematic as when reverting a change included in a batch, every commit associated with the change must be reverted rather than just the merge commit. There should be some configuration so that fast-forwarding can be disabled when merging batches. https://github.com/smarkets/marge-bot/blob/master/marge/batch_job.py#L145
0.0
5de1f8cc465630e97f5d419521c6b5becef52afe
[ "tests/test_batch_job.py::TestBatchJob::test_merge_batch[True]", "tests/test_batch_job.py::TestBatchJob::test_merge_batch[False]", "tests/test_batch_job.py::TestBatchJob::test_merge_batch_with_no_ff_enabled[True]", "tests/test_batch_job.py::TestBatchJob::test_merge_batch_with_no_ff_enabled[False]" ]
[ "tests/test_batch_job.py::flake-8::FLAKE8", "tests/test_batch_job.py::TestBatchJob::test_remove_batch_branch[True]", "tests/test_batch_job.py::TestBatchJob::test_remove_batch_branch[False]", "tests/test_batch_job.py::TestBatchJob::test_close_batch_mr[True]", "tests/test_batch_job.py::TestBatchJob::test_close_batch_mr[False]", "tests/test_batch_job.py::TestBatchJob::test_create_batch_mr[True]", "tests/test_batch_job.py::TestBatchJob::test_create_batch_mr[False]", "tests/test_batch_job.py::TestBatchJob::test_get_mrs_with_common_target_branch[True]", "tests/test_batch_job.py::TestBatchJob::test_get_mrs_with_common_target_branch[False]", "tests/test_batch_job.py::TestBatchJob::test_ensure_mergeable_mr_ci_not_ok[True]", "tests/test_batch_job.py::TestBatchJob::test_ensure_mergeable_mr_ci_not_ok[False]", "tests/test_batch_job.py::TestBatchJob::test_push_batch[True]", "tests/test_batch_job.py::TestBatchJob::test_push_batch[False]", "tests/test_batch_job.py::TestBatchJob::test_ensure_mr_not_changed[True]", "tests/test_batch_job.py::TestBatchJob::test_ensure_mr_not_changed[False]", "tests/test_batch_job.py::TestBatchJob::test_fuse_mr_when_target_branch_was_moved[True]", "tests/test_batch_job.py::TestBatchJob::test_fuse_mr_when_target_branch_was_moved[False]", "tests/test_batch_job.py::TestBatchJob::test_fuse_mr_when_source_branch_was_moved[True]", "tests/test_batch_job.py::TestBatchJob::test_fuse_mr_when_source_branch_was_moved[False]" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-04-26 04:37:47+00:00
bsd-3-clause
5,553
smarkets__marge-bot-29
diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e2c68f..33fcdd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,2 +1,3 @@ +* 0.1.2: Fix parsing of gitlab versions #28 * 0.1.1: Fix failure to take into account group permissions #19. * 0.1.0: Initial release diff --git a/marge/gitlab.py b/marge/gitlab.py index 1af1363..7ee00c0 100644 --- a/marge/gitlab.py +++ b/marge/gitlab.py @@ -204,6 +204,11 @@ class Resource(object): class Version(namedtuple('Version', 'release edition')): @classmethod def parse(cls, string): - release_string, edition = string.split('-', maxsplit=1) + maybe_split_string = string.split('-', maxsplit=1) + if len(maybe_split_string) == 2: + release_string, edition = maybe_split_string + else: + release_string, edition = string, None + release = tuple(int(number) for number in release_string.split('.')) return cls(release=release, edition=edition) diff --git a/version b/version index 17e51c3..d917d3e 100644 --- a/version +++ b/version @@ -1,1 +1,1 @@ -0.1.1 +0.1.2
smarkets/marge-bot
d4c434269d59540389039f3a6cdebbc779175168
diff --git a/tests/test_gitlab.py b/tests/test_gitlab.py index 3d261a8..249a95a 100644 --- a/tests/test_gitlab.py +++ b/tests/test_gitlab.py @@ -4,3 +4,6 @@ import marge.gitlab as gitlab class TestVersion(object): def test_parse(self): assert gitlab.Version.parse('9.2.2-ee') == gitlab.Version(release=(9, 2, 2), edition='ee') + + def test_parse_no_edition(self): + assert gitlab.Version.parse('9.4.0') == gitlab.Version(release=(9, 4, 0), edition=None)
Errors parsing version of current gitlab versions Got an an error when parsing the version of newer gitlab versions. Looks like the "edition" part of the version is missing but expected. ``` 24.7.2017 23:49:212017-07-24 21:49:21,799 DEBUG http://gitlab:80 "GET /api/v4/version HTTP/1.1" 200 None 24.7.2017 23:49:212017-07-24 21:49:21,800 DEBUG RESPONSE CODE: 200 24.7.2017 23:49:212017-07-24 21:49:21,800 DEBUG RESPONSE BODY: {'version': '9.4.0', 'revision': '9bbe2ac'} 24.7.2017 23:49:212017-07-24 21:49:21,800 ERROR Unexpected Exception 24.7.2017 23:49:21Traceback (most recent call last): 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/job.py", line 56, in execute 24.7.2017 23:49:21 approvals = merge_request.fetch_approvals() 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/merge_request.py", line 121, in fetch_approvals 24.7.2017 23:49:21 approvals.refetch_info() 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/approvals.py", line 10, in refetch_info 24.7.2017 23:49:21 if self._api.version().release >= (9, 2, 2): 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/gitlab.py", line 72, in version 24.7.2017 23:49:21 return Version.parse(response['version']) 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/gitlab.py", line 207, in parse 24.7.2017 23:49:21 release_string, edition = string.split('-', maxsplit=1) 24.7.2017 23:49:21ValueError: not enough values to unpack (expected 2, got 1) 24.7.2017 23:49:212017-07-24 21:49:21,800 DEBUG REQUEST: GET http://gitlab/api/v4/version {'PRIVATE-TOKEN': 'HW9c_BJgwgoK5cEmwsSr'} {'params': {}} 24.7.2017 23:49:212017-07-24 21:49:21,802 DEBUG Starting new HTTP connection (1): gitlab 24.7.2017 23:49:212017-07-24 21:49:21,810 DEBUG http://gitlab:80 "GET /api/v4/version HTTP/1.1" 200 None 24.7.2017 23:49:212017-07-24 21:49:21,811 DEBUG RESPONSE CODE: 200 24.7.2017 23:49:212017-07-24 21:49:21,811 DEBUG RESPONSE BODY: {'version': '9.4.0', 'revision': '9bbe2ac'} 24.7.2017 23:49:21Traceback (most recent call last): 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/job.py", line 56, in execute 24.7.2017 23:49:21 approvals = merge_request.fetch_approvals() 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/merge_request.py", line 121, in fetch_approvals 24.7.2017 23:49:21 approvals.refetch_info() 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/approvals.py", line 10, in refetch_info 24.7.2017 23:49:21 if self._api.version().release >= (9, 2, 2): 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/gitlab.py", line 72, in version 24.7.2017 23:49:21 return Version.parse(response['version']) 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/gitlab.py", line 207, in parse 24.7.2017 23:49:21 release_string, edition = string.split('-', maxsplit=1) 24.7.2017 23:49:21ValueError: not enough values to unpack (expected 2, got 1) 24.7.2017 23:49:21 24.7.2017 23:49:21During handling of the above exception, another exception occurred: 24.7.2017 23:49:21 24.7.2017 23:49:21Traceback (most recent call last): 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/bin/.marge.app-wrapped", line 4, in <module> 24.7.2017 23:49:21 marge.app.main() 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/app.py", line 114, in main 24.7.2017 23:49:21 marge_bot.start() 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/bot.py", line 50, in start 24.7.2017 23:49:21 self._run(repo_manager) 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/bot.py", line 92, in _run 24.7.2017 23:49:21 job.execute() 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/job.py", line 70, in execute 24.7.2017 23:49:21 merge_request.comment("I'm broken on the inside, please somebody fix me... :cry:") 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/merge_request.py", line 90, in comment 24.7.2017 23:49:21 if self._api.version().release >= (9, 2, 2): 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/gitlab.py", line 72, in version 24.7.2017 23:49:21 return Version.parse(response['version']) 24.7.2017 23:49:21 File "/nix/store/0yjvdhpw9cpmahw6ndfgby9n4pz8m79i-python3.6-marge-0.1.1/lib/python3.6/site-packages/marge/gitlab.py", line 207, in parse 24.7.2017 23:49:21 release_string, edition = string.split('-', maxsplit=1) 24.7.2017 23:49:21ValueError: not enough values to unpack (expected 2, got 1) ``` **Versions** GitLab: 9.4.0 GitLab API: v4 marge-bot: 0.1.1
0.0
d4c434269d59540389039f3a6cdebbc779175168
[ "tests/test_gitlab.py::TestVersion::test_parse_no_edition" ]
[ "tests/test_gitlab.py::TestVersion::test_parse" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2017-07-25 07:57:29+00:00
bsd-3-clause
5,554
smarkets__marge-bot-318
diff --git a/marge/bot.py b/marge/bot.py index 0996962..465e960 100644 --- a/marge/bot.py +++ b/marge/bot.py @@ -139,7 +139,7 @@ class Bot: [mr.web_url for mr in source_filtered_mrs] ) source_filtered_out = set(filtered_mrs) - set(source_filtered_mrs) - if filtered_out: + if source_filtered_out: log.debug( 'MRs that do not match source_branch_regexp: %s', [mr.web_url for mr in source_filtered_out] diff --git a/marge/job.py b/marge/job.py index 696aefa..9b33aae 100644 --- a/marge/job.py +++ b/marge/job.py @@ -55,6 +55,9 @@ class MergeJob: '(have: {0.approver_usernames} missing: {0.approvals_left})'.format(approvals) ) + if not merge_request.blocking_discussions_resolved: + raise CannotMerge("Sorry, I can't merge requests which have unresolved discussions!") + state = merge_request.state if state not in ('opened', 'reopened', 'locked'): if state in ('merged', 'closed'): diff --git a/marge/merge_request.py b/marge/merge_request.py index 98a14eb..030b79b 100644 --- a/marge/merge_request.py +++ b/marge/merge_request.py @@ -151,6 +151,10 @@ class MergeRequest(gitlab.Resource): def web_url(self): return self.info['web_url'] + @property + def blocking_discussions_resolved(self): + return self.info['blocking_discussions_resolved'] + @property def force_remove_source_branch(self): return self.info['force_remove_source_branch']
smarkets/marge-bot
647f916da79c88bae641c0dd7d46e01eaafa2dea
diff --git a/tests/gitlab_api_mock.py b/tests/gitlab_api_mock.py index a682d52..012fb64 100644 --- a/tests/gitlab_api_mock.py +++ b/tests/gitlab_api_mock.py @@ -58,6 +58,7 @@ class MockLab: # pylint: disable=too-few-public-methods 'force_remove_source_branch': True, 'target_branch': 'master', 'work_in_progress': False, + 'blocking_discussions_resolved': True, 'web_url': 'http://git.example.com/group/project/merge_request/666', } if merge_request_options is not None: diff --git a/tests/test_job.py b/tests/test_job.py index 77f5961..4818b29 100644 --- a/tests/test_job.py +++ b/tests/test_job.py @@ -130,6 +130,20 @@ class TestJob: assert exc_info.value.reason == "Sorry, I can't merge requests marked as Work-In-Progress!" + def test_ensure_mergeable_mr_unresolved_discussion(self): + merge_job = self.get_merge_job() + merge_request = self._mock_merge_request( + assignee_ids=[merge_job._user.id], + state='opened', + work_in_progress=False, + blocking_discussions_resolved=False, + ) + merge_request.fetch_approvals.return_value.sufficient = True + with pytest.raises(CannotMerge) as exc_info: + merge_job.ensure_mergeable_mr(merge_request) + + assert exc_info.value.reason == "Sorry, I can't merge requests which have unresolved discussions!" + def test_ensure_mergeable_mr_squash_and_trailers(self): merge_job = self.get_merge_job(options=MergeJobOptions.default(add_reviewers=True)) merge_request = self._mock_merge_request(
Improve error message when assigned to MR with unresolved threads Currently, the message posted by marge on when attempting to merge an MR with outstanding threads is only: > I couldn't merge this branch: Merge request was rejected by GitLab: 'Branch cannot be merged' and the log message is: >  | 2021-07-08 11:01:26,671 WARNING I couldn't merge this branch: Merge request was rejected by GitLab: 'Branch cannot be merged' This could be improved by extending def ensure_mergeable_mr(self, merge_request): by retrieving all notes in the mr, and checking that all notes that are resolvable are also resolved.
0.0
647f916da79c88bae641c0dd7d46e01eaafa2dea
[ "tests/test_job.py::TestJob::test_ensure_mergeable_mr_unresolved_discussion" ]
[ "tests/gitlab_api_mock.py::flake-8::FLAKE8", "tests/test_job.py::PYLINT", "tests/test_job.py::flake-8::FLAKE8", "tests/test_job.py::TestJob::test_get_source_project_when_is_target_project", "tests/test_job.py::TestJob::test_get_source_project_when_is_fork", "tests/test_job.py::TestJob::test_get_mr_ci_status[9.4.0-ee-False]", "tests/test_job.py::TestJob::test_get_mr_ci_status[10.5.0-ee-True]", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_not_assigned", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_state_not_ok", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_not_approved", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_wip", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_squash_and_trailers", "tests/test_job.py::TestJob::test_unassign_from_mr", "tests/test_job.py::TestJob::test_fuse_using_rebase", "tests/test_job.py::TestJob::test_fuse_using_merge", "tests/test_job.py::TestMergeJobOptions::test_default", "tests/test_job.py::TestMergeJobOptions::test_default_ci_time" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2021-07-13 19:40:48+00:00
bsd-3-clause
5,555
smarkets__marge-bot-340
diff --git a/CHANGELOG.md b/CHANGELOG.md index 0233cba..6716818 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,5 @@ + * 0.10.1: + - Feature: Guarantee pipeline before merging * 0.10.0: - Feature: implement HTTPS support for cloning (#225) #283 - Feature: Make CI work with GitHub Actions #308 diff --git a/marge/app.py b/marge/app.py index 49402d7..db40ce1 100644 --- a/marge/app.py +++ b/marge/app.py @@ -32,7 +32,7 @@ def time_interval(str_interval): ) from err -def _parse_config(args): +def _parse_config(args): # pylint: disable=too-many-statements def regexp(str_regex): try: @@ -237,6 +237,12 @@ def _parse_config(args): action='store_true', help='Run marge-bot as a single CLI command, not a service' ) + parser.add_argument( + '--guarantee-final-pipeline', + action='store_true', + help='Guaranteed final pipeline when assigned to marge-bot' + ) + config = parser.parse_args(args) if config.use_merge_strategy and config.batch: @@ -342,6 +348,7 @@ def main(args=None): use_no_ff_batches=options.use_no_ff_batches, use_merge_commit_batches=options.use_merge_commit_batches, skip_ci_batches=options.skip_ci_batches, + guarantee_final_pipeline=options.guarantee_final_pipeline, ), batch=options.batch, cli=options.cli, diff --git a/marge/job.py b/marge/job.py index 7c0cd44..c93bf55 100644 --- a/marge/job.py +++ b/marge/job.py @@ -460,6 +460,7 @@ JOB_OPTIONS = [ 'use_no_ff_batches', 'use_merge_commit_batches', 'skip_ci_batches', + 'guarantee_final_pipeline', ] @@ -476,6 +477,7 @@ class MergeJobOptions(namedtuple('MergeJobOptions', JOB_OPTIONS)): add_tested=False, add_part_of=False, add_reviewers=False, reapprove=False, approval_timeout=None, embargo=None, ci_timeout=None, fusion=Fusion.rebase, use_no_ff_batches=False, use_merge_commit_batches=False, skip_ci_batches=False, + guarantee_final_pipeline=False, ): approval_timeout = approval_timeout or timedelta(seconds=0) embargo = embargo or IntervalUnion.empty() @@ -492,6 +494,7 @@ class MergeJobOptions(namedtuple('MergeJobOptions', JOB_OPTIONS)): use_no_ff_batches=use_no_ff_batches, use_merge_commit_batches=use_merge_commit_batches, skip_ci_batches=skip_ci_batches, + guarantee_final_pipeline=guarantee_final_pipeline, ) diff --git a/marge/single_merge_job.py b/marge/single_merge_job.py index 7d3cb59..1f8155b 100644 --- a/marge/single_merge_job.py +++ b/marge/single_merge_job.py @@ -13,6 +13,7 @@ class SingleMergeJob(MergeJob): def __init__(self, *, api, user, project, repo, options, merge_request): super().__init__(api=api, user=user, project=project, repo=repo, options=options) self._merge_request = merge_request + self._options = options def execute(self): merge_request = self._merge_request @@ -61,7 +62,17 @@ class SingleMergeJob(MergeJob): merge_request.comment("Someone skipped the queue! Will have to try again...") continue - log.info('Commit id to merge %r (into: %r)', actual_sha, target_sha) + if _updated_sha == actual_sha and self._options.guarantee_final_pipeline: + log.info('No commits on target branch to fuse, triggering pipeline...') + merge_request.comment("jenkins retry") + time.sleep(30) + + log.info( + 'Commit id to merge %r into: %r (updated sha: %r)', + actual_sha, + target_sha, + _updated_sha + ) time.sleep(5) sha_now = Commit.last_on_branch(source_project.id, merge_request.source_branch, api).id diff --git a/version b/version index 78bc1ab..5712157 100644 --- a/version +++ b/version @@ -1,1 +1,1 @@ -0.10.0 +0.10.1
smarkets/marge-bot
70ab3c8a03779691a5b825272c9b2d3e04f3c625
diff --git a/tests/test_job.py b/tests/test_job.py index 4818b29..22036d1 100644 --- a/tests/test_job.py +++ b/tests/test_job.py @@ -217,6 +217,7 @@ class TestMergeJobOptions: use_no_ff_batches=False, use_merge_commit_batches=False, skip_ci_batches=False, + guarantee_final_pipeline=False, ) def test_default_ci_time(self):
Guarantee final pipeline when assigned to marge-bot Requirement: To guarantee one final pipeline when MR is assigned to marge-bot Working cases: 1. When multiple MRs are batched 2. When commits exists on master with single MR (pipeline is triggered due to rebase) Missed case: When single MR is being handled with no commits to rebase Helpful for running checks on approvals, compliance pipelines, CI cost saving.
0.0
70ab3c8a03779691a5b825272c9b2d3e04f3c625
[ "tests/test_job.py::TestMergeJobOptions::test_default" ]
[ "tests/test_job.py::PYLINT", "tests/test_job.py::flake-8::FLAKE8", "tests/test_job.py::TestJob::test_get_source_project_when_is_target_project", "tests/test_job.py::TestJob::test_get_source_project_when_is_fork", "tests/test_job.py::TestJob::test_get_mr_ci_status[9.4.0-ee-False]", "tests/test_job.py::TestJob::test_get_mr_ci_status[10.5.0-ee-True]", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_not_assigned", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_state_not_ok", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_not_approved", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_wip", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_unresolved_discussion", "tests/test_job.py::TestJob::test_ensure_mergeable_mr_squash_and_trailers", "tests/test_job.py::TestJob::test_unassign_from_mr", "tests/test_job.py::TestJob::test_fuse_using_rebase", "tests/test_job.py::TestJob::test_fuse_using_merge", "tests/test_job.py::TestMergeJobOptions::test_default_ci_time" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-05-09 16:30:47+00:00
bsd-3-clause
5,556
smarkets__marge-bot-46
diff --git a/marge/job.py b/marge/job.py index 8a562d8..b2078d8 100644 --- a/marge/job.py +++ b/marge/job.py @@ -153,6 +153,38 @@ class MergeJob(object): except gitlab.Unauthorized: log.warning('Unauthorized!') raise CannotMerge('My user cannot accept merge requests!') + except gitlab.NotFound as e: + log.warning('Not Found!: %s', e) + merge_request.refetch_info() + if merge_request.state == 'merged': + # someone must have hit "merge when build succeeds" and we lost the race, + # the branch is gone and we got a 404. Anyway, our job here is done. + # (see #33) + rebased_into_up_to_date_target_branch = True + else: + log.warning('For the record, merge request state is %r', merge_request.state) + raise + except gitlab.MethodNotAllowed as e: + log.warning('Not Allowed!: %s', e) + merge_request.refetch_info() + if merge_request.work_in_progress: + raise CannotMerge( + 'The request was marked as WIP as I was processing it (maybe a WIP commit?)' + ) + elif merge_request.state == 'reopened': + raise CannotMerge( + 'GitLab refused to merge this branch. I suspect that a Push Rule or a git-hook ' + 'is rejecting my commits; maybe my email needs to be white-listed?' + ) + elif merge_request.state == 'closed': + raise CannotMerge('Someone closed the merge request while I was attempting to merge it.') + elif merge_request.state == 'merged': + # We are not covering any observed behaviour here, but if at this + # point the request is merged, our job is done, so no need to complain + log.info('Merge request is already merged, someone was faster!') + rebased_into_up_to_date_target_branch = True + else: + raise CannotMerge("Gitlab refused to merge this request and I don't know why!") except gitlab.ApiError: log.exception('Unanticipated ApiError from Gitlab on merge attempt') raise CannotMerge('had some issue with gitlab, check my logs...')
smarkets/marge-bot
e510b250a5c0b2caa9d64dfaa7a2ad2206c1b542
diff --git a/tests/test_job.py b/tests/test_job.py index 8e6bf77..984c9c4 100644 --- a/tests/test_job.py +++ b/tests/test_job.py @@ -246,6 +246,119 @@ class TestRebaseAndAccept(object): assert api.state == 'merged' assert api.notes == ["My job would be easier if people didn't jump the queue and pushed directly... *sigh*"] + def test_handles_races_for_merging(self, time_sleep): + api, mocklab = self.api, self.mocklab + rewritten_sha = mocklab.rewritten_sha + api.add_transition( + PUT( + '/projects/1234/merge_requests/54/merge', + dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True), + ), + Error(marge.gitlab.NotFound(404, {'message': '404 Branch Not Found'})), + from_state='passed', to_state='someone_else_merged', + ) + api.add_merge_request( + dict(mocklab.merge_request_info, state='merged'), + from_state='someone_else_merged', + ) + with patch('marge.job.push_rebased_and_rewritten_version', side_effect=mocklab.push_rebased): + job = self.make_job() + job.execute() + assert api.state == 'someone_else_merged' + assert api.notes == [] + + def test_handles_request_becoming_wip_after_push(self, time_sleep): + api, mocklab = self.api, self.mocklab + rewritten_sha = mocklab.rewritten_sha + api.add_transition( + PUT( + '/projects/1234/merge_requests/54/merge', + dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True), + ), + Error(marge.gitlab.MethodNotAllowed(405, {'message': '405 Method Not Allowed'})), + from_state='passed', to_state='now_is_wip', + ) + api.add_merge_request( + dict(mocklab.merge_request_info, work_in_progress=True), + from_state='now_is_wip', + ) + message = 'The request was marked as WIP as I was processing it (maybe a WIP commit?)' + with patch('marge.job.push_rebased_and_rewritten_version', side_effect=mocklab.push_rebased): + with mocklab.expected_failure(message): + job = self.make_job() + job.execute() + assert api.state == 'now_is_wip' + assert api.notes == ["I couldn't merge this branch: %s" % message] + + def test_guesses_git_hook_error_on_merge_refusal(self, time_sleep): + api, mocklab = self.api, self.mocklab + rewritten_sha = mocklab.rewritten_sha + api.add_transition( + PUT( + '/projects/1234/merge_requests/54/merge', + dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True), + ), + Error(marge.gitlab.MethodNotAllowed(405, {'message': '405 Method Not Allowed'})), + from_state='passed', to_state='rejected_by_git_hook', + ) + api.add_merge_request( + dict(mocklab.merge_request_info, state='reopened'), + from_state='rejected_by_git_hook', + ) + message = ( + 'GitLab refused to merge this branch. I suspect that a Push Rule or a git-hook ' + 'is rejecting my commits; maybe my email needs to be white-listed?' + ) + with patch('marge.job.push_rebased_and_rewritten_version', side_effect=mocklab.push_rebased): + with mocklab.expected_failure(message): + job = self.make_job() + job.execute() + assert api.state == 'rejected_by_git_hook' + assert api.notes == ["I couldn't merge this branch: %s" % message] + + def test_guesses_git_hook_error_on_merge_refusal(self, time_sleep): + api, mocklab = self.api, self.mocklab + rewritten_sha = mocklab.rewritten_sha + api.add_transition( + PUT( + '/projects/1234/merge_requests/54/merge', + dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True), + ), + Error(marge.gitlab.MethodNotAllowed(405, {'message': '405 Method Not Allowed'})), + from_state='passed', to_state='oops_someone_closed_it', + ) + api.add_merge_request( + dict(mocklab.merge_request_info, state='closed'), + from_state='oops_someone_closed_it', + ) + message = 'Someone closed the merge request while I was attempting to merge it.' + with patch('marge.job.push_rebased_and_rewritten_version', side_effect=mocklab.push_rebased): + with mocklab.expected_failure(message): + job = self.make_job() + job.execute() + assert api.state == 'oops_someone_closed_it' + assert api.notes == ["I couldn't merge this branch: %s" % message] + + def test_tells_explicitly_that_gitlab_refused_to_merge(self, time_sleep): + api, mocklab = self.api, self.mocklab + rewritten_sha = mocklab.rewritten_sha + api.add_transition( + PUT( + '/projects/1234/merge_requests/54/merge', + dict(sha=rewritten_sha, should_remove_source_branch=True, merge_when_pipeline_succeeds=True), + ), + Error(marge.gitlab.MethodNotAllowed(405, {'message': '405 Method Not Allowed'})), + from_state='passed', to_state='rejected_for_misterious_reasons', + ) + message = "Gitlab refused to merge this request and I don't know why!" + with patch('marge.job.push_rebased_and_rewritten_version', side_effect=mocklab.push_rebased): + with mocklab.expected_failure(message): + job = self.make_job() + job.execute() + assert api.state == 'rejected_for_misterious_reasons' + assert api.notes == ["I couldn't merge this branch: %s" % message] + + def test_wont_merge_wip_stuff(self, time_sleep): api, mocklab = self.api, self.mocklab wip_merge_request = dict(mocklab.merge_request_info, work_in_progress=True)
Handle GitLab setting merge request as WIP after pushing Because of [this GitLab feature](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/8124), we can have the following interaction: 1. Someone creates a merge request containing a commit with WIP in its name. GitLab doesn't consider the Merge Request as WIP at this point. 1. Merge request is assigned to Marge; it checks that the merge requests is not WIP and proceeds. 1 The branch is rebased and pushed again. 1. Because we have just pushed a commit that contains WIP (even if it was in one of the original commits), GitLab marks the merge request as WIP (it even reports "Marge Bot marked as Work In Progress"). 1. After CI passes, she tries to merge, but GitLab now refuses to merge and we fail with "had some issue with gitlab" We should test for WIP status before merging or, perhaps better, after the merge failed and we don't know why.
0.0
e510b250a5c0b2caa9d64dfaa7a2ad2206c1b542
[ "tests/test_job.py::TestRebaseAndAccept::test_handles_races_for_merging", "tests/test_job.py::TestRebaseAndAccept::test_handles_request_becoming_wip_after_push", "tests/test_job.py::TestRebaseAndAccept::test_guesses_git_hook_error_on_merge_refusal", "tests/test_job.py::TestRebaseAndAccept::test_tells_explicitly_that_gitlab_refused_to_merge" ]
[ "tests/test_job.py::TestRebaseAndAccept::test_succeeds_first_time", "tests/test_job.py::TestRebaseAndAccept::test_fails_on_not_acceptable_if_master_did_not_move", "tests/test_job.py::TestRebaseAndAccept::test_succeeds_second_time_if_master_moved", "tests/test_job.py::TestRebaseAndAccept::test_wont_merge_wip_stuff", "tests/test_job.py::TestRebaseAndAccept::test_wont_merge_branches_with_autosquash_if_rewriting", "tests/test_job.py::TestMergeJobOptions::test_default", "tests/test_job.py::TestMergeJobOptions::test_default_ci_time" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2017-08-13 19:13:47+00:00
bsd-3-clause
5,557
smarkets__marge-bot-59
diff --git a/marge/app.py b/marge/app.py index 576ade9..29b524e 100644 --- a/marge/app.py +++ b/marge/app.py @@ -178,7 +178,7 @@ def main(args=sys.argv[1:]): add_reviewers=options.add_reviewers, reapprove=options.impersonate_approvers, embargo=options.embargo, - ci_timeout=timedelta(seconds=options.ci_timeout), + ci_timeout=options.ci_timeout, ) ) diff --git a/marge/job.py b/marge/job.py index b2d69fe..ae2b251 100644 --- a/marge/job.py +++ b/marge/job.py @@ -63,7 +63,7 @@ class MergeJob(object): log.exception('Unexpected Git error') merge_request.comment('Something seems broken on my local git repo; check my logs!') raise - except Exception: + except Exception as _ex: log.exception('Unexpected Exception') merge_request.comment("I'm broken on the inside, please somebody fix me... :cry:") self.unassign_from_mr(merge_request) @@ -119,11 +119,6 @@ class MergeJob(object): log.info('Commit id to merge %r (into: %r)', actual_sha, target_sha) time.sleep(5) - if source_project.only_allow_merge_if_pipeline_succeeds: - self.wait_for_ci_to_pass(source_project.id, actual_sha) - log.info('CI passed!') - time.sleep(2) - sha_now = Commit.last_on_branch(source_project.id, merge_request.source_branch, api).id # Make sure no-one managed to race and push to the branch in the # meantime, because we're about to impersonate the approvers, and @@ -133,13 +128,18 @@ class MergeJob(object): # Re-approve the merge request, in case us pushing it has removed # approvals. Note that there is a bit of a race; effectively # approval can't be withdrawn after we've pushed (resetting - # approvals) and CI runs. + # approvals) if self.opts.reapprove: # approving is not idempotent, so we need to check first that there are no approvals, # otherwise we'll get a failure on trying to re-instate the previous approvals current_approvals = merge_request.fetch_approvals() if not current_approvals.sufficient: approvals.reapprove() + + if source_project.only_allow_merge_if_pipeline_succeeds: + self.wait_for_ci_to_pass(source_project.id, actual_sha) + log.info('CI passed!') + time.sleep(2) try: merge_request.accept(remove_branch=True, sha=actual_sha) except gitlab.NotAcceptable as err:
smarkets/marge-bot
48d0576a978af8b71f4971926e345d7d1425a8c0
diff --git a/tests/test_app.py b/tests/test_app.py index d8a4705..ed8e64b 100644 --- a/tests/test_app.py +++ b/tests/test_app.py @@ -1,10 +1,141 @@ +import contextlib import datetime +import os +import re +import shlex +import unittest.mock as mock +from functools import wraps -from marge.app import time_interval +import pytest + +import marge.app as app +import marge.bot as bot +import marge.interval as interval +import marge.job as job + +import tests.gitlab_api_mock as gitlab_mock +from tests.test_user import INFO as user_info + + [email protected] +def env(**kwargs): + original = os.environ.copy() + + os.environ.clear() + for k, v in kwargs.items(): + os.environ[k] = v + + yield + + os.environ.clear() + for k, v in original.items(): + os.environ[k] = v + + [email protected] +def main(cmdline=''): + def api_mock(gitlab_url, auth_token): + assert gitlab_url == 'http://foo.com' + assert auth_token in ('NON-ADMIN-TOKEN', 'ADMIN-TOKEN') + api = gitlab_mock.Api(gitlab_url=gitlab_url, auth_token=auth_token, initial_state='initial') + user_info_for_token = dict(user_info, is_admin=auth_token == 'ADMIN-TOKEN') + api.add_user(user_info_for_token, is_current=True) + return api + + class DoNothingBot(bot.Bot): + instance = None + + def start(self): + assert self.__class__.instance is None + self.__class__.instance = self + + @property + def config(self): + return self._config + + with mock.patch('marge.bot.Bot', new=DoNothingBot), mock.patch('marge.gitlab.Api', new=api_mock): + app.main(args=shlex.split(cmdline)) + the_bot = DoNothingBot.instance + assert the_bot is not None + yield the_bot + + +def test_default_values(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main() as bot: + assert bot.user.info == user_info + assert bot.config.project_regexp == re.compile('.*') + assert bot.config.git_timeout == datetime.timedelta(seconds=120) + assert bot.config.merge_opts == job.MergeJobOptions.default() + +def test_embargo(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--embargo="Fri 1pm-Mon 7am"') as bot: + assert bot.config.merge_opts == job.MergeJobOptions.default( + embargo=interval.IntervalUnion.from_human('Fri 1pm-Mon 7am'), + ) + +def test_add_tested(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--add-tested') as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(add_tested=True) + +def test_add_part_of(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--add-part-of') as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(add_part_of=True) + +def test_add_reviewers(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with pytest.raises(AssertionError): + with main('--add-reviewers') as bot: + pass + + with env(MARGE_AUTH_TOKEN="ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--add-reviewers') as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(add_reviewers=True) + + +def test_impersonate_approvers(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with pytest.raises(AssertionError): + with main('--impersonate-approvers') as bot: + pass + + with env(MARGE_AUTH_TOKEN="ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main('--impersonate-approvers') as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(reapprove=True) + + +def test_project_regexp(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main("--project-regexp='foo.*bar'") as bot: + assert bot.config.project_regexp == re.compile('foo.*bar') + +def test_ci_timeout(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main("--ci-timeout 5m") as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(ci_timeout=datetime.timedelta(seconds=5*60)) + +def test_deprecated_max_ci_time_in_minutes(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main("--max-ci-time-in-minutes=5") as bot: + assert bot.config.merge_opts != job.MergeJobOptions.default() + assert bot.config.merge_opts == job.MergeJobOptions.default(ci_timeout=datetime.timedelta(seconds=5*60)) + +def test_git_timeout(): + with env(MARGE_AUTH_TOKEN="NON-ADMIN-TOKEN", MARGE_SSH_KEY="KEY", MARGE_GITLAB_URL='http://foo.com'): + with main("--git-timeout '150 s'") as bot: + assert bot.config.git_timeout == datetime.timedelta(seconds=150) # FIXME: I'd reallly prefer this to be a doctest, but adding --doctest-modules # seems to seriously mess up the test run def test_time_interval(): _900s = datetime.timedelta(0, 900) - assert [time_interval(x) for x in ['15min', '15min', '.25h', '900s']] == [_900s] * 4 + assert [app.time_interval(x) for x in ['15min', '15m', '.25h', '900s']] == [_900s] * 4 diff --git a/tests/test_job.py b/tests/test_job.py index e9b0639..4e9031b 100644 --- a/tests/test_job.py +++ b/tests/test_job.py @@ -88,6 +88,11 @@ class MockLab(object): Ok(_commit(id=rewritten_sha, status='success')), from_state=['passed', 'merged'], ) + api.add_transition( + GET('/projects/1234/repository/branches/useless_new_feature'), + Ok({'commit': _commit(id=rewritten_sha, status='running')}), + from_state='pushed', + ) api.add_transition( GET('/projects/1234/repository/branches/useless_new_feature'), Ok({'commit': _commit(id=rewritten_sha, status='success')}), @@ -192,14 +197,14 @@ class TestRebaseAndAccept(object): api.add_transition( GET('/projects/1234/repository/branches/useless_new_feature'), Ok({'commit': _commit(id=new_branch_head_sha, status='success')}), - from_state='passed', to_state='passed_but_head_changed' + from_state='pushed', to_state='pushed_but_head_changed' ) with patch('marge.job.push_rebased_and_rewritten_version', side_effect=mocklab.push_rebased): with mocklab.expected_failure("Someone pushed to branch while we were trying to merge"): job = self.make_job(marge.job.MergeJobOptions.default(add_tested=True, add_reviewers=False)) job.execute() - assert api.state == 'passed_but_head_changed' + assert api.state == 'pushed_but_head_changed' assert api.notes == ["I couldn't merge this branch: Someone pushed to branch while we were trying to merge"] def test_succeeds_second_time_if_master_moved(self, time_sleep):
Re-approvals only applied after successful CI run There is a comment related to this within `marge/job.py`, including for context: ``` # Re-approve the merge request, in case us pushing it has removed # approvals. Note that there is a bit of a race; effectively # approval can't be withdrawn after we've pushed (resetting # approvals) and CI runs. ``` Occasionally CI may fail due to transient network issues that are unrelated to the change made. In this case, Marge will error out and not bother attempting to reapply any approvals. GitLab doesn't remove approvals on CI failure, so it doesn't quite make sense that this happens with Marge. This also applies to any potential exception that might occur between the force push and applying approvals, we need to restart marge and then manually approve again. I'm unaware as to whether there is a historical reason for why approvals are reapplied when they are, but could they no be applied immediately after the rebase?
0.0
48d0576a978af8b71f4971926e345d7d1425a8c0
[ "tests/test_app.py::test_default_values", "tests/test_app.py::test_embargo", "tests/test_app.py::test_add_tested", "tests/test_app.py::test_add_part_of", "tests/test_app.py::test_add_reviewers", "tests/test_app.py::test_impersonate_approvers", "tests/test_app.py::test_project_regexp", "tests/test_app.py::test_ci_timeout", "tests/test_app.py::test_deprecated_max_ci_time_in_minutes", "tests/test_app.py::test_git_timeout", "tests/test_job.py::TestRebaseAndAccept::test_fails_on_not_acceptable_if_master_did_not_move" ]
[ "tests/test_app.py::test_time_interval", "tests/test_job.py::TestRebaseAndAccept::test_succeeds_first_time", "tests/test_job.py::TestRebaseAndAccept::test_succeeds_second_time_if_master_moved", "tests/test_job.py::TestRebaseAndAccept::test_handles_races_for_merging", "tests/test_job.py::TestRebaseAndAccept::test_handles_request_becoming_wip_after_push", "tests/test_job.py::TestRebaseAndAccept::test_guesses_git_hook_error_on_merge_refusal", "tests/test_job.py::TestRebaseAndAccept::test_tells_explicitly_that_gitlab_refused_to_merge", "tests/test_job.py::TestRebaseAndAccept::test_wont_merge_wip_stuff", "tests/test_job.py::TestRebaseAndAccept::test_wont_merge_branches_with_autosquash_if_rewriting", "tests/test_job.py::TestMergeJobOptions::test_default", "tests/test_job.py::TestMergeJobOptions::test_default_ci_time" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-10-15 18:24:53+00:00
bsd-3-clause
5,558
smarkets__marge-bot-80
diff --git a/marge/git.py b/marge/git.py index fa7c68a..2400001 100644 --- a/marge/git.py +++ b/marge/git.py @@ -84,7 +84,7 @@ class Repo(namedtuple('Repo', 'remote_url local_path ssh_key_file timeout')): def _fuse_branch(self, strategy, branch, target_branch, source_repo_url=None): assert source_repo_url or branch != target_branch, branch - self.git('fetch', 'origin') + self.git('fetch', '--prune', 'origin') if source_repo_url: # "upsert" remote 'source' and fetch it try: @@ -92,7 +92,7 @@ class Repo(namedtuple('Repo', 'remote_url local_path ssh_key_file timeout')): except GitError: pass self.git('remote', 'add', 'source', source_repo_url) - self.git('fetch', 'source') + self.git('fetch', '--prune', 'source') self.git('checkout', '-B', branch, 'source/' + branch, '--') else: self.git('checkout', '-B', branch, 'origin/' + branch, '--') diff --git a/marge/job.py b/marge/job.py index 82d80f2..b1022b5 100644 --- a/marge/job.py +++ b/marge/job.py @@ -37,7 +37,7 @@ class MergeJob(object): return state = merge_request.state - if state not in ('opened', 'reopened'): + if state not in ('opened', 'reopened', 'locked'): if state in ('merged', 'closed'): log.info('The merge request is already %s!', state) else: @@ -236,7 +236,7 @@ class MergeJob(object): return # success! if merge_request.state == 'closed': raise CannotMerge('someone closed the merge request while merging!') - assert merge_request.state in ('opened', 'reopened'), merge_request.state + assert merge_request.state in ('opened', 'reopened', 'locked'), merge_request.state log.info('Giving %s more secs for !%s to be merged...', waiting_time_in_secs, merge_request.iid) time.sleep(waiting_time_in_secs)
smarkets/marge-bot
600eaf337d143656bb44d710331119ea57c7a3e6
diff --git a/tests/test_git.py b/tests/test_git.py index 4569d03..77e5f31 100644 --- a/tests/test_git.py +++ b/tests/test_git.py @@ -38,7 +38,7 @@ class TestRepo(object): self.repo.rebase('feature_branch', 'master_of_the_universe') assert get_calls(mocked_run) == [ - 'git -C /tmp/local/path fetch origin', + 'git -C /tmp/local/path fetch --prune origin', 'git -C /tmp/local/path checkout -B feature_branch origin/feature_branch --', 'git -C /tmp/local/path rebase origin/master_of_the_universe', 'git -C /tmp/local/path rev-parse HEAD' @@ -48,7 +48,7 @@ class TestRepo(object): self.repo.merge('feature_branch', 'master_of_the_universe') assert get_calls(mocked_run) == [ - 'git -C /tmp/local/path fetch origin', + 'git -C /tmp/local/path fetch --prune origin', 'git -C /tmp/local/path checkout -B feature_branch origin/feature_branch --', 'git -C /tmp/local/path merge origin/master_of_the_universe', 'git -C /tmp/local/path rev-parse HEAD'
unexpected merge state locked ``` marge-bot_1 | 2018-02-05 12:39:47,026 INFO Commit id to merge 'bb052510c9a6357fe733b793f4c1f5b38a32e89e' (into: 'aa5d3b11e3768d34f74bde8de7095a629d76cba1') marge-bot_1 | 2018-02-05 12:39:53,476 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:40:03,757 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:40:14,265 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:40:24,748 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:40:35,059 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:40:45,633 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:40:55,966 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:41:06,263 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:41:16,685 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:41:27,039 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:41:37,310 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:41:47,509 INFO Giving 10 more secs for !125 to be merged... marge-bot_1 | 2018-02-05 12:41:57,879 ERROR Unexpected Exception marge-bot_1 | Traceback (most recent call last): marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/lib/python3.6/site-packages/marge/job.py", line 55, in execute marge-bot_1 | self.rebase_and_accept(approvals) marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/lib/python3.6/site-packages/marge/job.py", line 198, in rebase_and_accept marge-bot_1 | self.wait_for_branch_to_be_merged() marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/lib/python3.6/site-packages/marge/job.py", line 238, in wait_for_branch_to_be_merged marge-bot_1 | assert merge_request.state in ('opened', 'reopened'), merge_request.state marge-bot_1 | AssertionError: locked marge-bot_1 | Traceback (most recent call last): marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/bin/.marge.app-wrapped", line 4, in <module> marge-bot_1 | marge.app.main() marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/lib/python3.6/site-packages/marge/app.py", line 221, in main marge-bot_1 | marge_bot.start() marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/lib/python3.6/site-packages/marge/bot.py", line 39, in start marge-bot_1 | self._run(repo_manager) marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/lib/python3.6/site-packages/marge/bot.py", line 108, in _run marge-bot_1 | merge_job.execute() marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/lib/python3.6/site-packages/marge/job.py", line 55, in execute marge-bot_1 | self.rebase_and_accept(approvals) marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/lib/python3.6/site-packages/marge/job.py", line 198, in rebase_and_accept marge-bot_1 | self.wait_for_branch_to_be_merged() marge-bot_1 | File "/nix/store/v4xzqxg6yvhvdqk6w453pig8v7r69v1c-python3.6-marge-0.5.1/lib/python3.6/site-packages/marge/job.py", line 238, in wait_for_branch_to_be_merged marge-bot_1 | assert merge_request.state in ('opened', 'reopened'), merge_request.state marge-bot_1 | AssertionError: locked ``` here's how it looked at gitlab web: ![image](https://user-images.githubusercontent.com/199095/35805166-7aa99aa6-0a83-11e8-8b5b-7ffdd88c4c50.png)
0.0
600eaf337d143656bb44d710331119ea57c7a3e6
[ "tests/test_git.py::TestRepo::test_rebase_success", "tests/test_git.py::TestRepo::test_merge_success" ]
[ "tests/test_git.py::PYLINT", "tests/test_git.py::TestRepo::test_clone", "tests/test_git.py::TestRepo::test_config_user_info", "tests/test_git.py::TestRepo::test_reviewer_tagging_success", "tests/test_git.py::TestRepo::test_reviewer_tagging_failure", "tests/test_git.py::TestRepo::test_rebase_same_branch", "tests/test_git.py::TestRepo::test_merge_same_branch", "tests/test_git.py::TestRepo::test_remove_branch", "tests/test_git.py::TestRepo::test_remove_master_branch_fails", "tests/test_git.py::TestRepo::test_push_force", "tests/test_git.py::TestRepo::test_push_force_fails_on_dirty", "tests/test_git.py::TestRepo::test_push_force_fails_on_untracked", "tests/test_git.py::TestRepo::test_get_commit_hash", "tests/test_git.py::TestRepo::test_passes_ssh_key", "tests/test_git.py::test_filter", "tests/test_git.py::test_filter_fails_on_empty_commit_messages", "tests/test_git.py::test_filter_fails_on_commit_messages_that_are_empty_apart_from_trailers", "tests/test_git.py::test_filter_treats_the_first_commit_line_not_as_a_trailer_unless_it_matches_the_trailer_name_passed_in" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_git_commit_hash", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-02-08 11:30:02+00:00
bsd-3-clause
5,559
smarkets__marge-bot-89
diff --git a/marge/git.py b/marge/git.py index 29a7684..df8cfc5 100644 --- a/marge/git.py +++ b/marge/git.py @@ -107,9 +107,9 @@ class Repo(namedtuple('Repo', 'remote_url local_path ssh_key_file timeout')): raise return self.get_commit_hash() - def remove_branch(self, branch): - assert branch != 'master' - self.git('checkout', 'master', '--') + def remove_branch(self, branch, *, new_current_branch='master'): + assert branch != new_current_branch + self.git('checkout', new_current_branch, '--') self.git('branch', '-D', branch) def push_force(self, branch, source_repo_url=None): diff --git a/marge/job.py b/marge/job.py index 613a649..c7196c9 100644 --- a/marge/job.py +++ b/marge/job.py @@ -363,8 +363,8 @@ def update_from_target_branch_and_push( # A failure to clean up probably means something is fucked with the git repo # and likely explains any previous failure, so it will better to just # raise a GitError - if source_branch != 'master': - repo.remove_branch(source_branch) + if source_branch != target_branch: + repo.remove_branch(source_branch, new_current_branch=target_branch) else: assert source_repo_url is not None
smarkets/marge-bot
9986daf294673ad58a06c7ca19125bc20c144c96
diff --git a/tests/test_git.py b/tests/test_git.py index 9ceaa1a..2ebc79c 100644 --- a/tests/test_git.py +++ b/tests/test_git.py @@ -112,6 +112,13 @@ class TestRepo(object): assert get_calls(mocked_run) == [] def test_remove_branch(self, mocked_run): + self.repo.remove_branch('some_branch', new_current_branch='devel') + assert get_calls(mocked_run) == [ + 'git -C /tmp/local/path checkout devel --', + 'git -C /tmp/local/path branch -D some_branch', + ] + + def test_remove_branch_default(self, mocked_run): self.repo.remove_branch('some_branch') assert get_calls(mocked_run) == [ 'git -C /tmp/local/path checkout master --', @@ -120,7 +127,7 @@ class TestRepo(object): def test_remove_master_branch_fails(self, unused_mocked_run): with pytest.raises(AssertionError): - self.repo.remove_branch('master') + self.repo.remove_branch('meister', new_current_branch='meister') def test_push_force(self, mocked_run): mocked_run.return_value = mocked_stdout(b'')
Don't assume a branch called `master` exists when removing the merged branch It does not always merge request should be merged into `master`. E.g., in some git workflows, MR should be merged into `develop` branch. I propose to add configuration parameter `--branch-name-merge-into <string name>` for such situations.
0.0
9986daf294673ad58a06c7ca19125bc20c144c96
[ "tests/test_git.py::TestRepo::test_remove_branch", "tests/test_git.py::TestRepo::test_remove_master_branch_fails" ]
[ "tests/test_git.py::flake-8::FLAKE8", "tests/test_git.py::TestRepo::test_clone", "tests/test_git.py::TestRepo::test_config_user_info", "tests/test_git.py::TestRepo::test_rebase_success", "tests/test_git.py::TestRepo::test_merge_success", "tests/test_git.py::TestRepo::test_reviewer_tagging_success", "tests/test_git.py::TestRepo::test_reviewer_tagging_failure", "tests/test_git.py::TestRepo::test_rebase_same_branch", "tests/test_git.py::TestRepo::test_merge_same_branch", "tests/test_git.py::TestRepo::test_remove_branch_default", "tests/test_git.py::TestRepo::test_push_force", "tests/test_git.py::TestRepo::test_push_force_fails_on_dirty", "tests/test_git.py::TestRepo::test_push_force_fails_on_untracked", "tests/test_git.py::TestRepo::test_get_commit_hash", "tests/test_git.py::TestRepo::test_passes_ssh_key", "tests/test_git.py::test_filter", "tests/test_git.py::test_filter_fails_on_empty_commit_messages", "tests/test_git.py::test_filter_fails_on_commit_messages_that_are_empty_apart_from_trailers", "tests/test_git.py::test_filter_ignore_first_line_trailer_in_commit_message_if_not_set" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2018-03-03 15:10:37+00:00
bsd-3-clause
5,560
smarr__ReBench-163
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e155817..9455a1d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,7 +54,7 @@ jobs: run: | pip install pylint pylint rebench - if: startsWith(matrix.python-version, '3.') + if: matrix.python-version == '3.9' - name: Upload coverage results to Coveralls run: coveralls diff --git a/rebench/environment.py b/rebench/environment.py index 7c9775b..54f0b41 100644 --- a/rebench/environment.py +++ b/rebench/environment.py @@ -33,14 +33,17 @@ def _exec(cmd): _source = None -def determine_source_details(): +def determine_source_details(configurator): global _source # pylint: disable=global-statement if _source: return _source result = {} + git_cmd = ['git'] + if configurator and configurator.options and configurator.options.git_repo: + git_cmd += ['-C', configurator.options.git_repo] - is_git_repo = _exec(['git', 'rev-parse']) is not None + is_git_repo = _exec(git_cmd + ['rev-parse']) is not None if not is_git_repo: result['repoURL'] = None result['branchOrTag'] = None @@ -53,7 +56,7 @@ def determine_source_details(): _source = result return result - repo_url = _exec(['git', 'ls-remote', '--get-url']) if is_git_repo else None + repo_url = _exec(git_cmd + ['ls-remote', '--get-url']) if is_git_repo else None if repo_url is None: repo_url = '' @@ -64,13 +67,13 @@ def determine_source_details(): netloc="{}@{}".format(parsed.username, parsed.hostname)) result['repoURL'] = _encode_str(parsed.geturl()) - result['branchOrTag'] = _exec(['git', 'show', '-s', '--format=%D', 'HEAD']) - result['commitId'] = _exec(['git', 'rev-parse', 'HEAD']) - result['commitMsg'] = _exec(['git', 'show', '-s', '--format=%B', 'HEAD']) - result['authorName'] = _exec(['git', 'show', '-s', '--format=%aN', 'HEAD']) - result['committerName'] = _exec(['git', 'show', '-s', '--format=%cN', 'HEAD']) - result['authorEmail'] = _exec(['git', 'show', '-s', '--format=%aE', 'HEAD']) - result['committerEmail'] = _exec(['git', 'show', '-s', '--format=%cE', 'HEAD']) + result['branchOrTag'] = _exec(git_cmd + ['show', '-s', '--format=%D', 'HEAD']) + result['commitId'] = _exec(git_cmd + ['rev-parse', 'HEAD']) + result['commitMsg'] = _exec(git_cmd + ['show', '-s', '--format=%B', 'HEAD']) + result['authorName'] = _exec(git_cmd + ['show', '-s', '--format=%aN', 'HEAD']) + result['committerName'] = _exec(git_cmd + ['show', '-s', '--format=%cN', 'HEAD']) + result['authorEmail'] = _exec(git_cmd + ['show', '-s', '--format=%aE', 'HEAD']) + result['committerEmail'] = _exec(git_cmd + ['show', '-s', '--format=%cE', 'HEAD']) _source = result return result diff --git a/rebench/persistence.py b/rebench/persistence.py index e736e5a..bfb37b7 100644 --- a/rebench/persistence.py +++ b/rebench/persistence.py @@ -50,18 +50,23 @@ class DataStore(object): def get(self, filename, configurator): if filename not in self._files: - source = determine_source_details() + source = determine_source_details(configurator) + if configurator.use_rebench_db and source['commitId'] is None: + raise UIError("Reporting to ReBenchDB is enabled, " + + "but failed to obtain source details. " + + "If ReBench is run outside of the relevant repo " + + "set the path with --git-repo", None) if configurator.use_rebench_db and 'repo_url' in configurator.rebench_db: source['repoURL'] = configurator.rebench_db['repo_url'] if configurator.options and configurator.options.branch: source['branchOrTag'] = configurator.options.branch - p = _FilePersistence(filename, self, configurator.discard_old_data, self._ui) + p = _FilePersistence(filename, self, configurator, self._ui) self._ui.debug_output_info('ReBenchDB enabled: {e}\n', e=configurator.use_rebench_db) if configurator.use_rebench_db: - db = _ReBenchDB(configurator.get_rebench_db_connector(), self, self._ui) + db = _ReBenchDB(configurator, self, self._ui) p = _CompositePersistence(p, db) self._files[filename] = p @@ -165,7 +170,7 @@ class _CompositePersistence(_AbstractPersistence): class _FilePersistence(_ConcretePersistence): - def __init__(self, data_filename, data_store, discard_old_data, ui): + def __init__(self, data_filename, data_store, configurator, ui): super(_FilePersistence, self).__init__(data_store, ui) if not data_filename: raise ValueError("DataPointPersistence expects a filename " + @@ -173,13 +178,15 @@ class _FilePersistence(_ConcretePersistence): self._data_filename = data_filename self._file = None - if discard_old_data: + if configurator.discard_old_data: self._discard_old_data() self._lock = Lock() self._read_start_time() if not self._start_time: self._start_time = get_current_time() + self._configurator = configurator + def _discard_old_data(self): self._truncate_file(self._data_filename) @@ -294,7 +301,8 @@ class _FilePersistence(_ConcretePersistence): shebang_line = "#!%s\n" % (subprocess.list2cmdline(sys.argv)) shebang_line += _START_TIME_LINE + self._start_time + "\n" shebang_line += "# Environment: " + json.dumps(determine_environment()) + "\n" - shebang_line += "# Source: " + json.dumps(determine_source_details()) + "\n" + shebang_line += "# Source: " + json.dumps( + determine_source_details(self._configurator)) + "\n" try: # pylint: disable-next=unspecified-encoding,consider-using-with @@ -339,10 +347,11 @@ class _FilePersistence(_ConcretePersistence): class _ReBenchDB(_ConcretePersistence): - def __init__(self, rebench_db, data_store, ui): + def __init__(self, configurator, data_store, ui): super(_ReBenchDB, self).__init__(data_store, ui) # TODO: extract common code, possibly - self._rebench_db = rebench_db + self._configurator = configurator + self._rebench_db = configurator.get_rebench_db_connector() self._lock = Lock() @@ -406,7 +415,7 @@ class _ReBenchDB(_ConcretePersistence): 'criteria': criteria_index, 'env': determine_environment(), 'startTime': self._start_time, - 'source': determine_source_details()}, num_measurements) + 'source': determine_source_details(self._configurator)}, num_measurements) def close(self): with self._lock: diff --git a/rebench/rebench.py b/rebench/rebench.py index 462e00e..0d7fcb9 100755 --- a/rebench/rebench.py +++ b/rebench/rebench.py @@ -178,6 +178,10 @@ Argument: default=True, help='Override configuration and ' 'disable any reporting to Codespeed and ReBenchDB.') + codespeed.add_argument('--git-repo', dest='git_repo', default=None, + help='Path to the git repository with the source for the ' + + 'experiment. This is useful when the experiment is run ' + + 'from a different location, for instance a RAM disk, or tmpfs.') rebench_db = parser.add_argument_group( 'Reporting to ReBenchDB',
smarr/ReBench
7473a6874807943bb368fa04dc4246d60b65a18f
diff --git a/rebench/tests/environment_test.py b/rebench/tests/environment_test.py index f960314..229176b 100644 --- a/rebench/tests/environment_test.py +++ b/rebench/tests/environment_test.py @@ -8,7 +8,7 @@ from ..ui import TestDummyUI class ReBenchTestCase(TestCase): def test_source_details(self): - details = determine_source_details() + details = determine_source_details(None) self.assertEqual(len(details['commitId']), 40) self.assertGreater(len(details['committerName']), 0) self.assertGreater(len(details['committerEmail']), 0)
ReBench with Reporting does not fail when source info can't be obtained For reporting, it seems important to have the source details reported to ReBenchDB, since it is one of the key criteria used there to track the origin of data. I think it's plausible to fail ReBench when it is supposed to report the results and doesn't find source info. At the same time, it is useful to add a flag for passing the location of the repository where the source info can be obtained. The original issue appeared when moving execution to the tmpfs on Linux. Desired changes: - [ ] fail ReBench when it is reporting to ReBenchDB but doesn't have source details - [ ] add a commandline argument to configure the location where to find sources
0.0
7473a6874807943bb368fa04dc4246d60b65a18f
[ "rebench/tests/environment_test.py::ReBenchTestCase::test_source_details" ]
[ "rebench/tests/environment_test.py::ReBenchTestCase::test_environment" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-09-07 23:36:57+00:00
mit
5,561
smarr__ReBench-202
diff --git a/docs/extensions.md b/docs/extensions.md index 2642d2d..a0a17fa 100644 --- a/docs/extensions.md +++ b/docs/extensions.md @@ -16,6 +16,7 @@ ReBench currently provides builtin support for the following benchmark harnesses - `ValidationLog`: the format used by [SOMns](https://github.com/smarr/SOMns)'s ImpactHarness - `Time`: a harness that uses `/usr/bin/time` automatically + ### `PlainSecondsLog` This adapter attempts to read every line of program output as a millisecond @@ -35,6 +36,7 @@ Implementation Notes: - Python's `float()` function is used for parsing + ### `ReBenchLog` The ReBenchLog parser is the most commonly used and has most features. @@ -80,6 +82,24 @@ Implementation Notes: the following regular expression should match `r"^(?:.*: )?([^\s]+): ([^:]{1,30}):\s*([0-9]+)([a-zA-Z]+)")` + +## `Time` + +The `Time` adapter uses Unix's `/usr/bin/time` command. +On Linux, or more generally the platforms that support it, it will also use the +`-f` switch of the `time` command to record the maximum resident set size, +i.e., the maximum amount of memory the program used. + +Example configuration for a suite: + +```yaml + Suite: + gauge_adapter: Time + benchmarks: + - Bench1 +``` + + ## Supporting other Benchmark Harnesses To add support for your own harness, check the `rebench.interop` module. diff --git a/rebench/executor.py b/rebench/executor.py index 42d0a2b..58651e9 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -24,15 +24,13 @@ from collections import deque from math import floor from multiprocessing import cpu_count import os -import pkgutil import random import subprocess -import sys from threading import Thread, RLock from time import time from . import subprocess_with_timeout as subprocess_timeout -from .interop.adapter import ExecutionDeliveredNoResults +from .interop.adapter import ExecutionDeliveredNoResults, instantiate_adapter from .ui import escape_braces @@ -444,22 +442,7 @@ class Executor(object): return terminate def _get_gauge_adapter_instance(self, adapter_name): - adapter_name += "Adapter" - - root = sys.modules['rebench.interop'].__path__ - - for _, name, _ in pkgutil.walk_packages(root): - # depending on how ReBench was executed, name might one of the two - try: - mod = __import__("rebench.interop." + name, fromlist=adapter_name) - except ImportError: - try: - mod = __import__("interop." + name, fromlist=adapter_name) - except ImportError: - mod = None - if mod is not None and hasattr(mod, adapter_name): - return getattr(mod, adapter_name)(self._include_faulty, self) - return None + return instantiate_adapter(adapter_name, self._include_faulty, self) def _generate_data_point(self, cmdline, gauge_adapter, run_id, termination_check): diff --git a/rebench/interop/adapter.py b/rebench/interop/adapter.py index 8b54b54..c1097fa 100644 --- a/rebench/interop/adapter.py +++ b/rebench/interop/adapter.py @@ -18,6 +18,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import re +import pkgutil +import sys class GaugeAdapter(object): @@ -78,3 +80,24 @@ class OutputNotParseable(ExecutionDeliveredNoResults): class ResultsIndicatedAsInvalid(ExecutionDeliveredNoResults): pass + + +def instantiate_adapter(name, include_faulty, executor): + adapter_name = name + "Adapter" + root = sys.modules['rebench.interop'].__path__ + + for _, module_name, _ in pkgutil.walk_packages(root): + # depending on how ReBench was executed, name might one of the two + try: + mod = __import__("rebench.interop." + module_name, fromlist=adapter_name) + except ImportError: + try: + mod = __import__("interop." + module_name, fromlist=adapter_name) + except ImportError: + mod = None + if mod is not None: + for key in dir(mod): + if key.lower() == adapter_name.lower(): + return getattr(mod, key)(include_faulty, executor) + + return None diff --git a/rebench/interop/rebench_log_adapter.py b/rebench/interop/rebench_log_adapter.py index ef73ed1..d06e767 100644 --- a/rebench/interop/rebench_log_adapter.py +++ b/rebench/interop/rebench_log_adapter.py @@ -30,6 +30,8 @@ class RebenchLogAdapter(GaugeAdapter): """RebenchLogPerformance is the standard log parser of ReBench. It reads a simple log format, which includes the number of iterations of a benchmark and its runtime in microseconds. + + Note: regular expressions are documented in /docs/extensions.md """ re_log_line = re.compile( r"^(?:.*: )?([^\s]+)( [\w\.]+)?: iterations=([0-9]+) runtime: ([0-9]+)([mu])s")
smarr/ReBench
08c736b78808a67189925fd31484647926341d8c
diff --git a/rebench/tests/interop/adapter_test.py b/rebench/tests/interop/adapter_test.py new file mode 100644 index 0000000..81c4567 --- /dev/null +++ b/rebench/tests/interop/adapter_test.py @@ -0,0 +1,22 @@ +from unittest import TestCase +from ...interop.adapter import instantiate_adapter + + +class AdapterTest(TestCase): + def test_load_all_known_adapters(self): + self.assertIsNotNone(instantiate_adapter("JMH", False, None)) + self.assertIsNotNone(instantiate_adapter("Multivariate", False, None)) + self.assertIsNotNone(instantiate_adapter("Perf", False, None)) + self.assertIsNotNone(instantiate_adapter("PlainSecondsLog", False, None)) + self.assertIsNotNone(instantiate_adapter("RebenchLog", False, None)) + self.assertIsNotNone(instantiate_adapter("SavinaLog", False, None)) + self.assertIsNotNone(instantiate_adapter("Test", False, None)) + self.assertIsNotNone(instantiate_adapter("TestExecutor", False, None)) + self.assertIsNotNone(instantiate_adapter("Time", False, None)) + self.assertIsNotNone(instantiate_adapter("ValidationLog", False, None)) + + def test_case_insensitive_names(self): + self.assertIsNotNone(instantiate_adapter("RebenchLog", False, None)) + self.assertIsNotNone(instantiate_adapter("ReBenchLog", False, None)) + self.assertIsNotNone(instantiate_adapter("rebenchlog", False, None)) + self.assertIsNotNone(instantiate_adapter("REBENCHLOG", False, None))
`ReBenchLog` gauge adapter doesn't work on case-sensitive filesystems ReBench's custom benchmark log format is defined in [interop/rebench_log_adapter.py](https://github.com/smarr/ReBench/blob/08c736b78808a67189925fd31484647926341d8c/rebench/interop/rebench_log_adapter.py), in the class `RebenchLogAdapter`. The corresponding `benchmark_suites.${benchmark}.gauge_adapter` value is defined in [executor.py](https://github.com/smarr/ReBench/blob/08c736b78808a67189925fd31484647926341d8c/rebench/executor.py#L449): https://github.com/smarr/ReBench/blob/08c736b78808a67189925fd31484647926341d8c/rebench/executor.py#L446-L449 So the correct value is `RebenchLog`. However, the documentation and code reference `ReBenchLog` a number of times. On my Linux-based system, that fails to find the gauge adapter. I hazard that it would work on Windows (but probably not MacOS?). In particular, [`docs/extensions.md`](https://github.com/smarr/ReBench/blob/08c736b78808a67189925fd31484647926341d8c/docs/extensions.md) states the wrong value. https://github.com/smarr/ReBench/blob/08c736b78808a67189925fd31484647926341d8c/docs/extensions.md#L8-L17
0.0
08c736b78808a67189925fd31484647926341d8c
[ "rebench/tests/interop/adapter_test.py::AdapterTest::test_case_insensitive_names", "rebench/tests/interop/adapter_test.py::AdapterTest::test_load_all_known_adapters" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-01-28 14:54:04+00:00
mit
5,562
smarr__ReBench-227
diff --git a/rebench.conf b/rebench.conf index a43c60c..6052139 100644 --- a/rebench.conf +++ b/rebench.conf @@ -17,7 +17,7 @@ benchmark_suites: TestSuite1: gauge_adapter: Test # location: /Users/... - command: TestBenchMarks %(benchmark)s %(input)s %(variable)s + command: TestBenchMarks %(benchmark)s %(input)s %(variable)s something-else input_sizes: [1, 2, 10, 100, 1000] benchmarks: - Bench1 @@ -29,7 +29,7 @@ benchmark_suites: - val2 TestSuite2: gauge_adapter: Test - command: TestBenchMarks %(benchmark)s %(input)s %(variable)s + command: TestBenchMarks %(benchmark)s %(input)s %(variable)s another-thing input_sizes: [1, 2, 10, 100, 1000] cores: [7, 13, 55] benchmarks: diff --git a/rebench/executor.py b/rebench/executor.py index 2ac7163..5a16273 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -508,6 +508,8 @@ class Executor(object): + "{ind}{ind}Return code: %d\n" + "{ind}{ind}max_invocation_time: %s\n") % ( return_code, run_id.max_invocation_time) + elif return_code is None: + msg = "{ind}Run failed. Return code: None\n" else: msg = "{ind}Run failed. Return code: %d\n" % return_code diff --git a/rebench/persistence.py b/rebench/persistence.py index c71426d..081a441 100644 --- a/rebench/persistence.py +++ b/rebench/persistence.py @@ -317,12 +317,16 @@ class _FilePersistence(_ConcretePersistence): shebang_with_metadata += "# Environment: " + json.dumps(determine_environment()) + "\n" shebang_with_metadata += "# Source: " + json.dumps( determine_source_details(self._configurator)) + "\n" - shebang_with_metadata += self._SEP.join(Measurement.get_column_headers()) + "\n" + + csv_header = self._SEP.join(Measurement.get_column_headers()) + "\n" try: # pylint: disable-next=unspecified-encoding,consider-using-with data_file = open(self._data_filename, 'a+') + is_empty = data_file.tell() == 0 data_file.write(shebang_with_metadata) + if is_empty: + data_file.write(csv_header) data_file.flush() return data_file except Exception as err: # pylint: disable=broad-except
smarr/ReBench
cd302d5155c73575c2b158034afcde07d2403c6d
diff --git a/rebench/tests/persistency_test.py b/rebench/tests/persistency_test.py index ef981df..4cfe05a 100644 --- a/rebench/tests/persistency_test.py +++ b/rebench/tests/persistency_test.py @@ -172,27 +172,29 @@ class PersistencyTest(ReBenchTestCase): run.close_files() def test_check_file_lines(self): - ds = DataStore(self.ui) - cnf = Configurator(load_config(self._path + '/persistency.conf'), - ds, self.ui, data_file=self._tmp_file) - ds.load_data(None, False) - ex = Executor(cnf.get_runs(), False, self.ui) - ex.execute() + self._load_config_and_run() + with open(self._tmp_file, 'r') as file: # pylint: disable=unspecified-encoding lines = file.readlines() - command = self.get_line_after_char('#!', lines[0]) - self.assertEqual(command, subprocess.list2cmdline(sys.argv)) - time = self.get_line_after_char('Start:', lines[1]) - self.assertTrue(self.is_valid_time(time)) - json_code = self.get_line_after_char('Environment:', lines[2]) - self.assertTrue(self.is_valid_json(json_code)) - json_code = self.get_line_after_char('Source:', lines[3]) - self.assertTrue(self.is_valid_json(json_code)) - line = lines[4].split("\t") - line[-1] = line[-1].rstrip('\n') - words = Measurement.get_column_headers() - self.assertEqual(line, words) - self.assertEqual(len((lines[5]).split("\t")) ,len(line)) + + command = self.get_line_after_char('#!', lines[0]) + self.assertEqual(command, subprocess.list2cmdline(sys.argv)) + + time = self.get_line_after_char('Start:', lines[1]) + self.assertTrue(self.is_valid_time(time)) + + self.assertIsNotNone(json.loads(self.get_line_after_char('Environment:', lines[2]))) + self.assertIsNotNone(json.loads(self.get_line_after_char('Source:', lines[3]))) + + column_headers = lines[4].split("\t") + # remove the newline character from the last column header + column_headers[-1] = column_headers[-1].rstrip('\n') + + expected_headers = Measurement.get_column_headers() + self.assertEqual(column_headers, expected_headers) + + self.assertEqual(len((lines[5]).split("\t")), len(column_headers), + 'expected same number of column headers as data columns') def get_line_after_char(self, char, line): if char in line: @@ -207,9 +209,27 @@ class PersistencyTest(ReBenchTestCase): except ValueError: return False - def is_valid_json(self, json_str): - try: - json.loads(json_str) - return True - except json.JSONDecodeError: - return False + def _load_config_and_run(self, args=None): + ds = DataStore(self.ui) + cnf = Configurator(load_config(self._path + '/persistency.conf'), + ds, self.ui, args, data_file=self._tmp_file) + ds.load_data(None, False) + ex = Executor(cnf.get_runs(), False, self.ui) + ex.execute() + + def test_check_single_csv_header(self): + """Check that there is only one csv header in the file""" + # first run + self._load_config_and_run() + + # second run, requesting more invocations + opt_parser = ReBench().shell_options() + args = opt_parser.parse_args(['-in', '20', '-R', self._path + '/persistency.conf']) + self._load_config_and_run(args) + + with open(self._tmp_file, 'r') as file: # pylint: disable=unspecified-encoding + lines = file.readlines() + + # count the number of lines starting with 'invocation' + invocation_lines = [line for line in lines if line.startswith('invocation')] + self.assertEqual(len(invocation_lines), 1)
CSV Headers should be added only exactly once, to avoid being confused with data With #220 we are writing a line of headers into the file. We should only do that when the file is initially empty. Otherwise, the header line may be confused as data. @naomiGrew something for next week :)
0.0
cd302d5155c73575c2b158034afcde07d2403c6d
[ "rebench/tests/persistency_test.py::PersistencyTest::test_check_single_csv_header" ]
[ "rebench/tests/persistency_test.py::PersistencyTest::test_check_file_lines", "rebench/tests/persistency_test.py::PersistencyTest::test_data_discarding", "rebench/tests/persistency_test.py::PersistencyTest::test_de_serialization", "rebench/tests/persistency_test.py::PersistencyTest::test_disabled_rebench_db", "rebench/tests/persistency_test.py::PersistencyTest::test_iteration_invocation_semantics", "rebench/tests/persistency_test.py::PersistencyTest::test_rebench_db" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-17 18:56:10+00:00
mit
5,563
smartcar__python-sdk-82
diff --git a/.releaserc.js b/.releaserc.js new file mode 100644 index 0000000..5c86c28 --- /dev/null +++ b/.releaserc.js @@ -0,0 +1,31 @@ +'use strict'; + +module.exports = { + branches: 'master', + plugins: [ + '@semantic-release/commit-analyzer', + [ + '@google/semantic-release-replace-plugin', + { + replacements: [ + { + files: ['smartcar/__init__.py'], + from: "__version__ = 'semantic-release'", + to: "__version__ = '${nextRelease.version}'", + results: [ + { + file: 'smartcar/__init__.py', + hasChanged: true, + numMatches: 1, + numReplacements: 1, + }, + ], + countMatches: true, + }, + ], + }, + ], + '@semantic-release/release-notes-generator', + '@semantic-release/github', + ], +}; diff --git a/.travis.yml b/.travis.yml index 0f47f2e..5d22b00 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,8 +8,7 @@ addons: apt: packages: firefox-geckodriver -language: - - python +language: python python: - '2.7' @@ -30,33 +29,21 @@ script: jobs: include: - - stage: tag - language: generic - # no install necessary for generic language - install: true - script: - - git config --global user.email "[email protected]" - - git config --global user.name "Travis CI User" - - export tag=$(cat smartcar/__init__.py | grep '^__version__' | sed "s/^__version__[[:blank:]]*=[[:blank:]]'\(.*\)'/\1/g") - - if [ "$TRAVIS_BRANCH" = "master" ]; then git tag -a v$tag -m "Travis Generated Tag"; fi - deploy: - provider: script - skip_cleanup: true - script: echo -e "machine github.com\n login $CI_USER_TOKEN" >> ~/.netrc && git push origin v$tag - on: - branch: master - - stage: publish - language: python - python: - - '3.8' - # use 'true' to noop the install and script stageswhich are required for - # the python language - install: true - script: true + python: '3.8' + services: [] + addons: + firefox: 'skip' + apt: [] + install: + - nvm install 14 + - npm install [email protected] @google/[email protected] + script: + - npx semantic-release + - head -1 smartcar/__init__.py deploy: provider: pypi - user: $PYPI_USERNAME + username: $PYPI_USERNAME password: $PYPI_PASSWORD on: branch: master diff --git a/smartcar/__init__.py b/smartcar/__init__.py index 8f9d826..1ce06b1 100644 --- a/smartcar/__init__.py +++ b/smartcar/__init__.py @@ -1,4 +1,4 @@ -__version__ = '4.3.3' +__version__ = 'semantic-release' from .smartcar import (AuthClient, is_expired, get_user_id, get_vehicle_ids) from .vehicle import Vehicle
smartcar/python-sdk
c4ff88a82df60f198742d8e06069ec33d4fc7900
diff --git a/tests/test_requester.py b/tests/test_requester.py index fe629f4..81bea65 100644 --- a/tests/test_requester.py +++ b/tests/test_requester.py @@ -28,7 +28,7 @@ class TestRequester(unittest.TestCase): smartcar.requester.call('GET', self.URL) self.assertRegexpMatches( responses.calls[0].request.headers['User-Agent'], - r'^Smartcar\/(\d+\.\d+\.\d+) \((\w+); (\w+)\) Python v(\d+\.\d+\.\d+)$') + r'^Smartcar\/semantic-release \((\w+); (\w+)\) Python v(\d+\.\d+\.\d+)$') @responses.activate def test_oauth_error(self):
Changelog Please keep a changelog summarizing user-facing issues so upgrades are easy to understand: https://keepachangelog.com/en/1.0.0/ Thanks :)
0.0
c4ff88a82df60f198742d8e06069ec33d4fc7900
[ "tests/test_requester.py::TestRequester::test_user_agent" ]
[ "tests/test_requester.py::TestRequester::test_400", "tests/test_requester.py::TestRequester::test_401", "tests/test_requester.py::TestRequester::test_403", "tests/test_requester.py::TestRequester::test_404", "tests/test_requester.py::TestRequester::test_409", "tests/test_requester.py::TestRequester::test_429", "tests/test_requester.py::TestRequester::test_430", "tests/test_requester.py::TestRequester::test_500", "tests/test_requester.py::TestRequester::test_504", "tests/test_requester.py::TestRequester::test_oauth_error", "tests/test_requester.py::TestRequester::test_other", "tests/test_requester.py::TestRequester::test_smartcar_not_capable_error", "tests/test_requester.py::TestRequester::test_unknown_error", "tests/test_requester.py::TestRequester::test_vehicle_not_capable_error" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-10-02 00:57:05+00:00
mit
5,564
smarter-travel-media__warthog-14
diff --git a/warthog/config.py b/warthog/config.py index 3dfab6f..9c72399 100644 --- a/warthog/config.py +++ b/warthog/config.py @@ -15,15 +15,14 @@ Load and parse configuration for a client from an INI-style file. """ import collections -import threading -import ssl import sys - +import threading import codecs import os.path + import warthog.exceptions +import warthog.ssl from .packages import six -# pylint: disable=import-error from .packages.six.moves import configparser # List of locations (from most preferred to least preferred) that will @@ -38,12 +37,10 @@ DEFAULT_CONFIG_LOCATIONS = [ os.path.join(os.getcwd(), 'warthog.ini') ] - # By default, we assume that the configuration file is in UTF-8 unless # the caller indicates it is in some other encoding. DEFAULT_CONFIG_ENCODING = 'utf-8' - # Simple immutable struct to hold configuration information for a WarthogClient WarthogConfigSettings = collections.namedtuple( 'WarthogConfigSettings', ['scheme_host', 'username', 'password', 'verify', 'ssl_version']) @@ -163,10 +160,14 @@ class WarthogConfigLoader(object): def parse_ssl_version(version_str, ssl_module=None): - """Get the :mod:`ssl` protocol constant that represents the given version + """Get the :mod:`warthog.ssl` protocol constant that represents the given version string if it exists, raising an error if the version string is malformed or does not correspond to a supported protocol. + Note that the :mod:`warthog.ssl` protocol constants should match the Python + :mod:`ssl` module exactly. The difference is that our SSL module has all + potential versions while older Python modules did not. + :param unicode version_str: Version string to resolve to a protocol :param module ssl_module: SSL module to get the protocol constant from :return: The ssl module protocol constant or ``None`` @@ -180,7 +181,7 @@ def parse_ssl_version(version_str, ssl_module=None): if not version_str: return None - ssl_module = ssl_module if ssl_module is not None else ssl + ssl_module = ssl_module if ssl_module is not None else warthog.ssl # Get a list of all the 'PROTOCOL' constants in the SSL module, and # strip the 'PROTOCOL_' prefix. This is the set of supported SSL or diff --git a/warthog/ssl.py b/warthog/ssl.py new file mode 100644 index 0000000..dea969f --- /dev/null +++ b/warthog/ssl.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# +# Warthog - Simple client for A10 load balancers +# +# Copyright 2014-2016 Smarter Travel +# +# Available under the MIT license. See LICENSE for details. +# + +""" +warthog.ssl +~~~~~~~~~~~ + +SSL related constants used by Warthog +""" + +# Define our own versions of expected constants in the Python ssl +# module since older Python versions didn't define all of them. For +# example Python 2.6 and Python 3.3 don't include TLSv1.1 or TLSv1.2 +# and we need to support the combination of those Python versions +# and TLS versions. Kinda hacky but required. Such is life. + +PROTOCOL_SSLv3 = 1 + +PROTOCOL_SSLv23 = 2 + +PROTOCOL_TLSv1 = 3 + +PROTOCOL_TLSv1_1 = 4 + +PROTOCOL_TLSv1_2 = 5 diff --git a/warthog/transport.py b/warthog/transport.py index 836a526..776d2c8 100644 --- a/warthog/transport.py +++ b/warthog/transport.py @@ -26,17 +26,12 @@ from requests.adapters import ( from requests.packages.urllib3.exceptions import InsecureRequestWarning from requests.packages.urllib3.poolmanager import PoolManager -# HACK: We need to default to TLSv1.2 to work with the new load balancer -# but Python 2.6 and Python 3.3 don't have the TLSv1.2 constant. BUT, TLS -# version 1.2 will work with the version of requests we use on Python 2.6 -# so we hack in the constant here for the sake of a default. -# pylint: disable=invalid-name -_PROTOCOL_TLSv1_2 = 5 +import warthog.ssl # Default to using the SSL/TLS version that the A10 requires instead of # the default that the requests/urllib3 library picks. Or, maybe the A10 # just doesn't allow the client to negotiate. Either way, we use TLSv1.2. -DEFAULT_SSL_VERSION = _PROTOCOL_TLSv1_2 +DEFAULT_SSL_VERSION = warthog.ssl.PROTOCOL_TLSv1_2 # Default to verifying SSL/TLS certs because "safe by default" is a good idea. DEFAULT_CERT_VERIFY = True
smarter-travel-media/warthog
7e2c15be2747b71b2b6ef18c3191355489eaceb6
diff --git a/test/test_ssl.py b/test/test_ssl.py new file mode 100644 index 0000000..9165683 --- /dev/null +++ b/test/test_ssl.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +import ssl + +import warthog.ssl + + +# Test our hacky constants to make sure we haven't shot ourselves in the +# foot in a completely obvious and predictable way. + + +def test_ssl3_matches(): + assert ssl.PROTOCOL_SSLv3 == warthog.ssl.PROTOCOL_SSLv3 + + +def test_ssl23_matches(): + assert ssl.PROTOCOL_SSLv23 == warthog.ssl.PROTOCOL_SSLv23 + + +def test_tls1_matches(): + assert ssl.PROTOCOL_TLSv1 == warthog.ssl.PROTOCOL_TLSv1 + + +def test_tls1_1_matches(): + try: + # It's possible that we're running under an old version of Python + # and this constant doesn't exist (hence why warthog.ssl exists). + module_const = ssl.PROTOCOL_TLSv1_1 + except AttributeError: + return + + assert module_const == warthog.ssl.PROTOCOL_TLSv1_1 + + +def test_tls1_2_matches(): + try: + # It's possible that we're running under an old version of Python + # and this constant doesn't exist (hence why warthog.ssl exists). + module_const = ssl.PROTOCOL_TLSv1_2 + except AttributeError: + return + + assert module_const == warthog.ssl.PROTOCOL_TLSv1_2 diff --git a/test/test_transport.py b/test/test_transport.py index fd5d81a..3d399e6 100644 --- a/test/test_transport.py +++ b/test/test_transport.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- -import ssl - +import warthog.ssl import warthog.transport @@ -16,11 +15,11 @@ def test_get_transport_factory_no_verify(): def test_get_transport_factory_alternate_ssl_version(): - factory = warthog.transport.get_transport_factory(ssl_version=ssl.PROTOCOL_SSLv3) + factory = warthog.transport.get_transport_factory(ssl_version=warthog.ssl.PROTOCOL_TLSv1_1) session = factory() adapter = session.get_adapter('https://lb.example.com') - assert ssl.PROTOCOL_SSLv3 == adapter.ssl_version, 'Did not get expected SSL version' + assert warthog.ssl.PROTOCOL_TLSv1_1 == adapter.ssl_version, 'Did not get expected SSL version' def test_get_transport_factory_with_defaults(): @@ -31,18 +30,3 @@ def test_get_transport_factory_with_defaults(): assert warthog.transport.DEFAULT_SSL_VERSION == adapter.ssl_version, 'Did not get default TLS version' assert warthog.transport.DEFAULT_CERT_VERIFY == session.verify, 'Did not get default verify setting' - -def test_default_tls_version_matches_ssl_module(): - try: - import ssl - module_version = ssl.PROTOCOL_TLSv1_2 - except AttributeError: - # Running an old version of Python that doesn't have the version - # constant. This is the reason we need to use our own and we can't - # verify that it's right here so just end. - return - - # Make sure that our default version matches the actual constant in the - # ssl module. This is really just a sanity check to make sure this hack - # doesn't blow up in our face - assert module_version == warthog.transport.DEFAULT_SSL_VERSION
Remove dependency on `ssl` module protocol constants in warthog.confg Like the issue in https://github.com/smarter-travel-media/warthog/commit/a3c6ed378ff3c83133ca18898644eb356d203067 we need to remove dependence on the `ssl` module in `warthog.confg` to allow use on Python 2.6
0.0
7e2c15be2747b71b2b6ef18c3191355489eaceb6
[ "test/test_ssl.py::test_ssl23_matches", "test/test_ssl.py::test_tls1_matches", "test/test_ssl.py::test_tls1_1_matches", "test/test_ssl.py::test_tls1_2_matches", "test/test_transport.py::test_get_transport_factory_no_verify", "test/test_transport.py::test_get_transport_factory_alternate_ssl_version", "test/test_transport.py::test_get_transport_factory_with_defaults" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-04-04 21:30:36+00:00
mit
5,565
smartystreets__smartystreets-python-sdk-28
diff --git a/smartystreets_python_sdk/us_street/analysis.py b/smartystreets_python_sdk/us_street/analysis.py index 841ebd4..e442a2e 100644 --- a/smartystreets_python_sdk/us_street/analysis.py +++ b/smartystreets_python_sdk/us_street/analysis.py @@ -8,6 +8,7 @@ class Analysis: self.cmra = obj.get('dpv_cmra', None) self.vacant = obj.get('dpv_vacant', None) self.active = obj.get('active', None) + self.dpv_no_stat = obj.get('dpv_no_stat', None) self.is_ews_match = obj.get('ews_match', DeprecationWarning) self.footnotes = obj.get('footnotes', None) self.lacs_link_code = obj.get('lacslink_code', None)
smartystreets/smartystreets-python-sdk
e6a132c8e884842e13fead16063c2ea330f39cd5
diff --git a/test/us_street/client_test.py b/test/us_street/client_test.py index 41ca54f..46095c0 100644 --- a/test/us_street/client_test.py +++ b/test/us_street/client_test.py @@ -156,6 +156,7 @@ class TestClient(unittest.TestCase): "dpv_cmra": "Y", "dpv_vacant": "N", "active": "Y", + "dpv_no_stat": "N", "footnotes": "footnotes", "lacslink_code": "lacslink_code", "lacslink_indicator": "lacslink_indicator", @@ -214,6 +215,7 @@ class TestClient(unittest.TestCase): self.assertEqual(actual_candidate.analysis.cmra, "Y") self.assertEqual(actual_candidate.analysis.vacant, "N") self.assertEqual(actual_candidate.analysis.active, "Y") + self.assertEqual(actual_candidate.analysis.dpv_no_stat, "N") self.assertEqual(actual_candidate.analysis.footnotes, "footnotes") self.assertEqual(actual_candidate.analysis.lacs_link_code, "lacslink_code") self.assertEqual(actual_candidate.analysis.lacs_link_indicator, "lacslink_indicator")
Handle the new `dpv_no_stat` in the analysis object SmartyStreets has an `active` field. It recently started reporting all addresses as "active" due to problems with the underlying USPS data and now reports that information in the `dpv_no_stat` field. (See [here](https://smartystreets.com/articles/what-is-an-inactive-no-stat-address) for more.) This package should provide the new `dpv_no_stat` field for interested users.
0.0
e6a132c8e884842e13fead16063c2ea330f39cd5
[ "test/us_street/client_test.py::TestClient::test_full_json_response_deserialization" ]
[ "test/us_street/client_test.py::TestClient::test_candidates_correctly_assigned_to_corresponding_lookup", "test/us_street/client_test.py::TestClient::test_deserialize_called_with_response_body", "test/us_street/client_test.py::TestClient::test_empty_batch_not_sent", "test/us_street/client_test.py::TestClient::test_freeform_assigned_to_street_field", "test/us_street/client_test.py::TestClient::test_raises_exception_when_response_has_error", "test/us_street/client_test.py::TestClient::test_single_lookup_values_correctly_assigned_to_parameters", "test/us_street/client_test.py::TestClient::test_successfully_sends_batch" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2021-02-19 17:55:26+00:00
apache-2.0
5,566
smok-serwis__coolamqp-25
diff --git a/coolamqp/framing/base.py b/coolamqp/framing/base.py index 5f7ccf2..31753c9 100644 --- a/coolamqp/framing/base.py +++ b/coolamqp/framing/base.py @@ -92,6 +92,15 @@ class AMQPContentPropertyList(object): # todo they are immutable, so they could just serialize themselves... + def get(self, property_name, default=None): + """ + Return a particular property, or default if not defined + :param property_name: property name, unicode + :param default: default value + :return: memoryview or bytes + """ + return getattr(self, property_name, default) + @staticmethod def zero_property_flags(property_flags): """ diff --git a/coolamqp/framing/compilation/compile_definitions.py b/coolamqp/framing/compilation/compile_definitions.py index 0109147..62a957d 100644 --- a/coolamqp/framing/compilation/compile_definitions.py +++ b/coolamqp/framing/compilation/compile_definitions.py @@ -55,7 +55,7 @@ binary string? It's a memoryview all right. Only thing that isn't are field names in tables. """ -import struct, collections, warnings, logging, six +import struct, collections, logging, six from coolamqp.framing.base import AMQPClass, AMQPMethodPayload, AMQPContentPropertyList from coolamqp.framing.field_table import enframe_table, deframe_table, frame_table_size @@ -230,7 +230,7 @@ Field = collections.namedtuple('Field', ('name', 'type', 'basic_type', 'reserved # # If you do not know in advance what properties you will be using, it is correct to use # this constructor. - + if zpf in BasicContentPropertyList.PARTICULAR_CLASSES: return %s.PARTICULAR_CLASSES[zpf](**kwargs) else: logger.debug('Property field (%s:%d) not seen yet, compiling', repr(zpf)) diff --git a/coolamqp/framing/definitions.py b/coolamqp/framing/definitions.py index cab1e07..e472c3d 100644 --- a/coolamqp/framing/definitions.py +++ b/coolamqp/framing/definitions.py @@ -23,7 +23,7 @@ binary string? It's a memoryview all right. Only thing that isn't are field names in tables. """ -import struct, collections, warnings, logging, six +import struct, collections, logging, six from coolamqp.framing.base import AMQPClass, AMQPMethodPayload, AMQPContentPropertyList from coolamqp.framing.field_table import enframe_table, deframe_table, frame_table_size @@ -2359,6 +2359,23 @@ class BasicContentPropertyList(AMQPContentPropertyList): ]) zpf = six.binary_type(zpf) +# If you know in advance what properties you will be using, use typized constructors like +# +# runs once +# my_type = BasicContentPropertyList.typize('content_type', 'content_encoding') +# +# runs many times +# props = my_type('text/plain', 'utf8') +# +# instead of +# +# # runs many times +# props = BasicContentPropertyList(content_type='text/plain', content_encoding='utf8') +# +# This way you will be faster. +# +# If you do not know in advance what properties you will be using, it is correct to use +# this constructor. if zpf in BasicContentPropertyList.PARTICULAR_CLASSES: return BasicContentPropertyList.PARTICULAR_CLASSES[zpf](**kwargs) else:
smok-serwis/coolamqp
a379f33e87f4285a931031f4f69c70a5f30b46b7
diff --git a/tests/test_objects.py b/tests/test_objects.py index 6a4c0c6..e3a109d 100644 --- a/tests/test_objects.py +++ b/tests/test_objects.py @@ -5,9 +5,7 @@ It sounds like a melody from __future__ import print_function, absolute_import, division import six import unittest - - -from coolamqp.objects import NodeDefinition +from coolamqp.objects import NodeDefinition, MessageProperties class TestObjects(unittest.TestCase): @@ -23,3 +21,10 @@ class TestObjects(unittest.TestCase): n1 = NodeDefinition(u'amqp://ala:ma@kota/') self.assertEquals(n1.virtual_host, u'/') + + def test_get_message_properties(self): + empty_p_msg = MessageProperties() + ce_p_msg = MessageProperties(content_encoding=b'wtf') + + self.assertIsNone(empty_p_msg.get('content_encoding'), None) + self.assertEquals(ce_p_msg.get('content_encoding', b'wtf'), b'wtf')
Add some kind of .get() for attributes Because doing ```python try: mode = message.properties.content_type.tobytes() except AttributeError: mode = b'application/x-pickle' ``` all over sucks
0.0
a379f33e87f4285a931031f4f69c70a5f30b46b7
[ "tests/test_objects.py::TestObjects::test_get_message_properties" ]
[ "tests/test_objects.py::TestObjects::test_node_definition_from_amqp" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-06-04 21:19:17+00:00
mit
5,567
smok-serwis__coolamqp-28
diff --git a/coolamqp/attaches/consumer.py b/coolamqp/attaches/consumer.py index 98da3c0..32085bd 100644 --- a/coolamqp/attaches/consumer.py +++ b/coolamqp/attaches/consumer.py @@ -87,8 +87,10 @@ class Consumer(Channeler): :param on_message: callable that will process incoming messages :type on_message: callable(ReceivedMessage instance) :param no_ack: Will this consumer require acknowledges from messages? - :param qos: a tuple of (prefetch size, prefetch window) for this consumer - :type qos: tuple(int, int) or tuple(None, int) + :param qos: a tuple of (prefetch size, prefetch window) for this consumer, or an int (prefetch window only) + If an int is passed, prefetch size will be set to 0 (which means undefined), and this int + will be used for prefetch window + :type qos: tuple(int, int) or tuple(None, int) or int :param cancel_on_failure: Consumer will cancel itself when link goes down :type cancel_on_failure: bool :param future_to_notify: Future to succeed when this consumer goes online for the first time. @@ -119,7 +121,9 @@ class Consumer(Channeler): # if this is not None, then it has an attribute # on_cancel_customer(Consumer instance) if qos is not None: - if qos[0] is None: + if isinstance(qos, int): + qos = 0, qos + elif qos[0] is None: qos = 0, qos[1] # prefetch_size=0=undefined self.qos = qos self.qos_update_sent = False # QoS was not sent to server
smok-serwis/coolamqp
1b7c1619d9a65eabc4bb2502b098930e3d48a959
diff --git a/tests/test_attaches/__init__.py b/tests/test_attaches/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_attaches/test_consumer.py b/tests/test_attaches/test_consumer.py new file mode 100644 index 0000000..3ddf662 --- /dev/null +++ b/tests/test_attaches/test_consumer.py @@ -0,0 +1,13 @@ +# coding=UTF-8 +from __future__ import print_function, absolute_import, division +import six +import unittest +from coolamqp.attaches import Consumer +from coolamqp.objects import Queue + + +class TestConsumer(unittest.TestCase): + def test_issue_26(self): + """Support for passing qos as int""" + cons = Consumer(Queue('wtf'), lambda msg: None, qos=25) + self.assertEquals(cons.qos, (0, 25))
Support passing qos as int it should be understood as None, int then.
0.0
1b7c1619d9a65eabc4bb2502b098930e3d48a959
[ "tests/test_attaches/test_consumer.py::TestConsumer::test_issue_26" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2017-08-01 11:30:52+00:00
mit
5,568
smok-serwis__firanka-2
diff --git a/README.md b/README.md index c71f28c..f14d95a 100644 --- a/README.md +++ b/README.md @@ -98,6 +98,17 @@ By definition, _ModuloSeries_ has the domain of all real numbers. Note that someOtherSeries's domain length must be non-zero and finite. Otherwise _ValueError_ will be thrown. +## LinearInterpolationSeries + +These are discretes, but allow you to define an operator that will +take its neighbours into account and let you return a custom value. + +By default, it will assumes that values can be added, subbed, multed and dived, +and will do classical linear interpolation. + +They can either utilize an existing discrete series, or be created just as +any other discrete series would be. + ## Ranges Can be imported from _sai.ranges_. @@ -111,9 +122,7 @@ You can create Ranges as follows: Range(-5, 5, True, False) == Range('<-5;5)') ``` -First boolean argument signifies whether the interval is left-closed, -and second whether it is right-closed. - +For more information [use the source](firanka/ranges.py#L33) Range's are immutable and hashable. They can be sliced: ```python @@ -134,3 +143,8 @@ Or you can check for strict inclusion Range('<-1;1>') in Range('<-2;2>') ``` +## TimeProviders + +**EXPERIMENTAL** + +Can be imported from _sai.timeproviders_. diff --git a/firanka/ranges.py b/firanka/ranges.py index a2b2ae1..3e06c1e 100644 --- a/firanka/ranges.py +++ b/firanka/ranges.py @@ -1,9 +1,11 @@ # coding=UTF-8 from __future__ import print_function, absolute_import, division -import six + import functools import math +import six + __all__ = [ 'Range', 'REAL_SET', @@ -31,6 +33,16 @@ class Range(object): self.right_inc) def __init__(self, *args): + """ + Create like: + + * Range('<a;b>') + * Range(a, b, is_left_closed_, is_right_closed) + * Range(a, b) - will have both sides closed, unless one is inf + * Range(slice(a, b)) - will have both sides closed, unless one is None + + :param args: + """ if len(args) == 1: rs, = args if isinstance(rs, type(self)): @@ -48,6 +60,10 @@ class Range(object): start, stop = rs[1:-1].split(';') args = float(start), float(stop), rs[0] == '<', rs[-1] == '>' + elif len(args) == 2: + args = args[0], args[1], not math.isinf(args[0]), not math.isinf( + args[1]) + q = lambda a, b, args: args[a] and math.isinf(args[b]) if q(2, 0, args) or q(3, 1, args): @@ -65,7 +81,7 @@ class Range(object): if isinstance(x, Range): if ((x.start == self.start) and (x.left_inc ^ self.left_inc)) \ or ((x.stop == self.stop) and ( - x.right_inc ^ self.right_inc)): + x.right_inc ^ self.right_inc)): return False return (x.start >= self.start) and (x.stop <= self.stop) @@ -80,15 +96,15 @@ class Range(object): def is_empty(self): return (self.start == self.stop) and not ( - self.left_inc or self.right_inc) + self.left_inc or self.right_inc) def length(self): return self.stop - self.start def __repr__(self): return 'Range(%s, %s, %s, %s)' % ( - repr(self.start), repr(self.stop), repr(self.left_inc), - repr(self.right_inc)) + repr(self.start), repr(self.stop), repr(self.left_inc), + repr(self.right_inc)) def __getitem__(self, item): if not isinstance(item, slice): diff --git a/firanka/series/__init__.py b/firanka/series/__init__.py new file mode 100644 index 0000000..f738011 --- /dev/null +++ b/firanka/series/__init__.py @@ -0,0 +1,16 @@ +# coding=UTF-8 +from __future__ import absolute_import + +from .base import FunctionSeries, DiscreteSeries, Series +from .interpolations import LinearInterpolationSeries, \ + SCALAR_LINEAR_INTERPOLATOR +from .modulo import ModuloSeries + +__all__ = [ + 'FunctionSeries', + 'DiscreteSeries', + 'ModuloSeries', + 'Series', + 'SCALAR_LINEAR_INTERPOLATOR', + 'LinearInterpolationSeries', +] diff --git a/firanka/series.py b/firanka/series/base.py similarity index 88% rename from firanka/series.py rename to firanka/series/base.py index c1bb0cc..6182ce0 100644 --- a/firanka/series.py +++ b/firanka/series/base.py @@ -1,19 +1,10 @@ # coding=UTF-8 from __future__ import print_function, absolute_import, division -import math - import six from firanka.exceptions import NotInDomainError -from firanka.ranges import Range, REAL_SET, EMPTY_SET - -__all__ = [ - 'FunctionSeries', - 'DiscreteSeries', - 'ModuloSeries', - 'Series', -] +from firanka.ranges import Range, EMPTY_SET class Series(object): @@ -286,31 +277,3 @@ class JoinedSeries(Series): def _get_for(self, item): return self.op(self.ser1._get_for(item), self.ser2._get_for(item)) - - -class ModuloSeries(Series): - def __init__(self, series, *args, **kwargs): - """ - Construct a modulo series - :param series: base series to use - :raise ValueError: invalid domain length - """ - super(ModuloSeries, self).__init__(REAL_SET, *args, **kwargs) - - self.series = series - self.period = self.series.domain.length() - - if self.period == 0: - raise ValueError('Modulo series cannot have a period of 0') - elif math.isinf(self.period): - raise ValueError('Modulo series cannot have an infinite period') - - def _get_for(self, item): - if item < 0: - item = -(item // self.period) * self.period + item - elif item > self.period: - item = item - (item // self.period) * self.period - elif item == self.period: - item = 0 - - return self.series._get_for(self.series.domain.start + item) diff --git a/firanka/series/interpolations.py b/firanka/series/interpolations.py new file mode 100644 index 0000000..178c0ff --- /dev/null +++ b/firanka/series/interpolations.py @@ -0,0 +1,49 @@ +# coding=UTF-8 +from __future__ import print_function, absolute_import, division + +import six + +from .base import DiscreteSeries, Series + + +def SCALAR_LINEAR_INTERPOLATOR(t0, v0, t1, v1, tt): + """ + Good intepolator if our values can be added, subtracted, multiplied and divided + """ + return v0 + (tt - t0) * (t1 - t0) / (v1 - v0) + + +class LinearInterpolationSeries(DiscreteSeries): + def __init__(self, data, domain=None, + interpolator=SCALAR_LINEAR_INTERPOLATOR, + *args, **kwargs): + """ + :param interpolator: callable(t0: float, v0: any, t1: float, v1: any, tt: float) -> any + This, given intepolation points (t0, v0) and (t1, v1) such that t0 <= tt <= t1, + return a value for index tt + :raise TypeError: a non-discrete series was passed as data + """ + self.interpolator = interpolator + if isinstance(data, DiscreteSeries): + data, domain = data.data, data.domain + elif isinstance(data, Series): + raise TypeError('non-discrete series not supported!') + + super(LinearInterpolationSeries, self).__init__(data, domain, *args, + **kwargs) + + def _get_for(self, item): + if item == self.domain.start: + return self.data[0][1] + + if len(self.data) == 1: + return super(LinearInterpolationSeries, self).__getitem__(item) + + for i in six.moves.range(0, len(self.data) - 1): + cur_i, cur_v = self.data[i] + next_i, next_v = self.data[i + 1] + + if cur_i <= item <= next_i: + return self.interpolator(cur_i, cur_v, next_i, next_v, item) + + return self.data[-1][1] diff --git a/firanka/series/modulo.py b/firanka/series/modulo.py new file mode 100644 index 0000000..ed72808 --- /dev/null +++ b/firanka/series/modulo.py @@ -0,0 +1,35 @@ +# coding=UTF-8 +from __future__ import print_function, absolute_import, division + +import math + +from .base import Series +from ..ranges import REAL_SET + + +class ModuloSeries(Series): + def __init__(self, series, *args, **kwargs): + """ + Construct a modulo series + :param series: base series to use + :raise ValueError: invalid domain length + """ + super(ModuloSeries, self).__init__(REAL_SET, *args, **kwargs) + + self.series = series + self.period = self.series.domain.length() + + if self.period == 0: + raise ValueError('Modulo series cannot have a period of 0') + elif math.isinf(self.period): + raise ValueError('Modulo series cannot have an infinite period') + + def _get_for(self, item): + if item < 0: + item = -(item // self.period) * self.period + item + elif item > self.period: + item = item - (item // self.period) * self.period + elif item == self.period: + item = 0 + + return self.series._get_for(self.series.domain.start + item) diff --git a/firanka/timeproviders.py b/firanka/timeproviders.py index 2cca5f4..47dbd0c 100644 --- a/firanka/timeproviders.py +++ b/firanka/timeproviders.py @@ -1,10 +1,8 @@ # coding=UTF-8 from __future__ import print_function, absolute_import, division -import six -import logging -from .series import Series from .ranges import Range +from .series import Series class BijectionMapping(object):
smok-serwis/firanka
5888250487fd93c5251a0dfafd6173895e599550
diff --git a/tests/test_range.py b/tests/test_range.py index 5463f90..c8bc935 100644 --- a/tests/test_range.py +++ b/tests/test_range.py @@ -1,6 +1,8 @@ # coding=UTF-8 from __future__ import print_function, absolute_import, division + import unittest + from firanka.ranges import Range @@ -38,7 +40,7 @@ class TestRange(unittest.TestCase): def test_str_and_repr_and_bool(self): p = Range(-1, 1, True, True) self.assertEqual(eval(repr(p)), p) - self.assertEqual(str(Range(-1, 1, True, True)), '<-1;1>') + self.assertEqual(str(Range(-1, 1)), '<-1;1>') def test_constructor(self): self.assertRaises(ValueError, lambda: Range('#2;3>')) diff --git a/tests/test_series.py b/tests/test_series.py index 8b8a022..94d73bc 100644 --- a/tests/test_series.py +++ b/tests/test_series.py @@ -1,11 +1,13 @@ # coding=UTF-8 from __future__ import print_function, absolute_import, division -import six + import math import unittest -from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries -from firanka.ranges import Range + from firanka.exceptions import NotInDomainError +from firanka.ranges import Range +from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \ + LinearInterpolationSeries NOOP = lambda x: x @@ -174,3 +176,18 @@ class TestModuloSeries(unittest.TestCase): ser2 = FunctionSeries(NOOP, '<0;3)') ser3 = ser1.join(ser2, lambda x, y: x * y) + + +class TestLinearInterpolation(unittest.TestCase): + def test_lin(self): + series = LinearInterpolationSeries( + DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)')) + + self.assertEqual(series[0], 1) + self.assertEqual(series[0.5], 1.5) + self.assertEqual(series[1], 2) + self.assertEqual(series[2.3], 3) + + def test_conf(self): + self.assertRaises(TypeError, lambda: LinearInterpolationSeries( + FunctionSeries(NOOP, '<0;3)'))) diff --git a/tests/test_timeproviders.py b/tests/test_timeproviders.py index d6d0d78..87bc595 100644 --- a/tests/test_timeproviders.py +++ b/tests/test_timeproviders.py @@ -1,6 +1,6 @@ # coding=UTF-8 from __future__ import print_function, absolute_import, division -import six + import unittest from firanka.series import DiscreteSeries
Add a linear interpolation series Much like `DiscreteSeries` but can perform linear interpolation
0.0
5888250487fd93c5251a0dfafd6173895e599550
[ "tests/test_range.py::TestRange::test_constructor", "tests/test_range.py::TestRange::test_contains", "tests/test_range.py::TestRange::test_intersection", "tests/test_range.py::TestRange::test_isempty", "tests/test_range.py::TestRange::test_slicing", "tests/test_range.py::TestRange::test_str_and_repr_and_bool", "tests/test_series.py::TestDiscreteSeries::test_apply", "tests/test_series.py::TestDiscreteSeries::test_base", "tests/test_series.py::TestDiscreteSeries::test_discretize", "tests/test_series.py::TestDiscreteSeries::test_eval", "tests/test_series.py::TestDiscreteSeries::test_eval2", "tests/test_series.py::TestDiscreteSeries::test_eval3", "tests/test_series.py::TestDiscreteSeries::test_slice", "tests/test_series.py::TestDiscreteSeries::test_slice_outdomain", "tests/test_series.py::TestDiscreteSeries::test_translation", "tests/test_series.py::TestDiscreteSeries::test_uncov", "tests/test_series.py::TestFunctionSeries::test_apply", "tests/test_series.py::TestFunctionSeries::test_domain_sensitivity", "tests/test_series.py::TestFunctionSeries::test_slice", "tests/test_series.py::TestModuloSeries::test_base", "tests/test_series.py::TestModuloSeries::test_comp_discrete", "tests/test_series.py::TestModuloSeries::test_exceptions", "tests/test_series.py::TestLinearInterpolation::test_conf", "tests/test_series.py::TestLinearInterpolation::test_lin", "tests/test_timeproviders.py::TestTimeproviders::test_base" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2017-12-09 07:47:03+00:00
mit
5,569
smok-serwis__firanka-5
diff --git a/README.md b/README.md index f14d95a..a660d10 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,13 @@ and will do classical linear interpolation. They can either utilize an existing discrete series, or be created just as any other discrete series would be. +## Builders + +## DiscreteSeriesBuilder + +Sometimes you just need to update a DiscreteSeries, or to blang a brand new one. This little fella +will help you out. + ## Ranges Can be imported from _sai.ranges_. diff --git a/firanka/builder.py b/firanka/builders.py similarity index 95% rename from firanka/builder.py rename to firanka/builders.py index b9d499a..7691352 100644 --- a/firanka/builder.py +++ b/firanka/builders.py @@ -11,10 +11,10 @@ Update knowledge of current discrete series """ __all__ = [ - 'DiscreteKnowledgeBuilder', + 'DiscreteSeriesBuilder', ] -class DiscreteKnowledgeBuilder(object): +class DiscreteSeriesBuilder(object): def __init__(self, series=None): if series is None: diff --git a/firanka/series/base.py b/firanka/series/base.py index 79572ef..2855e20 100644 --- a/firanka/series/base.py +++ b/firanka/series/base.py @@ -99,6 +99,10 @@ class Series(object): class DiscreteSeries(Series): + """ + A series with lots of small rectangles interpolating something + """ + def __init__(self, data, domain=None, *args, **kwargs): data = SortedList(data) diff --git a/firanka/series/modulo.py b/firanka/series/modulo.py index ed72808..e0063c6 100644 --- a/firanka/series/modulo.py +++ b/firanka/series/modulo.py @@ -24,7 +24,12 @@ class ModuloSeries(Series): elif math.isinf(self.period): raise ValueError('Modulo series cannot have an infinite period') + # We internally translate the start of the series' domain to be at 0, because it simpler for us :D + self.intertrans = -self.series.domain.start + def _get_for(self, item): + item += self.intertrans + if item < 0: item = -(item // self.period) * self.period + item elif item > self.period:
smok-serwis/firanka
63a4a02b781c64a12865b320d9427d8eda28f1df
diff --git a/tests/test_builder.py b/tests/test_builder.py index 51c816a..9188215 100644 --- a/tests/test_builder.py +++ b/tests/test_builder.py @@ -3,7 +3,7 @@ from __future__ import print_function, absolute_import, division import six import unittest -from firanka.builder import DiscreteKnowledgeBuilder +from firanka.builders import DiscreteSeriesBuilder from firanka.series import DiscreteSeries @@ -12,7 +12,7 @@ class TestBuilder(unittest.TestCase): ser = DiscreteSeries([(0,1), (1,2)]) - kb = DiscreteKnowledgeBuilder(ser) + kb = DiscreteSeriesBuilder(ser) kb.put(3, 4) kb.put(-1, 5) @@ -23,3 +23,15 @@ class TestBuilder(unittest.TestCase): self.assertTrue(s2.domain, '<-1;3>') self.assertEqual(s2.data,[(-1,6), (0,2), (1,2), (3,4)]) + + def test_exnihilo(self): + kb = DiscreteSeriesBuilder() + + kb.put(0, 0) + kb.put(1, 1) + + s = kb.as_series() + + self.assertEqual(s[0],0) + self.assertEqual(s[1],1) + self.assertEqual(s.domain, '<0;1>')
ModuloSeries działanie niepoprawni Patrz test w test_series.py:TestModuloSeries:test_advanced
0.0
63a4a02b781c64a12865b320d9427d8eda28f1df
[ "tests/test_builder.py::TestBuilder::test_exnihilo", "tests/test_builder.py::TestBuilder::test_t1" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-12-09 20:21:12+00:00
mit
5,570
snakemake__snakemake-127
diff --git a/docs/project_info/faq.rst b/docs/project_info/faq.rst index 91321e44..e1ef4d2a 100644 --- a/docs/project_info/faq.rst +++ b/docs/project_info/faq.rst @@ -100,6 +100,25 @@ In order to infer the IDs from present files, Snakemake provides the ``glob_wild The function matches the given pattern against the files present in the filesystem and thereby infers the values for all wildcards in the pattern. A named tuple that contains a list of values for each wildcard is returned. Here, this named tuple has only one item, that is the list of values for the wildcard ``{id}``. +I don't want expand to use the product of every wildcard, what can I do? +------------------------------------------------------------------------ + +By default the expand function uses ``itertools.product`` to create every combination of the supplied wildcards. +Expand takes an optional, second positional argument which can customize how wildcards are combined. +To create the list ``["a_1.txt", "b_2.txt", "c_3.txt"]``, invoke expand as: +``expand("{sample}_{id}.txt", zip, sample=["a", "b", "c"], id=["1", "2", "3"])`` + +I don't want expand to use every wildcard, what can I do? +--------------------------------------------------------- + +Sometimes partially expanding wildcards is useful to define inputs which still depend on some wildcards. +Expand takes an optional keyword argument, allow_missing=True, that will format only wildcards which are supplied, leaving others as is. +To create the list ``["{sample}_1.txt", "{sample}_2.txt"]``, invoke expand as: +``expand("{sample}_{id}.txt", id=["1", "2"], allow_missing=True)`` +If the filename contains the wildcard ``allow_missing``, it will be formatted normally: +``expand("{allow_missing}.txt", allow_missing=True)`` returns ``["True.txt"]``. + + Snakemake complains about a cyclic dependency or a PeriodicWildcardError. What can I do? ---------------------------------------------------------------------------------------- diff --git a/snakemake/io.py b/snakemake/io.py index fd7d5dda..a976a9e9 100755 --- a/snakemake/io.py +++ b/snakemake/io.py @@ -17,6 +17,7 @@ import functools import subprocess as sp from itertools import product, chain from contextlib import contextmanager +import string import collections import yaml @@ -892,7 +893,8 @@ def expand(*args, **wildcards): second arg (optional): a function to combine wildcard values (itertools.product per default) **wildcards -- the wildcards as keyword arguments - with their values as lists + with their values as lists. If allow_missing=True is included + wildcards in filepattern without values will stay unformatted. """ filepatterns = args[0] if len(args) == 1: @@ -916,12 +918,27 @@ def expand(*args, **wildcards): "of expand (e.g. 'temp(expand(\"plots/{sample}.pdf\", sample=SAMPLES))')." ) + # check if remove missing is provided + format_dict = dict + if "allow_missing" in wildcards and wildcards["allow_missing"] is True: + + class FormatDict(dict): + def __missing__(self, key): + return "{" + key + "}" + + format_dict = FormatDict + # check that remove missing is not a wildcard in the filepatterns + for filepattern in filepatterns: + if "allow_missing" in re.findall(r"{([^}\.[!:]+)", filepattern): + format_dict = dict + break + # remove unused wildcards to avoid duplicate filepatterns wildcards = { filepattern: { k: v for k, v in wildcards.items() - if k in re.findall("{([^}\.[!:]+)", filepattern) + if k in re.findall(r"{([^}\.[!:]+)", filepattern) } for filepattern in filepatterns } @@ -934,11 +951,12 @@ def expand(*args, **wildcards): values = [values] yield [(wildcard, value) for value in values] + formatter = string.Formatter() try: return [ - filepattern.format(**comb) + formatter.vformat(filepattern, (), comb) for filepattern in filepatterns - for comb in map(dict, combinator(*flatten(wildcards[filepattern]))) + for comb in map(format_dict, combinator(*flatten(wildcards[filepattern]))) ] except KeyError as e: raise WildcardError("No values given for wildcard {}.".format(e)) @@ -1050,7 +1068,7 @@ def update_wildcard_constraints( def split_git_path(path): - file_sub = re.sub("^git\+file:/+", "/", path) + file_sub = re.sub(r"^git\+file:/+", "/", path) (file_path, version) = file_sub.split("@") file_path = os.path.realpath(file_path) root_path = get_git_root(file_path)
snakemake/snakemake
0607695047290effb44367cd004523e5e3398171
diff --git a/tests/test_expand.py b/tests/test_expand.py new file mode 100644 index 00000000..8094a38b --- /dev/null +++ b/tests/test_expand.py @@ -0,0 +1,67 @@ +from snakemake.io import expand +from snakemake.exceptions import WildcardError +import pytest + + +def test_simple_expand(): + # single filepattern + assert expand("{a}.out", a="test") == ["test.out"] + # multiple filepatterns + assert expand(["{a}.out", "{b}.out"], a="a", b="b") == ["a.out", "b.out"] + # multiple wildcards + assert expand("{a}.out", a=["1", "2", "3"]) == ["1.out", "2.out", "3.out"] + # multiple wildcards and patterns + assert expand(["{a}_{b}.ab", "{b}.b"], a="1 2".split(), b="3 4".split()) == [ + "1_3.ab", + "1_4.ab", + "2_3.ab", + "2_4.ab", + "3.b", + "4.b", + ] + # replace product + assert expand(["{a}_{b}.ab", "{b}.b"], zip, a="1 2".split(), b="3 4".split()) == [ + "1_3.ab", + "2_4.ab", + "3.b", + "4.b", + ] + + +def test_allow_missing(): + # single filepattern + assert expand("{a}_{b}.out", allow_missing=True) == ["{a}_{b}.out"] + assert expand("{a}_{b}.out", a="test", allow_missing=True) == ["test_{b}.out"] + # none missing + assert expand("{a}.out", a="test", allow_missing=True) == ["test.out"] + # wildcard is allow_missing + assert expand("{allow_missing}.out", allow_missing=True) == ["True.out"] + # allow_missing not True + assert expand("{a}.out", a="test", allow_missing="test2") == ["test.out"] + with pytest.raises(WildcardError) as e: + expand("{a}.out", allow_missing="test2") + assert str(e.value) == "No values given for wildcard 'a'." + + # multiple filepatterns + assert expand(["{a}.out", "{b}.out"], allow_missing=True) == ["{a}.out", "{b}.out"] + # multiple wildcards + assert expand("{a}_{b}.out", a=["1", "2", "3"], allow_missing=True) == [ + "1_{b}.out", + "2_{b}.out", + "3_{b}.out", + ] + # multiple wildcards and patterns + assert expand( + ["{a}_{b}_{C}.ab", "{b}_{c}.b"], + a="1 2".split(), + b="3 4".split(), + allow_missing=True, + ) == ["1_3_{C}.ab", "1_4_{C}.ab", "2_3_{C}.ab", "2_4_{C}.ab", "3_{c}.b", "4_{c}.b"] + # replace product + assert expand( + ["{a}_{b}_{C}.ab", "{b}_{c}.b"], + zip, + a="1 2".split(), + b="3 4".split(), + allow_missing=True, + ) == ["1_3_{C}.ab", "2_4_{C}.ab", "3_{c}.b", "4_{c}.b"]
A utility function to partially format strings with wildcards **Is your feature request related to a problem? Please describe.** In the workflow, we usually need to deal with multiple wildcards. It would be nice if there is a function to partially format the input string with multiple wildcards. Maybe this function already exists and I'm just not aware of it. For example: ``` foo = "{a}_{b}.txt" result = format_cards(foo, a="foo") print(result) # result = "foo_{b}.txt" ``` **Describe the solution you'd like** Here is a [solution](https://stackoverflow.com/questions/11283961/partial-string-formatting) I found on StackOverflow. ``` def format_cards(template, **kwargs): import string class FormatDict(dict): def __missing__(self, key): return "{" + key + "}" formatter = string.Formatter() mapping = FormatDict(**kwarg) return formatter.vformat(template, (), mapping) ```
0.0
0607695047290effb44367cd004523e5e3398171
[ "tests/test_expand.py::test_allow_missing" ]
[ "tests/test_expand.py::test_simple_expand" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-11-26 17:52:05+00:00
mit
5,571
snakemake__snakemake-469
diff --git a/snakemake/io.py b/snakemake/io.py index 1d5bf24b..d3fa5d6a 100755 --- a/snakemake/io.py +++ b/snakemake/io.py @@ -955,7 +955,7 @@ def expand(*args, **wildcards): combinator = product elif len(args) == 2: combinator = args[1] - if isinstance(filepatterns, str): + if isinstance(filepatterns, str) or isinstance(filepatterns, Path): filepatterns = [filepatterns] def path_to_str(f): diff --git a/snakemake/workflow.py b/snakemake/workflow.py index 5dc4f148..ccf603d3 100644 --- a/snakemake/workflow.py +++ b/snakemake/workflow.py @@ -952,7 +952,7 @@ class Workflow: success = scheduler.schedule() - if not immediate_submit: + if not immediate_submit and not dryrun: dag.cleanup_workdir() if success:
snakemake/snakemake
d550d20ce9520fd328e166938df253b2ca63fc8a
diff --git a/tests/test_io.py b/tests/test_io.py index bbad3b13..a53bad2f 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -1,3 +1,5 @@ +from pathlib import PosixPath + from snakemake.io import _wildcard_regex, expand from snakemake.exceptions import WildcardError @@ -100,3 +102,6 @@ def test_expand(): assert sorted( expand(["a: {a} + b: {b}", "c: {c}"], a="aa", b=["b", "bb"], c=["c", "cc"]) ) == sorted(["a: aa + b: b", "a: aa + b: bb", "c: c", "c: cc"]) + + # expand on pathlib.Path objects + assert expand(PosixPath() / "{x}" / "{y}", x="Hello", y="world",) == ["Hello/world"]
Support pathlib in expand() **Is your feature request related to a problem? Please describe.** Pathlib works most places in snakemake, which is great. However, in calls to expand(), pathlib paths must be converted to `str`, otherwise the following error occurs: ```sh TypeError in line 29 of /Users/.../Snakefile: 'PosixPath' object is not iterable File "/Users/.../Snakefile", line 29, in <module> ``` **Describe the solution you'd like** Update the `expand()` function to check whether the first argument is a `Path` object, and convert it to a string if so.
0.0
d550d20ce9520fd328e166938df253b2ca63fc8a
[ "tests/test_io.py::test_expand" ]
[ "tests/test_io.py::test_wildcard_regex" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-06-30 16:51:59+00:00
mit
5,572
snowblink14__smatch-30
diff --git a/.travis.yml b/.travis.yml index 50980a8..1aeed33 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,7 +9,7 @@ deploy: provider: pypi user: __token__ password: - secure: CVA8RIEx1IdOHUBMjF/MPd6FFE3wu0sAzJkowu0PzZ4VwbOlqSYXuYWjkNUV8plCVm8mgovMXBDjTC8q9AGYTJi8B5f92AY6YRfLVjJCpdMd8EH6VNlymxJTYg0t5W5RKpzdAyOLj5GyAPhVqY805TIE+ao2XKQ+UwlwRUq/SUhxi6gcvLsamuabfKg2OZxijKp6dPk+tqw33K+DYJVV3WOUqyI169Z3iXRhljghmLWiV6xdIs1/V34XsbdgRJDRs/kOsrhE19khOIUqZ/+to++qRFwLpgAsF71n23vGs/FwvP+ab1oMYtUNC8DHI2gmiGHO/ipE/FXqsYdYuhULnzs7nfx04YQoaZliD7Hbvze0zEczQZxHQpuHbykNND2WU20NOnckBzDgqhqzPIVjHwQTGFKMZqg7nZ8w81NrGSeGTXagtoBPtq6/RPU1alppFE4EU7/fZrAN/fXgiemcxmsK5Nl9Ps6zWoB2eFhCaLDeUI9BktIrp5nLplLQJmLfF+RfLaIYULD5hdwFzgcUTjG/nPr41XNMvwYvsM+rCGxlxBtL2Gc2xf7Kfv9T6vUr0eb0Yxp+oKNv7XN3j1rZ+0PIqMbi1dpMJu7OqxIR5lQB6bYwDcCeKxrFpnP/9drtXzjqGZnzCt/rdGnMUd9PrkZTHFXOrsq0ZtmFSNK5/m4= + secure: jXgeZ3fjIODIltATnt487VOBf6TIOp9bpd79r/wUG373pa6GVrWKEbX1LBAprmSW7dI+pYccQpeNF5OIsUJFFNjy59iLlKtfY+9N8ugD7rvuQL4pP+jwiVvuKB9SLyLwzJWrArsmVOju9JQUogtKXPJiflY1PE4fGV/Zjx+FNl6816HO8i2uHaLOBzDbNXtOCnAmioaM72cQ/qvtbKdkxF7u7xtAwqA8TEBeaf6PYR2b1mn8Vcgo7pG/uJGZtICPVINxv914jDpu0CDcWl3rmtVM1Q44XxKLV3c+6QUeonTy5s+35plDiWnI0F0HJiGfS51NYvUUaZZQKuNbIKg27dJcfhuzZDarD4C/P6fjN8NSdmB/B4P058pBkUTmmaeaLBDENBOlJAwRwHupRvsnWZ9xazw1edEVo2tnFPhAZJ+BBDv9lISiNg2kiQx6+5/BbnecjS4lyz3Kje+QljocDXHwshsSl8FxnmJ6TWI21yJJauKM4LGxHGyCrz0W1AsYw+S/cwppC1/BzS9ZLW5DdUeVzULGKzY1MqZJ7ABt70MLBMmrV5VjpUjtTS9dqVBMSvCxshR2hOM47x4wEzG9yQec4aDvHImEPOMXKHkzk6fVhm5qO9IxJTPldYrLcR/UFpcFdHKelIReqTQsjzaEI7CMCicux/DDpRt9msrCeQY= on: tags: true skip_existing: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 96c99a1..24cd58a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ * Add CI/CD configuration (see PR [#23][]) * Better handle deinversion of special roles ([#10][]) * Get `smatch-table.py` working again (part of PR [#27][]) +* Add `tests/` subdirectory and `test_top.py` ([#25]) +* Make TOP relation's value a constant string and not the top node's + concept to avoid double-penalizing different top concepts ([#25]) ## [1.0.2][] @@ -166,5 +169,6 @@ The following are taken from an old `update_log` file: [#19]: https://github.com/snowblink14/smatch/issues/19 [#22]: https://github.com/snowblink14/smatch/issues/22 [#23]: https://github.com/snowblink14/smatch/pull/23 +[#25]: https://github.com/snowblink14/smatch/pull/25 [#27]: https://github.com/snowblink14/smatch/pull/27 diff --git a/amr.py b/amr.py index 020f52e..182b6b5 100755 --- a/amr.py +++ b/amr.py @@ -420,8 +420,8 @@ class AMR(object): # each node has a relation list and attribute list relation_list.append(node_rel_list) attribute_list.append(node_attr_list) - # add TOP as an attribute. The attribute value is the top node value - attribute_list[0].append(["TOP", node_value_list[0]]) + # add TOP as an attribute. The attribute value just needs to be constant + attribute_list[0].append(["TOP", 'top']) result_amr = AMR(node_name_list, node_value_list, relation_list, attribute_list) return result_amr
snowblink14/smatch
a4f2e28a16666860bf70c4274803a60c67bac2cb
diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..f8c1899 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,18 @@ + +# smatch tests + +This directory contains test cases to verify the correct behavior of +smatch. Run the tests with [pytest](https://pytest.org/). This will +require you to install both pytest and smatch: + +```console +$ pip install pytest +$ pip install -e . # current directory is smatch +$ pytest +``` + +**Note:** As smatch is inherently non-deterministic due to its +hill-climbing implementation, the tests can be "flaky" (i.e., +sometimes pass, sometimes fail). To mitigate the possibility of flaky +tests, test cases should use *minimal* AMRs so it becomes trivial for +smatch to get the optimal solution. diff --git a/tests/test_top.py b/tests/test_top.py new file mode 100644 index 0000000..a9bf4c4 --- /dev/null +++ b/tests/test_top.py @@ -0,0 +1,57 @@ + +""" +The top node in an AMR determines the focus of the encoded +meaning. For example, the following have the same dependencies but a +different node at the top: + + (w / white-03 + :ARG1 (m / marble)) + +The above AMR means "the marble is white" or "the whiteness of the +marble". + + (m / marble + :ARG1-of (w / white-03)) + +This AMR means "the marble that is white" or "the white marble". + +For this reason, AMRs that differ only in which node is the top will +get smatch scores less than 1.0. + +For more information see: + + https://github.com/amrisi/amr-guidelines/blob/master/amr.md#focus +""" + +import smatch + +a = '(a / alpha :ARG0 (b / beta))' +b = '(a / alternative :ARG0 (b / beta))' +c = '(a / alpha :ARG0 (b / b-side))' +d = '(b / beta :ARG0-of (a / alpha))' + + +def get_amr_match(amr1, amr2): + vals = smatch.get_amr_match(amr1, amr2) + smatch.match_triple_dict.clear() + return vals + + +def test_same(): + assert get_amr_match(a, a) == (4, 4, 4) + smatch.match_triple_dict.clear() + + +def test_same_top_different_top_concept(): + assert get_amr_match(a, b) == (3, 4, 4) + smatch.match_triple_dict.clear() + + +def test_same_top_different_dependent_concept(): + assert get_amr_match(a, c) == (3, 4, 4) + smatch.match_triple_dict.clear() + + +def test_same_different_top(): + assert get_amr_match(a, d) == (3, 4, 4) + smatch.match_triple_dict.clear()
interpretation of TOP property when comparing SMATCH results with the scorer from the [2019 CoNLL Shared Task](http://mrp.nlpl.eu) on Cross-Framework Meaning Representation Parsing (MRP), we discovered that SMATCH will only consider the TOP property correct if the node labels also match. this appears to double-penalize for label mismatches and is maybe not the intended behavior? for more technical detail and a minimal test case, please see the MRP [`mtool` issue](https://github.com/cfmrp/mtool/issues/12#issue-456527664).
0.0
a4f2e28a16666860bf70c4274803a60c67bac2cb
[ "tests/test_top.py::test_same_top_different_top_concept" ]
[ "tests/test_top.py::test_same", "tests/test_top.py::test_same_top_different_dependent_concept", "tests/test_top.py::test_same_different_top" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-05-29 09:58:14+00:00
mit
5,573
snowflakedb__snowflake-connector-python-1094
diff --git a/src/snowflake/connector/connection.py b/src/snowflake/connector/connection.py index 6387cdb0..f6f07031 100644 --- a/src/snowflake/connector/connection.py +++ b/src/snowflake/connector/connection.py @@ -268,18 +268,13 @@ class SnowflakeConnection: self.heartbeat_thread = None + if "application" not in kwargs and ENV_VAR_PARTNER in os.environ.keys(): + kwargs["application"] = os.environ[ENV_VAR_PARTNER] + self.converter = None self.__set_error_attributes() self.connect(**kwargs) self._telemetry = TelemetryClient(self._rest) - # Some configuration files need to be updated here to make them testable - # E.g.: if DEFAULT_CONFIGURATION pulled in env variables these would be not testable - if ( - self.application - == DEFAULT_CONFIGURATION["application"][0] # still default value - and ENV_VAR_PARTNER in os.environ.keys() # is defined as an env variable - ): - self._application = os.environ[ENV_VAR_PARTNER] def __del__(self): # pragma: no cover try:
snowflakedb/snowflake-connector-python
7bb60da3b48edede5b636d449e5291acdc768534
diff --git a/test/unit/test_connection.py b/test/unit/test_connection.py index 792529a0..6be22ce0 100644 --- a/test/unit/test_connection.py +++ b/test/unit/test_connection.py @@ -4,6 +4,7 @@ # from __future__ import annotations +import json import os from unittest.mock import patch @@ -136,12 +137,38 @@ def test_is_still_running(): @pytest.mark.skipolddriver -def test_partner_env_var(): - with patch.dict(os.environ, {ENV_VAR_PARTNER: "Amanda"}): - with patch("snowflake.connector.network.SnowflakeRestful.fetch"): - with snowflake.connector.connect( - user="user", - account="account", - password="password123", - ) as conn: - assert conn.application == "Amanda" +@patch("snowflake.connector.network.SnowflakeRestful._post_request") +def test_partner_env_var(mockSnowflakeRestfulPostRequest): + PARTNER_NAME = "Amanda" + + request_body = {} + + def mock_post_request(url, headers, json_body, **kwargs): + nonlocal request_body + request_body = json.loads(json_body) + return { + "success": True, + "message": None, + "data": { + "token": "TOKEN", + "masterToken": "MASTER_TOKEN", + "idToken": None, + "parameters": [{"name": "SERVICE_NAME", "value": "FAKE_SERVICE_NAME"}], + }, + } + + # POST requests mock + mockSnowflakeRestfulPostRequest.side_effect = mock_post_request + + with patch.dict(os.environ, {ENV_VAR_PARTNER: PARTNER_NAME}): + # connection + con = snowflake.connector.connect( + user="user", + account="account", + password="testpassword", + database="TESTDB", + warehouse="TESTWH", + ) + assert con.application == PARTNER_NAME + + assert request_body["data"]["CLIENT_ENVIRONMENT"]["APPLICATION"] == PARTNER_NAME
SNOW-576084: Setting environment variable SF_PARTNER doesn't send overridden `application` value in requests Please answer these questions before submitting your issue. Thanks! 1. What version of Python are you using? ``` Python 3.9.11 (main, Apr 5 2022, 09:45:25) [Clang 13.1.6 (clang-1316.0.21.2)] ``` 2. What operating system and processor architecture are you using? ``` macOS-12.3.1-arm64-arm-64bit ``` 3. What are the component versions in the environment (`pip freeze`)? Testing locally ``` -e git+ssh://[email protected]/sfc-gh-zblackwood/snowflake-connector-python.git@0a3ad2de0fda656e8d5871c3eabf9ef6d618e190#egg=snowflake_connector_python ``` 4. What did you do? Here is the issue written as a test (note that the first assert succeeds, and the last fails) ``` @pytest.mark.skipolddriver @patch("snowflake.connector.network.SnowflakeRestful._post_request") def test_partner_env_var(mockSnowflakeRestfulPostRequest, capsys): PARTNER_NAME = "Amanda" def mock_post_request(url, headers, json_body, **kwargs): global mock_cnt print(json_body) ret = None if mock_cnt == 0: # return from /v1/login-request ret = { "success": True, "message": None, "data": { "token": "TOKEN", "masterToken": "MASTER_TOKEN", "idToken": None, "parameters": [ {"name": "SERVICE_NAME", "value": "FAKE_SERVICE_NAME"} ], }, } return ret # POST requests mock mockSnowflakeRestfulPostRequest.side_effect = mock_post_request global mock_cnt mock_cnt = 0 with patch.dict(os.environ, {ENV_VAR_PARTNER: PARTNER_NAME}): # connection con = snowflake.connector.connect( user="user", account="account", password="testpassword", database="TESTDB", warehouse="TESTWH", ) assert con.application == PARTNER_NAME # Check that the json body of the request that is made contains the new # APPLICATION name, instead of the default value captured = capsys.readouterr() assert f'"APPLICATION": "{PARTNER_NAME}"' in captured.out ``` 5. What did you expect to see? I expected that setting ENV_VAR_PARTNER environment variable would result in APPLICATION being overwritten in the requests, but it was not. 6. Can you set logging to DEBUG and collect the logs? ``` import logging import os for logger_name in ['snowflake.sqlalchemy', 'snowflake.connector', 'botocore']: logger = logging.getLogger(logger_name) logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(logging.Formatter('%(asctime)s - %(threadName)s %(filename)s:%(lineno)d - %(funcName)s() - %(levelname)s - %(message)s')) logger.addHandler(ch) ``` <!-- If you need urgent assistance reach out to support for escalated issue processing https://community.snowflake.com/s/article/How-To-Submit-a-Support-Case-in-Snowflake-Lodge -->
0.0
7bb60da3b48edede5b636d449e5291acdc768534
[ "test/unit/test_connection.py::test_partner_env_var" ]
[ "test/unit/test_connection.py::test_connect_with_service_name", "test/unit/test_connection.py::test_is_still_running" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_git_commit_hash" ], "has_test_patch": true, "is_lite": false }
2022-04-14 18:26:26+00:00
apache-2.0
5,574
snowflakedb__snowflake-connector-python-1465
diff --git a/src/snowflake/connector/cursor.py b/src/snowflake/connector/cursor.py index 16308163..92a574a1 100644 --- a/src/snowflake/connector/cursor.py +++ b/src/snowflake/connector/cursor.py @@ -1205,7 +1205,7 @@ class SnowflakeCursor: except StopIteration: return None - def fetchmany(self, size: int | None = None) -> list[dict | tuple | None]: + def fetchmany(self, size: int | None = None) -> list[tuple] | list[dict]: """Fetches the number of specified rows.""" if size is None: size = self.arraysize @@ -1231,7 +1231,7 @@ class SnowflakeCursor: return ret - def fetchall(self) -> list[dict | tuple | None]: + def fetchall(self) -> list[tuple] | list[dict]: """Fetches all of the results.""" ret = [] while True: diff --git a/src/snowflake/connector/network.py b/src/snowflake/connector/network.py index e6814579..6720d237 100644 --- a/src/snowflake/connector/network.py +++ b/src/snowflake/connector/network.py @@ -20,6 +20,7 @@ from typing import TYPE_CHECKING, Any import OpenSSL.SSL +from snowflake.connector.secret_detector import SecretDetector from snowflake.connector.vendored.requests.models import PreparedRequest from snowflake.connector.vendored.urllib3.connectionpool import ( HTTPConnectionPool, @@ -980,14 +981,9 @@ class SnowflakeRestful: def _handle_unknown_error(self, method, full_url, headers, data, conn) -> None: """Handles unknown errors.""" if data: - try: - decoded_data = json.loads(data) - if decoded_data.get("data") and decoded_data["data"].get("PASSWORD"): - # masking the password - decoded_data["data"]["PASSWORD"] = "********" - data = json.dumps(decoded_data) - except Exception: - logger.info("data is not JSON") + _, masked_data, err_str = SecretDetector.mask_secrets(data) + if err_str is None: + data = masked_data logger.error( f"Failed to get the response. Hanging? " f"method: {method}, url: {full_url}, headers:{headers}, "
snowflakedb/snowflake-connector-python
ec95c563ded4694f69e8bde4eb2f010f92681e58
diff --git a/test/unit/test_retry_network.py b/test/unit/test_retry_network.py index ba983bdf..7f7e69b5 100644 --- a/test/unit/test_retry_network.py +++ b/test/unit/test_retry_network.py @@ -6,6 +6,7 @@ from __future__ import annotations import errno +import logging import os import time from unittest.mock import MagicMock, Mock, PropertyMock @@ -217,3 +218,40 @@ def test_fetch(): assert ret == {} assert cnt.c == 1 # failed on first call - did not retry assert rest._connection.errorhandler.called # error + + +def test_secret_masking(caplog): + connection = MagicMock() + connection.errorhandler = Mock(return_value=None) + + rest = SnowflakeRestful( + host="testaccount.snowflakecomputing.com", port=443, connection=connection + ) + + data = ( + '{"code": 12345,' + ' "data": {"TOKEN": "_Y1ZNETTn5/qfUWj3Jedb", "PASSWORD": "dummy_pass"}' + "}" + ) + default_parameters = { + "method": "POST", + "full_url": "https://testaccount.snowflakecomputing.com/", + "headers": {}, + "data": data, + } + + class NotRetryableException(Exception): + pass + + def fake_request_exec(**kwargs): + return None + + # inject a fake method + rest._request_exec = fake_request_exec + + # first two attempts will fail but third will success + with caplog.at_level(logging.ERROR): + ret = rest.fetch(timeout=10, **default_parameters) + assert '"TOKEN": "****' in caplog.text + assert '"PASSWORD": "****' in caplog.text + assert ret == {}
SNOW-752200: Invalide None values in fetchmany and fetchall introduced in 3.0.1 1. What version of Python are you using? ``` > python --version --version Python 3.11.2 (main, Feb 13 2023, 17:10:22) [Clang 14.0.0 (clang-1400.0.29.202)] ``` 2. What operating system and processor architecture are you using? ``` > python -c 'import platform; print(platform.platform())' macOS-13.2.1-arm64-arm-64bit ``` 3. What are the component versions in the environment (`pip freeze`)? ``` > python -m pip freeze | grep -E 'mypy=|snowflake' mypy==1.0.1 snowflake-connector-python==3.0.1 ``` 4. What did you do? Ran mypy against our code using snowflake-connector-python 5. What did you expect to see? No error 6. Can you set logging to DEBUG and collect the logs? N/A In the 3.0.1 release the `fetchmany` and `fetchall` method got their return types updated to `list[dict | tuple | None]`. This is actually incorrect since they both contain logic to exclude row values that are `None`. Our own code is expecting lists of dicts or tuples explicitly and the possibility of getting None row is breaking mypy. The return types should be `list[dict | tuple]`.
0.0
ec95c563ded4694f69e8bde4eb2f010f92681e58
[ "test/unit/test_retry_network.py::test_secret_masking" ]
[ "test/unit/test_retry_network.py::test_request_exec", "test/unit/test_retry_network.py::test_fetch" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-03-02 00:51:37+00:00
apache-2.0
5,575
snowflakedb__snowflake-connector-python-1856
diff --git a/DESCRIPTION.md b/DESCRIPTION.md index a13a3feb..1aa2982a 100644 --- a/DESCRIPTION.md +++ b/DESCRIPTION.md @@ -18,6 +18,7 @@ Source code is also available at: https://github.com/snowflakedb/snowflake-conne - Added support for connecting using an existing connection via the session and master token. - Added support for connecting to Snowflake by authenticating with multiple SAML IDP using external browser. - Fixed compilation issue due to missing cstdint header on gcc13. + - Improved config permissions warning message. - v3.6.0(December 09,2023) diff --git a/src/snowflake/connector/config_manager.py b/src/snowflake/connector/config_manager.py index d9ecf80a..e981a40c 100644 --- a/src/snowflake/connector/config_manager.py +++ b/src/snowflake/connector/config_manager.py @@ -336,7 +336,7 @@ class ConfigManager: ): # for non-Windows, suggest change to 0600 permissions. chmod_message = ( - f". To change owner, run `chown $USER {str(filep)}`. To restrict permissions, run `chmod 0600 {str(filep)}`." + f'.\n * To change owner, run `chown $USER "{str(filep)}"`.\n * To restrict permissions, run `chmod 0600 "{str(filep)}"`.\n' if not IS_WINDOWS else "" )
snowflakedb/snowflake-connector-python
4861887fe94431e5d13db6f29932b668299de1ad
diff --git a/test/unit/test_configmanager.py b/test/unit/test_configmanager.py index 2b42a900..b4a44ebb 100644 --- a/test/unit/test_configmanager.py +++ b/test/unit/test_configmanager.py @@ -561,7 +561,7 @@ def test_warn_config_file_owner(tmp_path, monkeypatch): assert ( str(c[0].message) == f"Bad owner or permissions on {str(c_file)}" - + f". To change owner, run `chown $USER {str(c_file)}`. To restrict permissions, run `chmod 0600 {str(c_file)}`." + + f'.\n * To change owner, run `chown $USER "{str(c_file)}"`.\n * To restrict permissions, run `chmod 0600 "{str(c_file)}"`.\n' ) @@ -581,7 +581,7 @@ def test_warn_config_file_permissions(tmp_path): assert c1["b"] is True assert len(c) == 1 chmod_message = ( - f". To change owner, run `chown $USER {str(c_file)}`. To restrict permissions, run `chmod 0600 {str(c_file)}`." + f'.\n * To change owner, run `chown $USER "{str(c_file)}"`.\n * To restrict permissions, run `chmod 0600 "{str(c_file)}"`.\n' if not IS_WINDOWS else "" )
SNOW-1013234: Fix config permissions warning message ### What is the current behavior? If permissions / owner of config file is incorrect, the command outputs long one-liner containing two commands (`chmod` and `chown`) to call to fix the issue. 1. Many users tend to overlook that there are two commands and give up after one does not fix the issue 2. In Snowflake CLI on MacOS default config path is inside "Application Support" directory - space inside the path turned out to be problematic for some users ``` /Users/pczajka/Snowflake/venv3.12/lib/python3.12/site-packages/snowflake/connector/config_manager.py:344: UserWarning: Bad owner or permissions on /Users/pczajka/Library/Application Support/snowflake/config.toml. To change owner, run `chown $USER /Users/pczajka/Library/Application Support/snowflake/config.toml`. To restrict permissions, run `chmod 0600 /Users/pczajka/Library/Application Support/snowflake/config.toml`. warn(f"Bad owner or permissions on {str(filep)}{chmod_message}") ``` ### What is the desired behavior? Reformattinmg the message to highlight that there are two commands to call (fix 1) and add quotes around filenames (fix 2). For example: ``` /Users/pczajka/Snowflake/venv3.12/lib/python3.12/site-packages/snowflake/connector/config_manager.py:344: UserWarning: Bad owner or permissions on /Users/pczajka/Library/Application Support/snowflake/config.toml. * To change owner, run `chown $USER "/Users/pczajka/Library/Application Support/snowflake/config.toml"`. * To restrict permissions, run `chmod 0600 "/Users/pczajka/Library/Application Support/snowflake/config.toml"`. warn(f"Bad owner or permissions on {str(filep)}{chmod_message}") ``` ### How would this improve `snowflake-connector-python`? Improve user experience (also improve user experience of Snowflake CLI) ### References and other background _No response_
0.0
4861887fe94431e5d13db6f29932b668299de1ad
[ "test/unit/test_configmanager.py::test_warn_config_file_permissions" ]
[ "test/unit/test_configmanager.py::test_incorrect_config_read", "test/unit/test_configmanager.py::test_simple_config_read", "test/unit/test_configmanager.py::test_simple_config_read_sliced", "test/unit/test_configmanager.py::test_missing_value", "test/unit/test_configmanager.py::test_missing_value_sliced", "test/unit/test_configmanager.py::test_only_in_slice", "test/unit/test_configmanager.py::test_simple_nesting", "test/unit/test_configmanager.py::test_complicated_nesting", "test/unit/test_configmanager.py::test_error_missing_file_path", "test/unit/test_configmanager.py::test_error_invalid_toml", "test/unit/test_configmanager.py::test_error_child_conflict", "test/unit/test_configmanager.py::test_explicit_env_name", "test/unit/test_configmanager.py::test_error_contains", "test/unit/test_configmanager.py::test_error_missing_item", "test/unit/test_configmanager.py::test_error_missing_fp", "test/unit/test_configmanager.py::test_missing_config_file", "test/unit/test_configmanager.py::test_missing_config_files_sliced", "test/unit/test_configmanager.py::test_error_missing_fp_retrieve", "test/unit/test_configmanager.py::test_sf_dirs[None]", "test/unit/test_configmanager.py::test_sf_dirs[1]", "test/unit/test_configmanager.py::test_config_file_resolution_sfdirs_default", "test/unit/test_configmanager.py::test_config_file_resolution_sfdirs_nondefault", "test/unit/test_configmanager.py::test_config_file_resolution_non_sfdirs", "test/unit/test_configmanager.py::test_configoption_missing_root_manager", "test/unit/test_configmanager.py::test_configoption_missing_nest_path", "test/unit/test_configmanager.py::test_configoption_default_value", "test/unit/test_configmanager.py::test_defaultconnectionname" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2024-01-19 11:04:38+00:00
apache-2.0
5,576
snowplow__snowplow-python-tracker-348
diff --git a/examples/tracker_api_example/app.py b/examples/tracker_api_example/app.py index 3777a59..41f520c 100644 --- a/examples/tracker_api_example/app.py +++ b/examples/tracker_api_example/app.py @@ -31,10 +31,13 @@ def main(): print("Sending events to " + e.endpoint) + event_subject = Subject() + event_subject.set_color_depth(10) + page_view = PageView( page_url="https://www.snowplow.io", page_title="Homepage", - event_subject=t.subject, + event_subject=event_subject, ) t.track(page_view) diff --git a/snowplow_tracker/events/event.py b/snowplow_tracker/events/event.py index 7f510ba..c9d9b82 100644 --- a/snowplow_tracker/events/event.py +++ b/snowplow_tracker/events/event.py @@ -94,10 +94,13 @@ class Event(object): ): self.payload.add("ttm", int(self.true_timestamp)) - fin_subject = self.event_subject if self.event_subject is not None else subject + if self.event_subject is not None: + fin_payload_dict = self.event_subject.combine_subject(subject) + else: + fin_payload_dict = None if subject is None else subject.standard_nv_pairs - if fin_subject is not None: - self.payload.add_dict(fin_subject.standard_nv_pairs) + if fin_payload_dict is not None: + self.payload.add_dict(fin_payload_dict) return self.payload @property diff --git a/snowplow_tracker/subject.py b/snowplow_tracker/subject.py index 10bcbe2..c3165d3 100644 --- a/snowplow_tracker/subject.py +++ b/snowplow_tracker/subject.py @@ -15,8 +15,9 @@ # language governing permissions and limitations there under. # """ +from typing import Optional from snowplow_tracker.contracts import one_of, greater_than -from snowplow_tracker.typing import SupportedPlatform, SUPPORTED_PLATFORMS +from snowplow_tracker.typing import SupportedPlatform, SUPPORTED_PLATFORMS, PayloadDict DEFAULT_PLATFORM = "pc" @@ -29,7 +30,6 @@ class Subject(object): """ def __init__(self) -> None: - self.standard_nv_pairs = {"p": DEFAULT_PLATFORM} def set_platform(self, value: SupportedPlatform) -> "Subject": @@ -173,3 +173,16 @@ class Subject(object): """ self.standard_nv_pairs["tnuid"] = nuid return self + + def combine_subject(self, subject: Optional["Subject"]) -> PayloadDict: + """ + Merges another instance of Subject, with self taking priority + :param subject Subject to update + :type subject subject + :rtype PayloadDict + + """ + if subject is not None: + return {**subject.standard_nv_pairs, **self.standard_nv_pairs} + + return self.standard_nv_pairs
snowplow/snowplow-python-tracker
b55c30205399b415e8dae7cb748d66ddb96c2ac7
diff --git a/snowplow_tracker/test/unit/test_subject.py b/snowplow_tracker/test/unit/test_subject.py index 7645781..953a0a7 100644 --- a/snowplow_tracker/test/unit/test_subject.py +++ b/snowplow_tracker/test/unit/test_subject.py @@ -86,3 +86,31 @@ class TestSubject(unittest.TestCase): s.standard_nv_pairs["vid"] with pytest.raises(KeyError): s.standard_nv_pairs["tnuid"] + + def test_combine_subject(self) -> None: + s = _subject.Subject() + s.set_color_depth(10) + s.set_domain_session_id("domain_session_id") + + s2 = _subject.Subject() + s2.set_domain_user_id("domain_user_id") + s2.set_lang("en") + + fin_payload_dict = s.combine_subject(s2) + + expected_fin_payload_dict = { + "p": "pc", + "cd": 10, + "sid": "domain_session_id", + "duid": "domain_user_id", + "lang": "en", + } + + expected_subject = { + "p": "pc", + "cd": 10, + "sid": "domain_session_id", + } + + self.assertDictEqual(fin_payload_dict, expected_fin_payload_dict) + self.assertDictEqual(s.standard_nv_pairs, expected_subject)
Update payload builder to combine event subjects Currently only one event subject is added to the payload, these should be combined to prevent loss of information in line with other SS trackers.
0.0
b55c30205399b415e8dae7cb748d66ddb96c2ac7
[ "snowplow_tracker/test/unit/test_subject.py::TestSubject::test_combine_subject" ]
[ "snowplow_tracker/test/unit/test_subject.py::TestSubject::test_subject_0", "snowplow_tracker/test/unit/test_subject.py::TestSubject::test_subject_1" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-06-13 17:35:00+00:00
apache-2.0
5,577
snudler6__time-travel-63
diff --git a/.appveyor.yml b/.appveyor.yml index bf9a1ab..92262e2 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -3,7 +3,6 @@ environment: # For Python versions available on Appveyor, see # http://www.appveyor.com/docs/installed-software#python - TOXENV: "py27" - - TOXENV: "py34" - TOXENV: "py35" - TOXENV: "py36" - TOXENV: "pypy" @@ -13,6 +12,7 @@ install: - if "%TOXENV%"=="pypy" choco install python.pypy - if "%TOXENV%"=="pypy" set PATH=C:\tools\pypy\pypy;%PATH% # so tox can find pypy + - pip install --upgrade virtualenv - pip install tox build: off diff --git a/.travis.yml b/.travis.yml index e0d6151..997eb4e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,8 +6,6 @@ matrix: include: - python: 2.7 env: TOXENV=py27 - - python: 3.4 - env: TOXENV=py34 - python: 3.5 env: TOXENV=py35 - python: 3.6 diff --git a/setup.py b/setup.py index 505b7c2..4e035d1 100644 --- a/setup.py +++ b/setup.py @@ -14,6 +14,8 @@ patchers = [ if hasattr(select, 'poll'): patchers.append('poll_patcher = time_travel.patchers.poll_patcher:PollPatcher') +if hasattr(select, 'epoll'): + patchers.append('epoll_patcher = time_travel.patchers.epoll_patcher:EpollPatcher') setup( diff --git a/src/time_travel/patchers/epoll_patcher.py b/src/time_travel/patchers/epoll_patcher.py new file mode 100644 index 0000000..5bf2e54 --- /dev/null +++ b/src/time_travel/patchers/epoll_patcher.py @@ -0,0 +1,13 @@ +"""A patch to the select.epoll object.""" + +from .poll_patcher import PollPatcher + +import select as select_lib + + +class EpollPatcher(PollPatcher): + """Patcher for select.epoll.""" + + def get_patch_actions(self): + """Return generator containing all patches to do.""" + return [('epoll', select_lib.epoll, self._mock_poll)] diff --git a/tox.ini b/tox.ini index be16386..ab1cddf 100644 --- a/tox.ini +++ b/tox.ini @@ -1,13 +1,13 @@ [tox] -envlist = py27, py34, py35, py36, pypy, docs +envlist = py27, py35, py36, pypy, docs [testenv] -setenv = +setenv = PYTHONPATH={toxinidir}/src/ deps = -rrequirements/runtime.txt -rrequirements/tests.txt -commands = +commands = pycodestyle src/ pydocstyle -v src/ py.test --cache-clear -vv src/
snudler6/time-travel
99044a01d3cfb4ea4950432c2f044683ff3ece40
diff --git a/src/tests/test_poll.py b/src/tests/test_poll.py index 906c4e8..42b89c0 100644 --- a/src/tests/test_poll.py +++ b/src/tests/test_poll.py @@ -1,4 +1,5 @@ from time_travel.patchers.poll_patcher import PollPatcher +from time_travel.patchers.epoll_patcher import EpollPatcher from time_travel.time_machine_clock import TimeMachineClock from time_travel.event_pool import EventPool from .utils import _t @@ -21,12 +22,18 @@ class TestPollPatcher(object): """Start a poll patcher""" self.event_pool = EventPool() self.clock = TimeMachineClock(clock_listeners=[self.event_pool]) - self.patcher = PollPatcher(self.clock, - self.event_pool, - modules_to_patch=__name__) + self.patcher = self.get_patcher(self.clock, + self.event_pool, + modules_to_patch=__name__) self.patcher.start() - self.poll = select.poll() + self.poll = self.get_poll() + + def get_poll(self): + return select.poll() + + def get_patcher(self, *args, **kwargs): + return PollPatcher(*args, **kwargs) def teardown_method(self, method): """Stop the poll patcher""" @@ -177,3 +184,13 @@ class TestPollPatcher(object): assert self.poll.poll() == [(sock, select.POLLOUT)] assert self.clock.time == _t(10) + + [email protected](not hasattr(select, 'epoll'), + reason='select.epoll is not supported in this platform') +class TestEpollPatcher(TestPollPatcher): + def get_poll(self): + return select.epoll() + + def get_patcher(self, *args, **kwargs): + return EpollPatcher(*args, **kwargs) diff --git a/src/tests/test_time_travel.py b/src/tests/test_time_travel.py index 8e5378c..8f51e84 100644 --- a/src/tests/test_time_travel.py +++ b/src/tests/test_time_travel.py @@ -125,3 +125,18 @@ def test_poll(): now = t.clock.time assert poll.poll() == [(sock, select.POLLIN)] assert time.time() == now + 2 + + [email protected](not hasattr(select, 'epoll'), + reason='select.epoll is not supported in this platform') +def test_epoll(): + with TimeTravel(modules_to_patch=__name__) as t: + sock = socket.socket() + t.add_future_event(2, sock, select.POLLIN) + + poll = select.epoll() + poll.register(sock, select.POLLIN | select.POLLOUT) + + now = t.clock.time + assert poll.poll() == [(sock, select.POLLIN)] + assert time.time() == now + 2
add patch for select.epoll patch poll, epoll, ...
0.0
99044a01d3cfb4ea4950432c2f044683ff3ece40
[ "src/tests/test_poll.py::TestPollPatcher::test_empty_with_timeout", "src/tests/test_poll.py::TestPollPatcher::test_empty_without_timeout", "src/tests/test_poll.py::TestPollPatcher::test_one_socket", "src/tests/test_poll.py::TestPollPatcher::test_timeout_before_event", "src/tests/test_poll.py::TestPollPatcher::test_unregistered_events", "src/tests/test_poll.py::TestPollPatcher::test_multiple_sockets_same_time", "src/tests/test_poll.py::TestPollPatcher::test_same_socket_multiple_events", "src/tests/test_poll.py::TestPollPatcher::test_event_not_in_mask", "src/tests/test_poll.py::TestPollPatcher::test_event_not_returned_twice", "src/tests/test_poll.py::TestPollPatcher::test_same_event_multiple_timestamps", "src/tests/test_poll.py::TestPollPatcher::test_unregister", "src/tests/test_poll.py::TestPollPatcher::test_modify", "src/tests/test_poll.py::TestEpollPatcher::test_empty_with_timeout", "src/tests/test_poll.py::TestEpollPatcher::test_empty_without_timeout", "src/tests/test_poll.py::TestEpollPatcher::test_one_socket", "src/tests/test_poll.py::TestEpollPatcher::test_timeout_before_event", "src/tests/test_poll.py::TestEpollPatcher::test_unregistered_events", "src/tests/test_poll.py::TestEpollPatcher::test_multiple_sockets_same_time", "src/tests/test_poll.py::TestEpollPatcher::test_same_socket_multiple_events", "src/tests/test_poll.py::TestEpollPatcher::test_event_not_in_mask", "src/tests/test_poll.py::TestEpollPatcher::test_event_not_returned_twice", "src/tests/test_poll.py::TestEpollPatcher::test_same_event_multiple_timestamps", "src/tests/test_poll.py::TestEpollPatcher::test_unregister", "src/tests/test_poll.py::TestEpollPatcher::test_modify", "src/tests/test_time_travel.py::test_time_patch_set_time", "src/tests/test_time_travel.py::test_sleep_patch_sleep", "src/tests/test_time_travel.py::test_datetime_patch_set_time", "src/tests/test_time_travel.py::test_patch_without_module_name", "src/tests/test_time_travel.py::test_patch_stop_afer_scope_end", "src/tests/test_time_travel.py::test_inner_importing_of_datetime", "src/tests/test_time_travel.py::test_no_renaming_patching", "src/tests/test_time_travel.py::test_sleep_changing_datetime_now", "src/tests/test_time_travel.py::test_select_no_timeout", "src/tests/test_time_travel.py::test_select_with_timeout", "src/tests/test_time_travel.py::test_select_timeout_occurring", "src/tests/test_time_travel.py::test_poll" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-10-23 14:16:30+00:00
mit
5,578
soar-telescope__goodman_focus-16
diff --git a/CHANGES.rst b/CHANGES.rst index f3b2832..b896220 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,5 +1,5 @@ -0.1.4 (Not released yet) -======================== +0.2.0 +===== - Added messages when no file matches the `--obstype` value, by default is `FOCUS` [#9] @@ -11,6 +11,9 @@ several issues. For instance `--debug` was unusable, and also there were duplicated log entries for the file handler when used as a library in other application. [#10] +- Replaced the use of the function `get_args` by using arguments on class + instantiation instead +- Created name for modes [#11] 0.1.3 ===== diff --git a/goodman_focus/goodman_focus.py b/goodman_focus/goodman_focus.py index f8b1316..25853ce 100644 --- a/goodman_focus/goodman_focus.py +++ b/goodman_focus/goodman_focus.py @@ -4,6 +4,7 @@ import matplotlib.pyplot as plt import numpy as np import os import pandas +import re import sys from astropy.stats import sigma_clip @@ -18,6 +19,7 @@ import logging.config LOG_FORMAT = '[%(asctime)s][%(levelname)s]: %(message)s' LOG_LEVEL = logging.INFO +# LOG_LEVEL = logging.CRITICAL DATE_FORMAT = '%H:%M:%S' @@ -65,6 +67,12 @@ def get_args(arguments=None): help='Model to use in fitting the features in order to' 'obtain the FWHM for each of them') + parser.add_argument('--plot-results', + action='store_true', + dest='plot_results', + help='Show a plot when it finishes the focus ' + 'calculation') + parser.add_argument('--debug', action='store_true', dest='debug', @@ -72,14 +80,6 @@ def get_args(arguments=None): args = parser.parse_args(args=arguments) - if not os.path.isdir(args.data_path): - log.error("Data location {} does not exist".format(args.data_path)) - sys.exit(0) - elif len(glob.glob(os.path.join(args.data_path, args.file_pattern))) == 0: - log.error("There are no files matching \"{}\" in the folder \"{}\"" - "".format(args.file_pattern, args.data_path)) - sys.exit(0) - return args @@ -271,15 +271,28 @@ class GoodmanFocus(object): 'OBSTYPE', 'ROI'] - def __init__(self, arguments=None): - self.args = get_args(arguments=arguments) + def __init__(self, + data_path=os.getcwd(), + file_pattern="*.fits", + obstype="FOCUS", + features_model='gaussian', + plot_results=False, + debug=False): + + self.data_path = data_path + self.file_pattern = file_pattern + self.obstype = obstype + self.features_model = features_model + self.plot_results = plot_results + self.debug = debug + self.log = logging.getLogger(__name__) - if self.args.debug: + if self.debug: self.log.setLevel(level=logging.DEBUG) - if self.args.features_model == 'gaussian': + if self.features_model == 'gaussian': self.feature_model = models.Gaussian1D() - elif self.args.features_model == 'moffat': + elif self.features_model == 'moffat': self.feature_model = models.Moffat1D() self.__ccd = None @@ -291,16 +304,26 @@ class GoodmanFocus(object): self.fitter = fitting.LevMarLSQFitter() self.linear_fitter = fitting.LinearLSQFitter() - if os.path.isdir(self.args.data_path): - self.full_path = self.args.data_path + if os.path.isdir(self.data_path): + self.full_path = self.data_path + if not os.listdir(self.full_path): + self.log.critical("Directory is empty") + sys.exit(0) + # print(glob.glob(os.path.join(self.full_path, self.file_pattern))) + elif not glob.glob(os.path.join(self.full_path, self.file_pattern)): + self.log.critical('Directory {} does not containe files ' + 'matching the pattern {}' + ''.format(self.full_path, self.file_pattern)) + sys.exit(0) else: - sys.exit("No such directory") + self.log.critical("No such directory") + sys.exit(0) _ifc = ImageFileCollection(self.full_path, keywords=self.keywords) self.ifc = _ifc.summary.to_pandas() self.log.debug("Found {} FITS files".format(self.ifc.shape[0])) - self.ifc = self.ifc[(self.ifc['OBSTYPE'] == self.args.obstype)] + self.ifc = self.ifc[(self.ifc['OBSTYPE'] == self.obstype)] if self.ifc.shape[0] != 0: self.log.debug("Found {} FITS files with OBSTYPE = FOCUS".format( self.ifc.shape[0])) @@ -348,22 +371,24 @@ class GoodmanFocus(object): if value is not None: self._fwhm = value - def __call__(self, *args, **kwargs): for focus_group in self.focus_groups: - # print(focus_group) + mode_name = self._get_mode_name(focus_group) focus_dataframe = self.get_focus_data(group=focus_group) self._fit(df=focus_dataframe) - self.log.info("Best Focus for {} is {}".format(self.file_name, - self.__best_focus)) - if True: # pragma: no cover + self.log.info("Best Focus for mode {} is {}".format( + mode_name, + self.__best_focus)) + if self.plot_results: # pragma: no cover # TODO (simon): Do properly using matplotlib or pandas alone # fig = plt.subplots() focus_dataframe.plot(x='focus', y='fwhm', marker='x') - plt.axvline(self.__best_focus) - plt.title("Best Focus: {}".format(self.__best_focus)) + plt.axvline(self.__best_focus, color='k', label='Best Focus') + plt.title("Best Focus:\n{} {:.3f}".format( + mode_name, + self.__best_focus)) focus_list = focus_dataframe['focus'].tolist() new_x_axis = np.linspace(focus_list[0], focus_list[-1], 1000) plt.plot(new_x_axis, @@ -391,6 +416,24 @@ class GoodmanFocus(object): return self.__best_focus + @staticmethod + def _get_mode_name(group): + unique_values = group.drop_duplicates( + subset=['INSTCONF', 'FILTER', 'FILTER2', 'WAVMODE'], keep='first') + + if unique_values['WAVMODE'].values == ['Imaging']: + mode_name = 'IM_{}_{}'.format( + unique_values['INSTCONF'].values[0], + unique_values['FILTER'].values[0]) + else: + mode_name = 'SP_{}_{}_{}'.format( + unique_values['INSTCONF'].values[0], + unique_values['WAVMODE'].values[0], + unique_values['FILTER2'].values[0]) + mode_name = re.sub('[<> ]', '', mode_name) + # mode_name = re.sub('[- ]', '_', mode_name) + return mode_name + def get_focus_data(self, group): """Collects all the relevant data for finding best focus @@ -416,7 +459,7 @@ class GoodmanFocus(object): peaks, values, x_axis, profile = get_peaks( ccd=self.__ccd, file_name=self.file_name, - plots=self.args.debug) + plots=self.debug) self.fwhm = get_fwhm(peaks=peaks, values=values, @@ -450,13 +493,16 @@ def run_goodman_focus(args=None): # pragma: no cover args (list): (optional) a list of arguments and respective values. """ - - goodman_focus = GoodmanFocus(arguments=args) + args = get_args(arguments=args) + goodman_focus = GoodmanFocus(data_path=args.data_path, + file_pattern=args.file_pattern, + obstype=args.obstype, + features_model=args.features_model, + plot_results=args.plot_results, + debug=args.debug) goodman_focus() if __name__ == '__main__': # pragma: no cover # full_path = '/user/simon/data/soar/work/focus2' - get_focus = GoodmanFocus() - get_focus() - + run_goodman_focus() diff --git a/goodman_focus/version.py b/goodman_focus/version.py index 0e285e2..4e02b70 100644 --- a/goodman_focus/version.py +++ b/goodman_focus/version.py @@ -1,2 +1,2 @@ # This is an automatic generated file please do not edit -__version__ = '0.1.4.dev1' \ No newline at end of file +__version__ = '0.2.0' \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index e364ba8..b01f1cd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,4 +32,4 @@ install_requires = sphinx>=2.1.2 # version should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440) -version = 0.1.4.dev1 +version = 0.2.0
soar-telescope/goodman_focus
457f0d1169b8a1aea7b1e7944e55e54f4ff1c6ba
diff --git a/goodman_focus/tests/test_goodman_focus.py b/goodman_focus/tests/test_goodman_focus.py index 73c162b..a611a7a 100644 --- a/goodman_focus/tests/test_goodman_focus.py +++ b/goodman_focus/tests/test_goodman_focus.py @@ -9,12 +9,34 @@ from unittest import TestCase, skip from ccdproc import CCDData from ..goodman_focus import GoodmanFocus -from ..goodman_focus import get_peaks, get_fwhm +from ..goodman_focus import get_args, get_peaks, get_fwhm import matplotlib.pyplot as plt logging.disable(logging.CRITICAL) + +class ArgumentTests(TestCase): + + def setUp(self): + self.arg_list = ['--data-path', os.getcwd(), + '--file-pattern', '*.myfile', + '--obstype', 'ANY', + '--features-model', 'moffat', + '--plot-results', + '--debug'] + + def test_get_args_default(self): + args = get_args(arguments=self.arg_list) + self.assertEqual(args.__class__.__name__, 'Namespace') + self.assertEqual(args.data_path, os.getcwd()) + self.assertEqual(args.file_pattern, '*.myfile') + self.assertEqual(args.obstype, 'ANY') + self.assertEqual(args.features_model, 'moffat') + self.assertTrue(args.plot_results) + self.assertTrue(args.debug) + + class GetPeaksTest(TestCase): def setUp(self): @@ -143,7 +165,7 @@ class GoodmanFocusTests(TestCase): focus_data, columns=['file', 'fwhm', 'focus']) - self.goodman_focus = GoodmanFocus(arguments=arguments) + self.goodman_focus = GoodmanFocus() def test_get_focus_data(self): @@ -163,21 +185,49 @@ class GoodmanFocusTests(TestCase): self.goodman_focus() self.assertIsNotNone(self.goodman_focus.fwhm) + def test__call__Moffat1D(self): + self.goodman_focus = GoodmanFocus(features_model='moffat') + self.goodman_focus() + self.assertIsNotNone(self.goodman_focus.fwhm) + def tearDown(self): for _file in self.file_list: os.unlink(_file) -class DirectoryAndFilesTest(TestCase): +class SpectroscopicModeNameTests(TestCase): def setUp(self): - self.arguments = [ - '--data-path', os.path.join(os.getcwd(), 'nonexisting'), - '--file-pattern', '*.fits', - '--obstype', 'FOCUS', - '--features-model', 'gaussian'] + self.data = {'file': ['file_{}.fits'.format(i + 1) for i in range(5)], + 'INSTCONF': ['Blue'] * 5, + 'FILTER': ['FILTER-X'] * 5, + 'FILTER2': ['NO FILTER'] * 5, + 'WAVMODE': ['Imaging'] * 5} + + + def test_imaging_mode(self): + df = pandas.DataFrame(self.data) + expected_name = 'IM_Blue_FILTER-X' + mode_name = GoodmanFocus._get_mode_name(group=df) + self.assertEqual(mode_name, expected_name) + + def test_spectroscopy_mode(self): + self.data['WAVMODE'] = ['400 z1'] * 5 + df = pandas.DataFrame(self.data) + + expected_name = 'SP_Blue_400z1_NOFILTER' + + mode_name = GoodmanFocus._get_mode_name(group=df) + + self.assertEqual(mode_name, expected_name) + + +class DirectoryAndFilesTest(TestCase): + + def setUp(self): os.mkdir(os.path.join(os.getcwd(), 'test_dir_empty')) + os.mkdir(os.path.join(os.getcwd(), 'test_dir_no_matching_files')) os.mkdir(os.path.join(os.getcwd(), 'test_dir_no_focus')) for i in range(3): ccd = CCDData(data=np.ones((100, 100)), @@ -203,19 +253,37 @@ class DirectoryAndFilesTest(TestCase): def test_directory_does_not_exists(self): # goodman_focus = GoodmanFocus(arguments=arguments) - self.assertRaises(SystemExit, GoodmanFocus, self.arguments) + path_non_existing = os.path.join(os.getcwd(), 'non-existing') + self.assertRaises(SystemExit, GoodmanFocus, path_non_existing) def test_directory_exists_but_empty(self): - self.arguments[1] = os.path.join(os.getcwd(), 'test_dir_empty') - self.assertRaises(SystemExit, GoodmanFocus, self.arguments) + empty_path = os.path.join(os.getcwd(), 'test_dir_empty') + self.assertRaises(SystemExit, GoodmanFocus, empty_path) - def test_no_focus_files(self): - self.arguments[1] = os.path.join(os.getcwd(), 'test_dir_no_focus') - self.assertRaises(SystemExit, GoodmanFocus, self.arguments) + def test_directory_not_empty_and_no_matching_files(self): + path = os.path.join(os.getcwd(), 'test_dir_no_matching_files') + open(os.path.join(path, 'sample_file.txt'), 'a').close() + + self.assertRaises(SystemExit, GoodmanFocus, path) + + def test_no_focus_files(self): + path_no_focus_files = os.path.join(os.getcwd(), 'test_dir_no_focus') + self.assertRaises(SystemExit, GoodmanFocus, path_no_focus_files) def tearDown(self): - os.rmdir(os.path.join(os.getcwd(), 'test_dir_empty')) + for _file in os.listdir(os.path.join(os.getcwd(), 'test_dir_no_focus')): os.unlink(os.path.join(os.getcwd(), 'test_dir_no_focus', _file)) + + for _file in os.listdir(os.path.join( + os.getcwd(), + 'test_dir_no_matching_files')): + + os.unlink(os.path.join(os.getcwd(), + 'test_dir_no_matching_files', + _file)) + + os.rmdir(os.path.join(os.getcwd(), 'test_dir_empty')) os.rmdir(os.path.join(os.getcwd(), 'test_dir_no_focus')) + os.rmdir(os.path.join(os.getcwd(), 'test_dir_no_matching_files'))
disable plot by default plotting should be optional since not always anyone wants it.
0.0
457f0d1169b8a1aea7b1e7944e55e54f4ff1c6ba
[ "goodman_focus/tests/test_goodman_focus.py::ArgumentTests::test_get_args_default", "goodman_focus/tests/test_goodman_focus.py::GoodmanFocusTests::test__fit", "goodman_focus/tests/test_goodman_focus.py::SpectroscopicModeNameTests::test_imaging_mode", "goodman_focus/tests/test_goodman_focus.py::SpectroscopicModeNameTests::test_spectroscopy_mode" ]
[ "goodman_focus/tests/test_goodman_focus.py::DirectoryAndFilesTest::test_directory_does_not_exists", "goodman_focus/tests/test_goodman_focus.py::DirectoryAndFilesTest::test_directory_exists_but_empty", "goodman_focus/tests/test_goodman_focus.py::DirectoryAndFilesTest::test_directory_not_empty_and_no_matching_files", "goodman_focus/tests/test_goodman_focus.py::DirectoryAndFilesTest::test_no_focus_files" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2019-07-09 20:28:27+00:00
bsd-3-clause
5,579
sodafoundation__delfin-479
diff --git a/delfin/drivers/dell_emc/vmax/client.py b/delfin/drivers/dell_emc/vmax/client.py index 62974b7..ed24ecc 100644 --- a/delfin/drivers/dell_emc/vmax/client.py +++ b/delfin/drivers/dell_emc/vmax/client.py @@ -112,7 +112,44 @@ class VMAXClient(object): try: storage_info = self.rest.get_system_capacity( self.array_id, self.uni_version) - return storage_info + + total_capacity = 0 + used_capacity = 0 + free_capacity = 0 + raw_capacity = 0 + subscribed_capacity = 0 + if int(self.uni_version) < 90: + physical_capacity = storage_info.get('physicalCapacity') + total_cap = storage_info.get('total_usable_cap_gb') + used_cap = storage_info.get('total_allocated_cap_gb') + subscribed_cap = storage_info.get('total_subscribed_cap_gb') + total_raw = physical_capacity.get('total_capacity_gb') + free_cap = total_cap - used_cap + + total_capacity = int(total_cap * units.Gi) + used_capacity = int(used_cap * units.Gi) + free_capacity = int(free_cap * units.Gi) + raw_capacity = int(total_raw * units.Gi) + subscribed_capacity = int(subscribed_cap * units.Gi) + + else: + system_capacity = storage_info['system_capacity'] + physical_capacity = storage_info.get('physicalCapacity') + total_cap = system_capacity.get('usable_total_tb') + used_cap = system_capacity.get('usable_used_tb') + subscribed_cap = system_capacity.get('subscribed_total_tb') + total_raw = physical_capacity.get('total_capacity_gb') + free_cap = total_cap - used_cap + + total_capacity = int(total_cap * units.Ti) + used_capacity = int(used_cap * units.Ti) + free_capacity = int(free_cap * units.Ti) + raw_capacity = int(total_raw * units.Gi) + subscribed_capacity = int(subscribed_cap * units.Ti) + + return total_capacity, used_capacity, free_capacity,\ + raw_capacity, subscribed_capacity + except exception.SSLCertificateFailed: LOG.error('SSL certificate failed when ' 'get storage capacity for VMax') @@ -134,10 +171,20 @@ class VMAXClient(object): pool_info = self.rest.get_srp_by_name( self.array_id, self.uni_version, srp=pool) - srp_cap = pool_info['srp_capacity'] - total_cap = srp_cap['usable_total_tb'] * units.Ti - used_cap = srp_cap['usable_used_tb'] * units.Ti - subscribed_cap = srp_cap['subscribed_total_tb'] * units.Ti + total_cap = 0 + used_cap = 0 + subscribed_cap = 0 + if int(self.uni_version) < 90: + total_cap = pool_info['total_usable_cap_gb'] * units.Gi + used_cap = pool_info['total_allocated_cap_gb'] * units.Gi + subscribed_cap =\ + pool_info['total_subscribed_cap_gb'] * units.Gi + else: + srp_cap = pool_info['srp_capacity'] + total_cap = srp_cap['usable_total_tb'] * units.Ti + used_cap = srp_cap['usable_used_tb'] * units.Ti + subscribed_cap = srp_cap['subscribed_total_tb'] * units.Ti + p = { "name": pool, "storage_id": storage_id, @@ -150,6 +197,7 @@ class VMAXClient(object): "free_capacity": int(total_cap - used_cap), "subscribed_capacity": int(subscribed_cap), } + pool_list.append(p) return pool_list @@ -221,8 +269,9 @@ class VMAXClient(object): sg = vol['storageGroupId'][0] sg_info = self.rest.get_storage_group( self.array_id, self.uni_version, sg) - v['native_storage_pool_id'] = sg_info['srp'] - v['compressed'] = sg_info['compression'] + v['native_storage_pool_id'] =\ + sg_info.get('srp', default_srps[emulation_type]) + v['compressed'] = sg_info.get('compression', False) else: v['native_storage_pool_id'] = default_srps[emulation_type] diff --git a/delfin/drivers/dell_emc/vmax/vmax.py b/delfin/drivers/dell_emc/vmax/vmax.py index cd3ac40..f58e974 100644 --- a/delfin/drivers/dell_emc/vmax/vmax.py +++ b/delfin/drivers/dell_emc/vmax/vmax.py @@ -13,7 +13,6 @@ # limitations under the License. from oslo_log import log -from oslo_utils import units from delfin.common import constants from delfin.drivers.dell_emc.vmax.alert_handler import snmp_alerts from delfin.drivers.dell_emc.vmax.alert_handler import unisphere_alerts @@ -43,14 +42,9 @@ class VMAXStorageDriver(driver.StorageDriver): display_name = array_details['display_name'] # Get Storage details for capacity info - storg_info = self.client.get_storage_capacity() - system_capacity = storg_info['system_capacity'] - physical_capacity = storg_info['physicalCapacity'] - total_cap = system_capacity.get('usable_total_tb') - used_cap = system_capacity.get('usable_used_tb') - subscribed_cap = system_capacity.get('subscribed_total_tb') - total_raw = physical_capacity.get('total_capacity_gb') - free_cap = total_cap - used_cap + total_capacity, used_capacity, free_capacity,\ + raw_capacity, subscribed_capacity = \ + self.client.get_storage_capacity() storage = { # Unisphere Rest API do not provide Array name . @@ -63,11 +57,11 @@ class VMAXStorageDriver(driver.StorageDriver): 'status': constants.StorageStatus.NORMAL, 'serial_number': self.client.array_id, 'location': '', - 'total_capacity': int(total_cap * units.Ti), - 'used_capacity': int(used_cap * units.Ti), - 'free_capacity': int(free_cap * units.Ti), - 'raw_capacity': int(total_raw * units.Gi), - 'subscribed_capacity': int(subscribed_cap * units.Ti) + 'total_capacity': total_capacity, + 'used_capacity': used_capacity, + 'free_capacity': free_capacity, + 'raw_capacity': raw_capacity, + 'subscribed_capacity': subscribed_capacity } LOG.info("get_storage(), successfully retrieved storage details") return storage
sodafoundation/delfin
c5c0dd4b94c149dae5295a99174b3a6e6590da41
diff --git a/delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py b/delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py index 53c6713..0284ec8 100644 --- a/delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py +++ b/delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py @@ -109,6 +109,14 @@ class TestVMAXStorageDriver(TestCase): } } + system_capacity_84 = { + 'total_usable_cap_gb': 100 * 1024, + 'total_allocated_cap_gb': 75 * 1024, + 'total_subscribed_cap_gb': 200 * 1024, + 'physicalCapacity': { + 'total_capacity_gb': 1500 + } + } kwargs = VMAX_STORAGE_CONF mock_version.return_value = ['V9.0.2.7', '90'] @@ -128,6 +136,11 @@ class TestVMAXStorageDriver(TestCase): ret = driver.get_storage(context) self.assertDictEqual(ret, expected) + driver.client.uni_version = '84' + mock_capacity.return_value = system_capacity_84 + ret = driver.get_storage(context) + self.assertDictEqual(ret, expected) + mock_array_details.side_effect = exception.StorageBackendException with self.assertRaises(Exception) as exc: driver.get_storage(context)
Add VMAX driver support for Unisphere 8.4 **Issue/Feature Description:** Add VMAX driver support for Unisphere 8.4 **Why this issue to fixed / feature is needed(give scenarios or use cases):** **How to reproduce, in case of a bug:** **Other Notes / Environment Information: (Please give the env information, log link or any useful information for this issue)**
0.0
c5c0dd4b94c149dae5295a99174b3a6e6590da41
[ "delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py::TestVMAXStorageDriver::test_get_storage" ]
[ "delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py::TestVMAXStorageDriver::test_get_storage_performance", "delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py::TestVMAXStorageDriver::test_init", "delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py::TestVMAXStorageDriver::test_list_storage_pools", "delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py::TestVMAXStorageDriver::test_list_volumes", "delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py::TestVMAXStorageDriver::test_rest" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-02-07 07:51:57+00:00
apache-2.0
5,580
softlayer__softlayer-python-1679
diff --git a/SoftLayer/CLI/order/item_list.py b/SoftLayer/CLI/order/item_list.py index eef805d2..173e30a2 100644 --- a/SoftLayer/CLI/order/item_list.py +++ b/SoftLayer/CLI/order/item_list.py @@ -8,18 +8,19 @@ from SoftLayer.managers import ordering from SoftLayer.utils import lookup -COLUMNS = ['category', 'keyName', 'description', 'priceId'] -COLUMNS_ITEM_PRICES = ['keyName', 'priceId', 'Hourly', 'Monthly', 'Restriction'] -COLUMNS_ITEM_PRICES_LOCATION = ['keyName', 'priceId', 'Hourly', 'Monthly', 'Restriction'] +COLUMNS = ['Category', 'KeyName', 'Description', 'Price Id'] +COLUMNS_ITEM_PRICES = ['KeyName', 'Price Id', 'Hourly', 'Monthly', 'Restriction'] +COLUMNS_ITEM_PRICES_LOCATION = ['KeyName', 'Price Id', 'Hourly', 'Monthly', 'Restriction'] @click.command(cls=SLCommand) @click.argument('package_keyname') @click.option('--keyword', '-k', help="A word (or string) used to filter item names.") @click.option('--category', '-c', help="Category code to filter items by") [email protected]('--prices', '-p', is_flag=True, help='Use --prices to list the server item prices, and to list the ' - 'Item Prices by location, add it to the --prices option using ' - 'location KeyName, e.g. --prices AMSTERDAM02') [email protected]('--prices', '-p', is_flag=True, + help='Use --prices to list the server item prices, and to list the ' + 'Item Prices by location, add it to the --prices option using ' + 'location KeyName, e.g. --prices AMSTERDAM02') @click.argument('location', required=False) @environment.pass_env def cli(env, package_keyname, keyword, category, prices, location=None): @@ -66,7 +67,7 @@ def cli(env, package_keyname, keyword, category, prices, location=None): for item in sorted_items[category_name]: table_items_detail.add_row([category_name, item['keyName'], item['description'], get_price(item)]) tables.append(table_items_detail) - env.fout(formatting.listing(tables, separator='\n')) + env.fout(tables) def sort_items(items):
softlayer/softlayer-python
950a54b48b32a00865d4e80efdeaed1102f01a04
diff --git a/tests/CLI/modules/order_tests.py b/tests/CLI/modules/order_tests.py index d493b67f..c1541594 100644 --- a/tests/CLI/modules/order_tests.py +++ b/tests/CLI/modules/order_tests.py @@ -50,9 +50,9 @@ def test_item_list_prices(self): self.assert_no_fail(result) output = json.loads(result.output) - self.assertEqual(output[0][0]['priceId'], 1007) + self.assertEqual(output[0][0]['Price Id'], 1007) self.assertEqual(output[0][1]['Restriction'], '- - - -') - self.assertEqual(output[0][1]['keyName'], 'KeyName015') + self.assertEqual(output[0][1]['KeyName'], 'KeyName015') self.assert_called_with('SoftLayer_Product_Package', 'getItems') def test_item_list_location_keyname(self): @@ -61,8 +61,8 @@ def test_item_list_location_keyname(self): self.assert_no_fail(result) output = json.loads(result.output) self.assertEqual(output[0][0]['Hourly'], 0.0) - self.assertEqual(output[0][1]['keyName'], 'KeyName015') - self.assertEqual(output[0][1]['priceId'], 1144) + self.assertEqual(output[0][1]['KeyName'], 'KeyName015') + self.assertEqual(output[0][1]['Price Id'], 1144) self.assert_called_with('SoftLayer_Product_Package', 'getItemPrices') def test_item_list_location_name(self): @@ -71,8 +71,8 @@ def test_item_list_location_name(self): self.assert_no_fail(result) output = json.loads(result.output) self.assertEqual(output[0][0]['Hourly'], 0.0) - self.assertEqual(output[0][1]['keyName'], 'KeyName015') - self.assertEqual(output[0][1]['priceId'], 1144) + self.assertEqual(output[0][1]['KeyName'], 'KeyName015') + self.assertEqual(output[0][1]['Price Id'], 1144) self.assert_called_with('SoftLayer_Product_Package', 'getItemPrices') def test_item_list_category_keyword(self): @@ -81,8 +81,8 @@ def test_item_list_category_keyword(self): self.assert_no_fail(result) output = json.loads(result.output) self.assertEqual(output[0][0]['Hourly'], 0.0) - self.assertEqual(output[0][1]['keyName'], 'KeyName015') - self.assertEqual(output[0][1]['priceId'], 1144) + self.assertEqual(output[0][1]['KeyName'], 'KeyName015') + self.assertEqual(output[0][1]['Price Id'], 1144) self.assert_called_with('SoftLayer_Product_Package', 'getItemPrices') def test_package_list(self):
Rich table doesn't work for item-list ``` $ slcli order item-list STORAGE_AS_A_SERVICE_STAAS CASTSDFSDFSDFSDFSDF <rich.table.Table object at 0x0000023B30C75810> ```
0.0
950a54b48b32a00865d4e80efdeaed1102f01a04
[ "tests/CLI/modules/order_tests.py::OrderTests::test_item_list_category_keyword", "tests/CLI/modules/order_tests.py::OrderTests::test_item_list_location_keyname", "tests/CLI/modules/order_tests.py::OrderTests::test_item_list_location_name", "tests/CLI/modules/order_tests.py::OrderTests::test_item_list_prices" ]
[ "tests/CLI/modules/order_tests.py::OrderTests::test_category_list", "tests/CLI/modules/order_tests.py::OrderTests::test_item_list", "tests/CLI/modules/order_tests.py::OrderTests::test_location_list", "tests/CLI/modules/order_tests.py::OrderTests::test_order_lookup", "tests/CLI/modules/order_tests.py::OrderTests::test_package_list", "tests/CLI/modules/order_tests.py::OrderTests::test_package_list_keyword", "tests/CLI/modules/order_tests.py::OrderTests::test_package_list_type", "tests/CLI/modules/order_tests.py::OrderTests::test_place", "tests/CLI/modules/order_tests.py::OrderTests::test_place_extras_parameter_fail", "tests/CLI/modules/order_tests.py::OrderTests::test_place_quote", "tests/CLI/modules/order_tests.py::OrderTests::test_place_quote_extras_parameter_fail", "tests/CLI/modules/order_tests.py::OrderTests::test_place_with_quantity", "tests/CLI/modules/order_tests.py::OrderTests::test_preset_list", "tests/CLI/modules/order_tests.py::OrderTests::test_preset_list_keywork", "tests/CLI/modules/order_tests.py::OrderTests::test_preset_list_prices", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_detail", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_list", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_place", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_save", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_image", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_image_guid", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_postinstall_others", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_sshkey", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_userdata", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_userdata_file", "tests/CLI/modules/order_tests.py::OrderTests::test_verify_hourly", "tests/CLI/modules/order_tests.py::OrderTests::test_verify_monthly" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2022-07-01 15:30:28+00:00
mit
5,581
softlayer__softlayer-python-1680
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b009483c..e322a5b4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -46,6 +46,7 @@ jobs: - name: Publish 📦 to Test PyPI uses: pypa/gh-action-pypi-publish@master with: - password: ${{ secrets.CGALLO_TEST_PYPI }} - repository_url: https://test.pypi.org/legacy/ + user: __token__ + password: ${{ secrets.CGALLO_PYPI }} + repository_url: https://pypi.org/legacy/ diff --git a/SoftLayer/CLI/order/item_list.py b/SoftLayer/CLI/order/item_list.py index eef805d2..173e30a2 100644 --- a/SoftLayer/CLI/order/item_list.py +++ b/SoftLayer/CLI/order/item_list.py @@ -8,18 +8,19 @@ from SoftLayer.managers import ordering from SoftLayer.utils import lookup -COLUMNS = ['category', 'keyName', 'description', 'priceId'] -COLUMNS_ITEM_PRICES = ['keyName', 'priceId', 'Hourly', 'Monthly', 'Restriction'] -COLUMNS_ITEM_PRICES_LOCATION = ['keyName', 'priceId', 'Hourly', 'Monthly', 'Restriction'] +COLUMNS = ['Category', 'KeyName', 'Description', 'Price Id'] +COLUMNS_ITEM_PRICES = ['KeyName', 'Price Id', 'Hourly', 'Monthly', 'Restriction'] +COLUMNS_ITEM_PRICES_LOCATION = ['KeyName', 'Price Id', 'Hourly', 'Monthly', 'Restriction'] @click.command(cls=SLCommand) @click.argument('package_keyname') @click.option('--keyword', '-k', help="A word (or string) used to filter item names.") @click.option('--category', '-c', help="Category code to filter items by") [email protected]('--prices', '-p', is_flag=True, help='Use --prices to list the server item prices, and to list the ' - 'Item Prices by location, add it to the --prices option using ' - 'location KeyName, e.g. --prices AMSTERDAM02') [email protected]('--prices', '-p', is_flag=True, + help='Use --prices to list the server item prices, and to list the ' + 'Item Prices by location, add it to the --prices option using ' + 'location KeyName, e.g. --prices AMSTERDAM02') @click.argument('location', required=False) @environment.pass_env def cli(env, package_keyname, keyword, category, prices, location=None): @@ -66,7 +67,7 @@ def cli(env, package_keyname, keyword, category, prices, location=None): for item in sorted_items[category_name]: table_items_detail.add_row([category_name, item['keyName'], item['description'], get_price(item)]) tables.append(table_items_detail) - env.fout(formatting.listing(tables, separator='\n')) + env.fout(tables) def sort_items(items):
softlayer/softlayer-python
950a54b48b32a00865d4e80efdeaed1102f01a04
diff --git a/tests/CLI/modules/order_tests.py b/tests/CLI/modules/order_tests.py index d493b67f..c1541594 100644 --- a/tests/CLI/modules/order_tests.py +++ b/tests/CLI/modules/order_tests.py @@ -50,9 +50,9 @@ def test_item_list_prices(self): self.assert_no_fail(result) output = json.loads(result.output) - self.assertEqual(output[0][0]['priceId'], 1007) + self.assertEqual(output[0][0]['Price Id'], 1007) self.assertEqual(output[0][1]['Restriction'], '- - - -') - self.assertEqual(output[0][1]['keyName'], 'KeyName015') + self.assertEqual(output[0][1]['KeyName'], 'KeyName015') self.assert_called_with('SoftLayer_Product_Package', 'getItems') def test_item_list_location_keyname(self): @@ -61,8 +61,8 @@ def test_item_list_location_keyname(self): self.assert_no_fail(result) output = json.loads(result.output) self.assertEqual(output[0][0]['Hourly'], 0.0) - self.assertEqual(output[0][1]['keyName'], 'KeyName015') - self.assertEqual(output[0][1]['priceId'], 1144) + self.assertEqual(output[0][1]['KeyName'], 'KeyName015') + self.assertEqual(output[0][1]['Price Id'], 1144) self.assert_called_with('SoftLayer_Product_Package', 'getItemPrices') def test_item_list_location_name(self): @@ -71,8 +71,8 @@ def test_item_list_location_name(self): self.assert_no_fail(result) output = json.loads(result.output) self.assertEqual(output[0][0]['Hourly'], 0.0) - self.assertEqual(output[0][1]['keyName'], 'KeyName015') - self.assertEqual(output[0][1]['priceId'], 1144) + self.assertEqual(output[0][1]['KeyName'], 'KeyName015') + self.assertEqual(output[0][1]['Price Id'], 1144) self.assert_called_with('SoftLayer_Product_Package', 'getItemPrices') def test_item_list_category_keyword(self): @@ -81,8 +81,8 @@ def test_item_list_category_keyword(self): self.assert_no_fail(result) output = json.loads(result.output) self.assertEqual(output[0][0]['Hourly'], 0.0) - self.assertEqual(output[0][1]['keyName'], 'KeyName015') - self.assertEqual(output[0][1]['priceId'], 1144) + self.assertEqual(output[0][1]['KeyName'], 'KeyName015') + self.assertEqual(output[0][1]['Price Id'], 1144) self.assert_called_with('SoftLayer_Product_Package', 'getItemPrices') def test_package_list(self):
fix snapcraft build ``` Run VERSION=`snapcraft list-revisions slcli --arch armhf | grep "edge\*" | awk '{print $1}'` craft-store error: No keyring found to store or retrieve credentials from. Recommended resolution: Ensure the keyring is working or SNAPCRAFT_STORE_CREDENTIALS is correctly exported into the environment For more information, check out: https://snapcraft.io/docs/snapcraft-authentication Full execution log: '/home/runner/.cache/snapcraft/log/snapcraft-20220[6](https://github.com/softlayer/softlayer-python/runs/7138330636?check_suite_focus=true#step:3:7)30-192939.[8](https://github.com/softlayer/softlayer-python/runs/7138330636?check_suite_focus=true#step:3:9)6588[9](https://github.com/softlayer/softlayer-python/runs/7138330636?check_suite_focus=true#step:3:10).log' Publishing on armhf Usage: snapcraft [OPTIONS] COMMAND [ARGS]... Try 'snapcraft -h' for help. Error: No such command 'release'. Error: Process completed with exit code 2. ```
0.0
950a54b48b32a00865d4e80efdeaed1102f01a04
[ "tests/CLI/modules/order_tests.py::OrderTests::test_item_list_category_keyword", "tests/CLI/modules/order_tests.py::OrderTests::test_item_list_location_keyname", "tests/CLI/modules/order_tests.py::OrderTests::test_item_list_location_name", "tests/CLI/modules/order_tests.py::OrderTests::test_item_list_prices" ]
[ "tests/CLI/modules/order_tests.py::OrderTests::test_category_list", "tests/CLI/modules/order_tests.py::OrderTests::test_item_list", "tests/CLI/modules/order_tests.py::OrderTests::test_location_list", "tests/CLI/modules/order_tests.py::OrderTests::test_order_lookup", "tests/CLI/modules/order_tests.py::OrderTests::test_package_list", "tests/CLI/modules/order_tests.py::OrderTests::test_package_list_keyword", "tests/CLI/modules/order_tests.py::OrderTests::test_package_list_type", "tests/CLI/modules/order_tests.py::OrderTests::test_place", "tests/CLI/modules/order_tests.py::OrderTests::test_place_extras_parameter_fail", "tests/CLI/modules/order_tests.py::OrderTests::test_place_quote", "tests/CLI/modules/order_tests.py::OrderTests::test_place_quote_extras_parameter_fail", "tests/CLI/modules/order_tests.py::OrderTests::test_place_with_quantity", "tests/CLI/modules/order_tests.py::OrderTests::test_preset_list", "tests/CLI/modules/order_tests.py::OrderTests::test_preset_list_keywork", "tests/CLI/modules/order_tests.py::OrderTests::test_preset_list_prices", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_detail", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_list", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_place", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_save", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_image", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_image_guid", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_postinstall_others", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_sshkey", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_userdata", "tests/CLI/modules/order_tests.py::OrderTests::test_quote_verify_userdata_file", "tests/CLI/modules/order_tests.py::OrderTests::test_verify_hourly", "tests/CLI/modules/order_tests.py::OrderTests::test_verify_monthly" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-07-01 16:13:46+00:00
mit
5,582
softlayer__softlayer-python-1765
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e322a5b4..5d0d1277 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -48,5 +48,5 @@ jobs: with: user: __token__ password: ${{ secrets.CGALLO_PYPI }} - repository_url: https://pypi.org/legacy/ + repository_url: https://upload.pypi.org/legacy/ diff --git a/SoftLayer/CLI/cdn/detail.py b/SoftLayer/CLI/cdn/detail.py index a9bdce60..973b1acc 100644 --- a/SoftLayer/CLI/cdn/detail.py +++ b/SoftLayer/CLI/cdn/detail.py @@ -41,6 +41,6 @@ def cli(env, unique_id, history): table.add_row(['status', cdn_mapping['status']]) table.add_row(['total_bandwidth', total_bandwidth]) table.add_row(['total_hits', total_hits]) - table.add_row(['hit_radio', hit_ratio]) + table.add_row(['hit_ratio', hit_ratio]) env.fout(table) diff --git a/SoftLayer/fixtures/SoftLayer_Network_CdnMarketplace_Configuration_Mapping.py b/SoftLayer/fixtures/SoftLayer_Network_CdnMarketplace_Configuration_Mapping.py index dc3ca178..e080c40e 100644 --- a/SoftLayer/fixtures/SoftLayer_Network_CdnMarketplace_Configuration_Mapping.py +++ b/SoftLayer/fixtures/SoftLayer_Network_CdnMarketplace_Configuration_Mapping.py @@ -11,7 +11,7 @@ "path": "/", "protocol": "HTTP", "status": "CNAME_CONFIGURATION", - "uniqueId": "9934111111111", + "uniqueId": "11223344", "vendorName": "akamai" } ] @@ -28,7 +28,7 @@ "path": "/", "protocol": "HTTP", "status": "CNAME_CONFIGURATION", - "uniqueId": "9934111111111", + "uniqueId": "11223344", "vendorName": "akamai" } ] @@ -41,7 +41,7 @@ "performanceConfiguration": "Large file optimization", "protocol": "HTTP", "respectHeaders": True, - "uniqueId": "424406419091111", + "uniqueId": "11223344", "vendorName": "akamai", "header": "www.test.com", "httpPort": 83, diff --git a/SoftLayer/fixtures/SoftLayer_Network_CdnMarketplace_Metrics.py b/SoftLayer/fixtures/SoftLayer_Network_CdnMarketplace_Metrics.py index 6b6aab5b..2e4bc333 100644 --- a/SoftLayer/fixtures/SoftLayer_Network_CdnMarketplace_Metrics.py +++ b/SoftLayer/fixtures/SoftLayer_Network_CdnMarketplace_Metrics.py @@ -6,9 +6,9 @@ "HitRatio" ], "totals": [ - "0.0", - "0", - "0.0" + 1.0, + 3, + 2.0 ], "type": "TOTALS" }
softlayer/softlayer-python
7a672a49d77856cc10da5ad692a315fa5e98372f
diff --git a/tests/CLI/modules/cdn_tests.py b/tests/CLI/modules/cdn_tests.py index c0a96fee..a7dfa340 100644 --- a/tests/CLI/modules/cdn_tests.py +++ b/tests/CLI/modules/cdn_tests.py @@ -4,7 +4,9 @@ :license: MIT, see LICENSE for more details. """ +import datetime import json +from unittest import mock as mock from SoftLayer.CLI import exceptions from SoftLayer import testing @@ -21,27 +23,22 @@ def test_list_accounts(self): 'domain': 'test.example.com', 'origin': '1.1.1.1', 'status': 'CNAME_CONFIGURATION', - 'unique_id': '9934111111111', + 'unique_id': '11223344', 'vendor': 'akamai'}] ) - def test_detail_account(self): + @mock.patch('SoftLayer.utils.days_to_datetime') + def test_detail_account(self, mock_now): + mock_now.return_value = datetime.datetime(2020, 1, 1) result = self.run_command(['cdn', 'detail', '--history=30', '1245']) self.assert_no_fail(result) - self.assertEqual(json.loads(result.output), - {'hit_radio': '0.0 %', - 'hostname': 'test.example.com', - 'origin': '1.1.1.1', - 'origin_type': 'HOST_SERVER', - 'path': '/', - 'protocol': 'HTTP', - 'provider': 'akamai', - 'status': 'CNAME_CONFIGURATION', - 'total_bandwidth': '0.0 GB', - 'total_hits': '0', - 'unique_id': '9934111111111'} - ) + api_results = json.loads(result.output) + self.assertEqual(api_results['hit_ratio'], '2.0 %') + self.assertEqual(api_results['total_bandwidth'], '1.0 GB') + self.assertEqual(api_results['total_hits'], 3) + self.assertEqual(api_results['hostname'], 'test.example.com') + self.assertEqual(api_results['protocol'], 'HTTP') def test_purge_content(self): result = self.run_command(['cdn', 'purge', '1234', @@ -122,7 +119,7 @@ def test_edit_cache(self): self.assertEqual('include: test', header_result['Cache key optimization']) def test_edit_cache_by_uniqueId(self): - result = self.run_command(['cdn', 'edit', '9934111111111', '--cache', 'include-specified', '--cache', 'test']) + result = self.run_command(['cdn', 'edit', '11223344', '--cache', 'include-specified', '--cache', 'test']) self.assert_no_fail(result) header_result = json.loads(result.output) self.assertEqual('include: test', header_result['Cache key optimization']) diff --git a/tests/managers/cdn_tests.py b/tests/managers/cdn_tests.py index 7e56f81a..b0e641c6 100644 --- a/tests/managers/cdn_tests.py +++ b/tests/managers/cdn_tests.py @@ -4,6 +4,8 @@ :license: MIT, see LICENSE for more details. """ +import datetime +from unittest import mock as mock from SoftLayer import fixtures from SoftLayer.managers import cdn @@ -28,7 +30,9 @@ def test_detail_cdn(self): 'listDomainMappingByUniqueId', args=args) - def test_detail_usage_metric(self): + @mock.patch('SoftLayer.utils.days_to_datetime') + def test_detail_usage_metric(self, mock_now): + mock_now.return_value = datetime.datetime(2020, 1, 1) self.cdn_client.get_usage_metrics(12345, history=30, frequency="aggregate") args = (12345, @@ -39,6 +43,15 @@ def test_detail_usage_metric(self): 'getMappingUsageMetrics', args=args) + # Does this still work in 2038 ? https://github.com/softlayer/softlayer-python/issues/1764 for context + @mock.patch('SoftLayer.utils.days_to_datetime') + def test_detail_usage_metric_future(self, mock_now): + mock_now.return_value = datetime.datetime(2040, 1, 1) + self.assertRaises( + OverflowError, + self.cdn_client.get_usage_metrics, 12345, history=30, frequency="aggregate" + ) + def test_get_origins(self): self.cdn_client.get_origins("12345") self.assert_called_with('SoftLayer_Network_CdnMarketplace_Configuration_Mapping_Path', @@ -105,7 +118,7 @@ def test_purge_content(self): args=args) def test_cdn_edit(self): - identifier = '9934111111111' + identifier = '11223344' header = 'www.test.com' result = self.cdn_client.edit(identifier, header=header) @@ -116,7 +129,7 @@ def test_cdn_edit(self): 'SoftLayer_Network_CdnMarketplace_Configuration_Mapping', 'updateDomainMapping', args=({ - 'uniqueId': '9934111111111', + 'uniqueId': '11223344', 'originType': 'HOST_SERVER', 'protocol': 'HTTP', 'path': '/',
xmlrpc y2038 problem (originally filed in the openSUSE Bugzilla](https://bugzilla.suse.com/show_bug.cgi?id=1203311)): While working on reproducible builds for openSUSE, I found that our python-softlayer package failed tests in 2038 with ``` self = <xmlrpc.client.Marshaller object at 0x7fa5770e91f0>, value = 2167459529 write = <built-in method append of list object at 0x7fa577194740> def dump_long(self, value, write): if value > MAXINT or value < MININT: > raise OverflowError("int exceeds XML-RPC limits") E OverflowError: int exceeds XML-RPC limits /usr/lib64/python3.8/xmlrpc/client.py:539: OverflowError =========================== short test summary info ============================ FAILED tests/CLI/modules/cdn_tests.py::CdnTests::test_detail_account - Overfl... FAILED tests/managers/cdn_tests.py::CDNTests::test_detail_usage_metric - Over... =========== 2 failed, 1831 passed, 6 skipped, 5 deselected in 38.01s =========== ``` To Reproduce: ``` osc co openSUSE:Factory/python-softlayer && cd $_ osc build --noservice --trust-all-projects --vm-type=kvm \ --alternative-project=home:bmwiedemann:reproducible \ --build-opt=--vm-custom-opt="-rtc base=2038-01-20T00:00:00" \ openSUSE_Tumbleweed x86_64 ``` (basically run with the machine hardware clock set to post-2038) **Expected behavior** Test suite should pass. **Version** 6.1.2 from tarball.
0.0
7a672a49d77856cc10da5ad692a315fa5e98372f
[ "tests/CLI/modules/cdn_tests.py::CdnTests::test_detail_account", "tests/CLI/modules/cdn_tests.py::CdnTests::test_list_accounts", "tests/managers/cdn_tests.py::CDNTests::test_cdn_edit" ]
[ "tests/CLI/modules/cdn_tests.py::CdnTests::test_add_origin_server", "tests/CLI/modules/cdn_tests.py::CdnTests::test_add_origin_storage", "tests/CLI/modules/cdn_tests.py::CdnTests::test_add_origin_storage_with_file_extensions", "tests/CLI/modules/cdn_tests.py::CdnTests::test_add_origin_without_storage", "tests/CLI/modules/cdn_tests.py::CdnTests::test_edit_cache", "tests/CLI/modules/cdn_tests.py::CdnTests::test_edit_cache_by_uniqueId", "tests/CLI/modules/cdn_tests.py::CdnTests::test_edit_header", "tests/CLI/modules/cdn_tests.py::CdnTests::test_edit_http_port", "tests/CLI/modules/cdn_tests.py::CdnTests::test_edit_respect_headers", "tests/CLI/modules/cdn_tests.py::CdnTests::test_list_origins", "tests/CLI/modules/cdn_tests.py::CdnTests::test_purge_content", "tests/CLI/modules/cdn_tests.py::CdnTests::test_remove_origin", "tests/managers/cdn_tests.py::CDNTests::test_add_origin", "tests/managers/cdn_tests.py::CDNTests::test_add_origin_with_bucket_and_file_extension", "tests/managers/cdn_tests.py::CDNTests::test_cdn_instance_by_hostname", "tests/managers/cdn_tests.py::CDNTests::test_detail_cdn", "tests/managers/cdn_tests.py::CDNTests::test_detail_usage_metric", "tests/managers/cdn_tests.py::CDNTests::test_detail_usage_metric_future", "tests/managers/cdn_tests.py::CDNTests::test_get_origins", "tests/managers/cdn_tests.py::CDNTests::test_list_accounts", "tests/managers/cdn_tests.py::CDNTests::test_purge_content", "tests/managers/cdn_tests.py::CDNTests::test_remove_origin" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-10-06 18:06:48+00:00
mit
5,583
softlayer__softlayer-python-1868
diff --git a/SoftLayer/CLI/virt/list.py b/SoftLayer/CLI/virt/list.py index a16a1330..ee525ede 100644 --- a/SoftLayer/CLI/virt/list.py +++ b/SoftLayer/CLI/virt/list.py @@ -9,7 +9,6 @@ from SoftLayer.CLI import environment from SoftLayer.CLI import formatting from SoftLayer.CLI import helpers -from SoftLayer import utils # pylint: disable=unnecessary-lambda @@ -56,67 +55,41 @@ @click.option('--hourly', is_flag=True, help='Show only hourly instances') @click.option('--monthly', is_flag=True, help='Show only monthly instances') @click.option('--transient', help='Filter by transient instances', type=click.BOOL) [email protected]('--hardware', is_flag=True, default=False, help='Show the all VSI related to hardware') [email protected]('--all-guests', is_flag=True, default=False, help='Show the all VSI and hardware VSIs') [email protected]('--search', is_flag=False, flag_value="", default=None, + help="Use the more flexible Search API to list instances. See `slcli search --types` for list " + + "of searchable fields.") @helpers.multi_option('--tag', help='Filter by tags') [email protected]('--sortby', - help='Column to sort by', - default='hostname', - show_default=True) [email protected]('--sortby', default='hostname', show_default=True, help='Column to sort by') @click.option('--columns', callback=column_helper.get_formatter(COLUMNS), help='Columns to display. [options: %s]' % ', '.join(column.name for column in COLUMNS), default=','.join(DEFAULT_COLUMNS), show_default=True) [email protected]('--limit', '-l', - help='How many results to get in one api call, default is 100', - default=100, - show_default=True) [email protected]('--limit', '-l', default=100, show_default=True, + help='How many results to get in one api call, default is 100') @environment.pass_env def cli(env, sortby, cpu, domain, datacenter, hostname, memory, network, - hourly, monthly, tag, columns, limit, transient, hardware, all_guests): + hourly, monthly, tag, columns, limit, transient, search): """List virtual servers.""" - vsi = SoftLayer.VSManager(env.client) - guests = vsi.list_instances(hourly=hourly, - monthly=monthly, - hostname=hostname, - domain=domain, - cpus=cpu, - memory=memory, - datacenter=datacenter, - nic_speed=network, - transient=transient, - tags=tag, - mask=columns.mask(), - limit=limit) + guests = [] + if search is not None: + object_mask = f"mask[resource(SoftLayer_Virtual_Guest)[{columns.mask()}]]" + search_manager = SoftLayer.SearchManager(env.client) + guests = search_manager.search_instances(hostname=hostname, domain=domain, datacenter=datacenter, + tags=tag, search_string=search, mask=object_mask) + else: + vsi = SoftLayer.VSManager(env.client) + guests = vsi.list_instances(hourly=hourly, monthly=monthly, hostname=hostname, domain=domain, + cpus=cpu, memory=memory, datacenter=datacenter, nic_speed=network, + transient=transient, tags=tag, mask=columns.mask(), limit=limit) table = formatting.Table(columns.columns) table.sortby = sortby - if not hardware or all_guests: - for guest in guests: - table.add_row([value or formatting.blank() - for value in columns.row(guest)]) - env.fout(table) + for guest in guests: + table.add_row([value or formatting.blank() + for value in columns.row(guest)]) - if hardware or all_guests: - hardware_guests = vsi.get_hardware_guests() - for hd_guest in hardware_guests: - if hd_guest['virtualHost']['guests']: - title = "Hardware(id = {hardwareId}) guests associated".format(hardwareId=hd_guest['id']) - table_hardware_guest = formatting.Table(['id', 'hostname', 'CPU', 'Memory', 'Start Date', 'Status', - 'powerState'], title=title) - table_hardware_guest.sortby = 'hostname' - for guest in hd_guest['virtualHost']['guests']: - table_hardware_guest.add_row([ - guest['id'], - guest['hostname'], - '%i %s' % (guest['maxCpu'], guest['maxCpuUnits']), - guest['maxMemory'], - utils.clean_time(guest['createDate']), - guest['status']['keyName'], - guest['powerState']['keyName'] - ]) - env.fout(table_hardware_guest) + env.fout(table) diff --git a/SoftLayer/managers/__init__.py b/SoftLayer/managers/__init__.py index 2a895540..7fadd12e 100644 --- a/SoftLayer/managers/__init__.py +++ b/SoftLayer/managers/__init__.py @@ -24,6 +24,7 @@ from SoftLayer.managers.network import NetworkManager from SoftLayer.managers.object_storage import ObjectStorageManager from SoftLayer.managers.ordering import OrderingManager +from SoftLayer.managers.search import SearchManager from SoftLayer.managers.sshkey import SshKeyManager from SoftLayer.managers.ssl import SSLManager from SoftLayer.managers.tags import TagManager @@ -53,6 +54,7 @@ 'ObjectStorageManager', 'OrderingManager', 'PlacementManager', + 'SearchManager', 'SshKeyManager', 'SSLManager', 'TagManager', diff --git a/SoftLayer/managers/search.py b/SoftLayer/managers/search.py index 5f632df2..a0ae8c33 100644 --- a/SoftLayer/managers/search.py +++ b/SoftLayer/managers/search.py @@ -34,3 +34,30 @@ def advanced(self, search_string): """ return self.search_manager.advancedSearch(search_string) + + def search_instances(self, search_string, mask=None, **kwargs): + """Lists VSIs based in the search_string. + + Also takes in a few search terms as **kwargs. such as hostname, datacenter, domain and tags + """ + + # This forces the Search API to do a fuzzy search on our term, kinda. Not sure why the ** are + # Required but it will do an exact search without them. + if search_string: + search_string = f"*{search_string}*" + search_string = f"_objectType:SoftLayer_Virtual_Guest {search_string}" + if kwargs.get('hostname'): + search_string = f"{search_string} hostname: *{kwargs.get('hostname')}*" + if kwargs.get('domain'): + search_string = f"{search_string} domain: *{kwargs.get('domain')}*" + if kwargs.get('datacenter'): + search_string = f"{search_string} datacenter.longName: *{kwargs.get('datacenter')}*" + if kwargs.get('tags'): + tags = " ".join(kwargs.get("tags", [])) + search_string = f"{search_string} internalTagReferences.tag.name: {tags}" + result = self.search_manager.advancedSearch(search_string, mask=mask) + guests = [] + for resource in result: + guests.append(resource.get('resource')) + + return guests diff --git a/SoftLayer/managers/vs.py b/SoftLayer/managers/vs.py index f6520849..ef51be94 100644 --- a/SoftLayer/managers/vs.py +++ b/SoftLayer/managers/vs.py @@ -1441,15 +1441,6 @@ def migrate_dedicated(self, instance_id, host_id): """ return self.guest.migrateDedicatedHost(host_id, id=instance_id) - def get_hardware_guests(self): - """Returns all virtualHost capable hardware objects and their guests. - - :return SoftLayer_Hardware[]. - """ - object_filter = {"hardware": {"virtualHost": {"id": {"operation": "not null"}}}} - mask = "mask[virtualHost[guests[powerState]]]" - return self.client.call('SoftLayer_Account', 'getHardware', mask=mask, filter=object_filter) - def authorize_storage(self, vs_id, username_storage): """Authorize File or Block Storage to a Virtual Server.
softlayer/softlayer-python
18ecf5809c66b45922f0c947e3d268167f5153d9
diff --git a/tests/CLI/modules/vs/vs_tests.py b/tests/CLI/modules/vs/vs_tests.py index 8562f446..fc0807fd 100644 --- a/tests/CLI/modules/vs/vs_tests.py +++ b/tests/CLI/modules/vs/vs_tests.py @@ -145,6 +145,23 @@ def test_list_vs(self): self.assert_no_fail(result) + def test_list_vs_search_noargs(self): + result = self.run_command(['vs', 'list', '--search']) + self.assert_no_fail(result) + self.assert_called_with('SoftLayer_Search', 'advancedSearch', args=('_objectType:SoftLayer_Virtual_Guest ',)) + + def test_list_vs_search_noargs_domain(self): + result = self.run_command(['vs', 'list', '--search', '-Dtest']) + self.assert_no_fail(result) + self.assert_called_with('SoftLayer_Search', 'advancedSearch', + args=('_objectType:SoftLayer_Virtual_Guest domain: *test*',)) + + def test_list_vs_search_args(self): + result = self.run_command(['vs', 'list', '--search=thisTerm']) + self.assert_no_fail(result) + self.assert_called_with('SoftLayer_Search', 'advancedSearch', + args=('_objectType:SoftLayer_Virtual_Guest *thisTerm*',)) + @mock.patch('SoftLayer.utils.lookup') def test_detail_vs_empty_billing(self, mock_lookup): def mock_lookup_func(dic, key, *keys): @@ -919,10 +936,6 @@ def test_vs_migrate_exception(self): self.assert_called_with('SoftLayer_Virtual_Guest', 'migrate', identifier=100) self.assert_not_called_with('SoftLayer_Virtual_Guest', 'migrateDedicatedHost', args=(999), identifier=100) - def test_list_vsi(self): - result = self.run_command(['vs', 'list', '--hardware']) - self.assert_no_fail(result) - def test_credentail(self): result = self.run_command(['vs', 'credentials', '100']) self.assert_no_fail(result) diff --git a/tests/managers/search_tests.py b/tests/managers/search_tests.py index b82fdb65..54dfe67b 100644 --- a/tests/managers/search_tests.py +++ b/tests/managers/search_tests.py @@ -25,3 +25,22 @@ def test_search(self): def test_search_advanced(self): self.search.advanced('SoftLayer_Hardware') self.assert_called_with('SoftLayer_Search', 'advancedSearch') + + def test_search_instances_basic(self): + search_string = "TEST_STRING" + expected = f"_objectType:SoftLayer_Virtual_Guest *{search_string}*" + self.search.search_instances(search_string) + self.assert_called_with('SoftLayer_Search', 'advancedSearch', + args=(expected,)) + self.search.search_instances(search_string, hostname="thisHostname") + self.assert_called_with('SoftLayer_Search', 'advancedSearch', + args=(f"{expected} hostname: *thisHostname*",)) + self.search.search_instances(search_string, domain="thisDomain") + self.assert_called_with('SoftLayer_Search', 'advancedSearch', + args=(f"{expected} domain: *thisDomain*",)) + self.search.search_instances(search_string, datacenter="dal13") + self.assert_called_with('SoftLayer_Search', 'advancedSearch', + args=(f"{expected} datacenter.longName: *dal13*",)) + self.search.search_instances(search_string, tags=["thisTag"]) + self.assert_called_with('SoftLayer_Search', 'advancedSearch', + args=(f"{expected} internalTagReferences.tag.name: thisTag",)) diff --git a/tests/managers/vs/vs_tests.py b/tests/managers/vs/vs_tests.py index 4c6d18b1..a0ac6dae 100644 --- a/tests/managers/vs/vs_tests.py +++ b/tests/managers/vs/vs_tests.py @@ -1278,38 +1278,6 @@ def test_migrate_dedicated(self): self.assertTrue(result) self.assert_called_with('SoftLayer_Virtual_Guest', 'migrateDedicatedHost', args=(5555,), identifier=1234) - def test_get_hardware_guests(self): - mock = self.set_mock('SoftLayer_Account', 'getHardware') - mock.return_value = [{ - "accountId": 11111, - "domain": "vmware.chechu.com", - "hostname": "host14", - "id": 22222, - "virtualHost": { - "accountId": 11111, - "id": 33333, - "name": "host14.vmware.chechu.com", - "guests": [ - { - "accountId": 11111, - "hostname": "NSX-T Manager", - "id": 44444, - "maxCpu": 16, - "maxCpuUnits": "CORE", - "maxMemory": 49152, - "powerState": { - "keyName": "RUNNING", - "name": "Running" - }, - "status": { - "keyName": "ACTIVE", - "name": "Active" - } - }]}}] - - result = self.vs.get_hardware_guests() - self.assertEqual("NSX-T Manager", result[0]['virtualHost']['guests'][0]['hostname']) - def test_authorize_storage(self): options = self.vs.authorize_storage(1234, "SL01SEL301234-11")
slcli vs list --search "query" Add ability to list VSI based on a query
0.0
18ecf5809c66b45922f0c947e3d268167f5153d9
[ "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_list_vs_search_args", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_list_vs_search_noargs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_list_vs_search_noargs_domain", "tests/managers/search_tests.py::SearchTests::test_search_instances_basic" ]
[ "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_add_notification", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_authorize_portable_storage_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_authorize_storage_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_authorize_storage_vs_no_confirm", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_authorize_volume_and_portable_storage_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_authorize_vs_empty", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_billing", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_cancel", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_cancel_no_confirm", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_create_options", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_create_options_prices", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_create_options_prices_location", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_credentail", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_drives_swap", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_drives_system", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_vs_dedicated_host_not_found", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_vs_empty_allotment", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_vs_empty_billing", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_vs_empty_tag", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_vs_no_dedicated_host_hostname", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_vs_ptr_error", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_detail_vs_security_group", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_dns_sync_both", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_dns_sync_edit_a", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_dns_sync_edit_ptr", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_dns_sync_misc_exception", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_dns_sync_v6", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_edit", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_going_ready", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_last_transaction_empty", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_list_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_monitoring_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_not_ready", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_notification_delete", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_notifications", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_os_available", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_pause_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_pause_vs_no_confirm", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_power_off_vs_hard", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_power_off_vs_no_confirm", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_power_on_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_power_vs_off_soft", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_ready", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_reboot_vs_default", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_reboot_vs_hard", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_reboot_vs_no_confirm", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_reboot_vs_soft", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_reload", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_reload_no_confirm", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_rescue_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_rescue_vs_no_confirm", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_resume_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_upgrade", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_upgrade_aborted", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_upgrade_disk", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_upgrade_disk_error", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_upgrade_no_options", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_upgrade_private_no_cpu", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_upgrade_with_add_disk", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_upgrade_with_cpu_memory_and_flavor", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_upgrade_with_flavor", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_usage_metric_data_empty", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_usage_no_confirm", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_usage_vs", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_usage_vs_cpu", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_usage_vs_cpu_lower_case", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_usage_vs_memory", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_user_access", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_capture", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_detail_csv_output_format_with_nested_tables", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_migrate_all", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_migrate_all_empty", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_migrate_dedicated", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_migrate_exception", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_migrate_guest", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_migrate_list", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_migrate_list_empty", "tests/CLI/modules/vs/vs_tests.py::VirtTests::test_vs_storage", "tests/managers/search_tests.py::SearchTests::test_search", "tests/managers/search_tests.py::SearchTests::test_search_advanced", "tests/managers/search_tests.py::SearchTests::test_search_type", "tests/managers/vs/vs_tests.py::VSTests::test_add_notification", "tests/managers/vs/vs_tests.py::VSTests::test_authorize_portable_storage", "tests/managers/vs/vs_tests.py::VSTests::test_authorize_storage", "tests/managers/vs/vs_tests.py::VSTests::test_authorize_storage_empty", "tests/managers/vs/vs_tests.py::VSTests::test_browser_access_log", "tests/managers/vs/vs_tests.py::VSTests::test_cancel_instance", "tests/managers/vs/vs_tests.py::VSTests::test_capture_additional_disks", "tests/managers/vs/vs_tests.py::VSTests::test_captures", "tests/managers/vs/vs_tests.py::VSTests::test_change_port_speed_private", "tests/managers/vs/vs_tests.py::VSTests::test_change_port_speed_public", "tests/managers/vs/vs_tests.py::VSTests::test_create_instance", "tests/managers/vs/vs_tests.py::VSTests::test_create_instances", "tests/managers/vs/vs_tests.py::VSTests::test_create_network_components_by_routers", "tests/managers/vs/vs_tests.py::VSTests::test_create_network_components_by_routers_and_vlan", "tests/managers/vs/vs_tests.py::VSTests::test_create_network_components_private_subnet", "tests/managers/vs/vs_tests.py::VSTests::test_create_network_components_public_subnet", "tests/managers/vs/vs_tests.py::VSTests::test_create_network_components_vlan_subnet_private", "tests/managers/vs/vs_tests.py::VSTests::test_create_network_components_vlan_subnet_private_vlan_subnet_public", "tests/managers/vs/vs_tests.py::VSTests::test_create_network_components_vlan_subnet_public", "tests/managers/vs/vs_tests.py::VSTests::test_edit_blank", "tests/managers/vs/vs_tests.py::VSTests::test_edit_full", "tests/managers/vs/vs_tests.py::VSTests::test_edit_metadata", "tests/managers/vs/vs_tests.py::VSTests::test_edit_tags", "tests/managers/vs/vs_tests.py::VSTests::test_edit_tags_blank", "tests/managers/vs/vs_tests.py::VSTests::test_generate_basic", "tests/managers/vs/vs_tests.py::VSTests::test_generate_boot_mode", "tests/managers/vs/vs_tests.py::VSTests::test_generate_by_router_and_subnet", "tests/managers/vs/vs_tests.py::VSTests::test_generate_by_router_and_vlan", "tests/managers/vs/vs_tests.py::VSTests::test_generate_datacenter", "tests/managers/vs/vs_tests.py::VSTests::test_generate_dedicated", "tests/managers/vs/vs_tests.py::VSTests::test_generate_image_id", "tests/managers/vs/vs_tests.py::VSTests::test_generate_missing", "tests/managers/vs/vs_tests.py::VSTests::test_generate_monthly", "tests/managers/vs/vs_tests.py::VSTests::test_generate_multi_disk", "tests/managers/vs/vs_tests.py::VSTests::test_generate_network", "tests/managers/vs/vs_tests.py::VSTests::test_generate_no_disks", "tests/managers/vs/vs_tests.py::VSTests::test_generate_os_and_image", "tests/managers/vs/vs_tests.py::VSTests::test_generate_post_uri", "tests/managers/vs/vs_tests.py::VSTests::test_generate_private_network_only", "tests/managers/vs/vs_tests.py::VSTests::test_generate_private_subnet", "tests/managers/vs/vs_tests.py::VSTests::test_generate_private_vlan", "tests/managers/vs/vs_tests.py::VSTests::test_generate_private_vlan_subnet_public_vlan_subnet", "tests/managers/vs/vs_tests.py::VSTests::test_generate_private_vlan_with_private_subnet", "tests/managers/vs/vs_tests.py::VSTests::test_generate_public_subnet", "tests/managers/vs/vs_tests.py::VSTests::test_generate_public_vlan", "tests/managers/vs/vs_tests.py::VSTests::test_generate_public_vlan_with_public_subnet", "tests/managers/vs/vs_tests.py::VSTests::test_generate_sec_group", "tests/managers/vs/vs_tests.py::VSTests::test_generate_single_disk", "tests/managers/vs/vs_tests.py::VSTests::test_generate_sshkey", "tests/managers/vs/vs_tests.py::VSTests::test_generate_userdata", "tests/managers/vs/vs_tests.py::VSTests::test_get_bandwidth_allocation", "tests/managers/vs/vs_tests.py::VSTests::test_get_bandwidth_allocation_no_allotment", "tests/managers/vs/vs_tests.py::VSTests::test_get_bandwidth_allocation_with_allotment", "tests/managers/vs/vs_tests.py::VSTests::test_get_bandwidth_data", "tests/managers/vs/vs_tests.py::VSTests::test_get_create_options", "tests/managers/vs/vs_tests.py::VSTests::test_get_create_options_prices_by_location", "tests/managers/vs/vs_tests.py::VSTests::test_get_instance", "tests/managers/vs/vs_tests.py::VSTests::test_get_local_disks_empty", "tests/managers/vs/vs_tests.py::VSTests::test_get_local_disks_swap", "tests/managers/vs/vs_tests.py::VSTests::test_get_local_disks_system", "tests/managers/vs/vs_tests.py::VSTests::test_get_none_storage_credentials", "tests/managers/vs/vs_tests.py::VSTests::test_get_portable_storage", "tests/managers/vs/vs_tests.py::VSTests::test_get_portable_storage_empty", "tests/managers/vs/vs_tests.py::VSTests::test_get_storage_credentials", "tests/managers/vs/vs_tests.py::VSTests::test_get_storage_iscsi_details", "tests/managers/vs/vs_tests.py::VSTests::test_get_storage_iscsi_empty_details", "tests/managers/vs/vs_tests.py::VSTests::test_get_storage_nas_details", "tests/managers/vs/vs_tests.py::VSTests::test_get_storage_nas_empty_details", "tests/managers/vs/vs_tests.py::VSTests::test_get_tracking_id", "tests/managers/vs/vs_tests.py::VSTests::test_list_instances", "tests/managers/vs/vs_tests.py::VSTests::test_list_instances_hourly", "tests/managers/vs/vs_tests.py::VSTests::test_list_instances_monthly", "tests/managers/vs/vs_tests.py::VSTests::test_list_instances_neither", "tests/managers/vs/vs_tests.py::VSTests::test_list_instances_with_filters", "tests/managers/vs/vs_tests.py::VSTests::test_migrate", "tests/managers/vs/vs_tests.py::VSTests::test_migrate_dedicated", "tests/managers/vs/vs_tests.py::VSTests::test_notification", "tests/managers/vs/vs_tests.py::VSTests::test_notification_del", "tests/managers/vs/vs_tests.py::VSTests::test_reload_instance", "tests/managers/vs/vs_tests.py::VSTests::test_reload_instance_posturi_sshkeys", "tests/managers/vs/vs_tests.py::VSTests::test_reload_instance_with_new_os", "tests/managers/vs/vs_tests.py::VSTests::test_rescue", "tests/managers/vs/vs_tests.py::VSTests::test_resolve_ids_hostname", "tests/managers/vs/vs_tests.py::VSTests::test_resolve_ids_ip", "tests/managers/vs/vs_tests.py::VSTests::test_resolve_ids_ip_invalid", "tests/managers/vs/vs_tests.py::VSTests::test_resolve_ids_ip_private", "tests/managers/vs/vs_tests.py::VSTests::test_usage_vs_cpu", "tests/managers/vs/vs_tests.py::VSTests::test_usage_vs_memory" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-03-03 19:34:49+00:00
mit
5,584
softlayer__softlayer-python-2089
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 19d4bd81..adc5408b 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -39,11 +39,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@v2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -54,7 +54,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v1 + uses: github/codeql-action/autobuild@v2 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -68,4 +68,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index d307fd93..f0935473 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -10,12 +10,12 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.8] + python-version: [3.11] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f307200d..95afeced 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,3 +1,6 @@ +# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ +# Trusted Publisher stuff: https://docs.pypi.org/trusted-publishers/adding-a-publisher/ + name: Release to PyPi on: @@ -8,12 +11,17 @@ jobs: build-n-publish: name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI runs-on: ubuntu-latest + environment: + name: pypi + url: https://pypi.org/project/SoftLayer/ + permissions: + id-token: write steps: - - uses: actions/checkout@master - - name: Set up Python 3.8 - uses: actions/setup-python@v2 + - uses: actions/checkout@v4 + - name: Set up Python 3.11 + uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: 3.11 - name: Install pypa/build run: >- python -m @@ -28,10 +36,6 @@ jobs: --wheel --outdir dist/ . - - name: Publish 📦 to Test PyPI + - name: 📦 to PyPI uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.CGALLO_PYPI }} - repository_url: https://upload.pypi.org/legacy/ diff --git a/SoftLayer/CLI/file/cancel.py b/SoftLayer/CLI/file/cancel.py index 34077fbb..9837f9fa 100644 --- a/SoftLayer/CLI/file/cancel.py +++ b/SoftLayer/CLI/file/cancel.py @@ -16,14 +16,21 @@ is_flag=True, help="Cancels the file storage volume immediately instead " "of on the billing anniversary") [email protected]('--force', default=False, is_flag=True, help="Force modify") @environment.pass_env -def cli(env, volume_id, reason, immediate): - """Cancel an existing file storage volume.""" +def cli(env, volume_id, reason, immediate, force): + """Cancel an existing file storage volume. + + EXAMPLE:: + slcli file volume-cancel 12345678 --immediate -f + This command cancels volume with ID 12345678 immediately and without asking for confirmation. + """ file_storage_manager = SoftLayer.FileStorageManager(env.client) - if not (env.skip_confirmations or formatting.no_going_back(volume_id)): - raise exceptions.CLIAbort('Aborted') + if not force: + if not (env.skip_confirmations or formatting.no_going_back(volume_id)): + raise exceptions.CLIAbort('Aborted.') cancelled = file_storage_manager.cancel_file_volume(volume_id, reason, immediate) diff --git a/SoftLayer/CLI/file/duplicate.py b/SoftLayer/CLI/file/duplicate.py index b8b5983b..50c7c6ea 100644 --- a/SoftLayer/CLI/file/duplicate.py +++ b/SoftLayer/CLI/file/duplicate.py @@ -5,6 +5,7 @@ import SoftLayer from SoftLayer.CLI import environment from SoftLayer.CLI import exceptions +from SoftLayer.CLI import formatting CONTEXT_SETTINGS = {'token_normalize_func': lambda x: x.upper()} @@ -58,11 +59,17 @@ show_default=True, help='Whether or not this duplicate will be a dependent duplicate' 'of the origin volume.') [email protected]('--force', default=False, is_flag=True, help="Force modify") @environment.pass_env def cli(env, origin_volume_id, origin_snapshot_id, duplicate_size, duplicate_iops, duplicate_tier, duplicate_snapshot_size, billing, - dependent_duplicate): - """Order a duplicate file storage volume.""" + dependent_duplicate, force): + """Order a duplicate file storage volume. + + EXAMPLE:: + slcli file volume-duplicate 12345678 + This command shows order a new volume by duplicating the volume with ID 12345678. + """ file_manager = SoftLayer.FileStorageManager(env.client) hourly_billing_flag = False @@ -72,6 +79,11 @@ def cli(env, origin_volume_id, origin_snapshot_id, duplicate_size, if duplicate_tier is not None: duplicate_tier = float(duplicate_tier) + if not force: + if not (env.skip_confirmations or formatting.confirm("This action will incur charges on your account." + "Continue?")): + raise exceptions.CLIAbort('Aborted.') + try: order = file_manager.order_duplicate_volume( origin_volume_id, diff --git a/SoftLayer/CLI/file/limit.py b/SoftLayer/CLI/file/limit.py index e0243ddc..1437578c 100644 --- a/SoftLayer/CLI/file/limit.py +++ b/SoftLayer/CLI/file/limit.py @@ -18,7 +18,12 @@ @click.option('--datacenter', '-d', help='Filter by datacenter') @environment.pass_env def cli(env, sortby, datacenter): - """List number of block storage volumes limit per datacenter.""" + """List number of block storage volumes limit per datacenter. + + EXAMPLE: + slcli file volume-limits + This command lists the storage limits per datacenter for this account. + """ file_manager = SoftLayer.FileStorageManager(env.client) file_volumes = file_manager.list_file_volume_limit()
softlayer/softlayer-python
1e52a0267d44fd83033c64af738fb3b85e61bc26
diff --git a/.github/workflows/test_pypi_release.yml b/.github/workflows/test_pypi_release.yml index 12443d25..70245307 100644 --- a/.github/workflows/test_pypi_release.yml +++ b/.github/workflows/test_pypi_release.yml @@ -1,21 +1,27 @@ # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ +# Trusted Publisher stuff: https://docs.pypi.org/trusted-publishers/adding-a-publisher/ -name: Publish 📦 to TestPyPI +name: TEST Publish 📦 to TestPyPI on: push: - branches: [test-pypi ] + branches: [test-pypi] jobs: build-n-publish: - name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI + name: TEST Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI runs-on: ubuntu-latest + environment: + name: pypi-test + url: https://test.pypi.org/project/SoftLayer/ + permissions: + id-token: write steps: - uses: actions/checkout@master - - name: Set up Python 3.8 - uses: actions/setup-python@v2 + - name: Set up Python 3.11 + uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: 3.11 - name: Install pypa/build run: >- python -m @@ -31,7 +37,6 @@ jobs: --outdir dist/ . - name: Publish 📦 to Test PyPI - uses: pypa/gh-action-pypi-publish@master + uses: pypa/gh-action-pypi-publish@release/v1 with: - password: ${{ secrets.CGALLO_TEST_PYPI }} - repository_url: https://test.pypi.org/legacy/ \ No newline at end of file + repository-url: https://test.pypi.org/legacy/ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a9827899..9b079350 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,9 +13,9 @@ jobs: python-version: [3.7,3.8,3.9,'3.10',3.11] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies @@ -27,9 +27,9 @@ jobs: coverage: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: 3.9 - name: Install dependencies @@ -41,9 +41,9 @@ jobs: analysis: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: 3.9 - name: Install dependencies diff --git a/tests/CLI/modules/file_tests.py b/tests/CLI/modules/file_tests.py index a1749e7f..5aafbbee 100644 --- a/tests/CLI/modules/file_tests.py +++ b/tests/CLI/modules/file_tests.py @@ -850,3 +850,17 @@ def test_file_snapshot_cancel_force(self, confirm_mock): result = self.run_command(['file', 'snapshot-cancel', '4917309']) self.assertEqual(2, result.exit_code) self.assertEqual('Aborted.', result.exception.message) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_file_volume_cancel_force(self, confirm_mock): + confirm_mock.return_value = False + result = self.run_command(['file', 'volume-cancel', '1234']) + self.assertEqual(2, result.exit_code) + self.assertEqual('Aborted.', result.exception.message) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_file_volume_duplicate_force(self, confirm_mock): + confirm_mock.return_value = False + result = self.run_command(['file', 'volume-duplicate', '100']) + self.assertEqual(2, result.exit_code) + self.assertEqual('Aborted.', result.exception.message)
update Release build the release build seems to be using old/deprecated things. https://github.com/softlayer/softlayer-python/actions/runs/6165335378 ![image](https://github.com/softlayer/softlayer-python/assets/7408017/2841f4c1-8182-4736-8347-6495f0011066)
0.0
1e52a0267d44fd83033c64af738fb3b85e61bc26
[ "tests/CLI/modules/file_tests.py::FileTests::test_file_volume_cancel_force", "tests/CLI/modules/file_tests.py::FileTests::test_file_volume_duplicate_force" ]
[ "tests/CLI/modules/file_tests.py::FileTests::test_access_list", "tests/CLI/modules/file_tests.py::FileTests::test_authorize_host_to_volume", "tests/CLI/modules/file_tests.py::FileTests::test_create_snapshot", "tests/CLI/modules/file_tests.py::FileTests::test_create_snapshot_unsuccessful", "tests/CLI/modules/file_tests.py::FileTests::test_deauthorize_host_to_volume", "tests/CLI/modules/file_tests.py::FileTests::test_delete_snapshot", "tests/CLI/modules/file_tests.py::FileTests::test_dep_dupe_convert", "tests/CLI/modules/file_tests.py::FileTests::test_disable_snapshots", "tests/CLI/modules/file_tests.py::FileTests::test_disaster_recovery_failover", "tests/CLI/modules/file_tests.py::FileTests::test_disaster_recovery_failover_aborted", "tests/CLI/modules/file_tests.py::FileTests::test_dupe_refresh", "tests/CLI/modules/file_tests.py::FileTests::test_enable_snapshots", "tests/CLI/modules/file_tests.py::FileTests::test_file_replica_order_force", "tests/CLI/modules/file_tests.py::FileTests::test_file_replica_order_iops", "tests/CLI/modules/file_tests.py::FileTests::test_file_snapshot_cancel_force", "tests/CLI/modules/file_tests.py::FileTests::test_list_volume_schedules", "tests/CLI/modules/file_tests.py::FileTests::test_modify_order_no_force", "tests/CLI/modules/file_tests.py::FileTests::test_replicant_failback", "tests/CLI/modules/file_tests.py::FileTests::test_replicant_failback_unsuccessful", "tests/CLI/modules/file_tests.py::FileTests::test_replicant_failover", "tests/CLI/modules/file_tests.py::FileTests::test_replicant_failover_unsuccessful", "tests/CLI/modules/file_tests.py::FileTests::test_replication_locations", "tests/CLI/modules/file_tests.py::FileTests::test_replication_locations_unsuccessful", "tests/CLI/modules/file_tests.py::FileTests::test_replication_partners", "tests/CLI/modules/file_tests.py::FileTests::test_replication_partners_unsuccessful", "tests/CLI/modules/file_tests.py::FileTests::test_snapshot_cancel", "tests/CLI/modules/file_tests.py::FileTests::test_snapshot_get_notification_status", "tests/CLI/modules/file_tests.py::FileTests::test_snapshot_list", "tests/CLI/modules/file_tests.py::FileTests::test_snapshot_order", "tests/CLI/modules/file_tests.py::FileTests::test_snapshot_order_order_not_placed", "tests/CLI/modules/file_tests.py::FileTests::test_snapshot_order_performance_manager_error", "tests/CLI/modules/file_tests.py::FileTests::test_snapshot_restore", "tests/CLI/modules/file_tests.py::FileTests::test_volume_cancel", "tests/CLI/modules/file_tests.py::FileTests::test_volume_cancel_with_billing_item", "tests/CLI/modules/file_tests.py::FileTests::test_volume_cancel_without_billing_item", "tests/CLI/modules/file_tests.py::FileTests::test_volume_count", "tests/CLI/modules/file_tests.py::FileTests::test_volume_detail", "tests/CLI/modules/file_tests.py::FileTests::test_volume_detail_name_identifier", "tests/CLI/modules/file_tests.py::FileTests::test_volume_limit", "tests/CLI/modules/file_tests.py::FileTests::test_volume_limit_datacenter", "tests/CLI/modules/file_tests.py::FileTests::test_volume_limit_empty_datacenter", "tests/CLI/modules/file_tests.py::FileTests::test_volume_list", "tests/CLI/modules/file_tests.py::FileTests::test_volume_list_notes_format_output_json", "tests/CLI/modules/file_tests.py::FileTests::test_volume_list_order", "tests/CLI/modules/file_tests.py::FileTests::test_volume_list_reduced_notes_format_output_table", "tests/CLI/modules/file_tests.py::FileTests::test_volume_not_set_note", "tests/CLI/modules/file_tests.py::FileTests::test_volume_options", "tests/CLI/modules/file_tests.py::FileTests::test_volume_order_endurance_tier_not_given", "tests/CLI/modules/file_tests.py::FileTests::test_volume_order_hourly_billing_not_available", "tests/CLI/modules/file_tests.py::FileTests::test_volume_order_performance_iops_not_given", "tests/CLI/modules/file_tests.py::FileTests::test_volume_order_performance_snapshot_error", "tests/CLI/modules/file_tests.py::FileTests::test_volume_set_note" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-09-13 21:07:50+00:00
mit
5,585
softlayer__softlayer-python-2126
diff --git a/SoftLayer/CLI/formatting.py b/SoftLayer/CLI/formatting.py index 9c32318d..fb91ded6 100644 --- a/SoftLayer/CLI/formatting.py +++ b/SoftLayer/CLI/formatting.py @@ -259,8 +259,7 @@ def no_going_back(confirmation): if not confirmation: confirmation = 'yes' - prompt = ('This action cannot be undone! Type "%s" or press Enter ' - 'to abort' % confirmation) + prompt = f"This action cannot be undone! Type '{confirmation}' or press Enter to abort" ans = click.prompt(prompt, default='', show_default=False) if ans.lower() == str(confirmation): diff --git a/SoftLayer/CLI/hardware/update_firmware.py b/SoftLayer/CLI/hardware/update_firmware.py index 6b634376..96e82b9b 100644 --- a/SoftLayer/CLI/hardware/update_firmware.py +++ b/SoftLayer/CLI/hardware/update_firmware.py @@ -12,15 +12,23 @@ @click.command(cls=SoftLayer.CLI.command.SLCommand, ) @click.argument('identifier') [email protected]('-i', '--ipmi', is_flag=True, help="Update IPMI firmware") [email protected]('-r', '--raid', is_flag=True, help="Update RAID firmware") [email protected]('-b', '--bios', is_flag=True, help="Update BIOS firmware") [email protected]('-d', '--harddrive', is_flag=True, help="Update Hard Drives firmware") [email protected]('-n', '--network', is_flag=True, help="Update Network Card firmware") @environment.pass_env -def cli(env, identifier): - """Update server firmware.""" +def cli(env, identifier, ipmi, raid, bios, harddrive, network): + """Update server firmware. By default will update all available server components.""" mgr = SoftLayer.HardwareManager(env.client) hw_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'hardware') - if not (env.skip_confirmations or - formatting.confirm('This will power off the server with id %s and ' - 'update device firmware. Continue?' % hw_id)): + confirm_message = f"This will power off the server with id {hw_id} and update device firmware. Continue?" + if not (env.skip_confirmations or formatting.confirm(confirm_message)): raise exceptions.CLIAbort('Aborted.') - mgr.update_firmware(hw_id) + # If no options were specified, set them all to enabled. + if not any([ipmi, raid, bios, harddrive, network]): + ipmi = raid = bios = harddrive = network = 1 + mgr.update_firmware(hw_id, ipmi, raid, bios, harddrive, network) + env.fout(f"[green]Firmware update for {identifier} started") diff --git a/SoftLayer/managers/hardware.py b/SoftLayer/managers/hardware.py index a59e7244..69add1cf 100644 --- a/SoftLayer/managers/hardware.py +++ b/SoftLayer/managers/hardware.py @@ -723,22 +723,23 @@ def edit(self, hardware_id, userdata=None, hostname=None, domain=None, return self.hardware.editObject(obj, id=hardware_id) - def update_firmware(self, - hardware_id, - ipmi=True, - raid_controller=True, - bios=True, - hard_drive=True): + def update_firmware(self, hardware_id: int, + ipmi: bool = True, + raid_controller: bool = True, + bios: bool = True, + hard_drive: bool = True, + network: bool = True): """Update hardware firmware. This will cause the server to be unavailable for ~20 minutes. + https://sldn.softlayer.com/reference/services/SoftLayer_Hardware_Server/createFirmwareUpdateTransaction/ - :param int hardware_id: The ID of the hardware to have its firmware - updated. + :param int hardware_id: The ID of the hardware to have its firmware updated. :param bool ipmi: Update the ipmi firmware. :param bool raid_controller: Update the raid controller firmware. :param bool bios: Update the bios firmware. :param bool hard_drive: Update the hard drive firmware. + :param bool network: Update the network card firmware Example:: @@ -746,21 +747,22 @@ def update_firmware(self, result = mgr.update_firmware(hardware_id=1234) """ - return self.hardware.createFirmwareUpdateTransaction( - bool(ipmi), bool(raid_controller), bool(bios), bool(hard_drive), id=hardware_id) + return self.client.call( + 'SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', + bool(ipmi), bool(raid_controller), bool(bios), bool(hard_drive), bool(network), id=hardware_id + ) - def reflash_firmware(self, - hardware_id, - ipmi=True, - raid_controller=True, - bios=True): + def reflash_firmware(self, hardware_id: int, + ipmi: bool = True, + raid_controller: bool = True, + bios: bool = True,): """Reflash hardware firmware. This will cause the server to be unavailable for ~60 minutes. The firmware will not be upgraded but rather reflashed to the version installed. + https://sldn.softlayer.com/reference/services/SoftLayer_Hardware_Server/createFirmwareReflashTransaction/ - :param int hardware_id: The ID of the hardware to have its firmware - reflashed. + :param int hardware_id: The ID of the hardware to have its firmware reflashed. :param bool ipmi: Reflash the ipmi firmware. :param bool raid_controller: Reflash the raid controller firmware. :param bool bios: Reflash the bios firmware.
softlayer/softlayer-python
16d18f22e1266410ea2c8a99658fca348a6446e8
diff --git a/tests/CLI/modules/hardware/hardware_basic_tests.py b/tests/CLI/modules/hardware/hardware_basic_tests.py index 61135fa4..d7c2ca9b 100644 --- a/tests/CLI/modules/hardware/hardware_basic_tests.py +++ b/tests/CLI/modules/hardware/hardware_basic_tests.py @@ -498,26 +498,6 @@ def test_edit_server_userfile(self): self.assert_called_with('SoftLayer_Hardware_Server', 'setUserMetadata', args=(['some data'],), identifier=1000) - @mock.patch('SoftLayer.CLI.formatting.confirm') - def test_update_firmware(self, confirm_mock): - confirm_mock.return_value = True - result = self.run_command(['server', 'update-firmware', '1000']) - - self.assert_no_fail(result) - self.assertEqual(result.output, "") - self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', - args=((1, 1, 1, 1)), identifier=1000) - - @mock.patch('SoftLayer.CLI.formatting.confirm') - def test_reflash_firmware(self, confirm_mock): - confirm_mock.return_value = True - result = self.run_command(['server', 'reflash-firmware', '1000']) - - self.assert_no_fail(result) - self.assertEqual(result.output, 'Successfully device firmware reflashed\n') - self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareReflashTransaction', - args=((1, 1, 1)), identifier=1000) - def test_edit(self): result = self.run_command(['server', 'edit', '--domain=example.com', diff --git a/tests/CLI/modules/hardware/hardware_firmware_tests.py b/tests/CLI/modules/hardware/hardware_firmware_tests.py new file mode 100644 index 00000000..cc71c488 --- /dev/null +++ b/tests/CLI/modules/hardware/hardware_firmware_tests.py @@ -0,0 +1,109 @@ +""" + SoftLayer.tests.CLI.modules.hardware.hardware_firmware_tests + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + This suite is for the firmware related tests. + + :license: MIT, see LICENSE for more details. +""" +from SoftLayer.CLI import exceptions +from SoftLayer import testing +from unittest import mock as mock + + +class HardwareFirmwareTests(testing.TestCase): + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_update_firmware(self, confirm_mock): + confirm_mock.return_value = True + result = self.run_command(['server', 'update-firmware', '1000']) + self.assert_no_fail(result) + self.assertIn("Firmware update for 1000 started", result.output) + self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', + args=((1, 1, 1, 1, 1)), identifier=1000) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_update_firmware_just_ipmi(self, confirm_mock): + confirm_mock.return_value = True + result = self.run_command(['server', 'update-firmware', '1000', '-i']) + + self.assert_no_fail(result) + self.assertIn("Firmware update for 1000 started", result.output) + self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', + args=((1, 0, 0, 0, 0)), identifier=1000) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_update_firmware_just_raid(self, confirm_mock): + confirm_mock.return_value = True + result = self.run_command(['server', 'update-firmware', '1000', '-r']) + + self.assert_no_fail(result) + self.assertIn("Firmware update for 1000 started", result.output) + self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', + args=((0, 1, 0, 0, 0)), identifier=1000) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_update_firmware_just_bios(self, confirm_mock): + confirm_mock.return_value = True + result = self.run_command(['server', 'update-firmware', '1000', '-b']) + + self.assert_no_fail(result) + self.assertIn("Firmware update for 1000 started", result.output) + self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', + args=((0, 0, 1, 0, 0)), identifier=1000) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_update_firmware_just_disk(self, confirm_mock): + confirm_mock.return_value = True + result = self.run_command(['server', 'update-firmware', '1000', '-d']) + + self.assert_no_fail(result) + self.assertIn("Firmware update for 1000 started", result.output) + self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', + args=((0, 0, 0, 1, 0)), identifier=1000) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_update_firmware_just_nic(self, confirm_mock): + confirm_mock.return_value = True + result = self.run_command(['server', 'update-firmware', '1000', '-n']) + + self.assert_no_fail(result) + self.assertIn("Firmware update for 1000 started", result.output) + self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', + args=((0, 0, 0, 0, 1)), identifier=1000) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_update_firmware_just_all(self, confirm_mock): + confirm_mock.return_value = True + result = self.run_command(['server', 'update-firmware', '1000', '-i', '-r', '-b', '-d', '-n']) + + self.assert_no_fail(result) + self.assertIn("Firmware update for 1000 started", result.output) + self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', + args=((1, 1, 1, 1, 1)), identifier=1000) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_update_firmware_no_confirm(self, confirm_mock): + confirm_mock.return_value = False + + result = self.run_command(['server', 'update-firmware', '1000']) + self.assertEqual(result.exit_code, 2) + self.assertIsInstance(result.exception, exceptions.CLIAbort) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_reflash_firmware(self, confirm_mock): + confirm_mock.return_value = True + result = self.run_command(['server', 'reflash-firmware', '1000']) + + self.assert_no_fail(result) + self.assertEqual(result.output, 'Successfully device firmware reflashed\n') + self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareReflashTransaction', + args=((1, 1, 1)), identifier=1000) + + @mock.patch('SoftLayer.CLI.formatting.confirm') + def test_reflash_firmware_no_confirm(self, confirm_mock): + confirm_mock.return_value = False + + result = self.run_command(['server', 'reflash-firmware', '1000']) + self.assertEqual(result.exit_code, 2) + self.assertIsInstance(result.exception, exceptions.CLIAbort) diff --git a/tests/managers/hardware_tests.py b/tests/managers/hardware_tests.py index c6175d07..c067dd08 100644 --- a/tests/managers/hardware_tests.py +++ b/tests/managers/hardware_tests.py @@ -543,14 +543,14 @@ def test_update_firmware(self): self.assertEqual(result, True) self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', - identifier=100, args=(1, 1, 1, 1)) + identifier=100, args=(1, 1, 1, 1, 1)) def test_update_firmware_selective(self): result = self.hardware.update_firmware(100, ipmi=False, hard_drive=False) self.assertEqual(result, True) self.assert_called_with('SoftLayer_Hardware_Server', 'createFirmwareUpdateTransaction', - identifier=100, args=(0, 1, 1, 0)) + identifier=100, args=(0, 1, 1, 0, 1)) def test_reflash_firmware(self): result = self.hardware.reflash_firmware(100)
Slcli hardware update-firmware should do networkcards too now https://sldn.softlayer.com/reference/services/SoftLayer_Hardware_Server/createFirmwareUpdateTransaction/ has an option for networkCards now, need to support that
0.0
16d18f22e1266410ea2c8a99658fca348a6446e8
[ "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_update_firmware", "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_update_firmware_just_all", "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_update_firmware_just_bios", "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_update_firmware_just_disk", "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_update_firmware_just_ipmi", "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_update_firmware_just_nic", "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_update_firmware_just_raid", "tests/managers/hardware_tests.py::HardwareTests::test_update_firmware", "tests/managers/hardware_tests.py::HardwareTests::test_update_firmware_selective" ]
[ "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_add_notification", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_authorize_hw", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_authorize_hw_empty", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_authorize_hw_no_confirm", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_bandwidth_hw", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_bandwidth_hw_quite", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_billing", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_cancel_server", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_check_for_closing", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_components", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_create_credential", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_create_hw_no_confirm", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_create_options", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_create_options_location", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_create_options_prices", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_create_server", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_create_server_test_flag", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_create_server_with_export", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_create_server_with_router", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_detail_drives", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_detail_empty_allotment", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_detail_vs_empty_tag", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_dns_sync_both", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_dns_sync_edit_a", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_dns_sync_edit_ptr", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_dns_sync_misc_exception", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_dns_sync_v6", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_edit", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_edit_server_failed", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_edit_server_userdata", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_edit_server_userdata_and_file", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_edit_server_userfile", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_going_ready", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_hardware_cancel_no_force", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_hardware_storage", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_harware_power_off_force", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_harware_power_on_force", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_list_hw_search_noargs", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_list_hw_search_noargs_domain", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_list_servers", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_monitoring", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_not_ready", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_notification_delete", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_notifications", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_ready", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_rescue", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_sensor", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_sensor_discrete", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_cancel_reasons", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_credentials", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_credentials_exception_password_not_found", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_credentials_exception_passwords_not_found", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_details", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_power_cycle", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_power_cycle_negative", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_power_off", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_power_on", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_reboot_default", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_reboot_hard", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_reboot_negative", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_reboot_soft", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_reload", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_server_rescue_negative", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_toggle_ipmi_off", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_toggle_ipmi_on", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_upgrade", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_upgrade_aborted", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_upgrade_add_disk", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_upgrade_disk_already_exist", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_upgrade_disk_does_not_exist", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_upgrade_disk_not_price_found", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_upgrade_no_options", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_upgrade_resize_disk", "tests/CLI/modules/hardware/hardware_basic_tests.py::HardwareCLITests::test_upgrade_test", "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_reflash_firmware", "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_reflash_firmware_no_confirm", "tests/CLI/modules/hardware/hardware_firmware_tests.py::HardwareFirmwareTests::test_update_firmware_no_confirm", "tests/managers/hardware_tests.py::HardwareTests::test_add_notification", "tests/managers/hardware_tests.py::HardwareTests::test_authorize_storage", "tests/managers/hardware_tests.py::HardwareTests::test_authorize_storage_empty", "tests/managers/hardware_tests.py::HardwareTests::test_cancel_hardware", "tests/managers/hardware_tests.py::HardwareTests::test_cancel_hardware_monthly_now", "tests/managers/hardware_tests.py::HardwareTests::test_cancel_hardware_monthly_whenever", "tests/managers/hardware_tests.py::HardwareTests::test_cancel_hardware_no_billing_item", "tests/managers/hardware_tests.py::HardwareTests::test_cancel_hardware_with_reason_and_comment", "tests/managers/hardware_tests.py::HardwareTests::test_cancel_hardware_without_reason", "tests/managers/hardware_tests.py::HardwareTests::test_cancel_hardwareno_billing_item_or_ticket", "tests/managers/hardware_tests.py::HardwareTests::test_cancel_running_transaction", "tests/managers/hardware_tests.py::HardwareTests::test_change_port_speed_private", "tests/managers/hardware_tests.py::HardwareTests::test_change_port_speed_public", "tests/managers/hardware_tests.py::HardwareTests::test_clear_vlan", "tests/managers/hardware_tests.py::HardwareTests::test_create_credential", "tests/managers/hardware_tests.py::HardwareTests::test_edit", "tests/managers/hardware_tests.py::HardwareTests::test_edit_blank", "tests/managers/hardware_tests.py::HardwareTests::test_edit_meta", "tests/managers/hardware_tests.py::HardwareTests::test_generate_create_dict", "tests/managers/hardware_tests.py::HardwareTests::test_generate_create_dict_by_router_network_component", "tests/managers/hardware_tests.py::HardwareTests::test_generate_create_dict_network_key", "tests/managers/hardware_tests.py::HardwareTests::test_generate_create_dict_no_regions", "tests/managers/hardware_tests.py::HardwareTests::test_get_bandwidth_allocation", "tests/managers/hardware_tests.py::HardwareTests::test_get_bandwidth_allocation_no_allotment", "tests/managers/hardware_tests.py::HardwareTests::test_get_bandwidth_allocation_with_allotment", "tests/managers/hardware_tests.py::HardwareTests::test_get_bandwidth_data", "tests/managers/hardware_tests.py::HardwareTests::test_get_components", "tests/managers/hardware_tests.py::HardwareTests::test_get_create_options", "tests/managers/hardware_tests.py::HardwareTests::test_get_create_options_package_missing", "tests/managers/hardware_tests.py::HardwareTests::test_get_create_options_prices", "tests/managers/hardware_tests.py::HardwareTests::test_get_create_options_prices_by_location", "tests/managers/hardware_tests.py::HardwareTests::test_get_hard_drive_empty", "tests/managers/hardware_tests.py::HardwareTests::test_get_hard_drives", "tests/managers/hardware_tests.py::HardwareTests::test_get_hardware", "tests/managers/hardware_tests.py::HardwareTests::test_get_hardware_fast", "tests/managers/hardware_tests.py::HardwareTests::test_get_hardware_item_prices", "tests/managers/hardware_tests.py::HardwareTests::test_get_none_storage_credentials", "tests/managers/hardware_tests.py::HardwareTests::test_get_price_id_disk_capacity", "tests/managers/hardware_tests.py::HardwareTests::test_get_price_id_memory_capacity", "tests/managers/hardware_tests.py::HardwareTests::test_get_price_id_mismatch_capacity", "tests/managers/hardware_tests.py::HardwareTests::test_get_software_component", "tests/managers/hardware_tests.py::HardwareTests::test_get_storage_credentials", "tests/managers/hardware_tests.py::HardwareTests::test_get_storage_iscsi_details", "tests/managers/hardware_tests.py::HardwareTests::test_get_storage_iscsi_empty_details", "tests/managers/hardware_tests.py::HardwareTests::test_get_storage_nas_details", "tests/managers/hardware_tests.py::HardwareTests::test_get_storage_nas_empty_details", "tests/managers/hardware_tests.py::HardwareTests::test_get_tracking_id", "tests/managers/hardware_tests.py::HardwareTests::test_init_with_ordering_manager", "tests/managers/hardware_tests.py::HardwareTests::test_list_hardware", "tests/managers/hardware_tests.py::HardwareTests::test_list_hardware_with_filters", "tests/managers/hardware_tests.py::HardwareTests::test_notification", "tests/managers/hardware_tests.py::HardwareTests::test_notification_del", "tests/managers/hardware_tests.py::HardwareTests::test_place_order", "tests/managers/hardware_tests.py::HardwareTests::test_reflash_firmware", "tests/managers/hardware_tests.py::HardwareTests::test_reflash_firmware_selective", "tests/managers/hardware_tests.py::HardwareTests::test_reload", "tests/managers/hardware_tests.py::HardwareTests::test_remove_vlan", "tests/managers/hardware_tests.py::HardwareTests::test_rescue", "tests/managers/hardware_tests.py::HardwareTests::test_resolve_ids_hostname", "tests/managers/hardware_tests.py::HardwareTests::test_resolve_ids_ip", "tests/managers/hardware_tests.py::HardwareTests::test_sensor", "tests/managers/hardware_tests.py::HardwareTests::test_trunk_vlan", "tests/managers/hardware_tests.py::HardwareTests::test_upgrade", "tests/managers/hardware_tests.py::HardwareTests::test_upgrade_add_disk", "tests/managers/hardware_tests.py::HardwareTests::test_upgrade_blank", "tests/managers/hardware_tests.py::HardwareTests::test_upgrade_full", "tests/managers/hardware_tests.py::HardwareTests::test_upgrade_resize_disk", "tests/managers/hardware_tests.py::HardwareTests::test_verify_order", "tests/managers/hardware_tests.py::HardwareHelperTests::test_bandwidth_key", "tests/managers/hardware_tests.py::HardwareHelperTests::test_is_bonded", "tests/managers/hardware_tests.py::HardwareHelperTests::test_is_private", "tests/managers/hardware_tests.py::HardwareHelperTests::test_matches_location", "tests/managers/hardware_tests.py::HardwareHelperTests::test_port_speed_key", "tests/managers/hardware_tests.py::HardwareHelperTests::test_port_speed_key_exception" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-02-19 22:44:36+00:00
mit
5,586
solegalli__feature_engine-175
diff --git a/docs/index.rst b/docs/index.rst index 7601d5b..0d95b18 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -176,9 +176,9 @@ Feature Selection: - :doc:`selection/DropDuplicateFeatures`: drops duplicated variables from a dataframe - :doc:`selection/DropCorrelatedFeatures`: drops correlated variables from a dataframe - :doc:`selection/ShuffleFeaturesSelector`: selects features by evaluating model performance after feature shuffling +- :doc:`selection/SelectBySingleFeaturePerformance`: selects features based on their performance on univariate estimators - :doc:`selection/RecursiveFeatureElimination`: selects features recursively, by evaluating model performance - Getting Help ------------ diff --git a/docs/selection/SelectBySingleFeaturePerformance.rst b/docs/selection/SelectBySingleFeaturePerformance.rst new file mode 100644 index 0000000..5b3c632 --- /dev/null +++ b/docs/selection/SelectBySingleFeaturePerformance.rst @@ -0,0 +1,56 @@ +SelectBySingleFeaturePerformance +================================ + +The SelectBySingleFeaturePerformance()selects features based on the performance of +machine learning models trained using individual features. In other words, selects +features based on their individual performance, returned by estimators trained on +only that particular feature. + +.. code:: python + + import pandas as pd + from sklearn.datasets import load_diabetes + from sklearn.linear_model import LinearRegression + from feature_engine.selection import SelectBySingleFeaturePerformance + + # load dataset + diabetes_X, diabetes_y = load_diabetes(return_X_y=True) + X = pd.DataFrame(diabetes_X) + y = pd.DataFrame(diabetes_y) + + # initialize feature selector + sel = SelectBySingleFeaturePerformance( + estimator=LinearRegression(), scoring="r2", cv=3, threshold=0.01) + + # fit transformer + sel.fit(X, y) + + sel.selected_features_ + +.. code:: python + + [0, 2, 3, 4, 5, 6, 7, 8, 9] + +.. code:: python + + sel.feature_importance_ + +.. code:: python + + {0: 0.029231969375784466, + 1: -0.003738551760264386, + 2: 0.336620809987693, + 3: 0.19219056680145055, + 4: 0.037115559827549806, + 5: 0.017854228256932614, + 6: 0.15153886177526896, + 7: 0.17721609966501747, + 8: 0.3149462084418813, + 9: 0.13876602125792703} + + +API Reference +------------- + +.. autoclass:: feature_engine.selection.SelectBySingleFeaturePerformance + :members: \ No newline at end of file diff --git a/docs/selection/index.rst b/docs/selection/index.rst index ec698bc..bbf6b46 100644 --- a/docs/selection/index.rst +++ b/docs/selection/index.rst @@ -14,4 +14,5 @@ Or in other words to select subsets of variables. DropDuplicateFeatures DropCorrelatedFeatures ShuffleFeaturesSelector - RecursiveFeatureElimination \ No newline at end of file + SelectBySingleFeaturePerformance + RecursiveFeatureElimination diff --git a/feature_engine/selection/__init__.py b/feature_engine/selection/__init__.py index 4d17261..deaf2e7 100644 --- a/feature_engine/selection/__init__.py +++ b/feature_engine/selection/__init__.py @@ -5,8 +5,9 @@ from .drop_features import DropFeatures from .drop_constant_features import DropConstantFeatures from .drop_duplicate_features import DropDuplicateFeatures from .drop_correlated_features import DropCorrelatedFeatures -from .recursive_feature_elimination import RecursiveFeatureElimination from .shuffle_features import ShuffleFeaturesSelector +from .single_feature_performance_selection import SelectBySingleFeaturePerformance +from .recursive_feature_elimination import RecursiveFeatureElimination __all__ = [ "DropFeatures", @@ -14,5 +15,6 @@ __all__ = [ "DropDuplicateFeatures", "DropCorrelatedFeatures", "ShuffleFeaturesSelector", - "RecursiveFeatureElimination" + "SelectBySingleFeaturePerformance", + "RecursiveFeatureElimination", ] diff --git a/feature_engine/selection/single_feature_performance_selection.py b/feature_engine/selection/single_feature_performance_selection.py new file mode 100644 index 0000000..953bece --- /dev/null +++ b/feature_engine/selection/single_feature_performance_selection.py @@ -0,0 +1,175 @@ +from sklearn.base import BaseEstimator, TransformerMixin +from sklearn.ensemble import RandomForestClassifier +from sklearn.model_selection import cross_validate +from sklearn.utils.validation import check_is_fitted + +from feature_engine.dataframe_checks import ( + _is_dataframe, + _check_input_matches_training_df, +) +from feature_engine.variable_manipulation import ( + _define_variables, + _find_numerical_variables, +) + + +class SelectBySingleFeaturePerformance(BaseEstimator, TransformerMixin): + """ + + SelectBySingleFeaturePerformance selects features based on the performance obtained + from a machine learning model trained utilising a single feature. In other words, + it trains a machine learning model for every single feature, utilising that + individual feature, then determines each model performance. If the performance of + the model based on the single feature is greater than a user specified threshold, + then the feature is retained, otherwise removed. + + The models trained on the individual features are trained using cross-validation. + The performance metric to evaluate and the machine learning model to train are + specified by the user. + + Parameters + ---------- + + variables : str or list, default=None + The list of variable(s) to be evaluated. + If None, the transformer will evaluate all numerical variables in the dataset. + + estimator: object, default = RandomForestClassifier() + A Scikit-learn estimator for regression or classification. + + scoring: str, default='roc_auc' + Desired metric to optimise the performance for the estimator. Comes from + sklearn.metrics. See the model evaluation documentation for more options: + https://scikit-learn.org/stable/modules/model_evaluation.html + + threshold: float, int, default = 0.5 + The value that defines if a feature will be kept or removed. Note that for + metrics like roc-auc, r2_score and accuracy, the thresholds will be floats + between 0 and 1. For metrics like the mean_square_error and the + root_mean_square_error the threshold will be a big number. + The threshold must be defined by the user. + + cv : int, default=3 + Desired number of cross-validation fold to be used to fit the estimator. + + Attributes + ---------- + + selected_features_: list + The selected features. + + feature_performance_: dict + A dictionary containing the feature name as key and the performance of the + model trained on each feature as value. + + Methods + ------- + + fit: finds important features + + transform: removes non-important / non-selected features + + fit_transform: finds and removes non-important features + + """ + + def __init__( + self, + estimator=RandomForestClassifier(), + scoring="roc_auc", + cv=3, + threshold=0.5, + variables=None, + ): + + if not isinstance(cv, int) or cv < 1: + raise ValueError("cv can only take positive integers bigger than 1") + + if not isinstance(threshold, (int, float)): + raise ValueError("threshold can only be integer or float") + + self.variables = _define_variables(variables) + self.estimator = estimator + self.scoring = scoring + self.threshold = threshold + self.cv = cv + + def fit(self, X, y): + """ + + Args + ---- + + X: pandas dataframe of shape = [n_samples, n_features] + The input dataframe + + y: array-like of shape (n_samples) + Target variable. Required to train the estimator. + + + Returns + ------- + + self + """ + + # check input dataframe + X = _is_dataframe(X) + + # find numerical variables or check variables entered by user + self.variables = _find_numerical_variables(X, self.variables) + + # list to collect selected features + self.selected_features_ = [] + + self.feature_performance_ = {} + + # train a model for every feature + for feature in self.variables: + model = cross_validate( + self.estimator, + X[feature].to_frame(), + y, + cv=self.cv, + return_estimator=False, + scoring=self.scoring, + ) + + if model["test_score"].mean() > self.threshold: + self.selected_features_.append(feature) + + self.feature_performance_[feature] = model["test_score"].mean() + + self.input_shape_ = X.shape + + return self + + def transform(self, X): + """ + Removes non-selected features. + + Args + ---- + + X: pandas dataframe of shape = [n_samples, n_features]. + The input dataframe from which feature values will be train. + + + Returns + ------- + + X_transformed: pandas dataframe + of shape = [n_samples, selected_features] + Pandas dataframe with the selected features. + """ + + # check if fit is performed prior to transform + check_is_fitted(self) + + # check if input is a dataframe + X = _is_dataframe(X) + + # check if number of columns in test dataset matches to train dataset + _check_input_matches_training_df(X, self.input_shape_[1]) + + return X[self.selected_features_]
solegalli/feature_engine
643377419820de55d4001d977fac696f3af2eb0f
diff --git a/tests/test_selection/test_single_feature_performance_selection.py b/tests/test_selection/test_single_feature_performance_selection.py new file mode 100644 index 0000000..731834f --- /dev/null +++ b/tests/test_selection/test_single_feature_performance_selection.py @@ -0,0 +1,168 @@ +import numpy as np +import pandas as pd +import pytest +from sklearn.ensemble import RandomForestClassifier +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import LinearRegression +from sklearn.tree import DecisionTreeRegressor + +from feature_engine.selection import SelectBySingleFeaturePerformance + + +def test_default_parameters(df_test): + X, y = df_test + sel = SelectBySingleFeaturePerformance(RandomForestClassifier(random_state=1)) + sel.fit(X, y) + + # expected result + Xtransformed = X.copy() + Xtransformed.drop("var_3", 1, inplace=True) + Xtransformed.drop("var_10", 1, inplace=True) + + # test init params + assert sel.variables == [ + "var_0", + "var_1", + "var_2", + "var_3", + "var_4", + "var_5", + "var_6", + "var_7", + "var_8", + "var_9", + "var_10", + "var_11", + ] + assert sel.threshold == 0.5 + assert sel.cv == 3 + assert sel.scoring == "roc_auc" + # test fit attrs + assert sel.selected_features_ == [ + "var_0", + "var_1", + "var_2", + "var_4", + "var_5", + "var_6", + "var_7", + "var_8", + "var_9", + "var_11", + ] + assert sel.feature_performance_ == { + "var_0": 0.5957642619540211, + "var_1": 0.5365534287221033, + "var_2": 0.5001855546283257, + "var_3": 0.4752954458526748, + "var_4": 0.9780875304971691, + "var_5": 0.5065441419357082, + "var_6": 0.9758243290622809, + "var_7": 0.994571685008432, + "var_8": 0.5164434795458892, + "var_9": 0.9543427678969847, + "var_10": 0.47404183834906727, + "var_11": 0.5227164067525513, + } + # test transform output + pd.testing.assert_frame_equal(sel.transform(X), Xtransformed) + + +def test_regression_cv_3_and_r2(load_diabetes_dataset): + # test for regression using cv=3, and the r2 as metric. + X, y = load_diabetes_dataset + sel = SelectBySingleFeaturePerformance( + estimator=LinearRegression(), scoring="r2", cv=3, threshold=0.01 + ) + sel.fit(X, y) + + # expected output + Xtransformed = pd.DataFrame(X[[0, 2, 3, 4, 5, 6, 7, 8, 9]].copy()) + + performance_dict = { + 0: 0.029, + 1: -0.004, + 2: 0.337, + 3: 0.192, + 4: 0.037, + 5: 0.018, + 6: 0.152, + 7: 0.177, + 8: 0.315, + 9: 0.139, + } + + # test init params + assert sel.cv == 3 + assert sel.variables == list(X.columns) + assert sel.scoring == "r2" + assert sel.threshold == 0.01 + # fit params + assert sel.selected_features_ == [0, 2, 3, 4, 5, 6, 7, 8, 9] + assert all( + np.round(sel.feature_performance_[f], 3) == performance_dict[f] + for f in sel.feature_performance_.keys() + ) + # test transform output + pd.testing.assert_frame_equal(sel.transform(X), Xtransformed) + + +def test_regression_cv_2_and_mse(load_diabetes_dataset): + # test for regression using cv=2, and the neg_mean_squared_error as metric. + # add suitable threshold for regression mse + + X, y = load_diabetes_dataset + sel = SelectBySingleFeaturePerformance( + estimator=DecisionTreeRegressor(random_state=0), + scoring="neg_mean_squared_error", + cv=2, + threshold=10, + ) + # fit transformer + sel.fit(X, y) + + # expected output + Xtransformed = X.copy() + Xtransformed.drop( + Xtransformed.columns[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]], axis=1, inplace=True + ) + + # test init params + assert sel.cv == 2 + assert sel.variables == list(X.columns) + assert sel.scoring == "neg_mean_squared_error" + assert sel.threshold == 10 + # fit params + assert sel.selected_features_ == [] + assert sel.feature_performance_ == { + 0: -7657.154138192973, + 1: -5966.662211695372, + 2: -6613.779604700854, + 3: -6502.621725718592, + 4: -9415.586278197177, + 5: -11760.999622926094, + 6: -6592.584431571728, + 7: -5270.563893676307, + 8: -7641.414795123177, + 9: -6287.557824391035, + } + # test transform output + print(sel.transform(X)) + pd.testing.assert_frame_equal(sel.transform(X), Xtransformed) + + +def test_non_fitted_error(df_test): + # when fit is not called prior to transform + with pytest.raises(NotFittedError): + sel = SelectBySingleFeaturePerformance() + sel.transform(df_test) + + +def test_raises_cv_error(): + with pytest.raises(ValueError): + SelectBySingleFeaturePerformance(cv=0) + + +def test_raises_threshold_error(): + with pytest.raises(ValueError): + SelectBySingleFeaturePerformance(threshold=None)
feature_selection: select by individual feature performance with a ML algo A transformer that implements this: https://github.com/solegalli/feature-selection-for-machine-learning/blob/master/06-Filter-other-metrics/06.1-Univariate-roc-auc.ipynb Should be able to take pretty much any ML algorithm
0.0
643377419820de55d4001d977fac696f3af2eb0f
[ "tests/test_selection/test_single_feature_performance_selection.py::test_regression_cv_3_and_r2", "tests/test_selection/test_single_feature_performance_selection.py::test_non_fitted_error", "tests/test_selection/test_single_feature_performance_selection.py::test_raises_cv_error", "tests/test_selection/test_single_feature_performance_selection.py::test_raises_threshold_error" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-11-13 19:37:56+00:00
bsd-3-clause
5,587
solegalli__feature_engine-249
diff --git a/feature_engine/creation/mathematical_combination.py b/feature_engine/creation/mathematical_combination.py index fda42a6..e762d70 100644 --- a/feature_engine/creation/mathematical_combination.py +++ b/feature_engine/creation/mathematical_combination.py @@ -49,6 +49,11 @@ class MathematicalCombination(BaseEstimator, TransformerMixin): **total_number_payments** and **mean_number_payments**, plus the original set of variables. + Attention, if some of the variables to combine have missing data and + missing_values = 'ignore', the value will be ignored in the computation. The + computation will be then performed using the remaining variables to combine, for + observations with NA. + Parameters ---------- @@ -87,6 +92,12 @@ class MathematicalCombination(BaseEstimator, TransformerMixin): to the newly created features starting by the name of the mathematical operation, followed by the variables combined separated by -. + missing_values : string, default='raise' + Indicates if missing values should be ignored or raised. If + `missing_values='raise'` the transformer will return an error if the + the datasets to fit or transform contain missing values. Alternatively, use + 'ignore'. + Attributes ---------- combination_dict_ : @@ -125,6 +136,7 @@ class MathematicalCombination(BaseEstimator, TransformerMixin): variables_to_combine: List[Union[str, int]], math_operations: Optional[List[str]] = None, new_variables_names: Optional[List[str]] = None, + missing_values: str = "raise", ) -> None: # check input types @@ -160,6 +172,9 @@ class MathematicalCombination(BaseEstimator, TransformerMixin): "Choose one or more of ['sum', 'prod', 'mean', 'std', 'max', 'min']" ) + if missing_values not in ["raise", "ignore"]: + raise ValueError("missing_values takes only values 'raise' or 'ignore'") + # check input logic if len(variables_to_combine) <= 1: raise KeyError( @@ -180,6 +195,7 @@ class MathematicalCombination(BaseEstimator, TransformerMixin): self.variables_to_combine = variables_to_combine self.new_variables_names = new_variables_names self.math_operations = math_operations + self.missing_values = missing_values def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None): """ @@ -203,7 +219,7 @@ class MathematicalCombination(BaseEstimator, TransformerMixin): - If the input is not a Pandas DataFrame - If any user provided variables in variables_to_combine are not numerical ValueError - If the variable(s) contain null values + If the variable(s) contain null values when missing_values = raise Returns ------- @@ -219,7 +235,8 @@ class MathematicalCombination(BaseEstimator, TransformerMixin): ) # check if dataset contains na - _check_contains_na(X, self.variables_to_combine) + if self.missing_values == "raise": + _check_contains_na(X, self.variables_to_combine) if self.math_operations is None: self.math_operations_ = ["sum", "prod", "mean", "std", "max", "min"] @@ -260,7 +277,7 @@ class MathematicalCombination(BaseEstimator, TransformerMixin): TypeError If the input is not a Pandas DataFrame ValueError - - If the variable(s) contain null values + - If the variable(s) contain null values when missing_values = raise - If the dataframe is not of the same size as that used in fit() Returns @@ -276,7 +293,8 @@ class MathematicalCombination(BaseEstimator, TransformerMixin): X = _is_dataframe(X) # check if dataset contains na - _check_contains_na(X, self.variables_to_combine) + if self.missing_values == "raise": + _check_contains_na(X, self.variables_to_combine) # Check if input data contains same number of columns as dataframe used to fit. _check_input_matches_training_df(X, self.input_shape_[1])
solegalli/feature_engine
24b8cbdc0aea1e8c266b805947f38e9f02369d69
diff --git a/tests/test_creation/test_mathematical_combination.py b/tests/test_creation/test_mathematical_combination.py index 2640789..19cde4d 100644 --- a/tests/test_creation/test_mathematical_combination.py +++ b/tests/test_creation/test_mathematical_combination.py @@ -1,3 +1,4 @@ +import numpy as np import pandas as pd import pytest @@ -271,3 +272,55 @@ def test_variable_names_when_df_cols_are_integers(df_numeric_columns): } # transform params pd.testing.assert_frame_equal(X, ref) + + +def test_error_when_null_values_in_variable(df_vartypes): + + df_na = df_vartypes.copy() + df_na.loc[1, 'Age'] = np.nan + + with pytest.raises(ValueError): + math_combinator = MathematicalCombination( + variables_to_combine=["Age", "Marks"], + math_operations=["sum", "mean"], + missing_values="raise", + ) + math_combinator.fit(df_na) + + with pytest.raises(ValueError): + + math_combinator = MathematicalCombination( + variables_to_combine=["Age", "Marks"], + math_operations=["sum", "mean"], + missing_values="raise", + ) + math_combinator.fit(df_vartypes) + math_combinator.transform(df_na) + + +def test_no_error_when_null_values_in_variable(df_vartypes): + + df_na = df_vartypes.copy() + df_na.loc[1, 'Age'] = np.nan + + transformer = MathematicalCombination( + variables_to_combine=["Age", "Marks"], + math_operations=["sum", "mean"], + missing_values="ignore", + ) + + X = transformer.fit_transform(df_na) + + ref = pd.DataFrame.from_dict( + { + "Name": ["tom", "nick", "krish", "jack"], + "City": ["London", "Manchester", "Liverpool", "Bristol"], + "Age": [20, np.nan, 19, 18], + "Marks": [0.9, 0.8, 0.7, 0.6], + "dob": pd.date_range("2020-02-24", periods=4, freq="T"), + "sum(Age-Marks)": [20.9, 0.8, 19.7, 18.6], + "mean(Age-Marks)": [10.45, 0.8, 9.85, 9.3], + } + ) + # transform params + pd.testing.assert_frame_equal(X, ref)
allow MathematicalCombination to take in null values introduce parameter to be able to select whether to ignore or raise the missing values
0.0
24b8cbdc0aea1e8c266b805947f38e9f02369d69
[ "tests/test_creation/test_mathematical_combination.py::test_error_when_null_values_in_variable", "tests/test_creation/test_mathematical_combination.py::test_no_error_when_null_values_in_variable" ]
[ "tests/test_creation/test_mathematical_combination.py::test_error_when_param_variables_not_entered", "tests/test_creation/test_mathematical_combination.py::test_error_when_user_enters_one_variable", "tests/test_creation/test_mathematical_combination.py::test_error_when_variables_to_combine_wrong_type", "tests/test_creation/test_mathematical_combination.py::test_error_if_operation_not_supported", "tests/test_creation/test_mathematical_combination.py::test_error_if_operation_is_wrong_type", "tests/test_creation/test_mathematical_combination.py::test_error_if_new_variable_names_of_wrong_type", "tests/test_creation/test_mathematical_combination.py::test_error_if_variable_names_and_operations_list_length_not_equal", "tests/test_creation/test_mathematical_combination.py::test_default_parameters", "tests/test_creation/test_mathematical_combination.py::test_error_when_variables_to_combine_not_numeric", "tests/test_creation/test_mathematical_combination.py::test_error_when_entered_variables_not_in_df", "tests/test_creation/test_mathematical_combination.py::test_user_enters_two_operations", "tests/test_creation/test_mathematical_combination.py::test_user_enters_output_variable_names", "tests/test_creation/test_mathematical_combination.py::test_one_mathematical_operation", "tests/test_creation/test_mathematical_combination.py::test_variable_names_when_df_cols_are_integers" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-04-21 10:25:38+00:00
bsd-3-clause
5,588
solegalli__feature_engine-266
diff --git a/feature_engine/encoding/one_hot.py b/feature_engine/encoding/one_hot.py index c952a80..a2e2736 100644 --- a/feature_engine/encoding/one_hot.py +++ b/feature_engine/encoding/one_hot.py @@ -77,7 +77,14 @@ class OneHotEncoder(BaseCategoricalTransformer): drop_last: boolean, default=False Only used if `top_categories = None`. It indicates whether to create dummy variables for all the categories (k dummies), or if set to `True`, it will - ignore the last binary variable of the list (k-1 dummies). + ignore the last binary variable and return k-1 dummies. + + drop_last_binary: boolean, default=False + Whether to return 1 or 2 dummy variables for binary categorical variables. When + a categorical variable has only 2 categories, then the second dummy variable + created by one hot encoding can be completely redundant. Setting this parameter + to `True`, will ensure that for every binary variable in the dataset, only 1 + dummy is created. variables: list, default=None The list of categorical variables that will be encoded. If None, the @@ -100,6 +107,10 @@ class OneHotEncoder(BaseCategoricalTransformer): variables_: The group of variables that will be transformed. + variables_binary_: + A list with binary variables identified from the data. That is, variables with + only 2 categories. + n_features_in_: The number of features in the train set used in fit. @@ -135,6 +146,7 @@ class OneHotEncoder(BaseCategoricalTransformer): self, top_categories: Optional[int] = None, drop_last: bool = False, + drop_last_binary: bool = False, variables: Union[None, int, str, List[Union[str, int]]] = None, ignore_format: bool = False, ) -> None: @@ -145,11 +157,15 @@ class OneHotEncoder(BaseCategoricalTransformer): if not isinstance(drop_last, bool): raise ValueError("drop_last takes only True or False") + if not isinstance(drop_last_binary, bool): + raise ValueError("drop_last_binary takes only True or False") + if not isinstance(ignore_format, bool): raise ValueError("ignore_format takes only booleans True and False") self.top_categories = top_categories self.drop_last = drop_last + self.drop_last_binary = drop_last_binary self.variables = _check_input_parameter_variables(variables) self.ignore_format = ignore_format @@ -188,15 +204,9 @@ class OneHotEncoder(BaseCategoricalTransformer): self.encoder_dict_ = {} - for var in self.variables_: - if not self.top_categories: - if self.drop_last: - category_ls = [x for x in X[var].unique()] - self.encoder_dict_[var] = category_ls[:-1] - else: - self.encoder_dict_[var] = X[var].unique() - - else: + # make dummies only for the most popular categories + if self.top_categories: + for var in self.variables_: self.encoder_dict_[var] = [ x for x in X[var] @@ -206,6 +216,27 @@ class OneHotEncoder(BaseCategoricalTransformer): .index ] + else: + # return k-1 dummies + if self.drop_last: + for var in self.variables_: + category_ls = [x for x in X[var].unique()] + self.encoder_dict_[var] = category_ls[:-1] + + # return k dummies + else: + for var in self.variables_: + self.encoder_dict_[var] = X[var].unique() + + self.variables_binary_ = [ + var for var in self.variables_ if X[var].nunique() == 2 + ] + + # automatically encode binary variables as 1 dummy + if self.drop_last_binary: + for var in self.variables_binary_: + self.encoder_dict_[var] = X[var].unique()[0] + self._check_encoding_dictionary() self.n_features_in_ = X.shape[1]
solegalli/feature_engine
627519dd888bf3e2c8dd8a01e20fa83988cac7b5
diff --git a/tests/test_encoding/test_onehot_encoder.py b/tests/test_encoding/test_onehot_encoder.py index 4f6b50f..d2eb5b7 100644 --- a/tests/test_encoding/test_onehot_encoder.py +++ b/tests/test_encoding/test_onehot_encoder.py @@ -141,7 +141,6 @@ def test_transform_raises_error_if_df_contains_na(df_enc_big, df_enc_big_na): def test_encode_numerical_variables(df_enc_numeric): - encoder = OneHotEncoder( top_categories=None, variables=None, @@ -171,7 +170,6 @@ def test_encode_numerical_variables(df_enc_numeric): def test_variables_cast_as_category(df_enc_numeric): - encoder = OneHotEncoder( top_categories=None, variables=None, @@ -201,3 +199,121 @@ def test_variables_cast_as_category(df_enc_numeric): assert encoder.n_features_in_ == 2 # test transform output pd.testing.assert_frame_equal(X, transf) + + [email protected](scope="module") +def df_enc_binary(): + df = { + "var_A": ["A"] * 6 + ["B"] * 10 + ["C"] * 4, + "var_B": ["A"] * 10 + ["B"] * 6 + ["C"] * 4, + "var_C": ["A"] * 10 + ["B"] * 10, + "target": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0], + } + df = pd.DataFrame(df) + + return df + + +def test_encode_into_k_binary_plus_drop_binary(df_enc_binary): + encoder = OneHotEncoder( + top_categories=None, variables=None, drop_last=False, drop_last_binary=True + ) + X = encoder.fit_transform(df_enc_binary) + + # test fit attr + transf = { + "target": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0], + "var_A_A": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + "var_A_B": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + "var_A_C": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], + "var_B_A": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + "var_B_B": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + "var_B_C": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], + "var_C_A": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + } + + transf = pd.DataFrame(transf).astype("int32") + X = pd.DataFrame(X).astype("int32") + + assert encoder.variables_ == ["var_A", "var_B", "var_C"] + assert encoder.n_features_in_ == 4 + # test transform output + pd.testing.assert_frame_equal(X, transf) + assert "var_C_B" not in X.columns + + +def test_encode_into_kminus1_binary_plus_drop_binary(df_enc_binary): + encoder = OneHotEncoder( + top_categories=None, variables=None, drop_last=True, drop_last_binary=True + ) + X = encoder.fit_transform(df_enc_binary) + + # test fit attr + transf = { + "target": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0], + "var_A_A": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + "var_A_B": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + "var_B_A": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + "var_B_B": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + "var_C_A": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + } + + transf = pd.DataFrame(transf).astype("int32") + X = pd.DataFrame(X).astype("int32") + + assert encoder.variables_ == ["var_A", "var_B", "var_C"] + assert encoder.n_features_in_ == 4 + # test transform output + pd.testing.assert_frame_equal(X, transf) + assert "var_C_B" not in X.columns + + +def test_encode_into_top_categories_plus_drop_binary(df_enc_binary): + + # top_categories = 1 + encoder = OneHotEncoder( + top_categories=1, variables=None, drop_last=False, drop_last_binary=True + ) + X = encoder.fit_transform(df_enc_binary) + + # test fit attr + transf = { + "target": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0], + "var_A_B": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + "var_B_A": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + "var_C_A": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + } + + transf = pd.DataFrame(transf).astype("int32") + X = pd.DataFrame(X).astype("int32") + + assert encoder.variables_ == ["var_A", "var_B", "var_C"] + assert encoder.n_features_in_ == 4 + # test transform output + pd.testing.assert_frame_equal(X, transf) + assert "var_C_B" not in X.columns + + # top_categories = 2 + encoder = OneHotEncoder( + top_categories=2, variables=None, drop_last=False, drop_last_binary=True + ) + X = encoder.fit_transform(df_enc_binary) + + # test fit attr + transf = { + "target": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0], + "var_A_B": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + "var_A_A": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + "var_B_A": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + "var_B_B": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + "var_C_A": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + } + + transf = pd.DataFrame(transf).astype("int32") + X = pd.DataFrame(X).astype("int32") + + assert encoder.variables_ == ["var_A", "var_B", "var_C"] + assert encoder.n_features_in_ == 4 + # test transform output + pd.testing.assert_frame_equal(X, transf) + assert "var_C_B" not in X.columns
one hot encoder has the option to automatically drop the second binary variable for binary categorical variables We need to add one parameter to the init method of the one hot encoder, called for example automate_drop_last_binary that when True, will automatically return 1 binary variable if the original variable was binary. Details: - automate_drop_last_binary should default to False not to break backwards compatibility - automate_drop_last_binary would only work for binary variables - automate_drop_last_binary would only work if drop_last=False When automate_drop_last_binary=True, binary variables would result in the addition of just one dummy instead of 1. So for the variable colour, with categories black and white, instead of returning the binary colour_black and colour_white, which are redundant, it would only return colour_black In addition, we should add an attribute in the fit method called for example binary_variables_ where the transformer returns a list with the names of the binary variables. To finish of this PR, we need to add 1-2 tests to make sure that the behaviour is the expected
0.0
627519dd888bf3e2c8dd8a01e20fa83988cac7b5
[ "tests/test_encoding/test_onehot_encoder.py::test_encode_into_k_binary_plus_drop_binary", "tests/test_encoding/test_onehot_encoder.py::test_encode_into_kminus1_binary_plus_drop_binary", "tests/test_encoding/test_onehot_encoder.py::test_encode_into_top_categories_plus_drop_binary" ]
[ "tests/test_encoding/test_onehot_encoder.py::test_encode_categories_in_k_binary_plus_select_vars_automatically", "tests/test_encoding/test_onehot_encoder.py::test_encode_categories_in_k_minus_1_binary_plus_list_of_variables", "tests/test_encoding/test_onehot_encoder.py::test_encode_top_categories", "tests/test_encoding/test_onehot_encoder.py::test_error_if_top_categories_not_integer", "tests/test_encoding/test_onehot_encoder.py::test_error_if_drop_last_not_bool", "tests/test_encoding/test_onehot_encoder.py::test_fit_raises_error_if_df_contains_na", "tests/test_encoding/test_onehot_encoder.py::test_transform_raises_error_if_df_contains_na", "tests/test_encoding/test_onehot_encoder.py::test_encode_numerical_variables", "tests/test_encoding/test_onehot_encoder.py::test_variables_cast_as_category" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-06-10 12:56:37+00:00
bsd-3-clause
5,589
somm15__PyViCare-122
diff --git a/PyViCare/PyViCareDevice.py b/PyViCare/PyViCareDevice.py index c24a412..db3414f 100644 --- a/PyViCare/PyViCareDevice.py +++ b/PyViCare/PyViCareDevice.py @@ -210,6 +210,10 @@ class Device: def getDomesticHotWaterStorageTemperature(self): return self.service.getProperty("heating.dhw.sensors.temperature.hotWaterStorage")["properties"]["value"]["value"] + @handleNotSupported + def getDomesticHotWaterOutletTemperature(self): + return self.service.getProperty("heating.dhw.sensors.temperature.outlet")["properties"]["value"]["value"] + @handleNotSupported def getDomesticHotWaterPumpActive(self): status = self.service.getProperty("heating.dhw.pumps.primary")["properties"]["status"]["value"]
somm15/PyViCare
9cff0d4c1cb09389adac0cc1fcb213b65eca44f6
diff --git a/tests/test_Vitodens200W.py b/tests/test_Vitodens200W.py index ec46fd0..1065406 100644 --- a/tests/test_Vitodens200W.py +++ b/tests/test_Vitodens200W.py @@ -38,3 +38,6 @@ class Vitodens200W(unittest.TestCase): def test_getDomesticHotWaterCirculationPumpActive(self): self.assertEqual(self.device.getDomesticHotWaterCirculationPumpActive(), True) + + def test_getDomesticHotWaterOutletTemperature(self): + self.assertEqual(self.device.getDomesticHotWaterOutletTemperature(), 58)
Outlet temperature Hello, I've just noticed that we are missing the dhw outlet temperature. It would be something like: `return self.service.getProperty("heating.dhw.sensors.temperature.outlet")["properties"]["value"]["value"]` PS. Thank you for your great work!
0.0
9cff0d4c1cb09389adac0cc1fcb213b65eca44f6
[ "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterOutletTemperature" ]
[ "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerActive", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerHours", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerModulation", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerStarts", "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterCirculationPumpActive", "tests/test_Vitodens200W.py::Vitodens200W::test_getFrostProtectionActive", "tests/test_Vitodens200W.py::Vitodens200W::test_getModes", "tests/test_Vitodens200W.py::Vitodens200W::test_getPowerConsumptionDays", "tests/test_Vitodens200W.py::Vitodens200W::test_getPrograms" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2021-08-01 12:29:17+00:00
apache-2.0
5,590
somm15__PyViCare-124
diff --git a/PyViCare/PyViCareDevice.py b/PyViCare/PyViCareDevice.py index 86a9898..f7254d6 100644 --- a/PyViCare/PyViCareDevice.py +++ b/PyViCare/PyViCareDevice.py @@ -339,6 +339,13 @@ class Device: def activateOneTimeCharge(self): return self.service.setProperty("heating.dhw.oneTimeCharge","activate","{}") + def setDomesticHotWaterCirculationSchedule(self,schedule): + return self.service.setProperty("heating.dhw.pumps.circulation.schedule", "setSchedule","{\"newSchedule\":"+str(schedule)+"}") + + @handleNotSupported + def getDomesticHotWaterCirculationSchedule(self): + return self.service.getProperty("heating.dhw.pumps.circulation.schedule")["commands"]["setSchedule"]["params"]["newSchedule"]["constraints"]["modes"] + @handleNotSupported def getAvailableCircuits(self): return self.service.getProperty("heating.circuits")["properties"]["enabled"]["value"]
somm15/PyViCare
23a97c1ec3ebe89dddaa9a44245fcc595ad3331b
diff --git a/tests/test_Vitodens200W.py b/tests/test_Vitodens200W.py index 3a358c0..ffd3857 100644 --- a/tests/test_Vitodens200W.py +++ b/tests/test_Vitodens200W.py @@ -41,3 +41,8 @@ class Vitodens200W(unittest.TestCase): def test_getDomesticHotWaterOutletTemperature(self): self.assertEqual(self.device.getDomesticHotWaterOutletTemperature(), 58) + + def test_getDomesticHotWaterCirculationSchedule(self): + self.assertEqual(self.device.getDomesticHotWaterCirculationSchedule(), ['on']) + +
Possibility to read/set dhw circulation Hey everyone, is the above mentioned function working? I have setup PyViCare a few months ago, but this parameter shows 'True' all the time. My circulation is configured to be active only 5am-10pm. By the way, is there also a way to influence the circulationpump? I would like to activate it only if someone is at home (respectively deactivate when no one is in the house). I use a Viessmann heatpump Vitocal 200-S with Vitoconnect (in case that matters). The other readings (e.g. temperature, compressor,...) are fine. Regards, Anduril
0.0
23a97c1ec3ebe89dddaa9a44245fcc595ad3331b
[ "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterCirculationSchedule" ]
[ "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerActive", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerHours", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerModulation", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerStarts", "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterCirculationPumpActive", "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterOutletTemperature", "tests/test_Vitodens200W.py::Vitodens200W::test_getFrostProtectionActive", "tests/test_Vitodens200W.py::Vitodens200W::test_getModes", "tests/test_Vitodens200W.py::Vitodens200W::test_getPowerConsumptionDays", "tests/test_Vitodens200W.py::Vitodens200W::test_getPrograms" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2021-08-01 13:13:11+00:00
apache-2.0
5,591
somm15__PyViCare-132
diff --git a/PyViCare/PyViCareService.py b/PyViCare/PyViCareService.py index bd4d1fe..f280fde 100644 --- a/PyViCare/PyViCareService.py +++ b/PyViCare/PyViCareService.py @@ -45,5 +45,5 @@ class ViCareService: url = buildSetPropertyUrl( self.accessor, property_name, action) - post_data = data if isinstance(data, str) else json.dump(data) + post_data = data if isinstance(data, str) else json.dumps(data) return self.oauth_manager.post(url, post_data) \ No newline at end of file
somm15/PyViCare
97d953fb0752de5d25776a599086d0f2d5fb9fba
diff --git a/tests/test_PyViCareService.py b/tests/test_PyViCareService.py new file mode 100644 index 0000000..0fc4abe --- /dev/null +++ b/tests/test_PyViCareService.py @@ -0,0 +1,27 @@ +import unittest +from PyViCare.PyViCareService import ViCareDeviceAccessor, ViCareService +from PyViCare.PyViCareOAuthManager import AbstractViCareOAuthManager +from unittest.mock import Mock + + + + +class GenericDevice(unittest.TestCase): + + def setUp(self): + self.oauth_mock = Mock() + accessor = ViCareDeviceAccessor("[id]", "[serial]", "[device]") + self.service = ViCareService(self.oauth_mock, accessor) + + def test_getProperty(self): + self.service.getProperty("someprop") + self.oauth_mock.get.assert_called_once_with('/equipment/installations/[id]/gateways/[serial]/devices/[device]/features/someprop') + + def test_setProperty_object(self): + self.service.setProperty("someprop", "doaction", {'name': 'abc'}) + self.oauth_mock.post.assert_called_once_with('/equipment/installations/[id]/gateways/[serial]/devices/[device]/features/someprop/doaction', '{"name": "abc"}') + + def test_setProperty_string(self): + self.service.setProperty("someprop", "doaction", '{}') + self.oauth_mock.post.assert_called_once_with('/equipment/installations/[id]/gateways/[serial]/devices/[device]/features/someprop/doaction', '{}') + \ No newline at end of file
When will pyViCare be updated in PyLib typo in "/usr/local/lib/python3.9/dist-packages/PyViCare/PyViCareService.py" line 48 Error raised: dump() missing 1 required positional argument: 'fp' in python json Change post_data = data if isinstance(data, str) else json.dump(data) to post_data = data if isinstance(data, str) else json.dumps(data)
0.0
97d953fb0752de5d25776a599086d0f2d5fb9fba
[ "tests/test_PyViCareService.py::GenericDevice::test_setProperty_object" ]
[ "tests/test_PyViCareService.py::GenericDevice::test_getProperty", "tests/test_PyViCareService.py::GenericDevice::test_setProperty_string" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2021-08-03 18:59:55+00:00
apache-2.0
5,592
somm15__PyViCare-166
diff --git a/PyViCare/PyViCareDevice.py b/PyViCare/PyViCareDevice.py index 121f0c1..59d06a3 100644 --- a/PyViCare/PyViCareDevice.py +++ b/PyViCare/PyViCareDevice.py @@ -162,6 +162,14 @@ class Device: def getSolarCollectorTemperature(self): return self.service.getProperty("heating.solar.sensors.temperature.collector")["properties"]["value"]["value"] + @handleNotSupported + def getSolarStorageTemperature(self): + return self.service.getProperty("heating.solar.sensors.temperature.dhw")["properties"]["value"]["value"] + + @handleNotSupported + def getSolarPowerProduction(self): + return self.service.getProperty("heating.solar.power.production")["properties"]["day"]["value"] + @handleNotSupported def getSolarPumpActive(self): status = self.service.getProperty("heating.solar.pumps.circuit")[
somm15/PyViCare
3f4b32ae45195167a4dc47a8348ef7b043d989b2
diff --git a/tests/response/Solar.json b/tests/response/Solar.json new file mode 100644 index 0000000..4d3f8b3 --- /dev/null +++ b/tests/response/Solar.json @@ -0,0 +1,1158 @@ +{ + "data": [ + { + "feature": "heating", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.762Z", + "properties": {}, + "commands": {}, + "components": [ + "boiler", + "burner", + "circuits", + "device", + "dhw", + "sensors", + "operating", + "solar" + ] + }, + { + "feature": "heating.boiler", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.762Z", + "properties": {}, + "commands": {}, + "components": ["sensors", "serial", "temperature"] + }, + { + "feature": "heating.boiler.sensors", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.762Z", + "properties": {}, + "commands": {}, + "components": [] + }, + { + "feature": "heating.boiler.serial", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.762Z", + "properties": { "value": { "type": "string", "value": "" } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.boiler.sensors.temperature.main", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.762Z", + "properties": { + "status": { "type": "string", "value": "connected" }, + "value": { "type": "number", "value": 28 } + }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.boiler.temperature", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.762Z", + "properties": { "value": { "type": "number", "value": 5 } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.burner", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.762Z", + "properties": { "active": { "type": "boolean", "value": false } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.circuits", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.762Z", + "properties": { "enabled": { "type": "array", "value": ["0"] } }, + "commands": {}, + "components": ["0", "1", "2"] + }, + { + "feature": "heating.circuits.0", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": { + "active": { "type": "boolean", "value": true }, + "name": { "type": "string", "value": "Heizkreis 1" } + }, + "commands": { + "setName": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0/commands/setName", + "name": "setName", + "isExecutable": true, + "params": { + "name": { + "required": true, + "type": "string", + "constraints": { "minLength": 1, "maxLength": 20 } + } + } + } + }, + "components": [ + "circulation", + "dhw", + "frostprotection", + "heating", + "operating", + "sensors" + ] + }, + { + "feature": "heating.circuits.0.circulation", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["pump"] + }, + { + "feature": "heating.circuits.1.circulation", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["pump"] + }, + { + "feature": "heating.circuits.2.circulation", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["pump"] + }, + { + "feature": "heating.circuits.0.circulation.pump", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": { "status": { "type": "string", "value": "off" } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.circuits.0.dhw", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["pumps", "schedule"] + }, + { + "feature": "heating.circuits.1.dhw", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["pumps", "schedule"] + }, + { + "feature": "heating.circuits.2.dhw", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["pumps", "schedule"] + }, + { + "feature": "heating.circuits.0.dhw.pumps", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["circulation"] + }, + { + "feature": "heating.circuits.1.dhw.pumps", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["circulation"] + }, + { + "feature": "heating.circuits.2.dhw.pumps", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["circulation"] + }, + { + "feature": "heating.circuits.0.dhw.pumps.circulation", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["schedule"] + }, + { + "feature": "heating.circuits.1.dhw.pumps.circulation", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["schedule"] + }, + { + "feature": "heating.circuits.2.dhw.pumps.circulation", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["schedule"] + }, + { + "feature": "heating.circuits.0.frostprotection", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": { "status": { "type": "string", "value": "off" } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.circuits.0.heating", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["curve", "schedule"] + }, + { + "feature": "heating.circuits.1.heating", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["curve", "schedule"] + }, + { + "feature": "heating.circuits.2.heating", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": {}, + "commands": {}, + "components": ["curve", "schedule"] + }, + { + "feature": "heating.circuits.0.heating.curve", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.763Z", + "properties": { + "shift": { "type": "number", "value": 0 }, + "slope": { "type": "number", "value": 0.8 } + }, + "commands": { + "setCurve": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.heating.curve/commands/setCurve", + "name": "setCurve", + "isExecutable": true, + "params": { + "slope": { + "required": true, + "type": "number", + "constraints": { "min": 0.2, "max": 3.5, "stepping": 0.1 } + }, + "shift": { + "required": true, + "type": "number", + "constraints": { "min": -13, "max": 40, "stepping": 1 } + } + } + } + }, + "components": [] + }, + { + "feature": "heating.circuits.0.heating.schedule", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.767Z", + "properties": { + "active": { "type": "boolean", "value": false }, + "entries": { + "type": "Schedule", + "value": { + "mon": [ + { + "start": "07:00", + "end": "22:00", + "mode": "normal", + "position": 0 + } + ], + "tue": [ + { + "start": "07:00", + "end": "22:00", + "mode": "normal", + "position": 0 + } + ], + "wed": [ + { + "start": "07:00", + "end": "22:00", + "mode": "normal", + "position": 0 + } + ], + "thu": [ + { + "start": "07:00", + "end": "22:00", + "mode": "normal", + "position": 0 + } + ], + "fri": [ + { + "start": "07:00", + "end": "22:00", + "mode": "normal", + "position": 0 + } + ], + "sat": [ + { + "start": "07:00", + "end": "22:00", + "mode": "normal", + "position": 0 + } + ], + "sun": [ + { + "start": "07:00", + "end": "22:00", + "mode": "normal", + "position": 0 + } + ] + } + }, + "overlapAllowed": { "type": "boolean", "value": true } + }, + "commands": { + "setSchedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.heating.schedule/commands/setSchedule", + "name": "setSchedule", + "isExecutable": true, + "params": { + "newSchedule": { + "required": true, + "type": "Schedule", + "constraints": { + "maxEntries": 4, + "resolution": 10, + "modes": ["normal"], + "defaultMode": "reduced" + } + } + } + } + }, + "components": [] + }, + { + "feature": "heating.circuits.0.operating", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.767Z", + "properties": {}, + "commands": {}, + "components": ["modes", "programs"] + }, + { + "feature": "heating.circuits.1.operating", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.767Z", + "properties": {}, + "commands": {}, + "components": ["modes", "programs"] + }, + { + "feature": "heating.circuits.2.operating", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.767Z", + "properties": {}, + "commands": {}, + "components": ["modes", "programs"] + }, + { + "feature": "heating.circuits.0.operating.modes", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.767Z", + "properties": {}, + "commands": {}, + "components": ["active", "dhw", "heating", "dhwAndHeating", "standby"] + }, + { + "feature": "heating.circuits.1.operating.modes", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.767Z", + "properties": {}, + "commands": {}, + "components": ["active", "dhw", "heating", "dhwAndHeating", "standby"] + }, + { + "feature": "heating.circuits.2.operating.modes", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.767Z", + "properties": {}, + "commands": {}, + "components": ["active", "dhw", "heating", "dhwAndHeating", "standby"] + }, + { + "feature": "heating.circuits.0.operating.modes.active", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.767Z", + "properties": { "value": { "type": "string", "value": "dhw" } }, + "commands": { + "setMode": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.modes.active/commands/setMode", + "name": "setMode", + "isExecutable": true, + "params": { + "mode": { + "required": true, + "type": "string", + "constraints": { + "enum": [ + "dhw", + "dhwAndHeating", + "forcedNormal", + "forcedReduced", + "standby" + ] + } + } + } + } + }, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.modes.dhw", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.769Z", + "properties": { "active": { "type": "boolean", "value": true } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.modes.dhwAndHeating", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.769Z", + "properties": { "active": { "type": "boolean", "value": false } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.modes.standby", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.769Z", + "properties": { "active": { "type": "boolean", "value": false } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.programs", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.769Z", + "properties": {}, + "commands": {}, + "components": [ + "active", + "comfort", + "eco", + "external", + "holiday", + "normal", + "reduced", + "standby" + ] + }, + { + "feature": "heating.circuits.1.operating.programs", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.769Z", + "properties": {}, + "commands": {}, + "components": [ + "active", + "comfort", + "eco", + "external", + "holiday", + "normal", + "reduced", + "standby" + ] + }, + { + "feature": "heating.circuits.2.operating.programs", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.769Z", + "properties": {}, + "commands": {}, + "components": [ + "active", + "comfort", + "eco", + "external", + "holiday", + "normal", + "reduced", + "standby" + ] + }, + { + "feature": "heating.circuits.0.operating.programs.active", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.769Z", + "properties": { "value": { "type": "string", "value": "standby" } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.programs.comfort", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.775Z", + "properties": { + "active": { "type": "boolean", "value": false }, + "temperature": { "type": "number", "value": 20 } + }, + "commands": { + "setTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.comfort/commands/setTemperature", + "name": "setTemperature", + "isExecutable": true, + "params": { + "targetTemperature": { + "required": true, + "type": "number", + "constraints": { "min": 4, "max": 37, "stepping": 1 } + } + } + }, + "activate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.comfort/commands/activate", + "name": "activate", + "isExecutable": true, + "params": {} + }, + "deactivate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.comfort/commands/deactivate", + "name": "deactivate", + "isExecutable": false, + "params": {} + } + }, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.programs.eco", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.775Z", + "properties": { + "active": { "type": "boolean", "value": false }, + "temperature": { "type": "number", "value": 22 } + }, + "commands": { + "activate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.eco/commands/activate", + "name": "activate", + "isExecutable": false, + "params": {} + }, + "deactivate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.eco/commands/deactivate", + "name": "deactivate", + "isExecutable": false, + "params": {} + } + }, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.programs.external", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.775Z", + "properties": { + "active": { "type": "boolean", "value": false }, + "temperature": { "type": "number", "value": 0 } + }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.programs.holiday", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.775Z", + "properties": { + "active": { "type": "boolean", "value": false }, + "start": { "type": "string", "value": "" }, + "end": { "type": "string", "value": "" } + }, + "commands": { + "changeEndDate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.holiday/commands/changeEndDate", + "name": "changeEndDate", + "isExecutable": false, + "params": { + "end": { "required": true, "type": "string", "constraints": {} } + } + }, + "schedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.holiday/commands/schedule", + "name": "schedule", + "isExecutable": true, + "params": { + "start": { "required": true, "type": "string", "constraints": {} }, + "end": { "required": true, "type": "string", "constraints": {} } + } + }, + "unschedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.holiday/commands/unschedule", + "name": "unschedule", + "isExecutable": true, + "params": {} + } + }, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.programs.normal", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.775Z", + "properties": { + "active": { "type": "boolean", "value": false }, + "temperature": { "type": "number", "value": 22 } + }, + "commands": { + "setTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.normal/commands/setTemperature", + "name": "setTemperature", + "isExecutable": true, + "params": { + "targetTemperature": { + "required": true, + "type": "number", + "constraints": { "min": 3, "max": 37, "stepping": 1 } + } + } + } + }, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.programs.reduced", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { + "active": { "type": "boolean", "value": false }, + "temperature": { "type": "number", "value": 16 } + }, + "commands": { + "setTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.circuits.0.operating.programs.reduced/commands/setTemperature", + "name": "setTemperature", + "isExecutable": true, + "params": { + "targetTemperature": { + "required": true, + "type": "number", + "constraints": { "min": 3, "max": 37, "stepping": 1 } + } + } + } + }, + "components": [] + }, + { + "feature": "heating.circuits.0.operating.programs.standby", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { "active": { "type": "boolean", "value": true } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.circuits.0.sensors", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": {}, + "commands": {}, + "components": ["temperature"] + }, + { + "feature": "heating.circuits.1.sensors", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": {}, + "commands": {}, + "components": ["temperature"] + }, + { + "feature": "heating.circuits.2.sensors", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": {}, + "commands": {}, + "components": ["temperature"] + }, + { + "feature": "heating.circuits.0.sensors.temperature", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": {}, + "commands": {}, + "components": ["room", "supply"] + }, + { + "feature": "heating.circuits.1.sensors.temperature", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": {}, + "commands": {}, + "components": ["room", "supply"] + }, + { + "feature": "heating.circuits.2.sensors.temperature", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": {}, + "commands": {}, + "components": ["room", "supply"] + }, + { + "feature": "heating.circuits.0.sensors.temperature.supply", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { + "status": { "type": "string", "value": "connected" }, + "value": { "type": "number", "value": 28 } + }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.configuration.multiFamilyHouse", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { "active": { "type": "boolean", "value": false } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.controller.serial", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { + "value": { "type": "string", "value": "###" } + }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.device", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": {}, + "commands": {}, + "components": ["time"] + }, + { + "feature": "heating.device.time", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": {}, + "commands": {}, + "components": ["offset"] + }, + { + "feature": "heating.device.time.offset", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { "value": { "type": "number", "value": 118 } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.dhw", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { "active": { "type": "boolean", "value": true } }, + "commands": {}, + "components": ["charging", "schedule", "sensors", "temperature"] + }, + { + "feature": "heating.dhw.charging", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { "active": { "type": "boolean", "value": false } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.dhw.pumps.circulation", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { "status": { "type": "string", "value": "on" } }, + "commands": {}, + "components": ["schedule"] + }, + { + "feature": "heating.dhw.pumps.circulation.schedule", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.779Z", + "properties": { + "active": { "type": "boolean", "value": true }, + "entries": { + "type": "Schedule", + "value": { + "mon": [ + { "start": "06:00", "end": "22:00", "mode": "on", "position": 0 } + ], + "tue": [ + { "start": "06:00", "end": "22:00", "mode": "on", "position": 0 } + ], + "wed": [ + { "start": "06:00", "end": "22:00", "mode": "on", "position": 0 } + ], + "thu": [ + { "start": "06:00", "end": "22:00", "mode": "on", "position": 0 } + ], + "fri": [ + { "start": "06:00", "end": "22:00", "mode": "on", "position": 0 } + ], + "sat": [ + { "start": "06:00", "end": "22:00", "mode": "on", "position": 0 } + ], + "sun": [ + { "start": "06:00", "end": "22:00", "mode": "on", "position": 0 } + ] + } + } + }, + "commands": { + "setSchedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.dhw.pumps.circulation.schedule/commands/setSchedule", + "name": "setSchedule", + "isExecutable": true, + "params": { + "newSchedule": { + "required": true, + "type": "Schedule", + "constraints": { + "maxEntries": 4, + "resolution": 10, + "modes": ["on"], + "defaultMode": "off" + } + } + } + } + }, + "components": [] + }, + { + "feature": "heating.dhw.pumps.primary", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.782Z", + "properties": { "status": { "type": "string", "value": "off" } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.dhw.schedule", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.782Z", + "properties": { + "active": { "type": "boolean", "value": true }, + "entries": { + "type": "Schedule", + "value": { + "mon": [ + { "start": "06:00", "end": "07:00", "mode": "on", "position": 0 }, + { "start": "10:00", "end": "22:00", "mode": "on", "position": 1 } + ], + "tue": [ + { "start": "06:00", "end": "07:00", "mode": "on", "position": 0 }, + { "start": "10:00", "end": "22:00", "mode": "on", "position": 1 } + ], + "wed": [ + { "start": "06:00", "end": "07:00", "mode": "on", "position": 0 }, + { "start": "10:00", "end": "22:00", "mode": "on", "position": 1 } + ], + "thu": [ + { "start": "06:00", "end": "07:00", "mode": "on", "position": 0 }, + { "start": "10:00", "end": "22:00", "mode": "on", "position": 1 } + ], + "fri": [ + { "start": "06:00", "end": "07:00", "mode": "on", "position": 0 }, + { "start": "10:00", "end": "22:00", "mode": "on", "position": 1 } + ], + "sat": [ + { "start": "06:00", "end": "07:00", "mode": "on", "position": 0 }, + { "start": "10:00", "end": "22:00", "mode": "on", "position": 1 } + ], + "sun": [ + { "start": "06:00", "end": "07:00", "mode": "on", "position": 0 }, + { "start": "10:00", "end": "22:00", "mode": "on", "position": 1 } + ] + } + } + }, + "commands": { + "setSchedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.dhw.schedule/commands/setSchedule", + "name": "setSchedule", + "isExecutable": true, + "params": { + "newSchedule": { + "required": true, + "type": "Schedule", + "constraints": { + "maxEntries": 4, + "resolution": 10, + "modes": ["on"], + "defaultMode": "off" + } + } + } + } + }, + "components": [] + }, + { + "feature": "heating.dhw.sensors", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.782Z", + "properties": {}, + "commands": {}, + "components": [] + }, + { + "feature": "heating.dhw.sensors.temperature.hotWaterStorage", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.782Z", + "properties": { + "status": { "type": "string", "value": "connected" }, + "value": { "type": "number", "value": 63.9 } + }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.dhw.sensors.temperature.outlet", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.782Z", + "properties": { "status": { "type": "string", "value": "error" } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.dhw.temperature", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.786Z", + "properties": { "value": { "type": "number", "value": 58 } }, + "commands": { + "setTargetTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.dhw.temperature/commands/setTargetTemperature", + "name": "setTargetTemperature", + "isExecutable": true, + "params": { + "temperature": { + "required": true, + "type": "number", + "constraints": { "min": 10, "max": 60, "stepping": 1 } + } + } + } + }, + "components": ["main"] + }, + { + "feature": "heating.dhw.temperature.main", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.786Z", + "properties": { "value": { "type": "number", "value": 58 } }, + "commands": { + "setTargetTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.dhw.temperature.main/commands/setTargetTemperature", + "name": "setTargetTemperature", + "isExecutable": true, + "params": { + "temperature": { + "required": true, + "type": "number", + "constraints": { "min": 10, "max": 60, "stepping": 1 } + } + } + } + }, + "components": [] + }, + { + "feature": "heating.sensors", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.786Z", + "properties": {}, + "commands": {}, + "components": ["temperature"] + }, + { + "feature": "heating.sensors.temperature", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.786Z", + "properties": {}, + "commands": {}, + "components": ["outside"] + }, + { + "feature": "heating.sensors.temperature.outside", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.786Z", + "properties": { + "status": { "type": "string", "value": "connected" }, + "value": { "type": "number", "value": 28.5 } + }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.operating", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.786Z", + "properties": {}, + "commands": {}, + "components": ["programs"] + }, + { + "feature": "heating.operating.programs", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.786Z", + "properties": {}, + "commands": {}, + "components": ["holiday"] + }, + { + "feature": "heating.operating.programs.holiday", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.786Z", + "properties": { + "active": { "type": "boolean", "value": false }, + "start": { "type": "string", "value": "" }, + "end": { "type": "string", "value": "" } + }, + "commands": { + "changeEndDate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.operating.programs.holiday/commands/changeEndDate", + "name": "changeEndDate", + "isExecutable": false, + "params": { + "end": { "required": true, "type": "string", "constraints": {} } + } + }, + "schedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.operating.programs.holiday/commands/schedule", + "name": "schedule", + "isExecutable": true, + "params": { + "start": { "required": true, "type": "string", "constraints": {} }, + "end": { "required": true, "type": "string", "constraints": {} } + } + }, + "unschedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/###/gateways/###/devices/0/features/heating.operating.programs.holiday/commands/unschedule", + "name": "unschedule", + "isExecutable": true, + "params": {} + } + }, + "components": [] + }, + { + "feature": "heating.solar", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.790Z", + "properties": { "active": { "type": "boolean", "value": true } }, + "commands": {}, + "components": ["sensors"] + }, + { + "feature": "heating.solar.power.production", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.790Z", + "properties": { + "day": { + "type": "array", + "value": [ + 16.604, 19.78, 19.323, 20.592, 19.444, 14.517, 17.929, 20.534 + ] + }, + "week": { "type": "array", "value": [] }, + "month": { "type": "array", "value": [] }, + "year": { "type": "array", "value": [] }, + "unit": { "type": "string", "value": "kilowattHour" } + }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.solar.pumps.circuit", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.790Z", + "properties": { "status": { "type": "string", "value": "on" } }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.solar.sensors", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.790Z", + "properties": {}, + "commands": {}, + "components": ["temperature"] + }, + { + "feature": "heating.solar.sensors.temperature", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.790Z", + "properties": {}, + "commands": {}, + "components": ["dhw", "collector"] + }, + { + "feature": "heating.solar.sensors.temperature.dhw", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.790Z", + "properties": { + "value": { "type": "number", "value": 63.5 }, + "status": { "type": "string", "value": "connected" } + }, + "commands": {}, + "components": [] + }, + { + "feature": "heating.solar.sensors.temperature.collector", + "deviceId": "0", + "timestamp": "2021-08-12T14:41:31.790Z", + "properties": { + "value": { "type": "number", "value": 72.3 }, + "status": { "type": "string", "value": "connected" } + }, + "commands": {}, + "components": [] + } + ] +} diff --git a/tests/test_Solar.py b/tests/test_Solar.py new file mode 100644 index 0000000..f7a71f2 --- /dev/null +++ b/tests/test_Solar.py @@ -0,0 +1,16 @@ +from PyViCare.PyViCareDevice import Device +import unittest +from tests.ViCareServiceMock import ViCareServiceMock + + +class SolarTest(unittest.TestCase): + def setUp(self): + self.service = ViCareServiceMock('response/Solar.json') + self.device = Device(self.service) + + def test_getSolarStorageTemperature(self): + self.assertEqual(self.device.getSolarStorageTemperature(), 63.5) + + def test_getSolarPowerProduction(self): + self.assertEqual( + self.device.getSolarPowerProduction(), [16.604, 19.78, 19.323, 20.592, 19.444, 14.517, 17.929, 20.534])
Add Solar properties Please add - getSolarStorageTemperature - getSolarPowerProduction to the PyVicareDevice.py code
0.0
3f4b32ae45195167a4dc47a8348ef7b043d989b2
[ "tests/test_Solar.py::SolarTest::test_getSolarPowerProduction", "tests/test_Solar.py::SolarTest::test_getSolarStorageTemperature" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2021-08-13 21:24:39+00:00
apache-2.0
5,593
somm15__PyViCare-172
diff --git a/PyViCare/PyViCareDevice.py b/PyViCare/PyViCareDevice.py index 5ecd352..a01c89c 100644 --- a/PyViCare/PyViCareDevice.py +++ b/PyViCare/PyViCareDevice.py @@ -105,11 +105,11 @@ class Device: @handleNotSupported def getDomesticHotWaterMaxTemperature(self): - return self.service.getProperty("heating.dhw.temperature")["actions"][0]["fields"][0]["max"] + return self.service.getProperty("heating.dhw.temperature.main")["commands"]["setTargetTemperature"]["params"]["temperature"]["constraints"]["max"] @handleNotSupported def getDomesticHotWaterMinTemperature(self): - return self.service.getProperty("heating.dhw.temperature")["actions"][0]["fields"][0]["min"] + return self.service.getProperty("heating.dhw.temperature.main")["commands"]["setTargetTemperature"]["params"]["temperature"]["constraints"]["min"] @handleNotSupported def getDomesticHotWaterChargingActive(self): @@ -129,7 +129,7 @@ class Device: @handleAPICommandErrors def setDomesticHotWaterTemperature(self, temperature): - return self.service.setProperty("heating.dhw.temperature", "setTargetTemperature", {'temperature': temperature}) + return self.service.setProperty("heating.dhw.temperature.main", "setTargetTemperature", {'temperature': temperature}) """ Set the target temperature 2 for domestic host water Parameters
somm15/PyViCare
b52fff20925afb573cf6b6a4cd80d5618cd262f2
diff --git a/tests/test_GenericDevice.py b/tests/test_GenericDevice.py index 29246b7..a344c0a 100644 --- a/tests/test_GenericDevice.py +++ b/tests/test_GenericDevice.py @@ -29,7 +29,7 @@ class GenericDevice(unittest.TestCase): self.device.setDomesticHotWaterTemperature(50) self.assertEqual(len(self.service.setPropertyData), 1) self.assertEqual( - self.service.setPropertyData[0]['property_name'], 'heating.dhw.temperature') + self.service.setPropertyData[0]['property_name'], 'heating.dhw.temperature.main') self.assertEqual( self.service.setPropertyData[0]['action'], 'setTargetTemperature') self.assertEqual(self.service.setPropertyData[0]['data'], { diff --git a/tests/test_Vitodens200W.py b/tests/test_Vitodens200W.py index 847e3ef..868fd85 100644 --- a/tests/test_Vitodens200W.py +++ b/tests/test_Vitodens200W.py @@ -37,6 +37,12 @@ class Vitodens200W(unittest.TestCase): def test_getPowerConsumptionDays(self): self.assertRaises(PyViCareNotSupportedFeatureError, self.device.getPowerConsumptionDays) + def test_getDomesticHotWaterMaxTemperature(self): + self.assertEqual(self.device.getDomesticHotWaterMaxTemperature(), 60) + + def test_getDomesticHotWaterMinTemperature(self): + self.assertEqual(self.device.getDomesticHotWaterMinTemperature(), 10) + def test_getFrostProtectionActive(self): self.assertEqual( self.device.circuits[0].getFrostProtectionActive(), False)
PyViCare.PyViCareUtils.PyViCareNotSupportedFeatureError: heating.dhw.temperature Hi there, since 2.6 release I get the following error with my Vitodens W200: ``` t.getDomesticHotWaterConfiguredTemperature() *** PyViCare.PyViCareUtils.PyViCareNotSupportedFeatureError: heating.dhw.temperature ``` All other api calls like `setDomesticHotWaterTemperature` are working. Any ideas?
0.0
b52fff20925afb573cf6b6a4cd80d5618cd262f2
[ "tests/test_GenericDevice.py::GenericDevice::test_setDomesticHotWaterTemperature", "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterMaxTemperature", "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterMinTemperature" ]
[ "tests/test_GenericDevice.py::GenericDevice::test_activateComfort", "tests/test_GenericDevice.py::GenericDevice::test_deactivateComfort", "tests/test_GenericDevice.py::GenericDevice::test_setMode", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerActive", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerHours", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerModulation", "tests/test_Vitodens200W.py::Vitodens200W::test_getBurnerStarts", "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterCirculationPumpActive", "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterCirculationScheduleModes", "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterConfiguredTemperature", "tests/test_Vitodens200W.py::Vitodens200W::test_getDomesticHotWaterOutletTemperature", "tests/test_Vitodens200W.py::Vitodens200W::test_getFrostProtectionActive", "tests/test_Vitodens200W.py::Vitodens200W::test_getModes", "tests/test_Vitodens200W.py::Vitodens200W::test_getPowerConsumptionDays", "tests/test_Vitodens200W.py::Vitodens200W::test_getPrograms" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-08-17 19:03:32+00:00
apache-2.0
5,594
somm15__PyViCare-187
diff --git a/PyViCare/PyViCareCachedService.py b/PyViCare/PyViCareCachedService.py index b2a8b61..0c781fd 100644 --- a/PyViCare/PyViCareCachedService.py +++ b/PyViCare/PyViCareCachedService.py @@ -1,23 +1,16 @@ import logging import threading -from datetime import datetime from typing import Any from PyViCare.PyViCareAbstractOAuthManager import AbstractViCareOAuthManager from PyViCare.PyViCareService import (ViCareDeviceAccessor, ViCareService, readFeature) -from PyViCare.PyViCareUtils import PyViCareInvalidDataError +from PyViCare.PyViCareUtils import PyViCareInvalidDataError, ViCareTimer logger = logging.getLogger('ViCare') logger.addHandler(logging.NullHandler()) -class ViCareTimer: - # class is used to replace logic in unittest - def now(self) -> datetime: - return datetime.now() - - class ViCareCachedService(ViCareService): def __init__(self, oauth_manager: AbstractViCareOAuthManager, accessor: ViCareDeviceAccessor, cacheDuration: int) -> None: diff --git a/PyViCare/PyViCareDevice.py b/PyViCare/PyViCareDevice.py index b4253a3..3c3e212 100644 --- a/PyViCare/PyViCareDevice.py +++ b/PyViCare/PyViCareDevice.py @@ -1,10 +1,10 @@ import logging -from datetime import datetime from typing import Any, Callable, List, Optional from PyViCare.PyViCareService import ViCareService from PyViCare.PyViCareUtils import (PyViCareNotSupportedFeatureError, - handleAPICommandErrors, handleNotSupported) + ViCareTimer, handleAPICommandErrors, + handleNotSupported) logger = logging.getLogger('ViCare') logger.addHandler(logging.NullHandler()) @@ -70,8 +70,8 @@ class Device: if schedule == "error" or schedule["active"] is not True: return None - currentDateTime = datetime.now() - currentTime = currentDateTime.time() + currentDateTime = ViCareTimer().now() + currentTime = ViCareTimer.time_as_delta(currentDateTime) current_day = VICARE_DAYS[currentDateTime.weekday()] if current_day not in schedule: @@ -79,8 +79,8 @@ class Device: mode = None for s in schedule[current_day]: - startTime = datetime.strptime(s["start"], '%H:%M').time() - endTime = datetime.strptime(s["end"], '%H:%M').time() + startTime = ViCareTimer.parse_time_as_delta(s["start"]) + endTime = ViCareTimer.parse_time_as_delta(s["end"]) if startTime <= currentTime and currentTime <= endTime: if s["mode"] == VICARE_DHW_TEMP2: # temp-2 overrides all other modes return s["mode"] @@ -245,16 +245,16 @@ class Device: if schedule == "error" or schedule["active"] is not True: return None - currentDateTime = datetime.now() - currentTime = currentDateTime.time() + currentDateTime = ViCareTimer().now() + currentTime = ViCareTimer.time_as_delta(currentDateTime) current_day = VICARE_DAYS[currentDateTime.weekday()] if current_day not in schedule: return None # no schedule for day configured for s in schedule[current_day]: - startTime = datetime.strptime(s["start"], '%H:%M').time() - endTime = datetime.strptime(s["end"], '%H:%M').time() + startTime = ViCareTimer.parse_time_as_delta(s["start"]) + endTime = ViCareTimer.parse_time_as_delta(s["end"]) if startTime <= currentTime and currentTime <= endTime: return s["mode"] return schedule['default_mode'] diff --git a/PyViCare/PyViCareUtils.py b/PyViCare/PyViCareUtils.py index f79dbf5..91b9cdc 100644 --- a/PyViCare/PyViCareUtils.py +++ b/PyViCare/PyViCareUtils.py @@ -1,4 +1,4 @@ -import datetime +from datetime import datetime, timedelta from functools import wraps from typing import Callable @@ -10,6 +10,23 @@ from PyViCare import Feature # the device. +class ViCareTimer: + # class is used to replace logic in unittest + def now(self) -> datetime: + return datetime.now() + + @staticmethod + def time_as_delta(date_time: datetime) -> timedelta: + return date_time - datetime(year=date_time.year, month=date_time.month, day=date_time.day) + + @staticmethod + def parse_time_as_delta(time_string: str) -> timedelta: + return timedelta( + hours=int(time_string[0:2]), + minutes=int(time_string[3:5]) + ) + + def handleNotSupported(func: Callable) -> Callable: @wraps(func) def wrapper(*args, **kwargs): @@ -73,7 +90,7 @@ class PyViCareRateLimitError(Exception): name = extended_payload["name"] requestCountLimit = extended_payload["requestCountLimit"] limitReset = extended_payload["limitReset"] - limitResetDate = datetime.datetime.utcfromtimestamp(limitReset / 1000) + limitResetDate = datetime.utcfromtimestamp(limitReset / 1000) msg = f'API rate limit {name} exceeded. Max {requestCountLimit} calls in timewindow. Limit reset at {limitResetDate.isoformat()}.'
somm15/PyViCare
bfb36c6feb3d9035a89f01a1a187b049987875dd
diff --git a/tests/response/Vitocal222S.json b/tests/response/Vitocal222S.json index ada4b47..3373631 100644 --- a/tests/response/Vitocal222S.json +++ b/tests/response/Vitocal222S.json @@ -1,4216 +1,4059 @@ { - "data": [ - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.normal", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.programs.normal", - "timestamp": "2021-09-02T11:30:21.708Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "sensors", - "serial" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.boiler", - "gatewayId": "################", - "feature": "heating.boiler", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors", - "gatewayId": "################", - "feature": "heating.dhw.sensors", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "0" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.condensors", - "gatewayId": "################", - "feature": "heating.condensors", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "shift": { - "type": "number", - "unit": "", - "value": -5 - }, - "slope": { - "type": "number", - "unit": "", - "value": 0 - } - }, - "commands": { - "setCurve": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating.curve/commands/setCurve", - "name": "setCurve", - "isExecutable": true, - "params": { - "slope": { - "type": "number", - "required": true, - "constraints": { - "min": 0, - "max": 3.5, - "stepping": 0.1 - } - }, - "shift": { - "type": "number", - "required": true, - "constraints": { - "min": -15, - "max": 40, - "stepping": 1 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating.curve", - "gatewayId": "################", - "feature": "heating.circuits.0.heating.curve", - "timestamp": "2021-09-02T11:30:20.479Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.dhw", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes.dhw", - "timestamp": "2021-09-02T11:30:20.547Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "enabled": { - "value": [ - "0" - ], - "type": "array" - } - }, - "commands": {}, - "components": [ - "0", - "1" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors", - "gatewayId": "################", - "feature": "heating.compressors", - "timestamp": "2021-09-02T11:30:19.903Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.reduced", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.programs.reduced", - "timestamp": "2021-09-02T11:30:21.710Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.standby", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.programs.standby", - "timestamp": "2021-09-02T11:30:20.647Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "circulation", - "frostprotection", - "heating", - "operating", - "sensors", - "temperature" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1", - "gatewayId": "################", - "feature": "heating.circuits.1", - "timestamp": "2021-09-02T11:30:20.348Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.fixed", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.programs.fixed", - "timestamp": "2021-09-02T11:30:20.640Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": true, - "type": "boolean" - }, - "entries": { - "value": { - "mon": [], - "tue": [], - "wed": [], - "thu": [], - "fri": [], - "sat": [], - "sun": [] - }, - "type": "Schedule" - } - }, - "commands": { - "setSchedule": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.pumps.circulation.schedule/commands/setSchedule", - "name": "setSchedule", - "isExecutable": true, - "params": { - "newSchedule": { - "type": "Schedule", - "required": true, - "constraints": { - "modes": [ - "5/25-cycles", - "5/10-cycles", - "on" - ], - "maxEntries": 8, - "resolution": 10, - "defaultMode": "off", - "overlapAllowed": true - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.pumps.circulation.schedule", - "gatewayId": "################", - "feature": "heating.dhw.pumps.circulation.schedule", - "timestamp": "2021-09-02T11:30:20.751Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.heatingCooling", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes.heatingCooling", - "timestamp": "2021-09-02T11:30:20.576Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.sensors.temperature.room", - "gatewayId": "################", - "feature": "heating.circuits.1.sensors.temperature.room", - "timestamp": "2021-09-02T11:30:20.651Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.configuration.heatingRod", - "gatewayId": "################", - "feature": "heating.configuration.heatingRod", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burner", - "gatewayId": "################", - "feature": "heating.burner", - "timestamp": "2021-09-02T11:30:20.278Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.sensors.temperature.dhw", - "gatewayId": "################", - "feature": "heating.solar.sensors.temperature.dhw", - "timestamp": "2021-09-02T11:30:20.808Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.standard", - "gatewayId": "################", - "feature": "ventilation.operating.modes.standard", - "timestamp": "2021-09-02T11:30:20.822Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.sensors.temperature.supply", - "gatewayId": "################", - "feature": "heating.circuits.2.sensors.temperature.supply", - "timestamp": "2021-09-02T11:30:20.658Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.1.statistics", - "gatewayId": "################", - "feature": "heating.compressors.1.statistics", - "timestamp": "2021-09-02T11:30:20.250Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.dhwAndHeatingCooling", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes.dhwAndHeatingCooling", - "timestamp": "2021-09-02T11:30:20.562Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "pump" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.circulation", - "gatewayId": "################", - "feature": "heating.circuits.2.circulation", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.active", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes.active", - "timestamp": "2021-09-02T11:30:20.772Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.standby", - "gatewayId": "################", - "feature": "ventilation.operating.programs.standby", - "timestamp": "2021-09-02T11:30:20.830Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.dhwAndHeatingCooling", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes.dhwAndHeatingCooling", - "timestamp": "2021-09-02T11:30:20.559Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": true, - "type": "boolean" - }, - "entries": { - "value": { - "mon": [ - { - "start": "00:00", - "end": "07:00", - "mode": "reduced", - "position": 0 - }, - { - "start": "07:00", - "end": "08:00", - "mode": "standard", - "position": 1 - }, - { - "start": "08:00", - "end": "21:00", - "mode": "standard", - "position": 2 - }, - { - "start": "21:00", - "end": "24:00", - "mode": "reduced", - "position": 3 - } - ], - "tue": [ - { - "start": "00:00", - "end": "07:00", - "mode": "reduced", - "position": 0 - }, - { - "start": "07:00", - "end": "08:00", - "mode": "standard", - "position": 1 - }, - { - "start": "08:00", - "end": "21:00", - "mode": "standard", - "position": 2 - }, - { - "start": "21:00", - "end": "24:00", - "mode": "reduced", - "position": 3 - } - ], - "wed": [ - { - "start": "00:00", - "end": "07:00", - "mode": "reduced", - "position": 0 - }, - { - "start": "07:00", - "end": "08:00", - "mode": "standard", - "position": 1 - }, - { - "start": "08:00", - "end": "21:00", - "mode": "standard", - "position": 2 - }, - { - "start": "21:00", - "end": "24:00", - "mode": "reduced", - "position": 3 - } - ], - "thu": [ - { - "start": "00:00", - "end": "07:00", - "mode": "reduced", - "position": 0 - }, - { - "start": "07:00", - "end": "08:00", - "mode": "standard", - "position": 1 - }, - { - "start": "08:00", - "end": "21:00", - "mode": "standard", - "position": 2 - }, - { - "start": "21:00", - "end": "24:00", - "mode": "reduced", - "position": 3 - } - ], - "fri": [ - { - "start": "00:00", - "end": "07:00", - "mode": "reduced", - "position": 0 - }, - { - "start": "07:00", - "end": "08:00", - "mode": "standard", - "position": 1 - }, - { - "start": "08:00", - "end": "21:00", - "mode": "standard", - "position": 2 - }, - { - "start": "21:00", - "end": "24:00", - "mode": "reduced", - "position": 3 - } - ], - "sat": [ - { - "start": "00:00", - "end": "07:00", - "mode": "reduced", - "position": 0 - }, - { - "start": "07:00", - "end": "08:00", - "mode": "standard", - "position": 1 - }, - { - "start": "08:00", - "end": "21:00", - "mode": "standard", - "position": 2 - }, - { - "start": "21:00", - "end": "24:00", - "mode": "reduced", - "position": 3 - } - ], - "sun": [ - { - "start": "00:00", - "end": "07:00", - "mode": "reduced", - "position": 0 - }, - { - "start": "07:00", - "end": "08:00", - "mode": "standard", - "position": 1 - }, - { - "start": "08:00", - "end": "21:00", - "mode": "standard", - "position": 2 - }, - { - "start": "21:00", - "end": "24:00", - "mode": "reduced", - "position": 3 - } - ] - }, - "type": "Schedule" - } - }, - "commands": { - "setSchedule": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.schedule/commands/setSchedule", - "name": "setSchedule", - "isExecutable": true, - "params": { - "newSchedule": { - "type": "Schedule", - "required": true, - "constraints": { - "modes": [ - "reduced", - "standard", - "intensive" - ], - "maxEntries": 8, - "resolution": 10, - "defaultMode": "basic", - "overlapAllowed": true - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.schedule", - "gatewayId": "################", - "feature": "ventilation.schedule", - "timestamp": "2021-09-02T11:30:20.815Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.active", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes.active", - "timestamp": "2021-09-02T11:30:20.788Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "active", - "cooling", - "dhw", - "dhwAndHeating", - "dhwAndHeatingCooling", - "heating", - "heatingCooling", - "normalStandby", - "standby" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.standby", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes.standby", - "timestamp": "2021-09-02T11:30:20.620Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "type": "string", - "value": "normal" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.active", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.programs.active", - "timestamp": "2021-09-02T11:30:20.744Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": false, - "type": "boolean" - }, - "start": { - "value": "", - "type": "string" - }, - "end": { - "value": "", - "type": "string" - } - }, - "commands": { - "changeEndDate": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs.holiday/commands/changeEndDate", - "name": "changeEndDate", - "isExecutable": false, - "params": { - "end": { - "type": "string", - "required": true, - "constraints": { - "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$", - "sameDayAllowed": false - } - } - } - }, - "schedule": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs.holiday/commands/schedule", - "name": "schedule", - "isExecutable": true, - "params": { - "start": { - "type": "string", - "required": true, - "constraints": { - "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$" - } - }, - "end": { - "type": "string", - "required": true, - "constraints": { - "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$", - "sameDayAllowed": false - } - } - } - }, - "unschedule": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs.holiday/commands/unschedule", - "name": "unschedule", - "isExecutable": true, - "params": {} - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs.holiday", - "gatewayId": "################", - "feature": "heating.operating.programs.holiday", - "timestamp": "2021-09-02T11:30:20.851Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.evaporators.0.sensors.temperature", - "gatewayId": "################", - "feature": "heating.evaporators.0.sensors.temperature", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "value": 45, - "unit": "", - "type": "number" - } - }, - "commands": { - "setTargetTemperature": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.temp2/commands/setTargetTemperature", - "name": "setTargetTemperature", - "isExecutable": true, - "params": { - "temperature": { - "type": "number", - "required": true, - "constraints": { - "min": 10, - "max": 60, - "stepping": 1 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.temp2", - "gatewayId": "################", - "feature": "heating.dhw.temperature.temp2", - "timestamp": "2021-09-02T11:30:19.870Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "sensors" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.evaporators.0", - "gatewayId": "################", - "feature": "heating.evaporators.0", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.eco", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.programs.eco", - "timestamp": "2021-09-02T11:30:21.713Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.standby", - "gatewayId": "################", - "feature": "ventilation.operating.modes.standby", - "timestamp": "2021-09-02T11:30:20.820Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "type": "number", - "value": 0, - "unit": "" - } - }, - "commands": {}, - "components": [ - "levels" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.temperature", - "gatewayId": "################", - "feature": "heating.circuits.1.temperature", - "timestamp": "2021-09-02T11:30:20.863Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "status": { - "type": "string", - "value": "off" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.frostprotection", - "gatewayId": "################", - "feature": "heating.circuits.0.frostprotection", - "timestamp": "2021-09-02T11:30:20.466Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "circuit" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.pumps", - "gatewayId": "################", - "feature": "heating.solar.pumps", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "status": { - "type": "string", - "value": "notConnected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.buffer.sensors.temperature.main", - "gatewayId": "################", - "feature": "heating.buffer.sensors.temperature.main", - "timestamp": "2021-09-02T11:30:20.269Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "active", - "comfort", - "eco", - "fixed", - "normal", - "reduced", - "standby" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.programs", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": true - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.ventilation", - "gatewayId": "################", - "feature": "ventilation.operating.modes.ventilation", - "timestamp": "2021-09-02T11:30:20.823Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burners.0.modulation", - "gatewayId": "################", - "feature": "heating.burners.0.modulation", - "timestamp": "2021-09-02T11:30:20.276Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "heatingRod", - "multiFamilyHouse", - "secondaryHeatGenerator" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.configuration", - "gatewayId": "################", - "feature": "heating.configuration", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": false, - "type": "boolean" - } - }, - "commands": { - "activate": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.oneTimeCharge/commands/activate", - "name": "activate", - "isExecutable": true, - "params": {} - }, - "deactivate": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.oneTimeCharge/commands/deactivate", - "name": "deactivate", - "isExecutable": false, - "params": {} - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.oneTimeCharge", - "gatewayId": "################", - "feature": "heating.dhw.oneTimeCharge", - "timestamp": "2021-09-02T11:30:20.713Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": false, - "type": "boolean" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.standby", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.programs.standby", - "timestamp": "2021-09-02T11:30:20.646Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": true, - "type": "boolean" - }, - "demand": { - "value": "unknown", - "type": "string" - }, - "temperature": { - "value": 23, - "unit": "", - "type": "number" - } - }, - "commands": { - "setTemperature": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.normal/commands/setTemperature", - "name": "setTemperature", - "isExecutable": true, - "params": { - "targetTemperature": { - "type": "number", - "required": true, - "constraints": { - "min": 10, - "max": 30, - "stepping": 1 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.normal", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.programs.normal", - "timestamp": "2021-09-02T11:30:21.707Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "shift": { - "type": "number", - "unit": "", - "value": 0 - }, - "slope": { - "type": "number", - "unit": "", - "value": 0.6 - } - }, - "commands": { - "setCurve": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.heating.curve/commands/setCurve", - "name": "setCurve", - "isExecutable": true, - "params": { - "slope": { - "type": "number", - "required": true, - "constraints": { - "min": 0, - "max": 3.5, - "stepping": 0.1 - } - }, - "shift": { - "type": "number", - "required": true, - "constraints": { - "min": -15, - "max": 40, - "stepping": 1 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.heating.curve", - "gatewayId": "################", - "feature": "heating.circuits.2.heating.curve", - "timestamp": "2021-09-02T11:30:20.483Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.basic", - "gatewayId": "################", - "feature": "ventilation.operating.programs.basic", - "timestamp": "2021-09-02T11:30:20.832Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "0" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burners", - "gatewayId": "################", - "feature": "heating.burners", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.frostprotection", - "gatewayId": "################", - "feature": "heating.circuits.2.frostprotection", - "timestamp": "2021-09-02T11:30:20.469Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "curve", - "schedule" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.heating", - "gatewayId": "################", - "feature": "heating.circuits.2.heating", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": false, - "type": "boolean" - }, - "demand": { - "value": "unknown", - "type": "string" - }, - "temperature": { - "value": 10, - "unit": "", - "type": "number" - } - }, - "commands": { - "setTemperature": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.reduced/commands/setTemperature", - "name": "setTemperature", - "isExecutable": true, - "params": { - "targetTemperature": { - "type": "number", - "required": true, - "constraints": { - "min": 10, - "max": 30, - "stepping": 1 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.reduced", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.programs.reduced", - "timestamp": "2021-09-02T11:30:21.710Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.normalStandby", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes.normalStandby", - "timestamp": "2021-09-02T11:30:20.527Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.comfort", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.programs.comfort", - "timestamp": "2021-09-02T11:30:21.705Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.dhwAndHeating", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes.dhwAndHeating", - "timestamp": "2021-09-02T11:30:20.570Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.charging", - "gatewayId": "################", - "feature": "heating.dhw.charging", - "timestamp": "2021-09-02T11:30:20.712Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": true, - "type": "boolean" - }, - "name": { - "value": "Heat/cool circuit 1", - "type": "string" - }, - "type": { - "value": "heatingCircuit", - "type": "string" - } - }, - "commands": { - "setName": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0/commands/setName", - "name": "setName", - "isExecutable": true, - "params": { - "name": { - "type": "string", - "required": true, - "constraints": { - "minLength": 1, - "maxLength": 20 - } - } - } - } - }, - "components": [ - "circulation", - "frostprotection", - "heating", - "operating", - "sensors", - "temperature" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0", - "gatewayId": "################", - "feature": "heating.circuits.0", - "timestamp": "2021-09-02T11:30:20.286Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "temperature" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.sensors", - "gatewayId": "################", - "feature": "heating.circuits.1.sensors", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.sensors.temperature", - "gatewayId": "################", - "feature": "heating.compressors.0.sensors.temperature", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "pumps", - "sensors" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar", - "gatewayId": "################", - "feature": "heating.solar", - "timestamp": "2021-09-02T11:30:20.841Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "status": { - "type": "string", - "value": "notConnected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.primaryCircuit.sensors.temperature.return", - "gatewayId": "################", - "feature": "heating.primaryCircuit.sensors.temperature.return", - "timestamp": "2021-09-02T11:30:20.662Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.cooling", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes.cooling", - "timestamp": "2021-09-02T11:30:20.503Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.normalStandby", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes.normalStandby", - "timestamp": "2021-09-02T11:30:20.516Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.dhwAndHeating", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes.dhwAndHeating", - "timestamp": "2021-09-02T11:30:20.565Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "modes", - "programs" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating", - "gatewayId": "################", - "feature": "ventilation.operating", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "type": "number", - "value": 19, - "unit": "" - } - }, - "commands": {}, - "components": [ - "levels" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature", - "gatewayId": "################", - "feature": "heating.circuits.0.temperature", - "timestamp": "2021-09-02T11:30:20.862Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "value": { - "type": "number", - "value": 21.2, - "unit": "celsius" - }, - "status": { - "type": "string", - "value": "connected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors.temperature.return", - "gatewayId": "################", - "feature": "heating.sensors.temperature.return", - "timestamp": "2021-09-02T18:30:40.784Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": false, - "type": "boolean" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.comfort", - "gatewayId": "################", - "feature": "ventilation.operating.programs.comfort", - "timestamp": "2021-09-02T11:30:20.829Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "holiday" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs", - "gatewayId": "################", - "feature": "heating.operating.programs", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.standby", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.programs.standby", - "timestamp": "2021-09-02T11:30:20.649Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "type": "string", - "value": "standard" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.active", - "gatewayId": "################", - "feature": "ventilation.operating.programs.active", - "timestamp": "2021-09-02T11:30:20.825Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "modes", - "programs" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating", - "gatewayId": "################", - "feature": "heating.circuits.1.operating", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "pump" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.circulation", - "gatewayId": "################", - "feature": "heating.circuits.1.circulation", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.active", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.programs.active", - "timestamp": "2021-09-02T11:30:20.748Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.heatingCooling", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes.heatingCooling", - "timestamp": "2021-09-02T11:30:20.582Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.circulation.pump", - "gatewayId": "################", - "feature": "heating.circuits.1.circulation.pump", - "timestamp": "2021-09-02T11:30:20.741Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "modes", - "programs" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating", - "gatewayId": "################", - "feature": "heating.circuits.0.operating", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/device", - "gatewayId": "################", - "feature": "device", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.sensors.temperature.collector", - "gatewayId": "################", - "feature": "heating.solar.sensors.temperature.collector", - "timestamp": "2021-09-02T11:30:20.842Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "value": "dhwAndHeatingCooling", - "type": "string" - } - }, - "commands": { - "setMode": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.active/commands/setMode", - "name": "setMode", - "isExecutable": true, - "params": { - "mode": { - "type": "string", - "required": true, - "constraints": { - "enum": [ - "standby", - "dhw", - "dhwAndHeatingCooling" - ] - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.active", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes.active", - "timestamp": "2021-09-02T11:30:20.758Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.cooling", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes.cooling", - "timestamp": "2021-09-02T11:30:20.506Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors.valve", - "gatewayId": "################", - "feature": "heating.sensors.valve", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "collector", - "dhw" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.sensors.temperature", - "gatewayId": "################", - "feature": "heating.solar.sensors.temperature", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.heating.schedule", - "gatewayId": "################", - "feature": "heating.circuits.1.heating.schedule", - "timestamp": "2021-09-02T11:30:20.486Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "value": { - "type": "number", - "value": 25.1, - "unit": "celsius" - }, - "status": { - "type": "string", - "value": "connected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.sensors.temperature.room", - "gatewayId": "################", - "feature": "heating.circuits.0.sensors.temperature.room", - "timestamp": "2021-09-02T18:19:04.649Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "value": { - "type": "number", - "value": 26.3, - "unit": "celsius" - }, - "status": { - "type": "string", - "value": "connected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.secondaryCircuit.sensors.temperature.supply", - "gatewayId": "################", - "feature": "heating.secondaryCircuit.sensors.temperature.supply", - "timestamp": "2021-09-02T18:21:47.906Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.configuration.secondaryHeatGenerator", - "gatewayId": "################", - "feature": "heating.configuration.secondaryHeatGenerator", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.reduced", - "gatewayId": "################", - "feature": "ventilation.operating.programs.reduced", - "timestamp": "2021-09-02T11:30:20.834Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.configuration.multiFamilyHouse", - "gatewayId": "################", - "feature": "heating.configuration.multiFamilyHouse", - "timestamp": "2021-09-02T11:30:20.281Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.intensive", - "gatewayId": "################", - "feature": "ventilation.operating.programs.intensive", - "timestamp": "2021-09-02T11:30:20.838Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.standby", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes.standby", - "timestamp": "2021-09-02T11:30:20.624Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "curve", - "schedule" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating", - "gatewayId": "################", - "feature": "heating.circuits.0.heating", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": false, - "type": "boolean" - }, - "demand": { - "value": "unknown", - "type": "string" - }, - "temperature": { - "value": 20, - "unit": "", - "type": "number" - } - }, - "commands": { - "setTemperature": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.comfort/commands/setTemperature", - "name": "setTemperature", - "isExecutable": true, - "params": { - "targetTemperature": { - "type": "number", - "required": true, - "constraints": { - "min": 10, - "max": 30, - "stepping": 1 - } - } - } - }, - "activate": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.comfort/commands/activate", - "name": "activate", - "isExecutable": true, - "params": { - "temperature": { - "type": "number", - "required": false, - "constraints": { - "min": 10, - "max": 30, - "stepping": 1 - } - } - } - }, - "deactivate": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.comfort/commands/deactivate", - "name": "deactivate", - "isExecutable": false, - "params": {} - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.comfort", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.programs.comfort", - "timestamp": "2021-09-02T11:30:21.704Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.power.production", - "gatewayId": "################", - "feature": "heating.solar.power.production", - "timestamp": "2021-09-02T11:30:20.810Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.heating.schedule", - "gatewayId": "################", - "feature": "heating.circuits.2.heating.schedule", - "timestamp": "2021-09-02T11:30:20.488Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "status": { - "type": "string", - "value": "notConnected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors.temperature.hotWaterStorage.bottom", - "gatewayId": "################", - "feature": "heating.dhw.sensors.temperature.hotWaterStorage.bottom", - "timestamp": "2021-09-02T11:30:20.724Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "active", - "comfort", - "eco", - "fixed", - "normal", - "reduced", - "standby" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.programs", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.heatingCooling", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes.heatingCooling", - "timestamp": "2021-09-02T11:30:20.579Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "temperature" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.sensors", - "gatewayId": "################", - "feature": "heating.circuits.0.sensors", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.circulation.pump", - "gatewayId": "################", - "feature": "heating.circuits.2.circulation.pump", - "timestamp": "2021-09-02T11:30:20.743Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "status": { - "type": "string", - "value": "notConnected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.boiler.sensors.temperature.commonSupply", - "gatewayId": "################", - "feature": "heating.boiler.sensors.temperature.commonSupply", - "timestamp": "2021-09-02T11:30:20.267Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.sensors.temperature.supply", - "gatewayId": "################", - "feature": "heating.circuits.1.sensors.temperature.supply", - "timestamp": "2021-09-02T11:30:20.656Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "value": { - "type": "number", - "value": 21.2, - "unit": "celsius" - }, - "status": { - "type": "string", - "value": "connected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.secondaryCircuit.sensors.temperature.return", - "gatewayId": "################", - "feature": "heating.secondaryCircuit.sensors.temperature.return", - "timestamp": "2021-09-02T18:31:01.143Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.normalStandby", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes.normalStandby", - "timestamp": "2021-09-02T11:30:20.512Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "value": { - "type": "number", - "value": 26.3, - "unit": "celsius" - }, - "status": { - "type": "string", - "value": "connected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.sensors.temperature.supply", - "gatewayId": "################", - "feature": "heating.circuits.0.sensors.temperature.supply", - "timestamp": "2021-09-02T18:17:07.742Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "temperature" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.sensors", - "gatewayId": "################", - "feature": "heating.compressors.0.sensors", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "active", - "basic", - "comfort", - "eco", - "holiday", - "intensive", - "reduced", - "standard", - "standby" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs", - "gatewayId": "################", - "feature": "ventilation.operating.programs", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "value": "ventilation", - "type": "string" - } - }, - "commands": { - "setMode": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.active/commands/setMode", - "name": "setMode", - "isExecutable": true, - "params": { - "mode": { - "type": "string", - "required": true, - "constraints": { - "enum": [ - "standby", - "standard", - "ventilation" - ] - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.active", - "gatewayId": "################", - "feature": "ventilation.operating.modes.active", - "timestamp": "2021-09-02T11:30:20.818Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": false, - "type": "boolean" - }, - "start": { - "value": "", - "type": "string" - }, - "end": { - "value": "", - "type": "string" - } - }, - "commands": { - "changeEndDate": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.holiday/commands/changeEndDate", - "name": "changeEndDate", - "isExecutable": false, - "params": { - "end": { - "type": "string", - "required": true, - "constraints": { - "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$", - "sameDayAllowed": false - } - } - } - }, - "schedule": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.holiday/commands/schedule", - "name": "schedule", - "isExecutable": true, - "params": { - "start": { - "type": "string", - "required": true, - "constraints": { - "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$" - } - }, - "end": { - "type": "string", - "required": true, - "constraints": { - "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$", - "sameDayAllowed": false - } - } - } - }, - "unschedule": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.holiday/commands/unschedule", - "name": "unschedule", - "isExecutable": true, - "params": {} - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.holiday", - "gatewayId": "################", - "feature": "ventilation.operating.programs.holiday", - "timestamp": "2021-09-02T11:30:20.827Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": true - }, - "status": { - "type": "string", - "value": "on" - } - }, - "commands": {}, - "components": [ - "charging", - "oneTimeCharge", - "schedule", - "sensors", - "temperature" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw", - "gatewayId": "################", - "feature": "heating.dhw", - "timestamp": "2021-09-02T11:30:20.750Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "modulation", - "statistics" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burners.0", - "gatewayId": "################", - "feature": "heating.burners.0", - "timestamp": "2021-09-02T11:30:20.280Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "active", - "comfort", - "eco", - "fixed", - "normal", - "reduced", - "standby" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.programs", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "pump" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.circulation", - "gatewayId": "################", - "feature": "heating.circuits.0.circulation", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "active", - "standard", - "standby", - "ventilation" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes", - "gatewayId": "################", - "feature": "ventilation.operating.modes", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.dhw", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes.dhw", - "timestamp": "2021-09-02T11:30:20.550Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.temperature.levels", - "gatewayId": "################", - "feature": "heating.circuits.1.temperature.levels", - "timestamp": "2021-09-02T11:30:20.872Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "starts": { - "type": "number", - "value": 1244, - "unit": "" - }, - "hours": { - "type": "number", - "value": 1191.3, - "unit": "" - }, - "hoursLoadClassOne": { - "type": "number", - "value": 251, - "unit": "" - }, - "hoursLoadClassTwo": { - "type": "number", - "value": 337, - "unit": "" - }, - "hoursLoadClassThree": { - "type": "number", - "value": 307, - "unit": "" - }, - "hoursLoadClassFour": { - "type": "number", - "value": 240, - "unit": "" - }, - "hoursLoadClassFive": { - "type": "number", - "value": 22, - "unit": "" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.statistics", - "gatewayId": "################", - "feature": "heating.compressors.0.statistics", - "timestamp": "2021-09-02T11:30:20.248Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "enabled": { - "value": [ - "0" - ], - "type": "array" - } - }, - "commands": {}, - "components": [ - "0", - "1", - "2" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits", - "gatewayId": "################", - "feature": "heating.circuits", - "timestamp": "2021-09-02T11:30:20.402Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "temperature", - "valve" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors", - "gatewayId": "################", - "feature": "heating.sensors", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "type": "number", - "value": 118, - "unit": "" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.device.time.offset", - "gatewayId": "################", - "feature": "heating.device.time.offset", - "timestamp": "2021-09-02T11:30:20.676Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "value": { - "type": "number", - "value": 15.6, - "unit": "celsius" - }, - "status": { - "type": "string", - "value": "connected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors.temperature.outside", - "gatewayId": "################", - "feature": "heating.sensors.temperature.outside", - "timestamp": "2021-09-02T18:31:19.344Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.normal", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.programs.normal", - "timestamp": "2021-09-02T11:30:21.707Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "status": { - "type": "string", - "value": "off" - } - }, - "commands": {}, - "components": [ - "schedule" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.pumps.circulation", - "gatewayId": "################", - "feature": "heating.dhw.pumps.circulation", - "timestamp": "2021-09-02T11:30:20.717Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.dhwAndHeating", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes.dhwAndHeating", - "timestamp": "2021-09-02T11:30:20.567Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "shift": { - "type": "number", - "unit": "", - "value": 0 - }, - "slope": { - "type": "number", - "unit": "", - "value": 0.6 - } - }, - "commands": { - "setCurve": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.heating.curve/commands/setCurve", - "name": "setCurve", - "isExecutable": true, - "params": { - "slope": { - "type": "number", - "required": true, - "constraints": { - "min": 0, - "max": 3.5, - "stepping": 0.1 - } - }, - "shift": { - "type": "number", - "required": true, - "constraints": { - "min": -15, - "max": 40, - "stepping": 1 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.heating.curve", - "gatewayId": "################", - "feature": "heating.circuits.1.heating.curve", - "timestamp": "2021-09-02T11:30:20.481Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "value": { - "type": "number", - "value": 43.2, - "unit": "celsius" - }, - "status": { - "type": "string", - "value": "connected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors.temperature.hotWaterStorage.top", - "gatewayId": "################", - "feature": "heating.dhw.sensors.temperature.hotWaterStorage.top", - "timestamp": "2021-09-02T18:24:29.029Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burners.0.statistics", - "gatewayId": "################", - "feature": "heating.burners.0.statistics", - "timestamp": "2021-09-02T11:30:20.461Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "active", - "cooling", - "dhw", - "dhwAndHeating", - "dhwAndHeatingCooling", - "heating", - "heatingCooling", - "normalStandby", - "standby" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "type": "string", - "value": "################" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.controller.serial", - "gatewayId": "################", - "feature": "heating.controller.serial", - "timestamp": "2021-09-02T11:30:20.674Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "value": 2.5, - "unit": "", - "type": "number" - } - }, - "commands": { - "setHysteresis": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.hysteresis/commands/setHysteresis", - "name": "setHysteresis", - "isExecutable": true, - "params": { - "hysteresis": { - "type": "number", - "required": true, - "constraints": { - "min": 1, - "max": 10, - "stepping": 0.5 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.hysteresis", - "gatewayId": "################", - "feature": "heating.dhw.temperature.hysteresis", - "timestamp": "2021-09-02T11:30:20.732Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.temperature.levels", - "gatewayId": "################", - "feature": "heating.circuits.2.temperature.levels", - "timestamp": "2021-09-02T11:30:20.876Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "active", - "cooling", - "dhw", - "dhwAndHeating", - "dhwAndHeatingCooling", - "heating", - "heatingCooling", - "normalStandby", - "standby" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "type": "number", - "value": 0, - "unit": "" - } - }, - "commands": {}, - "components": [ - "levels" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.temperature", - "gatewayId": "################", - "feature": "heating.circuits.2.temperature", - "timestamp": "2021-09-02T11:30:20.865Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": true, - "type": "boolean" - }, - "entries": { - "value": { - "mon": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "tue": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "wed": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "thu": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "fri": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "sat": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "sun": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ] - }, - "type": "Schedule" - } - }, - "commands": { - "setSchedule": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating.schedule/commands/setSchedule", - "name": "setSchedule", - "isExecutable": true, - "params": { - "newSchedule": { - "type": "Schedule", - "required": true, - "constraints": { - "modes": [ - "reduced", - "normal", - "fixed" - ], - "maxEntries": 8, - "resolution": 10, - "defaultMode": "standby", - "overlapAllowed": true - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating.schedule", - "gatewayId": "################", - "feature": "heating.circuits.0.heating.schedule", - "timestamp": "2021-09-02T11:30:20.484Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.fixed", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.programs.fixed", - "timestamp": "2021-09-02T11:30:20.639Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "heat", - "statistics" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.1", - "gatewayId": "################", - "feature": "heating.compressors.1", - "timestamp": "2021-09-02T11:30:19.902Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": true - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.dhwAndHeatingCooling", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes.dhwAndHeatingCooling", - "timestamp": "2021-09-02T11:30:20.556Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.active", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.programs.active", - "timestamp": "2021-09-02T11:30:20.746Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "room", - "supply" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.sensors.temperature", - "gatewayId": "################", - "feature": "heating.circuits.1.sensors.temperature", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": true, - "type": "boolean" - }, - "temperature": { - "value": 20, - "unit": "celsius", - "type": "number" - }, - "unit": { - "value": "celsius", - "type": "string" - } - }, - "commands": { - "setTemperature": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.standard/commands/setTemperature", - "name": "setTemperature", - "isExecutable": true, - "params": { - "targetTemperature": { - "type": "number", - "required": true, - "constraints": { - "min": 10, - "max": 30, - "stepping": 1 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.standard", - "gatewayId": "################", - "feature": "ventilation.operating.programs.standard", - "timestamp": "2021-09-02T11:30:20.835Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "min": { - "value": 15, - "unit": "celsius", - "type": "number" - }, - "minUnit": { - "value": "celsius", - "type": "string" - }, - "max": { - "value": 23, - "unit": "celsius", - "type": "number" - }, - "maxUnit": { - "value": "celsius", - "type": "string" - } - }, - "commands": { - "setMin": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature.levels/commands/setMin", - "name": "setMin", - "isExecutable": true, - "params": { - "temperature": { - "type": "number", - "required": true, - "constraints": { - "min": 1, - "max": 30, - "stepping": 1 - } - } - } - }, - "setMax": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature.levels/commands/setMax", - "name": "setMax", - "isExecutable": true, - "params": { - "temperature": { - "type": "number", - "required": true, - "constraints": { - "min": 10, - "max": 70, - "stepping": 1 - } - } - } - }, - "setLevels": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature.levels/commands/setLevels", - "name": "setLevels", - "isExecutable": true, - "params": { - "minTemperature": { - "type": "number", - "required": true, - "constraints": { - "min": 1, - "max": 30, - "stepping": 1 - } - }, - "maxTemperature": { - "type": "number", - "required": true, - "constraints": { - "min": 10, - "max": 70, - "stepping": 1 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature.levels", - "gatewayId": "################", - "feature": "heating.circuits.0.temperature.levels", - "timestamp": "2021-09-02T11:30:20.868Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.standby", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes.standby", - "timestamp": "2021-09-02T11:30:20.627Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "temperature" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.sensors", - "gatewayId": "################", - "feature": "heating.solar.sensors", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.cooling", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes.cooling", - "timestamp": "2021-09-02T11:30:20.509Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.pumps.circuit", - "gatewayId": "################", - "feature": "heating.solar.pumps.circuit", - "timestamp": "2021-09-02T11:30:20.848Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": true - } - }, - "commands": {}, - "components": [ - "operating", - "schedule" + "data": [ + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.normal", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.programs.normal", + "timestamp": "2021-09-02T11:30:21.708Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["sensors", "serial"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.boiler", + "gatewayId": "################", + "feature": "heating.boiler", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors", + "gatewayId": "################", + "feature": "heating.dhw.sensors", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["0"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.condensors", + "gatewayId": "################", + "feature": "heating.condensors", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "shift": { + "type": "number", + "unit": "", + "value": -5 + }, + "slope": { + "type": "number", + "unit": "", + "value": 0 + } + }, + "commands": { + "setCurve": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating.curve/commands/setCurve", + "name": "setCurve", + "isExecutable": true, + "params": { + "slope": { + "type": "number", + "required": true, + "constraints": { + "min": 0, + "max": 3.5, + "stepping": 0.1 + } + }, + "shift": { + "type": "number", + "required": true, + "constraints": { + "min": -15, + "max": 40, + "stepping": 1 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating.curve", + "gatewayId": "################", + "feature": "heating.circuits.0.heating.curve", + "timestamp": "2021-09-02T11:30:20.479Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.dhw", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes.dhw", + "timestamp": "2021-09-02T11:30:20.547Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "enabled": { + "value": ["0"], + "type": "array" + } + }, + "commands": {}, + "components": ["0", "1"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors", + "gatewayId": "################", + "feature": "heating.compressors", + "timestamp": "2021-09-02T11:30:19.903Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.reduced", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.programs.reduced", + "timestamp": "2021-09-02T11:30:21.710Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.standby", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.programs.standby", + "timestamp": "2021-09-02T11:30:20.647Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "circulation", + "frostprotection", + "heating", + "operating", + "sensors", + "temperature" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1", + "gatewayId": "################", + "feature": "heating.circuits.1", + "timestamp": "2021-09-02T11:30:20.348Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.fixed", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.programs.fixed", + "timestamp": "2021-09-02T11:30:20.640Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": true, + "type": "boolean" + }, + "entries": { + "value": { + "mon": [], + "tue": [], + "wed": [], + "thu": [], + "fri": [], + "sat": [], + "sun": [] + }, + "type": "Schedule" + } + }, + "commands": { + "setSchedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.pumps.circulation.schedule/commands/setSchedule", + "name": "setSchedule", + "isExecutable": true, + "params": { + "newSchedule": { + "type": "Schedule", + "required": true, + "constraints": { + "modes": ["5/25-cycles", "5/10-cycles", "on"], + "maxEntries": 8, + "resolution": 10, + "defaultMode": "off", + "overlapAllowed": true + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.pumps.circulation.schedule", + "gatewayId": "################", + "feature": "heating.dhw.pumps.circulation.schedule", + "timestamp": "2021-09-02T11:30:20.751Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.heatingCooling", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes.heatingCooling", + "timestamp": "2021-09-02T11:30:20.576Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.sensors.temperature.room", + "gatewayId": "################", + "feature": "heating.circuits.1.sensors.temperature.room", + "timestamp": "2021-09-02T11:30:20.651Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.configuration.heatingRod", + "gatewayId": "################", + "feature": "heating.configuration.heatingRod", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burner", + "gatewayId": "################", + "feature": "heating.burner", + "timestamp": "2021-09-02T11:30:20.278Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.sensors.temperature.dhw", + "gatewayId": "################", + "feature": "heating.solar.sensors.temperature.dhw", + "timestamp": "2021-09-02T11:30:20.808Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.standard", + "gatewayId": "################", + "feature": "ventilation.operating.modes.standard", + "timestamp": "2021-09-02T11:30:20.822Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.sensors.temperature.supply", + "gatewayId": "################", + "feature": "heating.circuits.2.sensors.temperature.supply", + "timestamp": "2021-09-02T11:30:20.658Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.1.statistics", + "gatewayId": "################", + "feature": "heating.compressors.1.statistics", + "timestamp": "2021-09-02T11:30:20.250Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.dhwAndHeatingCooling", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes.dhwAndHeatingCooling", + "timestamp": "2021-09-02T11:30:20.562Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["pump"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.circulation", + "gatewayId": "################", + "feature": "heating.circuits.2.circulation", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.active", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes.active", + "timestamp": "2021-09-02T11:30:20.772Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.standby", + "gatewayId": "################", + "feature": "ventilation.operating.programs.standby", + "timestamp": "2021-09-02T11:30:20.830Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.dhwAndHeatingCooling", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes.dhwAndHeatingCooling", + "timestamp": "2021-09-02T11:30:20.559Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": true, + "type": "boolean" + }, + "entries": { + "value": { + "mon": [ + { + "start": "00:00", + "end": "07:00", + "mode": "reduced", + "position": 0 + }, + { + "start": "07:00", + "end": "08:00", + "mode": "standard", + "position": 1 + }, + { + "start": "08:00", + "end": "21:00", + "mode": "standard", + "position": 2 + }, + { + "start": "21:00", + "end": "24:00", + "mode": "reduced", + "position": 3 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation", - "gatewayId": "################", - "feature": "ventilation", - "timestamp": "2021-09-02T11:30:20.812Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "status": { - "type": "string", - "value": "notConnected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.buffer.sensors.temperature.top", - "gatewayId": "################", - "feature": "heating.buffer.sensors.temperature.top", - "timestamp": "2021-09-02T11:30:20.270Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "temperature" + "tue": [ + { + "start": "00:00", + "end": "07:00", + "mode": "reduced", + "position": 0 + }, + { + "start": "07:00", + "end": "08:00", + "mode": "standard", + "position": 1 + }, + { + "start": "08:00", + "end": "21:00", + "mode": "standard", + "position": 2 + }, + { + "start": "21:00", + "end": "24:00", + "mode": "reduced", + "position": 3 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.sensors", - "gatewayId": "################", - "feature": "heating.circuits.2.sensors", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.1.heat.production", - "gatewayId": "################", - "feature": "heating.compressors.1.heat.production", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "offset" + "wed": [ + { + "start": "00:00", + "end": "07:00", + "mode": "reduced", + "position": 0 + }, + { + "start": "07:00", + "end": "08:00", + "mode": "standard", + "position": 1 + }, + { + "start": "08:00", + "end": "21:00", + "mode": "standard", + "position": 2 + }, + { + "start": "21:00", + "end": "24:00", + "mode": "reduced", + "position": 3 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.device.time", - "gatewayId": "################", - "feature": "heating.device.time", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "room", - "supply" + "thu": [ + { + "start": "00:00", + "end": "07:00", + "mode": "reduced", + "position": 0 + }, + { + "start": "07:00", + "end": "08:00", + "mode": "standard", + "position": 1 + }, + { + "start": "08:00", + "end": "21:00", + "mode": "standard", + "position": 2 + }, + { + "start": "21:00", + "end": "24:00", + "mode": "reduced", + "position": 3 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.sensors.temperature", - "gatewayId": "################", - "feature": "heating.circuits.0.sensors.temperature", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": true, - "type": "boolean" - }, - "entries": { - "value": { - "mon": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "tue": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "wed": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "thu": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "fri": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "sat": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ], - "sun": [ - { - "start": "00:00", - "end": "24:00", - "mode": "normal", - "position": 0 - } - ] - }, - "type": "Schedule" - } - }, - "commands": { - "setSchedule": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.schedule/commands/setSchedule", - "name": "setSchedule", - "isExecutable": true, - "params": { - "newSchedule": { - "type": "Schedule", - "required": true, - "constraints": { - "modes": [ - "top", - "normal", - "temp-2" - ], - "maxEntries": 8, - "resolution": 10, - "defaultMode": "off", - "overlapAllowed": true - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.schedule", - "gatewayId": "################", - "feature": "heating.dhw.schedule", - "timestamp": "2021-09-02T11:30:20.752Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "value": 45, - "unit": "", - "type": "number" - } - }, - "commands": { - "setTargetTemperature": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.main/commands/setTargetTemperature", - "name": "setTargetTemperature", - "isExecutable": true, - "params": { - "temperature": { - "type": "number", - "required": true, - "constraints": { - "min": 10, - "efficientLowerBorder": 10, - "efficientUpperBorder": 60, - "max": 60, - "stepping": 1 - } - } - } - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.main", - "gatewayId": "################", - "feature": "heating.dhw.temperature.main", - "timestamp": "2021-09-02T11:30:19.868Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "status": { - "type": "string", - "value": "off" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.pumps.primary", - "gatewayId": "################", - "feature": "heating.dhw.pumps.primary", - "timestamp": "2021-09-02T11:30:20.716Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "value": { - "type": "number", - "value": 43.2, - "unit": "celsius" - }, - "status": { - "type": "string", - "value": "connected" - } - }, - "commands": {}, - "components": [ - "bottom", - "top" + "fri": [ + { + "start": "00:00", + "end": "07:00", + "mode": "reduced", + "position": 0 + }, + { + "start": "07:00", + "end": "08:00", + "mode": "standard", + "position": 1 + }, + { + "start": "08:00", + "end": "21:00", + "mode": "standard", + "position": 2 + }, + { + "start": "21:00", + "end": "24:00", + "mode": "reduced", + "position": 3 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors.temperature.hotWaterStorage", - "gatewayId": "################", - "feature": "heating.dhw.sensors.temperature.hotWaterStorage", - "timestamp": "2021-09-02T18:24:29.009Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "boiler", - "buffer", - "burner", - "burners", - "circuits", - "compressors", - "condensors", - "configuration", - "device", - "dhw", - "evaporators", - "operating", - "sensors", - "solar" + "sat": [ + { + "start": "00:00", + "end": "07:00", + "mode": "reduced", + "position": 0 + }, + { + "start": "07:00", + "end": "08:00", + "mode": "standard", + "position": 1 + }, + { + "start": "08:00", + "end": "21:00", + "mode": "standard", + "position": 2 + }, + { + "start": "21:00", + "end": "24:00", + "mode": "reduced", + "position": 3 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating", - "gatewayId": "################", - "feature": "heating", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": false, - "type": "boolean" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.eco", - "gatewayId": "################", - "feature": "ventilation.operating.programs.eco", - "timestamp": "2021-09-02T11:30:20.839Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.reduced", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.programs.reduced", - "timestamp": "2021-09-02T11:30:21.710Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.buffer", - "gatewayId": "################", - "feature": "heating.buffer", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "value": { - "type": "string", - "value": "################" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.boiler.serial", - "gatewayId": "################", - "feature": "heating.boiler.serial", - "timestamp": "2021-09-02T11:30:20.272Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.heating", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.modes.heating", - "timestamp": "2021-09-02T11:30:20.590Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.heat.production", - "gatewayId": "################", - "feature": "heating.compressors.0.heat.production", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "sensors" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.condensors.0", - "gatewayId": "################", - "feature": "heating.condensors.0", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "value": { - "type": "number", - "value": 16.8, - "unit": "celsius" - }, - "status": { - "type": "string", - "value": "connected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.primaryCircuit.sensors.temperature.supply", - "gatewayId": "################", - "feature": "heating.primaryCircuit.sensors.temperature.supply", - "timestamp": "2021-09-02T18:30:28.760Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.eco", - "gatewayId": "################", - "feature": "heating.circuits.2.operating.programs.eco", - "timestamp": "2021-09-02T11:30:21.714Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.comfort", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.programs.comfort", - "timestamp": "2021-09-02T11:30:21.705Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "time" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.device", - "gatewayId": "################", - "feature": "heating.device", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - }, - "phase": { - "type": "string", - "value": "off" - } - }, - "commands": {}, - "components": [ - "heat", - "sensors", - "statistics" - ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0", - "gatewayId": "################", - "feature": "heating.compressors.0", - "timestamp": "2021-09-02T11:30:19.879Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.fixed", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.programs.fixed", - "timestamp": "2021-09-02T11:30:20.637Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "0" + "sun": [ + { + "start": "00:00", + "end": "07:00", + "mode": "reduced", + "position": 0 + }, + { + "start": "07:00", + "end": "08:00", + "mode": "standard", + "position": 1 + }, + { + "start": "08:00", + "end": "21:00", + "mode": "standard", + "position": 2 + }, + { + "start": "21:00", + "end": "24:00", + "mode": "reduced", + "position": 3 + } + ] + }, + "type": "Schedule" + } + }, + "commands": { + "setSchedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.schedule/commands/setSchedule", + "name": "setSchedule", + "isExecutable": true, + "params": { + "newSchedule": { + "type": "Schedule", + "required": true, + "constraints": { + "modes": ["reduced", "standard", "intensive"], + "maxEntries": 8, + "resolution": 10, + "defaultMode": "basic", + "overlapAllowed": true + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.schedule", + "gatewayId": "################", + "feature": "ventilation.schedule", + "timestamp": "2021-09-02T11:30:20.815Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.active", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes.active", + "timestamp": "2021-09-02T11:30:20.788Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "active", + "cooling", + "dhw", + "dhwAndHeating", + "dhwAndHeatingCooling", + "heating", + "heatingCooling", + "normalStandby", + "standby" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.standby", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes.standby", + "timestamp": "2021-09-02T11:30:20.620Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "type": "string", + "value": "normal" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.active", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.programs.active", + "timestamp": "2021-09-02T11:30:20.744Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": false, + "type": "boolean" + }, + "start": { + "value": "", + "type": "string" + }, + "end": { + "value": "", + "type": "string" + } + }, + "commands": { + "changeEndDate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs.holiday/commands/changeEndDate", + "name": "changeEndDate", + "isExecutable": false, + "params": { + "end": { + "type": "string", + "required": true, + "constraints": { + "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$", + "sameDayAllowed": false + } + } + } + }, + "schedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs.holiday/commands/schedule", + "name": "schedule", + "isExecutable": true, + "params": { + "start": { + "type": "string", + "required": true, + "constraints": { + "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$" + } + }, + "end": { + "type": "string", + "required": true, + "constraints": { + "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$", + "sameDayAllowed": false + } + } + } + }, + "unschedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs.holiday/commands/unschedule", + "name": "unschedule", + "isExecutable": true, + "params": {} + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs.holiday", + "gatewayId": "################", + "feature": "heating.operating.programs.holiday", + "timestamp": "2021-09-02T11:30:20.851Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.evaporators.0.sensors.temperature", + "gatewayId": "################", + "feature": "heating.evaporators.0.sensors.temperature", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "value": 45, + "unit": "", + "type": "number" + } + }, + "commands": { + "setTargetTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.temp2/commands/setTargetTemperature", + "name": "setTargetTemperature", + "isExecutable": true, + "params": { + "temperature": { + "type": "number", + "required": true, + "constraints": { + "min": 10, + "max": 60, + "stepping": 1 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.temp2", + "gatewayId": "################", + "feature": "heating.dhw.temperature.temp2", + "timestamp": "2021-09-02T11:30:19.870Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["sensors"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.evaporators.0", + "gatewayId": "################", + "feature": "heating.evaporators.0", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.eco", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.programs.eco", + "timestamp": "2021-09-02T11:30:21.713Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.standby", + "gatewayId": "################", + "feature": "ventilation.operating.modes.standby", + "timestamp": "2021-09-02T11:30:20.820Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "type": "number", + "value": 0, + "unit": "" + } + }, + "commands": {}, + "components": ["levels"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.temperature", + "gatewayId": "################", + "feature": "heating.circuits.1.temperature", + "timestamp": "2021-09-02T11:30:20.863Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "status": { + "type": "string", + "value": "off" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.frostprotection", + "gatewayId": "################", + "feature": "heating.circuits.0.frostprotection", + "timestamp": "2021-09-02T11:30:20.466Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["circuit"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.pumps", + "gatewayId": "################", + "feature": "heating.solar.pumps", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "status": { + "type": "string", + "value": "notConnected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.buffer.sensors.temperature.main", + "gatewayId": "################", + "feature": "heating.buffer.sensors.temperature.main", + "timestamp": "2021-09-02T11:30:20.269Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "active", + "comfort", + "eco", + "fixed", + "normal", + "reduced", + "standby" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.programs", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": true + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.ventilation", + "gatewayId": "################", + "feature": "ventilation.operating.modes.ventilation", + "timestamp": "2021-09-02T11:30:20.823Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burners.0.modulation", + "gatewayId": "################", + "feature": "heating.burners.0.modulation", + "timestamp": "2021-09-02T11:30:20.276Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "heatingRod", + "multiFamilyHouse", + "secondaryHeatGenerator" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.configuration", + "gatewayId": "################", + "feature": "heating.configuration", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": false, + "type": "boolean" + } + }, + "commands": { + "activate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.oneTimeCharge/commands/activate", + "name": "activate", + "isExecutable": true, + "params": {} + }, + "deactivate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.oneTimeCharge/commands/deactivate", + "name": "deactivate", + "isExecutable": false, + "params": {} + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.oneTimeCharge", + "gatewayId": "################", + "feature": "heating.dhw.oneTimeCharge", + "timestamp": "2021-09-02T11:30:20.713Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": false, + "type": "boolean" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.standby", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.programs.standby", + "timestamp": "2021-09-02T11:30:20.646Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": true, + "type": "boolean" + }, + "demand": { + "value": "unknown", + "type": "string" + }, + "temperature": { + "value": 23, + "unit": "", + "type": "number" + } + }, + "commands": { + "setTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.normal/commands/setTemperature", + "name": "setTemperature", + "isExecutable": true, + "params": { + "targetTemperature": { + "type": "number", + "required": true, + "constraints": { + "min": 10, + "max": 30, + "stepping": 1 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.normal", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.programs.normal", + "timestamp": "2021-09-02T11:30:21.707Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "shift": { + "type": "number", + "unit": "", + "value": 0 + }, + "slope": { + "type": "number", + "unit": "", + "value": 0.6 + } + }, + "commands": { + "setCurve": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.heating.curve/commands/setCurve", + "name": "setCurve", + "isExecutable": true, + "params": { + "slope": { + "type": "number", + "required": true, + "constraints": { + "min": 0, + "max": 3.5, + "stepping": 0.1 + } + }, + "shift": { + "type": "number", + "required": true, + "constraints": { + "min": -15, + "max": 40, + "stepping": 1 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.heating.curve", + "gatewayId": "################", + "feature": "heating.circuits.2.heating.curve", + "timestamp": "2021-09-02T11:30:20.483Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.basic", + "gatewayId": "################", + "feature": "ventilation.operating.programs.basic", + "timestamp": "2021-09-02T11:30:20.832Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["0"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burners", + "gatewayId": "################", + "feature": "heating.burners", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.frostprotection", + "gatewayId": "################", + "feature": "heating.circuits.2.frostprotection", + "timestamp": "2021-09-02T11:30:20.469Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["curve", "schedule"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.heating", + "gatewayId": "################", + "feature": "heating.circuits.2.heating", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": false, + "type": "boolean" + }, + "demand": { + "value": "unknown", + "type": "string" + }, + "temperature": { + "value": 10, + "unit": "", + "type": "number" + } + }, + "commands": { + "setTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.reduced/commands/setTemperature", + "name": "setTemperature", + "isExecutable": true, + "params": { + "targetTemperature": { + "type": "number", + "required": true, + "constraints": { + "min": 10, + "max": 30, + "stepping": 1 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.reduced", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.programs.reduced", + "timestamp": "2021-09-02T11:30:21.710Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.normalStandby", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes.normalStandby", + "timestamp": "2021-09-02T11:30:20.527Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.comfort", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.programs.comfort", + "timestamp": "2021-09-02T11:30:21.705Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.dhwAndHeating", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes.dhwAndHeating", + "timestamp": "2021-09-02T11:30:20.570Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.charging", + "gatewayId": "################", + "feature": "heating.dhw.charging", + "timestamp": "2021-09-02T11:30:20.712Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": true, + "type": "boolean" + }, + "name": { + "value": "Heat/cool circuit 1", + "type": "string" + }, + "type": { + "value": "heatingCircuit", + "type": "string" + } + }, + "commands": { + "setName": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0/commands/setName", + "name": "setName", + "isExecutable": true, + "params": { + "name": { + "type": "string", + "required": true, + "constraints": { + "minLength": 1, + "maxLength": 20 + } + } + } + } + }, + "components": [ + "circulation", + "frostprotection", + "heating", + "operating", + "sensors", + "temperature" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0", + "gatewayId": "################", + "feature": "heating.circuits.0", + "timestamp": "2021-09-02T11:30:20.286Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["temperature"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.sensors", + "gatewayId": "################", + "feature": "heating.circuits.1.sensors", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.sensors.temperature", + "gatewayId": "################", + "feature": "heating.compressors.0.sensors.temperature", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["pumps", "sensors"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar", + "gatewayId": "################", + "feature": "heating.solar", + "timestamp": "2021-09-02T11:30:20.841Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "status": { + "type": "string", + "value": "notConnected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.primaryCircuit.sensors.temperature.return", + "gatewayId": "################", + "feature": "heating.primaryCircuit.sensors.temperature.return", + "timestamp": "2021-09-02T11:30:20.662Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.cooling", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes.cooling", + "timestamp": "2021-09-02T11:30:20.503Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.normalStandby", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes.normalStandby", + "timestamp": "2021-09-02T11:30:20.516Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.dhwAndHeating", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes.dhwAndHeating", + "timestamp": "2021-09-02T11:30:20.565Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["modes", "programs"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating", + "gatewayId": "################", + "feature": "ventilation.operating", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "type": "number", + "value": 19, + "unit": "" + } + }, + "commands": {}, + "components": ["levels"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature", + "gatewayId": "################", + "feature": "heating.circuits.0.temperature", + "timestamp": "2021-09-02T11:30:20.862Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "value": { + "type": "number", + "value": 21.2, + "unit": "celsius" + }, + "status": { + "type": "string", + "value": "connected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors.temperature.return", + "gatewayId": "################", + "feature": "heating.sensors.temperature.return", + "timestamp": "2021-09-02T18:30:40.784Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": false, + "type": "boolean" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.comfort", + "gatewayId": "################", + "feature": "ventilation.operating.programs.comfort", + "timestamp": "2021-09-02T11:30:20.829Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["holiday"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating.programs", + "gatewayId": "################", + "feature": "heating.operating.programs", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.standby", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.programs.standby", + "timestamp": "2021-09-02T11:30:20.649Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "type": "string", + "value": "standard" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.active", + "gatewayId": "################", + "feature": "ventilation.operating.programs.active", + "timestamp": "2021-09-02T11:30:20.825Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["modes", "programs"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating", + "gatewayId": "################", + "feature": "heating.circuits.1.operating", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["pump"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.circulation", + "gatewayId": "################", + "feature": "heating.circuits.1.circulation", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.active", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.programs.active", + "timestamp": "2021-09-02T11:30:20.748Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.heatingCooling", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes.heatingCooling", + "timestamp": "2021-09-02T11:30:20.582Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.circulation.pump", + "gatewayId": "################", + "feature": "heating.circuits.1.circulation.pump", + "timestamp": "2021-09-02T11:30:20.741Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["modes", "programs"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating", + "gatewayId": "################", + "feature": "heating.circuits.0.operating", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/device", + "gatewayId": "################", + "feature": "device", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.sensors.temperature.collector", + "gatewayId": "################", + "feature": "heating.solar.sensors.temperature.collector", + "timestamp": "2021-09-02T11:30:20.842Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "value": "dhwAndHeatingCooling", + "type": "string" + } + }, + "commands": { + "setMode": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.active/commands/setMode", + "name": "setMode", + "isExecutable": true, + "params": { + "mode": { + "type": "string", + "required": true, + "constraints": { + "enum": ["standby", "dhw", "dhwAndHeatingCooling"] + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.active", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes.active", + "timestamp": "2021-09-02T11:30:20.758Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.cooling", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes.cooling", + "timestamp": "2021-09-02T11:30:20.506Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors.valve", + "gatewayId": "################", + "feature": "heating.sensors.valve", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["collector", "dhw"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.sensors.temperature", + "gatewayId": "################", + "feature": "heating.solar.sensors.temperature", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.heating.schedule", + "gatewayId": "################", + "feature": "heating.circuits.1.heating.schedule", + "timestamp": "2021-09-02T11:30:20.486Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "value": { + "type": "number", + "value": 25.1, + "unit": "celsius" + }, + "status": { + "type": "string", + "value": "connected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.sensors.temperature.room", + "gatewayId": "################", + "feature": "heating.circuits.0.sensors.temperature.room", + "timestamp": "2021-09-02T18:19:04.649Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "value": { + "type": "number", + "value": 26.3, + "unit": "celsius" + }, + "status": { + "type": "string", + "value": "connected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.secondaryCircuit.sensors.temperature.supply", + "gatewayId": "################", + "feature": "heating.secondaryCircuit.sensors.temperature.supply", + "timestamp": "2021-09-02T18:21:47.906Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.configuration.secondaryHeatGenerator", + "gatewayId": "################", + "feature": "heating.configuration.secondaryHeatGenerator", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.reduced", + "gatewayId": "################", + "feature": "ventilation.operating.programs.reduced", + "timestamp": "2021-09-02T11:30:20.834Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.configuration.multiFamilyHouse", + "gatewayId": "################", + "feature": "heating.configuration.multiFamilyHouse", + "timestamp": "2021-09-02T11:30:20.281Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.intensive", + "gatewayId": "################", + "feature": "ventilation.operating.programs.intensive", + "timestamp": "2021-09-02T11:30:20.838Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.standby", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes.standby", + "timestamp": "2021-09-02T11:30:20.624Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["curve", "schedule"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating", + "gatewayId": "################", + "feature": "heating.circuits.0.heating", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": false, + "type": "boolean" + }, + "demand": { + "value": "unknown", + "type": "string" + }, + "temperature": { + "value": 20, + "unit": "", + "type": "number" + } + }, + "commands": { + "setTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.comfort/commands/setTemperature", + "name": "setTemperature", + "isExecutable": true, + "params": { + "targetTemperature": { + "type": "number", + "required": true, + "constraints": { + "min": 10, + "max": 30, + "stepping": 1 + } + } + } + }, + "activate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.comfort/commands/activate", + "name": "activate", + "isExecutable": true, + "params": { + "temperature": { + "type": "number", + "required": false, + "constraints": { + "min": 10, + "max": 30, + "stepping": 1 + } + } + } + }, + "deactivate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.comfort/commands/deactivate", + "name": "deactivate", + "isExecutable": false, + "params": {} + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.comfort", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.programs.comfort", + "timestamp": "2021-09-02T11:30:21.704Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.power.production", + "gatewayId": "################", + "feature": "heating.solar.power.production", + "timestamp": "2021-09-02T11:30:20.810Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.heating.schedule", + "gatewayId": "################", + "feature": "heating.circuits.2.heating.schedule", + "timestamp": "2021-09-02T11:30:20.488Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "status": { + "type": "string", + "value": "notConnected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors.temperature.hotWaterStorage.bottom", + "gatewayId": "################", + "feature": "heating.dhw.sensors.temperature.hotWaterStorage.bottom", + "timestamp": "2021-09-02T11:30:20.724Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "active", + "comfort", + "eco", + "fixed", + "normal", + "reduced", + "standby" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.programs", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.heatingCooling", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes.heatingCooling", + "timestamp": "2021-09-02T11:30:20.579Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["temperature"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.sensors", + "gatewayId": "################", + "feature": "heating.circuits.0.sensors", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.circulation.pump", + "gatewayId": "################", + "feature": "heating.circuits.2.circulation.pump", + "timestamp": "2021-09-02T11:30:20.743Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "status": { + "type": "string", + "value": "notConnected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.boiler.sensors.temperature.commonSupply", + "gatewayId": "################", + "feature": "heating.boiler.sensors.temperature.commonSupply", + "timestamp": "2021-09-02T11:30:20.267Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.sensors.temperature.supply", + "gatewayId": "################", + "feature": "heating.circuits.1.sensors.temperature.supply", + "timestamp": "2021-09-02T11:30:20.656Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "value": { + "type": "number", + "value": 21.2, + "unit": "celsius" + }, + "status": { + "type": "string", + "value": "connected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.secondaryCircuit.sensors.temperature.return", + "gatewayId": "################", + "feature": "heating.secondaryCircuit.sensors.temperature.return", + "timestamp": "2021-09-02T18:31:01.143Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.normalStandby", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes.normalStandby", + "timestamp": "2021-09-02T11:30:20.512Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "value": { + "type": "number", + "value": 26.3, + "unit": "celsius" + }, + "status": { + "type": "string", + "value": "connected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.sensors.temperature.supply", + "gatewayId": "################", + "feature": "heating.circuits.0.sensors.temperature.supply", + "timestamp": "2021-09-02T18:17:07.742Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["temperature"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.sensors", + "gatewayId": "################", + "feature": "heating.compressors.0.sensors", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "active", + "basic", + "comfort", + "eco", + "holiday", + "intensive", + "reduced", + "standard", + "standby" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs", + "gatewayId": "################", + "feature": "ventilation.operating.programs", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "value": "ventilation", + "type": "string" + } + }, + "commands": { + "setMode": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.active/commands/setMode", + "name": "setMode", + "isExecutable": true, + "params": { + "mode": { + "type": "string", + "required": true, + "constraints": { + "enum": ["standby", "standard", "ventilation"] + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes.active", + "gatewayId": "################", + "feature": "ventilation.operating.modes.active", + "timestamp": "2021-09-02T11:30:20.818Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": false, + "type": "boolean" + }, + "start": { + "value": "", + "type": "string" + }, + "end": { + "value": "", + "type": "string" + } + }, + "commands": { + "changeEndDate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.holiday/commands/changeEndDate", + "name": "changeEndDate", + "isExecutable": false, + "params": { + "end": { + "type": "string", + "required": true, + "constraints": { + "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$", + "sameDayAllowed": false + } + } + } + }, + "schedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.holiday/commands/schedule", + "name": "schedule", + "isExecutable": true, + "params": { + "start": { + "type": "string", + "required": true, + "constraints": { + "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$" + } + }, + "end": { + "type": "string", + "required": true, + "constraints": { + "regEx": "^[\\d]{4}-[\\d]{2}-[\\d]{2}$", + "sameDayAllowed": false + } + } + } + }, + "unschedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.holiday/commands/unschedule", + "name": "unschedule", + "isExecutable": true, + "params": {} + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.holiday", + "gatewayId": "################", + "feature": "ventilation.operating.programs.holiday", + "timestamp": "2021-09-02T11:30:20.827Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": true + }, + "status": { + "type": "string", + "value": "on" + } + }, + "commands": {}, + "components": [ + "charging", + "oneTimeCharge", + "schedule", + "sensors", + "temperature" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw", + "gatewayId": "################", + "feature": "heating.dhw", + "timestamp": "2021-09-02T11:30:20.750Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["modulation", "statistics"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burners.0", + "gatewayId": "################", + "feature": "heating.burners.0", + "timestamp": "2021-09-02T11:30:20.280Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "active", + "comfort", + "eco", + "fixed", + "normal", + "reduced", + "standby" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.programs", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["pump"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.circulation", + "gatewayId": "################", + "feature": "heating.circuits.0.circulation", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["active", "standard", "standby", "ventilation"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.modes", + "gatewayId": "################", + "feature": "ventilation.operating.modes", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.dhw", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes.dhw", + "timestamp": "2021-09-02T11:30:20.550Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.temperature.levels", + "gatewayId": "################", + "feature": "heating.circuits.1.temperature.levels", + "timestamp": "2021-09-02T11:30:20.872Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "starts": { + "type": "number", + "value": 1244, + "unit": "" + }, + "hours": { + "type": "number", + "value": 1191.3, + "unit": "" + }, + "hoursLoadClassOne": { + "type": "number", + "value": 251, + "unit": "" + }, + "hoursLoadClassTwo": { + "type": "number", + "value": 337, + "unit": "" + }, + "hoursLoadClassThree": { + "type": "number", + "value": 307, + "unit": "" + }, + "hoursLoadClassFour": { + "type": "number", + "value": 240, + "unit": "" + }, + "hoursLoadClassFive": { + "type": "number", + "value": 22, + "unit": "" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.statistics", + "gatewayId": "################", + "feature": "heating.compressors.0.statistics", + "timestamp": "2021-09-02T11:30:20.248Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "enabled": { + "value": ["0"], + "type": "array" + } + }, + "commands": {}, + "components": ["0", "1", "2"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits", + "gatewayId": "################", + "feature": "heating.circuits", + "timestamp": "2021-09-02T11:30:20.402Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["temperature", "valve"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors", + "gatewayId": "################", + "feature": "heating.sensors", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "type": "number", + "value": 118, + "unit": "" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.device.time.offset", + "gatewayId": "################", + "feature": "heating.device.time.offset", + "timestamp": "2021-09-02T11:30:20.676Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "value": { + "type": "number", + "value": 15.6, + "unit": "celsius" + }, + "status": { + "type": "string", + "value": "connected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors.temperature.outside", + "gatewayId": "################", + "feature": "heating.sensors.temperature.outside", + "timestamp": "2021-09-02T18:31:19.344Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.normal", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.programs.normal", + "timestamp": "2021-09-02T11:30:21.707Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "status": { + "type": "string", + "value": "off" + } + }, + "commands": {}, + "components": ["schedule"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.pumps.circulation", + "gatewayId": "################", + "feature": "heating.dhw.pumps.circulation", + "timestamp": "2021-09-02T11:30:20.717Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.dhwAndHeating", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes.dhwAndHeating", + "timestamp": "2021-09-02T11:30:20.567Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "shift": { + "type": "number", + "unit": "", + "value": 0 + }, + "slope": { + "type": "number", + "unit": "", + "value": 0.6 + } + }, + "commands": { + "setCurve": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.heating.curve/commands/setCurve", + "name": "setCurve", + "isExecutable": true, + "params": { + "slope": { + "type": "number", + "required": true, + "constraints": { + "min": 0, + "max": 3.5, + "stepping": 0.1 + } + }, + "shift": { + "type": "number", + "required": true, + "constraints": { + "min": -15, + "max": 40, + "stepping": 1 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.heating.curve", + "gatewayId": "################", + "feature": "heating.circuits.1.heating.curve", + "timestamp": "2021-09-02T11:30:20.481Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "value": { + "type": "number", + "value": 43.2, + "unit": "celsius" + }, + "status": { + "type": "string", + "value": "connected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors.temperature.hotWaterStorage.top", + "gatewayId": "################", + "feature": "heating.dhw.sensors.temperature.hotWaterStorage.top", + "timestamp": "2021-09-02T18:24:29.029Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.burners.0.statistics", + "gatewayId": "################", + "feature": "heating.burners.0.statistics", + "timestamp": "2021-09-02T11:30:20.461Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "active", + "cooling", + "dhw", + "dhwAndHeating", + "dhwAndHeatingCooling", + "heating", + "heatingCooling", + "normalStandby", + "standby" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "type": "string", + "value": "################" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.controller.serial", + "gatewayId": "################", + "feature": "heating.controller.serial", + "timestamp": "2021-09-02T11:30:20.674Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "value": 2.5, + "unit": "", + "type": "number" + } + }, + "commands": { + "setHysteresis": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.hysteresis/commands/setHysteresis", + "name": "setHysteresis", + "isExecutable": true, + "params": { + "hysteresis": { + "type": "number", + "required": true, + "constraints": { + "min": 1, + "max": 10, + "stepping": 0.5 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.hysteresis", + "gatewayId": "################", + "feature": "heating.dhw.temperature.hysteresis", + "timestamp": "2021-09-02T11:30:20.732Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.temperature.levels", + "gatewayId": "################", + "feature": "heating.circuits.2.temperature.levels", + "timestamp": "2021-09-02T11:30:20.876Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "active", + "cooling", + "dhw", + "dhwAndHeating", + "dhwAndHeatingCooling", + "heating", + "heatingCooling", + "normalStandby", + "standby" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "type": "number", + "value": 0, + "unit": "" + } + }, + "commands": {}, + "components": ["levels"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.temperature", + "gatewayId": "################", + "feature": "heating.circuits.2.temperature", + "timestamp": "2021-09-02T11:30:20.865Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": true, + "type": "boolean" + }, + "entries": { + "value": { + "mon": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.evaporators", - "gatewayId": "################", - "feature": "heating.evaporators", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.boiler.sensors", - "gatewayId": "################", - "feature": "heating.boiler.sensors", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "curve", - "schedule" + "tue": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.heating", - "gatewayId": "################", - "feature": "heating.circuits.1.heating", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "circulation", - "frostprotection", - "heating", - "operating", - "sensors", - "temperature" + "wed": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2", - "gatewayId": "################", - "feature": "heating.circuits.2", - "timestamp": "2021-09-02T11:30:20.401Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "hysteresis", - "main", - "temp2" + "thu": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature", - "gatewayId": "################", - "feature": "heating.dhw.temperature", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "modes", - "programs" + "fri": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating", - "gatewayId": "################", - "feature": "heating.circuits.2.operating", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.heating", - "gatewayId": "################", - "feature": "heating.circuits.1.operating.modes.heating", - "timestamp": "2021-09-02T11:30:20.588Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "production" + "sat": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.1.heat", - "gatewayId": "################", - "feature": "heating.compressors.1.heat", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "programs" + "sun": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } + ] + }, + "type": "Schedule" + } + }, + "commands": { + "setSchedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating.schedule/commands/setSchedule", + "name": "setSchedule", + "isExecutable": true, + "params": { + "newSchedule": { + "type": "Schedule", + "required": true, + "constraints": { + "modes": ["reduced", "normal", "fixed"], + "maxEntries": 8, + "resolution": 10, + "defaultMode": "standby", + "overlapAllowed": true + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.heating.schedule", + "gatewayId": "################", + "feature": "heating.circuits.0.heating.schedule", + "timestamp": "2021-09-02T11:30:20.484Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.fixed", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.programs.fixed", + "timestamp": "2021-09-02T11:30:20.639Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["heat", "statistics"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.1", + "gatewayId": "################", + "feature": "heating.compressors.1", + "timestamp": "2021-09-02T11:30:19.902Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": true + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.dhwAndHeatingCooling", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes.dhwAndHeatingCooling", + "timestamp": "2021-09-02T11:30:20.556Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.active", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.programs.active", + "timestamp": "2021-09-02T11:30:20.746Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["room", "supply"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.sensors.temperature", + "gatewayId": "################", + "feature": "heating.circuits.1.sensors.temperature", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": true, + "type": "boolean" + }, + "temperature": { + "value": 20, + "unit": "celsius", + "type": "number" + }, + "unit": { + "value": "celsius", + "type": "string" + } + }, + "commands": { + "setTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.standard/commands/setTemperature", + "name": "setTemperature", + "isExecutable": true, + "params": { + "targetTemperature": { + "type": "number", + "required": true, + "constraints": { + "min": 10, + "max": 30, + "stepping": 1 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.standard", + "gatewayId": "################", + "feature": "ventilation.operating.programs.standard", + "timestamp": "2021-09-02T11:30:20.835Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "min": { + "value": 15, + "unit": "celsius", + "type": "number" + }, + "minUnit": { + "value": "celsius", + "type": "string" + }, + "max": { + "value": 23, + "unit": "celsius", + "type": "number" + }, + "maxUnit": { + "value": "celsius", + "type": "string" + } + }, + "commands": { + "setMin": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature.levels/commands/setMin", + "name": "setMin", + "isExecutable": true, + "params": { + "temperature": { + "type": "number", + "required": true, + "constraints": { + "min": 1, + "max": 30, + "stepping": 1 + } + } + } + }, + "setMax": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature.levels/commands/setMax", + "name": "setMax", + "isExecutable": true, + "params": { + "temperature": { + "type": "number", + "required": true, + "constraints": { + "min": 10, + "max": 70, + "stepping": 1 + } + } + } + }, + "setLevels": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature.levels/commands/setLevels", + "name": "setLevels", + "isExecutable": true, + "params": { + "minTemperature": { + "type": "number", + "required": true, + "constraints": { + "min": 1, + "max": 30, + "stepping": 1 + } + }, + "maxTemperature": { + "type": "number", + "required": true, + "constraints": { + "min": 10, + "max": 70, + "stepping": 1 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.temperature.levels", + "gatewayId": "################", + "feature": "heating.circuits.0.temperature.levels", + "timestamp": "2021-09-02T11:30:20.868Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.standby", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes.standby", + "timestamp": "2021-09-02T11:30:20.627Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["temperature"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.sensors", + "gatewayId": "################", + "feature": "heating.solar.sensors", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.cooling", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes.cooling", + "timestamp": "2021-09-02T11:30:20.509Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.solar.pumps.circuit", + "gatewayId": "################", + "feature": "heating.solar.pumps.circuit", + "timestamp": "2021-09-02T11:30:20.848Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": true + } + }, + "commands": {}, + "components": ["operating", "schedule"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation", + "gatewayId": "################", + "feature": "ventilation", + "timestamp": "2021-09-02T11:30:20.812Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "status": { + "type": "string", + "value": "notConnected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.buffer.sensors.temperature.top", + "gatewayId": "################", + "feature": "heating.buffer.sensors.temperature.top", + "timestamp": "2021-09-02T11:30:20.270Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["temperature"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.sensors", + "gatewayId": "################", + "feature": "heating.circuits.2.sensors", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.1.heat.production", + "gatewayId": "################", + "feature": "heating.compressors.1.heat.production", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["offset"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.device.time", + "gatewayId": "################", + "feature": "heating.device.time", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["room", "supply"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.sensors.temperature", + "gatewayId": "################", + "feature": "heating.circuits.0.sensors.temperature", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": true, + "type": "boolean" + }, + "entries": { + "value": { + "mon": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating", - "gatewayId": "################", - "feature": "heating.operating", - "timestamp": "2021-09-02T11:30:19.048Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.condensors.0.sensors.temperature", - "gatewayId": "################", - "feature": "heating.condensors.0.sensors.temperature", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "room", - "supply" + "tue": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.sensors.temperature", - "gatewayId": "################", - "feature": "heating.circuits.2.sensors.temperature", - "timestamp": "2021-09-02T11:30:19.050Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "production" + "wed": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.heat", - "gatewayId": "################", - "feature": "heating.compressors.0.heat", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "type": "boolean", - "value": false - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.dhw", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes.dhw", - "timestamp": "2021-09-02T11:30:20.537Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.sensors.temperature.room", - "gatewayId": "################", - "feature": "heating.circuits.2.sensors.temperature.room", - "timestamp": "2021-09-02T11:30:20.653Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "temperature" + "thu": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.condensors.0.sensors", - "gatewayId": "################", - "feature": "heating.condensors.0.sensors", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "active": { - "value": false, - "type": "boolean" - }, - "temperature": { - "value": 23, - "unit": "", - "type": "number" - } - }, - "commands": { - "activate": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.eco/commands/activate", - "name": "activate", - "isExecutable": true, - "params": {} - }, - "deactivate": { - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.eco/commands/deactivate", - "name": "deactivate", - "isExecutable": false, - "params": {} - } - }, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.eco", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.programs.eco", - "timestamp": "2021-09-02T11:30:21.713Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "status": { - "type": "string", - "value": "off" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.circulation.pump", - "gatewayId": "################", - "feature": "heating.circuits.0.circulation.pump", - "timestamp": "2021-09-02T11:30:20.739Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.frostprotection", - "gatewayId": "################", - "feature": "heating.circuits.1.frostprotection", - "timestamp": "2021-09-02T11:30:20.467Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "outside", - "return" + "fri": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors.temperature", - "gatewayId": "################", - "feature": "heating.sensors.temperature", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": { - "unit": { - "value": "celsius", - "type": "string" - }, - "status": { - "type": "string", - "value": "notConnected" - } - }, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors.temperature.outlet", - "gatewayId": "################", - "feature": "heating.dhw.sensors.temperature.outlet", - "timestamp": "2021-09-02T11:30:20.738Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.heating", - "gatewayId": "################", - "feature": "heating.circuits.0.operating.modes.heating", - "timestamp": "2021-09-02T11:30:20.585Z", - "isEnabled": false, - "isReady": true, - "deviceId": "0" - }, - { - "properties": {}, - "commands": {}, - "components": [ - "temperature" + "sat": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } ], - "apiVersion": 1, - "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.evaporators.0.sensors", - "gatewayId": "################", - "feature": "heating.evaporators.0.sensors", - "timestamp": "2021-09-02T11:30:19.049Z", - "isEnabled": true, - "isReady": true, - "deviceId": "0" + "sun": [ + { + "start": "00:00", + "end": "24:00", + "mode": "normal", + "position": 0 + } + ] + }, + "type": "Schedule" + } + }, + "commands": { + "setSchedule": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.schedule/commands/setSchedule", + "name": "setSchedule", + "isExecutable": true, + "params": { + "newSchedule": { + "type": "Schedule", + "required": true, + "constraints": { + "modes": ["top", "normal", "temp-2"], + "maxEntries": 8, + "resolution": 10, + "defaultMode": "off", + "overlapAllowed": true + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.schedule", + "gatewayId": "################", + "feature": "heating.dhw.schedule", + "timestamp": "2021-09-02T11:30:20.752Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "value": 45, + "unit": "", + "type": "number" + } + }, + "commands": { + "setTargetTemperature": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.main/commands/setTargetTemperature", + "name": "setTargetTemperature", + "isExecutable": true, + "params": { + "temperature": { + "type": "number", + "required": true, + "constraints": { + "min": 10, + "efficientLowerBorder": 10, + "efficientUpperBorder": 60, + "max": 60, + "stepping": 1 + } + } + } + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature.main", + "gatewayId": "################", + "feature": "heating.dhw.temperature.main", + "timestamp": "2021-09-02T11:30:19.868Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "status": { + "type": "string", + "value": "off" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.pumps.primary", + "gatewayId": "################", + "feature": "heating.dhw.pumps.primary", + "timestamp": "2021-09-02T11:30:20.716Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "value": { + "type": "number", + "value": 43.2, + "unit": "celsius" + }, + "status": { + "type": "string", + "value": "connected" + } + }, + "commands": {}, + "components": ["bottom", "top"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors.temperature.hotWaterStorage", + "gatewayId": "################", + "feature": "heating.dhw.sensors.temperature.hotWaterStorage", + "timestamp": "2021-09-02T18:24:29.009Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "boiler", + "buffer", + "burner", + "burners", + "circuits", + "compressors", + "condensors", + "configuration", + "device", + "dhw", + "evaporators", + "operating", + "sensors", + "solar" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating", + "gatewayId": "################", + "feature": "heating", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": false, + "type": "boolean" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/ventilation.operating.programs.eco", + "gatewayId": "################", + "feature": "ventilation.operating.programs.eco", + "timestamp": "2021-09-02T11:30:20.839Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.reduced", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.programs.reduced", + "timestamp": "2021-09-02T11:30:21.710Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.buffer", + "gatewayId": "################", + "feature": "heating.buffer", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "value": { + "type": "string", + "value": "################" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.boiler.serial", + "gatewayId": "################", + "feature": "heating.boiler.serial", + "timestamp": "2021-09-02T11:30:20.272Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.modes.heating", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.modes.heating", + "timestamp": "2021-09-02T11:30:20.590Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.heat.production", + "gatewayId": "################", + "feature": "heating.compressors.0.heat.production", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["sensors"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.condensors.0", + "gatewayId": "################", + "feature": "heating.condensors.0", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "value": { + "type": "number", + "value": 16.8, + "unit": "celsius" + }, + "status": { + "type": "string", + "value": "connected" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.primaryCircuit.sensors.temperature.supply", + "gatewayId": "################", + "feature": "heating.primaryCircuit.sensors.temperature.supply", + "timestamp": "2021-09-02T18:30:28.760Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating.programs.eco", + "gatewayId": "################", + "feature": "heating.circuits.2.operating.programs.eco", + "timestamp": "2021-09-02T11:30:21.714Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.programs.comfort", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.programs.comfort", + "timestamp": "2021-09-02T11:30:21.705Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["time"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.device", + "gatewayId": "################", + "feature": "heating.device", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + }, + "phase": { + "type": "string", + "value": "off" + } + }, + "commands": {}, + "components": ["heat", "sensors", "statistics"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0", + "gatewayId": "################", + "feature": "heating.compressors.0", + "timestamp": "2021-09-02T11:30:19.879Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.fixed", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.programs.fixed", + "timestamp": "2021-09-02T11:30:20.637Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["0"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.evaporators", + "gatewayId": "################", + "feature": "heating.evaporators", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.boiler.sensors", + "gatewayId": "################", + "feature": "heating.boiler.sensors", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["curve", "schedule"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.heating", + "gatewayId": "################", + "feature": "heating.circuits.1.heating", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [ + "circulation", + "frostprotection", + "heating", + "operating", + "sensors", + "temperature" + ], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2", + "gatewayId": "################", + "feature": "heating.circuits.2", + "timestamp": "2021-09-02T11:30:20.401Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["hysteresis", "main", "temp2"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.temperature", + "gatewayId": "################", + "feature": "heating.dhw.temperature", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["modes", "programs"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.operating", + "gatewayId": "################", + "feature": "heating.circuits.2.operating", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.operating.modes.heating", + "gatewayId": "################", + "feature": "heating.circuits.1.operating.modes.heating", + "timestamp": "2021-09-02T11:30:20.588Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["production"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.1.heat", + "gatewayId": "################", + "feature": "heating.compressors.1.heat", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["programs"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.operating", + "gatewayId": "################", + "feature": "heating.operating", + "timestamp": "2021-09-02T11:30:19.048Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.condensors.0.sensors.temperature", + "gatewayId": "################", + "feature": "heating.condensors.0.sensors.temperature", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["room", "supply"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.sensors.temperature", + "gatewayId": "################", + "feature": "heating.circuits.2.sensors.temperature", + "timestamp": "2021-09-02T11:30:19.050Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["production"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.compressors.0.heat", + "gatewayId": "################", + "feature": "heating.compressors.0.heat", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "type": "boolean", + "value": false + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.dhw", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes.dhw", + "timestamp": "2021-09-02T11:30:20.537Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.2.sensors.temperature.room", + "gatewayId": "################", + "feature": "heating.circuits.2.sensors.temperature.room", + "timestamp": "2021-09-02T11:30:20.653Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["temperature"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.condensors.0.sensors", + "gatewayId": "################", + "feature": "heating.condensors.0.sensors", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "active": { + "value": false, + "type": "boolean" + }, + "temperature": { + "value": 23, + "unit": "", + "type": "number" + } + }, + "commands": { + "activate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.eco/commands/activate", + "name": "activate", + "isExecutable": true, + "params": {} + }, + "deactivate": { + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.eco/commands/deactivate", + "name": "deactivate", + "isExecutable": false, + "params": {} + } + }, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.programs.eco", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.programs.eco", + "timestamp": "2021-09-02T11:30:21.713Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "status": { + "type": "string", + "value": "off" + } + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.circulation.pump", + "gatewayId": "################", + "feature": "heating.circuits.0.circulation.pump", + "timestamp": "2021-09-02T11:30:20.739Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.1.frostprotection", + "gatewayId": "################", + "feature": "heating.circuits.1.frostprotection", + "timestamp": "2021-09-02T11:30:20.467Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["outside", "return"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.sensors.temperature", + "gatewayId": "################", + "feature": "heating.sensors.temperature", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": { + "unit": { + "value": "celsius", + "type": "string" + }, + "status": { + "type": "string", + "value": "notConnected" } - ] + }, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.dhw.sensors.temperature.outlet", + "gatewayId": "################", + "feature": "heating.dhw.sensors.temperature.outlet", + "timestamp": "2021-09-02T11:30:20.738Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": [], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.circuits.0.operating.modes.heating", + "gatewayId": "################", + "feature": "heating.circuits.0.operating.modes.heating", + "timestamp": "2021-09-02T11:30:20.585Z", + "isEnabled": false, + "isReady": true, + "deviceId": "0" + }, + { + "properties": {}, + "commands": {}, + "components": ["temperature"], + "apiVersion": 1, + "uri": "https://api.viessmann-platform.io/iot/v1/equipment/installations/#######/gateways/################/devices/0/features/heating.evaporators.0.sensors", + "gatewayId": "################", + "feature": "heating.evaporators.0.sensors", + "timestamp": "2021-09-02T11:30:19.049Z", + "isEnabled": true, + "isReady": true, + "deviceId": "0" + } + ] } diff --git a/tests/test_TestForMissingProperties.py b/tests/test_TestForMissingProperties.py index ecf6623..73d9084 100644 --- a/tests/test_TestForMissingProperties.py +++ b/tests/test_TestForMissingProperties.py @@ -25,6 +25,23 @@ class TestForMissingProperties(unittest.TestCase): 'heating.circuits.0.temperature.levels', # hint: command 'heating.dhw.temperature.hysteresis', # hint: command + + # todo: implement ventilation + 'ventilation.schedule', + 'ventilation.operating.programs', + 'ventilation.operating.programs.eco', + 'ventilation.operating.programs.comfort', + 'ventilation.operating.programs.basic', + 'ventilation.operating.programs.active', + 'ventilation.operating.programs.holiday', + 'ventilation.operating.programs.intensive', + 'ventilation.operating.programs.standby', + 'ventilation.operating.programs.standard', + 'ventilation.operating.programs.reduced', + 'ventilation.operating.modes.standby', + 'ventilation.operating.modes.active', + 'ventilation.operating.modes.standard', + 'ventilation.operating.modes.ventilation' ] all_features = self.read_all_features() diff --git a/tests/test_Utils.py b/tests/test_Utils.py new file mode 100644 index 0000000..a004838 --- /dev/null +++ b/tests/test_Utils.py @@ -0,0 +1,11 @@ +import unittest +from datetime import timedelta + +from PyViCare.PyViCareUtils import ViCareTimer + + +class UtilTests(unittest.TestCase): + + def test_parse_timespan(self): + self.assertEqual(timedelta(hours=2, minutes=4), ViCareTimer.parse_time_as_delta("02:04")) + self.assertEqual(timedelta(hours=24, minutes=0), ViCareTimer.parse_time_as_delta("24:00")) diff --git a/tests/test_Vitocal222S.py b/tests/test_Vitocal222S.py new file mode 100644 index 0000000..324a1a4 --- /dev/null +++ b/tests/test_Vitocal222S.py @@ -0,0 +1,18 @@ +import datetime +import unittest +from unittest.mock import patch + +from PyViCare.PyViCareHeatPump import HeatPump +from PyViCare.PyViCareUtils import ViCareTimer +from tests.ViCareServiceMock import ViCareServiceMock + + +class Vitocal222S(unittest.TestCase): + def setUp(self): + self.service = ViCareServiceMock('response/Vitocal222S.json') + self.device = HeatPump(self.service) + + def test_getDomesticHotWaterActiveMode_10_10_time(self): + with patch.object(ViCareTimer, 'now', return_value=datetime.datetime(2000, 1, 1, 10, 10, 0)): + self.assertEqual( + self.device.getDomesticHotWaterActiveMode(), 'normal')
Schedule entries ending with 24:00 fail parsing with datetime.strptime My schedule entries frequently finish at midnight and Viessmann decided to write this as `"end": "24:00"`, causing `datetime.strptime` in `endTime = datetime.strptime(s["end"], '%H:%M').time()` to complain with `ValueError: time data '24:00' does not match format '%H:%M'`.
0.0
bfb36c6feb3d9035a89f01a1a187b049987875dd
[ "tests/test_TestForMissingProperties.py::TestForMissingProperties::test_missingProperties", "tests/test_TestForMissingProperties.py::TestForMissingProperties::test_unverifiedProperties", "tests/test_Utils.py::UtilTests::test_parse_timespan", "tests/test_Vitocal222S.py::Vitocal222S::test_getDomesticHotWaterActiveMode_10_10_time" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-09-09 10:59:26+00:00
apache-2.0
5,595
somm15__PyViCare-198
diff --git a/PyViCare/PyViCareDevice.py b/PyViCare/PyViCareDevice.py index 18a993d..cda7fa1 100644 --- a/PyViCare/PyViCareDevice.py +++ b/PyViCare/PyViCareDevice.py @@ -440,6 +440,8 @@ class HeatingCircuit(DeviceWithComponent): @handleNotSupported def getCurrentDesiredTemperature(self): active_programm = self.getActiveProgram() + if active_programm in ['standby']: + return None return self.service.getProperty(f"heating.circuits.{self.circuit}.operating.programs.{active_programm}")["properties"]["temperature"]["value"] @handleNotSupported
somm15/PyViCare
a7656e432db4cfa6fe9e59a7c7d19b319d892774
diff --git a/tests/test_Vitocal222S.py b/tests/test_Vitocal222S.py index abe4f81..a85b510 100644 --- a/tests/test_Vitocal222S.py +++ b/tests/test_Vitocal222S.py @@ -14,3 +14,7 @@ class Vitocal222S(unittest.TestCase): with now_is('2000-01-01 10:10:00'): self.assertEqual( self.device.getDomesticHotWaterActiveMode(), 'normal') + + def test_getCurrentDesiredTemperature(self): + self.assertEqual( + self.device.circuits[0].getCurrentDesiredTemperature(), 23) diff --git a/tests/test_Vitodens300W.py b/tests/test_Vitodens300W.py index 82b7c35..50d3302 100644 --- a/tests/test_Vitodens300W.py +++ b/tests/test_Vitodens300W.py @@ -49,6 +49,10 @@ class Vitodens300W(unittest.TestCase): self.assertEqual( self.device.getDomesticHotWaterCirculationPumpActive(), True) + def test_getCurrentDesiredTemperature(self): + self.assertEqual( + self.device.circuits[0].getCurrentDesiredTemperature(), None) + # Is currently (August, 2021) not supported by the Viessman API even though it works for the Vitodens 200W. def test_getDomesticHotWaterOutletTemperature(self): self.assertRaises(PyViCareNotSupportedFeatureError,
getCurrentDesiredTemperature for standby is throwing not supported error The only "circuit"-function not working since the last api-update is getCurrentDesiredTemperature(). error-message: ``` site-packages\PyViCare\PyViCareUtils.py", line 45, in feature_flag_wrapper return wrapper(*args, **kwargs) site-packages\PyViCare\PyViCareUtils.py", line 40, in wrapper raise PyViCareNotSupportedFeatureError(func.__name__) PyViCare.PyViCareUtils.PyViCareNotSupportedFeatureError: getCurrentDesiredTemperature ``` _Originally posted by @kruemel746 in https://github.com/somm15/PyViCare/issues/194#issuecomment-931603388_
0.0
a7656e432db4cfa6fe9e59a7c7d19b319d892774
[ "tests/test_Vitodens300W.py::Vitodens300W::test_getCurrentDesiredTemperature" ]
[ "tests/test_Vitocal222S.py::Vitocal222S::test_getCurrentDesiredTemperature", "tests/test_Vitocal222S.py::Vitocal222S::test_getDomesticHotWaterActiveMode_10_10_time", "tests/test_Vitodens300W.py::Vitodens300W::test_getActive", "tests/test_Vitodens300W.py::Vitodens300W::test_getBurnerHours", "tests/test_Vitodens300W.py::Vitodens300W::test_getBurnerModulation", "tests/test_Vitodens300W.py::Vitodens300W::test_getBurnerStarts", "tests/test_Vitodens300W.py::Vitodens300W::test_getDomesticHotWaterChargingLevel", "tests/test_Vitodens300W.py::Vitodens300W::test_getDomesticHotWaterCirculationPumpActive", "tests/test_Vitodens300W.py::Vitodens300W::test_getDomesticHotWaterCirculationScheduleModes", "tests/test_Vitodens300W.py::Vitodens300W::test_getDomesticHotWaterOutletTemperature", "tests/test_Vitodens300W.py::Vitodens300W::test_getFrostProtectionActive", "tests/test_Vitodens300W.py::Vitodens300W::test_getModes", "tests/test_Vitodens300W.py::Vitodens300W::test_getPowerConsumptionDays", "tests/test_Vitodens300W.py::Vitodens300W::test_getPrograms" ]
{ "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-10-01 08:03:24+00:00
apache-2.0
5,596
somm15__PyViCare-313
diff --git a/PyViCare/PyViCareHeatingDevice.py b/PyViCare/PyViCareHeatingDevice.py index e68be11..13e75df 100644 --- a/PyViCare/PyViCareHeatingDevice.py +++ b/PyViCare/PyViCareHeatingDevice.py @@ -432,31 +432,44 @@ class HeatingCircuit(HeatingDeviceWithComponent): @handleNotSupported def getActiveProgramMinTemperature(self): active_program = self.getActiveProgram() - if active_program in ['standby']: + return self.getProgramMinTemperature(active_program) + + @handleNotSupported + def getActiveProgramMaxTemperature(self): + active_program = self.getActiveProgram() + return self.getProgramMaxTemperature(active_program) + + @handleNotSupported + def getActiveProgramStepping(self): + active_program = self.getActiveProgram() + return self.getProgramStepping(active_program) + + @handleNotSupported + def getProgramMinTemperature(self, program: str): + if program in ['standby']: return None - return self.service.getProperty(f"heating.circuits.{self.circuit}.operating.programs.{active_program}")[ + return self.service.getProperty(f"heating.circuits.{self.circuit}.operating.programs.{program}")[ "commands"]["setTemperature"]["params"]["targetTemperature"]["constraints"]["min"] @handleNotSupported - def getActiveProgramMaxTemperature(self): - active_program = self.getActiveProgram() - if active_program in ['standby']: + def getProgramMaxTemperature(self, program: str): + if program in ['standby']: return None - - return self.service.getProperty(f"heating.circuits.{self.circuit}.operating.programs.{active_program}")[ + + return self.service.getProperty(f"heating.circuits.{self.circuit}.operating.programs.{program}")[ "commands"]["setTemperature"]["params"]["targetTemperature"]["constraints"]["max"] @handleNotSupported - def getActiveProgramStepping(self): - active_program = self.getActiveProgram() - if active_program in ['standby']: + def getProgramStepping(self, program: str): + if program in ['standby']: return None - - return self.service.getProperty(f"heating.circuits.{self.circuit}.operating.programs.{active_program}")[ + + return self.service.getProperty(f"heating.circuits.{self.circuit}.operating.programs.{program}")[ "commands"]["setTemperature"]["params"]["targetTemperature"]["constraints"]["stepping"] + """ Activate a program NOTE DEVICE_COMMUNICATION_ERROR can just mean that the program is already on
somm15/PyViCare
0578591c2be473e4b8edbf3a806d287e662e95f7
diff --git a/tests/test_Vitocal200.py b/tests/test_Vitocal200.py index 2411726..39edcf1 100644 --- a/tests/test_Vitocal200.py +++ b/tests/test_Vitocal200.py @@ -100,4 +100,13 @@ class Vitocal200(unittest.TestCase): self.assertEqual(self.device.getCircuit(0).getActiveProgramMaxTemperature(), 30) def test_getActiveProgramMaxTemperature(self): - self.assertEqual(self.device.getCircuit(0).getActiveProgramStepping(), 1) \ No newline at end of file + self.assertEqual(self.device.getCircuit(0).getActiveProgramStepping(), 1) + + def test_getNormalProgramMinTemperature(self): + self.assertEqual(self.device.getCircuit(0).getProgramMinTemperature("normal"), 10) + + def test_getNormalProgramMaxTemperature(self): + self.assertEqual(self.device.getCircuit(0).getProgramMaxTemperature("normal"), 30) + + def test_getNormalProgramStepping(self): + self.assertEqual(self.device.getCircuit(0).getProgramStepping("normal"), 1) \ No newline at end of file
min and max temperature for desired program there is function setProgramTemperature to set temperature for certain program ```python def setProgramTemperature(self, program: str, temperature: int): ``` but no way how to find out allowed range and stepping, even though vicare API contains these values ```text { 'properties': { 'active': { 'value': False, 'type': 'boolean' }, 'demand': { 'value': 'unknown', 'type': 'string' }, 'temperature': { 'value': 65, 'unit': 'celsius', 'type': 'number' } }, 'commands': { 'setTemperature': { 'uri': '.../devices/0/features/heating.circuits.0.operating.programs.normal/commands/setTemperature', 'name': 'setTemperature', 'isExecutable': True, 'params': { 'targetTemperature': { 'type': 'number', 'required': True, 'constraints': { 'min': 20, # <--- here 'max': 82, # <--- here 'stepping': 1 # <--- here } } } } }, 'apiVersion': 1, 'uri': '../devices/0/features/heating.circuits.0.operating.programs.normal', 'gatewayId': '...', 'feature': 'heating.circuits.0.operating.programs.normal', 'timestamp': '2022-09-25T13:48:31.938Z', 'isEnabled': True, 'isReady': True, 'deviceId': '0' }, ``` A similar function exists in pyvicare for DWH part getDomesticHotWaterMinTemperature. getDomesticHotWaterMaxTemperature
0.0
0578591c2be473e4b8edbf3a806d287e662e95f7
[ "tests/test_Vitocal200.py::Vitocal200::test_getNormalProgramMaxTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getNormalProgramMinTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getNormalProgramStepping" ]
[ "tests/test_Vitocal200.py::Vitocal200::test_getActiveProgramMaxTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getActiveProgramMinTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getAvailableCompressors", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorActive", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHours", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass1", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass2", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass3", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass4", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass5", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorStarts", "tests/test_Vitocal200.py::Vitocal200::test_getDomesticHotWaterActiveMode_fri_10_10_time", "tests/test_Vitocal200.py::Vitocal200::test_getDomesticHotWaterCirculationPumpActive", "tests/test_Vitocal200.py::Vitocal200::test_getDomesticHotWaterDesiredTemperature_fri_10_10_time", "tests/test_Vitocal200.py::Vitocal200::test_getDomesticHotWaterDesiredTemperature_fri_20_00_time", "tests/test_Vitocal200.py::Vitocal200::test_getHeatingCurveShift", "tests/test_Vitocal200.py::Vitocal200::test_getHeatingCurveSlope", "tests/test_Vitocal200.py::Vitocal200::test_getModes", "tests/test_Vitocal200.py::Vitocal200::test_getPrograms", "tests/test_Vitocal200.py::Vitocal200::test_getReturnTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getReturnTemperaturePrimaryCircuit", "tests/test_Vitocal200.py::Vitocal200::test_getSupplyTemperaturePrimaryCircuit" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-05-12 20:10:14+00:00
apache-2.0
5,597
somm15__PyViCare-316
diff --git a/PyViCare/PyViCareHeatPump.py b/PyViCare/PyViCareHeatPump.py index 6a15ad7..3c407ba 100644 --- a/PyViCare/PyViCareHeatPump.py +++ b/PyViCare/PyViCareHeatPump.py @@ -138,3 +138,7 @@ class Compressor(HeatingDeviceWithComponent): @handleNotSupported def getActive(self): return self.service.getProperty(f"heating.compressors.{self.compressor}")["properties"]["active"]["value"] + + @handleNotSupported + def getPhase(self): + return self.service.getProperty(f"heating.compressors.{self.compressor}")["properties"]["phase"]["value"]
somm15/PyViCare
5dec7aeccbbe06c5e6e99cd103e0cdbf6d96cee1
diff --git a/tests/test_Vitocal200.py b/tests/test_Vitocal200.py index 39edcf1..202c14c 100644 --- a/tests/test_Vitocal200.py +++ b/tests/test_Vitocal200.py @@ -12,38 +12,42 @@ class Vitocal200(unittest.TestCase): self.device = HeatPump(self.service) def test_getCompressorActive(self): - self.assertEqual(self.device.compressors[0].getActive(), False) + self.assertEqual(self.device.getCompressor(0).getActive(), False) def test_getCompressorHours(self): self.assertAlmostEqual( - self.device.compressors[0].getHours(), 13651.9) + self.device.getCompressor(0).getHours(), 13651.9) def test_getAvailableCompressors(self): self.assertEqual(self.device.getAvailableCompressors(), ['0']) def test_getCompressorStarts(self): self.assertAlmostEqual( - self.device.compressors[0].getStarts(), 6973) + self.device.getCompressor(0).getStarts(), 6973) def test_getCompressorHoursLoadClass1(self): self.assertAlmostEqual( - self.device.compressors[0].getHoursLoadClass1(), 366) + self.device.getCompressor(0).getHoursLoadClass1(), 366) def test_getCompressorHoursLoadClass2(self): self.assertAlmostEqual( - self.device.compressors[0].getHoursLoadClass2(), 5579) + self.device.getCompressor(0).getHoursLoadClass2(), 5579) def test_getCompressorHoursLoadClass3(self): self.assertAlmostEqual( - self.device.compressors[0].getHoursLoadClass3(), 6024) + self.device.getCompressor(0).getHoursLoadClass3(), 6024) def test_getCompressorHoursLoadClass4(self): self.assertAlmostEqual( - self.device.compressors[0].getHoursLoadClass4(), 659) + self.device.getCompressor(0).getHoursLoadClass4(), 659) def test_getCompressorHoursLoadClass5(self): self.assertAlmostEqual( - self.device.compressors[0].getHoursLoadClass5(), 715) + self.device.getCompressor(0).getHoursLoadClass5(), 715) + + def test_getCompressorPhase(self): + self.assertEqual( + self.device.getCompressor(0).getPhase(), "off") def test_getHeatingCurveSlope(self): self.assertAlmostEqual( diff --git a/tests/test_Vitocal250A.py b/tests/test_Vitocal250A.py index fbc904f..caa5148 100644 --- a/tests/test_Vitocal250A.py +++ b/tests/test_Vitocal250A.py @@ -124,3 +124,8 @@ class Vitocal250A(unittest.TestCase): def test_getPowerSummaryConsumptionDomesticHotWaterLastYear(self): self.assertEqual( self.device.getPowerSummaryConsumptionDomesticHotWaterLastYear(), 177.7) + + def test_getCompressorPhase(self): + self.assertEqual( + self.device.getCompressor(0).getPhase(), "ready") +
Compressor feature 'phase' Hi, there is an additional feature to 'active' in heatpump/compressor that is 'phase'. It can be easily read by adding @handleNotSupported def getPhase(self): return self.service.getProperty(f"heating.compressors.{self.compressor}")["properties"]["phase"]["value"] at the end of PyViCareHeatPump.py I am not familiar with github and won't mess-up the code. If it is interesting to the crowd, maybe it can be inserted at the next maintenance.
0.0
5dec7aeccbbe06c5e6e99cd103e0cdbf6d96cee1
[ "tests/test_Vitocal200.py::Vitocal200::test_getCompressorPhase", "tests/test_Vitocal250A.py::Vitocal250A::test_getCompressorPhase" ]
[ "tests/test_Vitocal200.py::Vitocal200::test_getActiveProgramMaxTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getActiveProgramMinTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getAvailableCompressors", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorActive", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHours", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass1", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass2", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass3", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass4", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorHoursLoadClass5", "tests/test_Vitocal200.py::Vitocal200::test_getCompressorStarts", "tests/test_Vitocal200.py::Vitocal200::test_getDomesticHotWaterActiveMode_fri_10_10_time", "tests/test_Vitocal200.py::Vitocal200::test_getDomesticHotWaterCirculationPumpActive", "tests/test_Vitocal200.py::Vitocal200::test_getDomesticHotWaterDesiredTemperature_fri_10_10_time", "tests/test_Vitocal200.py::Vitocal200::test_getDomesticHotWaterDesiredTemperature_fri_20_00_time", "tests/test_Vitocal200.py::Vitocal200::test_getHeatingCurveShift", "tests/test_Vitocal200.py::Vitocal200::test_getHeatingCurveSlope", "tests/test_Vitocal200.py::Vitocal200::test_getModes", "tests/test_Vitocal200.py::Vitocal200::test_getNormalProgramMaxTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getNormalProgramMinTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getNormalProgramStepping", "tests/test_Vitocal200.py::Vitocal200::test_getPrograms", "tests/test_Vitocal200.py::Vitocal200::test_getReturnTemperature", "tests/test_Vitocal200.py::Vitocal200::test_getReturnTemperaturePrimaryCircuit", "tests/test_Vitocal200.py::Vitocal200::test_getSupplyTemperaturePrimaryCircuit", "tests/test_Vitocal250A.py::Vitocal250A::test_getBufferMainTemperature", "tests/test_Vitocal250A.py::Vitocal250A::test_getCompressorActive", "tests/test_Vitocal250A.py::Vitocal250A::test_getCompressorHours", "tests/test_Vitocal250A.py::Vitocal250A::test_getCompressorStarts", "tests/test_Vitocal250A.py::Vitocal250A::test_getFrostProtectionActive", "tests/test_Vitocal250A.py::Vitocal250A::test_getHeatingCurveShift", "tests/test_Vitocal250A.py::Vitocal250A::test_getHeatingCurveSlope", "tests/test_Vitocal250A.py::Vitocal250A::test_getModes", "tests/test_Vitocal250A.py::Vitocal250A::test_getOutsideTemperature", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerConsumptionDomesticHotWaterToday", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerConsumptionToday", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerConsumptionUnit", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionDomesticHotWaterCurrentDay", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionDomesticHotWaterCurrentMonth", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionDomesticHotWaterCurrentYear", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionDomesticHotWaterLastMonth", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionDomesticHotWaterLastSevenDays", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionDomesticHotWaterLastYear", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionDomesticHotWaterUnit", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionHeatingCurrentDay", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionHeatingCurrentMonth", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionHeatingCurrentYear", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionHeatingLastMonth", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionHeatingLastSevenDays", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionHeatingLastYear", "tests/test_Vitocal250A.py::Vitocal250A::test_getPowerSummaryConsumptionHeatingUnit", "tests/test_Vitocal250A.py::Vitocal250A::test_getPrograms", "tests/test_Vitocal250A.py::Vitocal250A::test_getReturnTemperature", "tests/test_Vitocal250A.py::Vitocal250A::test_getSupplyTemperaturePrimaryCircuit" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-05-13 17:48:54+00:00
apache-2.0
5,598
somm15__PyViCare-334
diff --git a/PyViCare/PyViCareDeviceConfig.py b/PyViCare/PyViCareDeviceConfig.py index 57f546f..0a776a4 100644 --- a/PyViCare/PyViCareDeviceConfig.py +++ b/PyViCare/PyViCareDeviceConfig.py @@ -12,6 +12,7 @@ from PyViCare.PyViCarePelletsBoiler import PelletsBoiler from PyViCare.PyViCareRadiatorActuator import RadiatorActuator from PyViCare.PyViCareRoomSensor import RoomSensor from PyViCare.PyViCareElectricalEnergySystem import ElectricalEnergySystem +from PyViCare.PyViCareGateway import Gateway from PyViCare.PyViCareVentilationDevice import VentilationDevice logger = logging.getLogger('ViCare') @@ -54,6 +55,9 @@ class PyViCareDeviceConfig: def asElectricalEnergySystem(self): return ElectricalEnergySystem(self.service) + + def asGateway(self): + return Gateway(self.service) def asVentilation(self): return VentilationDevice(self.service) @@ -84,7 +88,8 @@ class PyViCareDeviceConfig: (self.asElectricalEnergySystem, r"E3_TCU10_x07", ["type:tcu"]), (self.asElectricalEnergySystem, r"E3_EEBus", ["type:eebus"]), (self.asElectricalEnergySystem, r"E3_VitoCharge_03", ["type:energy_storage"]), - (self.asVentilation, r"E3_ViAir", ["type:ventilation"]) + (self.asVentilation, r"E3_ViAir", ["type:ventilation"]), + (self.asGateway, r"Heatbox1", ["type:gateway;VitoconnectOpto1"]) ] for (creator_method, type_name, roles) in device_types: diff --git a/PyViCare/PyViCareGateway.py b/PyViCare/PyViCareGateway.py new file mode 100644 index 0000000..fbdf317 --- /dev/null +++ b/PyViCare/PyViCareGateway.py @@ -0,0 +1,9 @@ +from PyViCare.PyViCareDevice import Device +from PyViCare.PyViCareUtils import handleNotSupported + + +class Gateway(Device): + + @handleNotSupported + def getWifiSignalStrength(self): + return self.service.getProperty("gateway.wifi")["properties"]["strength"]["value"] diff --git a/PyViCare/PyViCareService.py b/PyViCare/PyViCareService.py index 29e7f64..259deee 100644 --- a/PyViCare/PyViCareService.py +++ b/PyViCare/PyViCareService.py @@ -27,10 +27,6 @@ def buildSetPropertyUrl(accessor, property_name, action): return f'/features/installations/{accessor.id}/gateways/{accessor.serial}/devices/{accessor.device_id}/features/{property_name}/commands/{action}' -def buildGetPropertyUrl(accessor, property_name): - return f'/features/installations/{accessor.id}/gateways/{accessor.serial}/devices/{accessor.device_id}/features/{property_name}' - - class ViCareDeviceAccessor: def __init__(self, id: int, serial: str, device_id: str) -> None: self.id = id @@ -45,13 +41,21 @@ class ViCareService: self.roles = roles def getProperty(self, property_name: str) -> Any: - url = buildGetPropertyUrl( - self.accessor, property_name) + url = self.buildGetPropertyUrl(property_name) return self.oauth_manager.get(url) + + def buildGetPropertyUrl(self, property_name): + if self._isGateway(): + return f'/features/installations/{self.accessor.id}/gateways/{self.accessor.serial}/features/{property_name}' + return f'/features/installations/{self.accessor.id}/gateways/{self.accessor.serial}/devices/{self.accessor.device_id}/features/{property_name}' + def hasRoles(self, requested_roles) -> bool: return hasRoles(requested_roles, self.roles) + def _isGateway(self) -> bool: + return self.hasRoles(["type:gateway;VitoconnectOpto1"]) + def setProperty(self, property_name: str, action: str, data: Any) -> Any: url = buildSetPropertyUrl( self.accessor, property_name, action) @@ -61,4 +65,6 @@ class ViCareService: def fetch_all_features(self) -> Any: url = f'/features/installations/{self.accessor.id}/gateways/{self.accessor.serial}/devices/{self.accessor.device_id}/features/' + if self._isGateway(): + url = f'/features/installations/{self.accessor.id}/gateways/{self.accessor.serial}/features/' return self.oauth_manager.get(url)
somm15/PyViCare
16eb10d1c5fdb372a669705b54e34bb222b2057b
diff --git a/tests/response/VitoconnectOpto1.json b/tests/response/VitoconnectOpto1.json new file mode 100644 index 0000000..db0aa24 --- /dev/null +++ b/tests/response/VitoconnectOpto1.json @@ -0,0 +1,65 @@ +{ + "data": [ + { + "apiVersion": 1, + "commands": {}, + "feature": "gateway.devices", + "gatewayId": "################", + "isEnabled": true, + "isReady": true, + "properties": { + "devices": { + "type": "DeviceList", + "value": [ + { + "fingerprint": "xxx", + "id": "gateway", + "modelId": "Heatbox1", + "modelVersion": "xxx", + "name": "Heatbox 1, Vitoconnect", + "roles": [ + "type:gateway;VitoconnectOpto1", + "type:legacy" + ], + "status": "online", + "type": "vitoconnect" + }, + { + "fingerprint": "xxx", + "id": "0", + "modelId": "VScotHO1_40", + "modelVersion": "xxx", + "name": "VT 200 (HO1A / HO1B)", + "roles": [ + "type:boiler", + "type:legacy", + "type:product;VScotHO1" + ], + "status": "online", + "type": "heating" + } + ] + } + }, + "timestamp": "2023-12-25T04:01:00.448Z", + "uri": "https://api.viessmann.com/iot/v1/features/installations/#######/gateways/################/features/gateway.devices" + }, + { + "apiVersion": 1, + "commands": {}, + "feature": "gateway.wifi", + "gatewayId": "################", + "isEnabled": true, + "isReady": true, + "properties": { + "strength": { + "type": "number", + "unit": "", + "value": -69 + } + }, + "timestamp": "2023-12-26T20:44:41.417Z", + "uri": "https://api.viessmann.com/iot/v1/features/installations/#######/gateways/################/features/gateway.wifi" + } + ] +} \ No newline at end of file diff --git a/tests/test_PyViCareDeviceConfig.py b/tests/test_PyViCareDeviceConfig.py index f0953a5..c95cc3e 100644 --- a/tests/test_PyViCareDeviceConfig.py +++ b/tests/test_PyViCareDeviceConfig.py @@ -66,3 +66,14 @@ class PyViCareDeviceConfigTest(unittest.TestCase): c = PyViCareDeviceConfig(self.service, "0", "E3_ViAir_300F", "Online") device_type = c.asAutoDetectDevice() self.assertEqual("VentilationDevice", type(device_type).__name__) + + def test_autoDetect_VitoconnectOpto1_asGateway(self): + c = PyViCareDeviceConfig(self.service, "0", "Heatbox1", "Online") + device_type = c.asAutoDetectDevice() + self.assertEqual("Gateway", type(device_type).__name__) + + def test_autoDetect_RoleGateway_asGateway(self): + self.service.hasRoles = has_roles(["type:gateway;VitoconnectOpto1"]) + c = PyViCareDeviceConfig(self.service, "0", "Unknown", "Online") + device_type = c.asAutoDetectDevice() + self.assertEqual("Gateway", type(device_type).__name__) diff --git a/tests/test_PyViCareService.py b/tests/test_PyViCareService.py index 70adc98..86885bc 100644 --- a/tests/test_PyViCareService.py +++ b/tests/test_PyViCareService.py @@ -8,8 +8,8 @@ class PyViCareServiceTest(unittest.TestCase): def setUp(self): self.oauth_mock = Mock() - accessor = ViCareDeviceAccessor("[id]", "[serial]", "[device]") - self.service = ViCareService(self.oauth_mock, accessor, []) + self.accessor = ViCareDeviceAccessor("[id]", "[serial]", "[device]") + self.service = ViCareService(self.oauth_mock, self.accessor, []) def test_getProperty(self): self.service.getProperty("someprop") @@ -25,3 +25,15 @@ class PyViCareServiceTest(unittest.TestCase): self.service.setProperty("someprop", "doaction", '{}') self.oauth_mock.post.assert_called_once_with( '/features/installations/[id]/gateways/[serial]/devices/[device]/features/someprop/commands/doaction', '{}') + + def test_getProperty_gateway(self): + self.service = ViCareService(self.oauth_mock, self.accessor, ["type:gateway;VitoconnectOpto1"]) + self.service.getProperty("someprop") + self.oauth_mock.get.assert_called_once_with( + '/features/installations/[id]/gateways/[serial]/features/someprop') + + def test_fetch_all_features_gateway(self): + self.service = ViCareService(self.oauth_mock, self.accessor, ["type:gateway;VitoconnectOpto1"]) + self.service.fetch_all_features() + self.oauth_mock.get.assert_called_once_with( + '/features/installations/[id]/gateways/[serial]/features/') diff --git a/tests/test_TestForMissingProperties.py b/tests/test_TestForMissingProperties.py index 7844e55..4fc2af2 100644 --- a/tests/test_TestForMissingProperties.py +++ b/tests/test_TestForMissingProperties.py @@ -61,6 +61,9 @@ class TestForMissingProperties(unittest.TestCase): # Ignored for now as both are not documented in https://documentation.viessmann.com/static/iot/data-points 'device.messages.errors.raw', 'device.productIdentification', + + # gateway + 'gateway.devices', # not used ] all_features = self.read_all_features() diff --git a/tests/test_VitoconnectOpto1.py b/tests/test_VitoconnectOpto1.py new file mode 100644 index 0000000..671c32a --- /dev/null +++ b/tests/test_VitoconnectOpto1.py @@ -0,0 +1,14 @@ +import unittest + +from PyViCare.PyViCareGateway import Gateway +from tests.ViCareServiceMock import ViCareServiceMock + + +class VitoconnectOpto1(unittest.TestCase): + def setUp(self): + self.service = ViCareServiceMock('response/VitoconnectOpto1.json') + self.device = Gateway(self.service) + + def test_getWifiSignalStrength(self): + self.assertEqual( + self.device.getWifiSignalStrength(), -69)
Read WiFi Signal Strength Hey, I've been looking into the code and into the HomeAssistant integration lately. I would be interested in reading gateway specific values with the API and I found `device.wifi`. But this seems not to be in the library code yet. Where could this be implemented? Is there a dedicated Gateway class necessary for it?
0.0
16eb10d1c5fdb372a669705b54e34bb222b2057b
[ "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_RoleBoiler_asGazBoiler", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_RoleClimateSensor_asRoomSensor", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_RoleGateway_asGateway", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_RoleHeatpump_asHeatpump", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_RoleRadiator_asRadiatorActuator", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_RoleVentilation_asVentilation", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_Unknown_asGeneric", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_VScot_asGazBoiler", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_Vitoair_FS_300E_asVentilation", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_VitoconnectOpto1_asGateway", "tests/test_PyViCareDeviceConfig.py::PyViCareDeviceConfigTest::test_autoDetect_Vitodens_asGazBoiler", "tests/test_PyViCareService.py::PyViCareServiceTest::test_fetch_all_features_gateway", "tests/test_PyViCareService.py::PyViCareServiceTest::test_getProperty", "tests/test_PyViCareService.py::PyViCareServiceTest::test_getProperty_gateway", "tests/test_PyViCareService.py::PyViCareServiceTest::test_setProperty_object", "tests/test_PyViCareService.py::PyViCareServiceTest::test_setProperty_string", "tests/test_TestForMissingProperties.py::TestForMissingProperties::test_missingProperties", "tests/test_TestForMissingProperties.py::TestForMissingProperties::test_unverifiedProperties", "tests/test_VitoconnectOpto1.py::VitoconnectOpto1::test_getWifiSignalStrength" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-10-19 21:34:29+00:00
apache-2.0
5,599
sonata-nfv__tng-sdk-package-16
diff --git a/src/tngsdk/package/__init__.py b/src/tngsdk/package/__init__.py index 82cd398..c3e1fb3 100755 --- a/src/tngsdk/package/__init__.py +++ b/src/tngsdk/package/__init__.py @@ -30,6 +30,36 @@ # acknowledge the contributions of their colleagues of the SONATA # partner consortium (www.5gtango.eu). +import logging +import coloredlogs +import os + +from tngsdk.package.cli import parse_args, CLI +from tngsdk.package.pkgmgm import Packager + + +LOG = logging.getLogger(os.path.basename(__file__)) + + +def logging_setup(): + os.environ["COLOREDLOGS_LOG_FORMAT"] \ + = "%(asctime)s [%(levelname)s] [%(name)s] %(message)s" + def main(): - print("not implemented") + logging_setup() + args = parse_args() + # TODO better log configuration (e.g. file-based logging) + if args.verbose: + coloredlogs.install(level="DEBUG") + else: + coloredlogs.install(level="INFO") + # TODO validate if args combination makes any sense + p = Packager(args) + if args.service: + # TODO start package in service mode + pass + else: + # run package in CLI mode + c = CLI(args, p) + c.dispatch() diff --git a/src/tngsdk/package/cli.py b/src/tngsdk/package/cli.py new file mode 100644 index 0000000..88928c0 --- /dev/null +++ b/src/tngsdk/package/cli.py @@ -0,0 +1,119 @@ +# Copyright (c) 2015 SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University +# ALL RIGHTS RESERVED. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Neither the name of the SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University +# nor the names of its contributors may be used to endorse or promote +# products derived from this software without specific prior written +# permission. +# +# This work has been performed in the framework of the SONATA project, +# funded by the European Commission under Grant number 671517 through +# the Horizon 2020 and 5G-PPP programmes. The authors would like to +# acknowledge the contributions of their colleagues of the SONATA +# partner consortium (www.sonata-nfv.eu). +# +# This work has also been performed in the framework of the 5GTANGO project, +# funded by the European Commission under Grant number 761493 through +# the Horizon 2020 and 5G-PPP programmes. The authors would like to +# acknowledge the contributions of their colleagues of the SONATA +# partner consortium (www.5gtango.eu). +import logging +import argparse +import os +import sys + + +LOG = logging.getLogger(os.path.basename(__file__)) + + +class CLI(object): + + def __init__(self, args, packager): + self._args = args + self._p = packager + + def dispatch(self): + if self._args.package: + # package creation + self._p.package() + else: + # un-packaging + self._p.unpackage() + + +def parse_args(input_args=None): + parser = argparse.ArgumentParser( + description="5GTANGO SDK packager") + + parser.add_argument( + "-p", + "--package", + help="Create package from given project.", + required=False, + default=None, + dest="package") + + parser.add_argument( + "-u", + "--unpackage", + help="Unpackage given package.", + required=False, + default=None, + dest="unpackage") + + parser.add_argument( + "--format", + help="Package format [5GTANGO|OSM]." + + "\nDefault: 5GTANGO", + required=False, + default="5GTANGO", + dest="format") + + parser.add_argument( + "-v", + "--verbose", + help="Output debug messages.", + required=False, + default=False, + dest="verbose", + action="store_true") + + parser.add_argument( + "-s", + "--service", + help="Run packager in service mode with REST API.", + required=False, + default=False, + dest="service", + action="store_true") + + parser.add_argument( + "--address", + help="Listen address of REST API when in service mode." + + "\nDefault: 0.0.0.0", + required=False, + default="0.0.0.0", + dest="service_address") + + parser.add_argument( + "--port", + help="TCP port of REST API when in service mode." + + "\nDefault: 5099", + required=False, + default=5099, + dest="service_port") + if input_args is None: + input_args = sys.argv[1:] + return parser.parse_args(input_args) diff --git a/src/tngsdk/package/pkgmgm.py b/src/tngsdk/package/pkgmgm.py new file mode 100644 index 0000000..f40ac40 --- /dev/null +++ b/src/tngsdk/package/pkgmgm.py @@ -0,0 +1,48 @@ +# Copyright (c) 2018 SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University +# ALL RIGHTS RESERVED. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Neither the name of the SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University +# nor the names of its contributors may be used to endorse or promote +# products derived from this software without specific prior written +# permission. +# +# This work has been performed in the framework of the SONATA project, +# funded by the European Commission under Grant number 671517 through +# the Horizon 2020 and 5G-PPP programmes. The authors would like to +# acknowledge the contributions of their colleagues of the SONATA +# partner consortium (www.sonata-nfv.eu). +# +# This work has also been performed in the framework of the 5GTANGO project, +# funded by the European Commission under Grant number 761493 through +# the Horizon 2020 and 5G-PPP programmes. The authors would like to +# acknowledge the contributions of their colleagues of the SONATA +# partner consortium (www.5gtango.eu). +import logging +import os + + +LOG = logging.getLogger(os.path.basename(__file__)) + + +class Packager(object): + + def __init__(self, args): + self._args = args + + def package(self): + LOG.warning("packaging not implemented") + + def unpackage(self): + LOG.warning("unpackaging not implemented")
sonata-nfv/tng-sdk-package
ff3c5eeee949fc5b3a379d1bbb753f6e24fb0991
diff --git a/src/tngsdk/package/tests/test_unit_package.py b/src/tngsdk/package/rest.py old mode 100755 new mode 100644 similarity index 92% rename from src/tngsdk/package/tests/test_unit_package.py rename to src/tngsdk/package/rest.py index 1c80e24..49a747d --- a/src/tngsdk/package/tests/test_unit_package.py +++ b/src/tngsdk/package/rest.py @@ -29,12 +29,3 @@ # the Horizon 2020 and 5G-PPP programmes. The authors would like to # acknowledge the contributions of their colleagues of the SONATA # partner consortium (www.5gtango.eu). - - -import unittest - - -class TngSdkPackageTest(unittest.TestCase): - - def test_test(self): - self.assertTrue(True) diff --git a/src/tngsdk/package/tests/test_unit_pkgmgm.py b/src/tngsdk/package/tests/test_unit_pkgmgm.py new file mode 100755 index 0000000..53d79ab --- /dev/null +++ b/src/tngsdk/package/tests/test_unit_pkgmgm.py @@ -0,0 +1,58 @@ +# Copyright (c) 2015 SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University +# ALL RIGHTS RESERVED. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Neither the name of the SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University +# nor the names of its contributors may be used to endorse or promote +# products derived from this software without specific prior written +# permission. +# +# This work has been performed in the framework of the SONATA project, +# funded by the European Commission under Grant number 671517 through +# the Horizon 2020 and 5G-PPP programmes. The authors would like to +# acknowledge the contributions of their colleagues of the SONATA +# partner consortium (www.sonata-nfv.eu). +# +# This work has also been performed in the framework of the 5GTANGO project, +# funded by the European Commission under Grant number 761493 through +# the Horizon 2020 and 5G-PPP programmes. The authors would like to +# acknowledge the contributions of their colleagues of the SONATA +# partner consortium (www.5gtango.eu). + + +import unittest +from tngsdk.package.cli import parse_args +from tngsdk.package.pkgmgm import Packager + + +class TngSdkPkgMgmTest(unittest.TestCase): + + def setUp(self): + # list can manually define CLI arguments + self.args = parse_args([]) + + def tearDown(self): + pass + + def test_instantiation(self): + p = Packager(self.args) + del p + + def test_package(self): + p = Packager(self.args) + p.package() + + def test_unpackage(self): + p = Packager(self.args) + p.unpackage()
Add basic command line interface #Example: ``` tng-package -h usage: tng-package [-h] [-p PACKAGE] [-u UNPACKAGE] [--format FORMAT] [-v] [-s] [--address SERVICE_PORT] [--port SERVICE_PORT] 5GTANGO SDK packager optional arguments: -h, --help show this help message and exit -p PACKAGE, --package PACKAGE Create package from given project. -u UNPACKAGE, --unpackage UNPACKAGE Unpackage given package. --format FORMAT Package format [5GTANGO|OSM]. Default: 5GTANGO -v, --verbose Output debug messages. -s, --service Run packager in service mode with REST API. --address SERVICE_PORT Listen address of REST API when in service mode. Default: 0.0.0.0 --port SERVICE_PORT TCP port of REST API when in service mode. Default: 5099 ```
0.0
ff3c5eeee949fc5b3a379d1bbb753f6e24fb0991
[ "src/tngsdk/package/tests/test_unit_pkgmgm.py::TngSdkPkgMgmTest::test_instantiation", "src/tngsdk/package/tests/test_unit_pkgmgm.py::TngSdkPkgMgmTest::test_package", "src/tngsdk/package/tests/test_unit_pkgmgm.py::TngSdkPkgMgmTest::test_unpackage" ]
[]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
2018-02-13 13:10:35+00:00
apache-2.0
5,600
sorgerlab__indra-1104
diff --git a/doc/modules/databases/index.rst b/doc/modules/databases/index.rst index ac9fc2846..8791677d7 100644 --- a/doc/modules/databases/index.rst +++ b/doc/modules/databases/index.rst @@ -98,3 +98,8 @@ Disease Ontology (DOID) client (:py:mod:`indra.databases.doid_client`) ---------------------------------------------------------------------- .. automodule:: indra.databases.doid_client :members: + +Taxonomy client (:py:mod:`indra.databases.taxonomy_client`) +----------------------------------------------------------- +.. automodule:: indra.databases.taxonomy_client + :members: diff --git a/indra/databases/taxonomy_client.py b/indra/databases/taxonomy_client.py new file mode 100644 index 000000000..b257739b8 --- /dev/null +++ b/indra/databases/taxonomy_client.py @@ -0,0 +1,38 @@ +"""Client to access the Entrez Taxonomy web service.""" +import requests + +base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi' + + +def _send_search_request(term): + params = { + 'db': 'taxonomy', + 'term': term, + 'retmode': 'json' + } + res = requests.get(base_url, params=params) + if not res.status_code == 200: + return None + return res.json().get('esearchresult') + + +def get_taxonomy_id(name): + """Return the taxonomy ID corresponding to a taxonomy name. + + Parameters + ---------- + name : str + The name of the taxonomy entry. + Example: "Severe acute respiratory syndrome coronavirus 2" + + Returns + ------- + str or None + The taxonomy ID corresponding to the given name or None + if not available. + """ + res = _send_search_request(name) + idlist = res.get('idlist') + if not idlist: + return None + return idlist[0] diff --git a/indra/sources/bel/processor.py b/indra/sources/bel/processor.py index 9e71e37bd..22e0093c5 100644 --- a/indra/sources/bel/processor.py +++ b/indra/sources/bel/processor.py @@ -14,7 +14,7 @@ from indra.statements import * from indra.sources.bel.rdf_processor import bel_to_indra, chebi_name_id from indra.databases import ( chebi_client, go_client, hgnc_client, mesh_client, - mirbase_client, uniprot_client, + mirbase_client, uniprot_client, taxonomy_client ) from indra.assemblers.pybel.assembler import _pybel_indra_act_map @@ -575,6 +575,12 @@ def get_db_refs_by_name(ns, name, node_data): # SDIS, SCHEM: Include the name as the ID for the namespace elif ns in ('SDIS', 'SCHEM', 'TEXT'): db_refs = {ns: name} + elif ns == 'TAX': + tid = taxonomy_client.get_taxonomy_id(name) + if tid: + db_refs = {'TAXONOMY': tid} + else: + logger.info('Could not get taxonomy ID for %s' % name) else: logger.info("Unhandled namespace: %s: %s (%s)" % (ns, name, node_data))
sorgerlab/indra
7394532b4afc2845c9f088197070a343772329f8
diff --git a/indra/tests/test_taxonomy_client.py b/indra/tests/test_taxonomy_client.py new file mode 100644 index 000000000..c812d854e --- /dev/null +++ b/indra/tests/test_taxonomy_client.py @@ -0,0 +1,6 @@ +from indra.databases import taxonomy_client + + +def test_name_lookup(): + assert taxonomy_client.get_taxonomy_id( + 'Severe acute respiratory syndrome coronavirus 2') == '2697049'
Unhandled namespace: TAX in process_pybel_graph() Hi folks, I wanted to convert BEL statements to English using your excellent `EnglishAssembly.make_model()` method but a couple statements could not be parsed because they included a grounded term from the NCBI Taxonomy namespace which was not available in [`indra.sources.bel.processor.get_db_refs_by_name()`](https://github.com/sorgerlab/indra/blob/7394532b4afc2845c9f088197070a343772329f8/indra/sources/bel/processor.py#L441) This was the message I got when running the following code: ```python il6_amp = pybel.from_biodati('...') from indra.assemblers.english import EnglishAssembler from indra.sources.bel.api import process_pybel_graph pbp = process_pybel_graph(il6_amp) stmts = pbp.statements stmts ``` ``` INFO: [2020-05-30 07:14:20] indra.sources.bel.processor - Unhandled namespace: TAX: Severe acute respiratory syndrome coronavirus 2 (a(TAX:"Severe acute respiratory syndrome coronavirus 2")) INFO: [2020-05-30 07:14:20] indra.sources.bel.processor - Unable to get identifier information for node: a(TAX:"Severe acute respiratory syndrome coronavirus 2") INFO: [2020-05-30 07:14:20] indra.sources.bel.processor - Unhandled namespace: TAX: Severe acute respiratory syndrome coronavirus 2 (a(TAX:"Severe acute respiratory syndrome coronavirus 2")) INFO: [2020-05-30 07:14:20] indra.sources.bel.processor - Unable to get identifier information for node: a(TAX:"Severe acute respiratory syndrome coronavirus 2") ```
0.0
7394532b4afc2845c9f088197070a343772329f8
[ "indra/tests/test_taxonomy_client.py::test_name_lookup" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-05-30 16:15:11+00:00
bsd-2-clause
5,601
sosw__sosw-216
diff --git a/sosw/components/dynamo_db.py b/sosw/components/dynamo_db.py index ba52847..d7686c6 100644 --- a/sosw/components/dynamo_db.py +++ b/sosw/components/dynamo_db.py @@ -261,6 +261,11 @@ class DynamoDbClient: if val_dict: val = val_dict.get(key_type) # Ex: 1234 or "myvalue" + if val is None and key_type not in val_dict: + real_type = list(val_dict.keys())[0] + raise ValueError(f"'{key}' is expected to be of type '{key_type}' in row_mapper, " + f"but real value is of type '{real_type}'") + # type_deserializer.deserialize() parses 'N' to `Decimal` type but it cant be parsed to a datetime # so we cast it to either an integer or a float. if key_type == 'N':
sosw/sosw
e1e4b9c3fdc321830f6eb3f04c4f40b08c23238a
diff --git a/sosw/components/test/unit/test_dynamo_db.py b/sosw/components/test/unit/test_dynamo_db.py index 30dace2..e9022ee 100644 --- a/sosw/components/test/unit/test_dynamo_db.py +++ b/sosw/components/test/unit/test_dynamo_db.py @@ -201,6 +201,21 @@ class dynamodb_client_UnitTestCase(unittest.TestCase): self.assertDictEqual(res, expected) + def test_dynamo_to_dict__mapping_doesnt_match__raises(self): + # If the value type in the DB doesn't match the expected type in row_mapper - raise ValueError + + dynamo_row = { + 'hash_col': {'S': 'aaa'}, 'range_col': {'N': '123'}, + 'other_col': {'N': '111'} # In the row_mapper, other_col is of type 'S' + } + + with self.assertRaises(ValueError) as e: + dict_row = self.dynamo_client.dynamo_to_dict(dynamo_row) + + self.assertEqual("'other_col' is expected to be of type 'S' in row_mapper, but real value is of type 'N'", + str(e.exception)) + + def test_get_by_query__validates_comparison(self): self.assertRaises(AssertionError, self.dynamo_client.get_by_query, keys={'k': '1'}, comparisons={'k': 'unsupported'})
DynamoDB - exception on wrong type in row_mapper When row_mapper type is of one type (for example 'S'), and real type from DB is of another type (for example 'N') This exception happens: <img width="834" alt="Screen Shot 2020-07-08 at 13 59 53" src="https://user-images.githubusercontent.com/11838450/86911422-f7956280-c123-11ea-83cf-d9f207ed3c1c.png"> Options (choose one) 1. Raise informative exception 2. Convert to the expected type if possible I prefer the 1st option.
0.0
e1e4b9c3fdc321830f6eb3f04c4f40b08c23238a
[ "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_dynamo_to_dict__mapping_doesnt_match__raises" ]
[ "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test__parse_filter_expression", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test__parse_filter_expression__raises", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_batch_get_items_one_table__not_strict", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_batch_get_items_one_table__strict", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_create__calls_boto_client", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_create__calls_put", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_create__raises__if_no_hash_col_configured", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_dict_to_dynamo__not_strict__map_type", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_dict_to_dynamo_not_strict", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_dict_to_dynamo_prefix", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_dict_to_dynamo_strict", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_dynamo_to_dict", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_dynamo_to_dict__do_json_loads", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_dynamo_to_dict__dont_json_loads", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_dynamo_to_dict_no_strict_row_mapper", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_get_by_query__between", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_get_by_query__expr_attr", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_get_by_query__max_items_and_count__raises", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_get_by_query__return_count", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_get_by_query__validates_comparison", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_patch__transfers_attrs_to_remove", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_sleep_db__", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_sleep_db__fell_asleep", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_sleep_db__get_capacity_called", "sosw/components/test/unit/test_dynamo_db.py::dynamodb_client_UnitTestCase::test_sleep_db__wrong_action" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-07-08 13:39:28+00:00
mit
5,602
sotetsuk__memozo-21
diff --git a/memozo/memozo.py b/memozo/memozo.py index a67bbe5..e4e3311 100644 --- a/memozo/memozo.py +++ b/memozo/memozo.py @@ -76,3 +76,32 @@ class Memozo(object): return _wrapper return wrapper + + def pickle(self, name=None, ext='pickle', protocol=None): + + def wrapper(func): + _name = func.__name__ if name is None else name + + @functools.wraps(func) + def _wrapper(*args, **kwargs): + args = utils.get_bound_args(func, *args, **kwargs) + args_str = utils.get_args_str(args) + sha1 = utils.get_hash(_name, func.__name__, args_str) + file_path = os.path.join(self.base_path, "{}_{}.{}".format(_name, sha1, ext)) + + if utils.log_exisits(self.base_path, _name, func.__name__, args_str) and os.path.exists(file_path): + with open(file_path, 'rb') as f: + obj = pickle.load(f) + return obj + + obj = func(*args, **kwargs) + + with open(file_path, 'wb') as f: + pickle.dump(obj, f, protocol=protocol) + utils.write(self.base_path, _name, func.__name__, args_str) + + return obj + + return _wrapper + + return wrapper
sotetsuk/memozo
a0d0985f445279d2c0ae295e9488556cf6507f9f
diff --git a/tests/test_memozo.py b/tests/test_memozo.py index e224e0a..4c39e59 100644 --- a/tests/test_memozo.py +++ b/tests/test_memozo.py @@ -1,6 +1,7 @@ import os import unittest import codecs +import pickle from memozo import Memozo, utils @@ -115,3 +116,30 @@ class TestMemozoGenerator(unittest.TestCase): def test_load_data_from_cache(self): # TODO(sotetsuk): WRITE THIS TEST pass + + +class TestMemozoPickle(unittest.TestCase): + + def test_no_cache_output(self): + base_path = './tests/resources' + m = Memozo(base_path) + + @m.pickle('pickle_test', protocol=pickle.HIGHEST_PROTOCOL) + def pickle_test_func(): + return {'a': 3, 'b': 5} + + expected = {'a': 3, 'b': 5} + actual = pickle_test_func() + self.assertTrue(actual == expected) + + sha1 = utils.get_hash('pickle_test', 'pickle_test_func', '') + file_path = os.path.join(base_path, "{}_{}.{}".format('pickle_test', sha1, 'pickle')) + os.remove(file_path) + + def test_data_cached_collectly(self): + # TODO(sotetsuk): WRITE THIS TEST + pass + + def test_load_data_from_cache(self): + # TODO(sotetsuk): WRITE THIS TEST + pass
implement pickle ```py @m.pickle() ```
0.0
a0d0985f445279d2c0ae295e9488556cf6507f9f
[ "tests/test_memozo.py::TestMemozoPickle::test_no_cache_output" ]
[ "tests/test_memozo.py::TestMemozoCall::test_args", "tests/test_memozo.py::TestMemozoCall::test_call", "tests/test_memozo.py::TestMemozoCall::test_doc_string", "tests/test_memozo.py::TestMemozoCall::test_set_name", "tests/test_memozo.py::TestMemozoGenerator::test_data_cached_collectly", "tests/test_memozo.py::TestMemozoGenerator::test_load_data_from_cache", "tests/test_memozo.py::TestMemozoGenerator::test_no_cache_output", "tests/test_memozo.py::TestMemozoPickle::test_data_cached_collectly", "tests/test_memozo.py::TestMemozoPickle::test_load_data_from_cache" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2017-01-20 03:32:25+00:00
mit
5,603
spacegraphcats__spacegraphcats-47
diff --git a/spacegraphcats/graph_parser.py b/spacegraphcats/graph_parser.py index 740e811..9977400 100644 --- a/spacegraphcats/graph_parser.py +++ b/spacegraphcats/graph_parser.py @@ -11,16 +11,33 @@ def _mapstr(items): return list(map(str, items)) -def parse(graph_file, add_vertex=None, add_edge=None): +def parse(graph_file, add_vertex=None, add_edge=None, consecutive_ids=False): """Parse a graph and call provided methods with vertices and edges.""" # read vertices vertex_attributes = _parse_line(graph_file.readline())[2:] + # consecutive id to original id + id_map = [] + # original id to consecutive id + id_map_reverse = {} + + def _get_consecutive_id(id): + if not consecutive_ids: + return id + + if id in id_map_reverse: + return id_map_reverse[id] + else: + consecutive_id = len(id_map) + id_map_reverse[id] = consecutive_id + id_map.append(id) + return consecutive_id + next_line = graph_file.readline() while len(next_line) > 1: if add_vertex is not None: parsed = _parse_line(next_line) - add_vertex(int(parsed[0]), int(parsed[1]), + add_vertex(_get_consecutive_id(int(parsed[0])), int(parsed[1]), vertex_attributes, parsed[2:]) next_line = graph_file.readline() @@ -34,10 +51,12 @@ def parse(graph_file, add_vertex=None, add_edge=None): next_line = graph_file.readline() while len(next_line) > 1: parsed = _parse_line(next_line) - add_edge(int(parsed[0]), int(parsed[1]), + add_edge(_get_consecutive_id(int(parsed[0])), _get_consecutive_id(int(parsed[1])), edge_attributes, parsed[2:]) next_line = graph_file.readline() + return id_map + def parse_minhash(minhash_file, add_minhash): """Parse minhash (.mxt) file."""
spacegraphcats/spacegraphcats
bc684c22c5b7435c5bc17a365cb0fa99f3b1a536
diff --git a/spacegraphcats/test_parser.py b/spacegraphcats/test_parser.py index ea58823..f03a864 100644 --- a/spacegraphcats/test_parser.py +++ b/spacegraphcats/test_parser.py @@ -33,18 +33,20 @@ class ParserTest(unittest.TestCase): all_values_e.append(values) with open(os.path.join(DIR, 'parser-examples/graph.gxt')) as f: - self.p = parser.parse(f, collect_vertex, collect_edge) + id_map = parser.parse(f, collect_vertex, collect_edge, True) - self.assertEqual(all_ids, [1, 2, 3]) + self.assertEqual(all_ids, [0, 1, 2]) self.assertEqual(all_sizes, [2, 3, 1]) self.assertEqual(all_names_v, [['label', 'fill'], ['label', 'fill'], ['label', 'fill']]) self.assertEqual(all_values_v, [['foo', 'red'], ['bar', 'green'], ['batman', 'black']]) - self.assertEqual(all_srcs, [1, 2]) - self.assertEqual(all_dests, [2, 3]) + self.assertEqual(all_srcs, [0, 1]) + self.assertEqual(all_dests, [1, 2]) self.assertEqual(all_names_e, [['label'], ['label']]) self.assertEqual(all_values_e, [['a'], ['b']]) + self.assertEqual(id_map, [1, 2, 3]) + def test_mxt_parsing(self): minhashes = {}
Parser should make sure that ids are consecutive The parser should ensure that ids are consecutive so that the graph can use a more efficient list for the vertices. For this to work, we also need to return a list that maps back to the original ids. We could make the list more efficient if there is a constant offset.
0.0
bc684c22c5b7435c5bc17a365cb0fa99f3b1a536
[ "spacegraphcats/test_parser.py::ParserTest::test_graph_parsing" ]
[ "spacegraphcats/test_parser.py::ParserTest::test_mxt_parsing" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2017-01-28 07:59:17+00:00
mit
5,604
spacetelescope__gwcs-439
diff --git a/CHANGES.rst b/CHANGES.rst index bfd8523..4c17f4a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,6 +4,10 @@ Bug Fixes ^^^^^^^^^ +- Synchronize ``array_shape`` and ``pixel_shape`` attributes of WCS + objects. [#439] + + 0.18.3 (2022-12-23) ------------------- Bug Fixes diff --git a/gwcs/api.py b/gwcs/api.py index 9e5012f..e4fd19a 100644 --- a/gwcs/api.py +++ b/gwcs/api.py @@ -170,11 +170,17 @@ class GWCSAPIMixin(BaseHighLevelWCS, BaseLowLevelWCS): The shape should be given in ``(row, column)`` order (the convention for arrays in Python). """ - return self._array_shape + if self._pixel_shape is None: + return None + else: + return self._pixel_shape[::-1] @array_shape.setter def array_shape(self, value): - self._array_shape = value + if value is None: + self._pixel_shape = None + else: + self._pixel_shape = value[::-1] @property def pixel_bounds(self): diff --git a/gwcs/wcs.py b/gwcs/wcs.py index 92a635c..3d09a34 100644 --- a/gwcs/wcs.py +++ b/gwcs/wcs.py @@ -141,7 +141,6 @@ class WCS(GWCSAPIMixin): self._available_frames = [] self._pipeline = [] self._name = name - self._array_shape = None self._initialize_wcs(forward_transform, input_frame, output_frame) self._pixel_shape = None
spacetelescope/gwcs
d38dff05157a938bf4856d9a3354044c17e6b79f
diff --git a/gwcs/tests/test_api.py b/gwcs/tests/test_api.py index cae9bdd..5d1fa6c 100644 --- a/gwcs/tests/test_api.py +++ b/gwcs/tests/test_api.py @@ -327,6 +327,11 @@ def test_array_shape(wcsobj): wcsobj.array_shape = (2040, 1020) assert_array_equal(wcsobj.array_shape, (2040, 1020)) + assert wcsobj.array_shape == wcsobj.pixel_shape[::-1] + + wcsobj.pixel_shape = (1111, 2222) + assert wcsobj.array_shape == (2222, 1111) + @wcs_objs def test_pixel_bounds(wcsobj):
APE 14 array_shape and pixel_shape should map to same private attribute Currently WCS properties `array_shape` and `pixel_shape` set and retrieve independent private attributes. https://github.com/spacetelescope/gwcs/blob/145778108aa6d7d0a220c96a179dc55d0a818e95/gwcs/api.py#L175-L177 and https://github.com/spacetelescope/gwcs/blob/145778108aa6d7d0a220c96a179dc55d0a818e95/gwcs/api.py#L226-L237 But they are related: ```python array_shape == pixel_shape[::-1] ``` for 2D images. So really they should be setting the same underlying private attribute with the same validation. Currently validation only happens for setting `pixel_shape`. Finally, neither of these serializes when written out to ASDF. We should probably add one or the other (both?) to the schema. Brought about via discussion with @mcara.
0.0
d38dff05157a938bf4856d9a3354044c17e6b79f
[ "gwcs/tests/test_api.py::test_array_shape[gwcs_2d_spatial_shift]" ]
[ "gwcs/tests/test_api.py::test_lowlevel_types[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_lowlevel_types[gwcs_2d_spatial_reordered]", "gwcs/tests/test_api.py::test_lowlevel_types[gwcs_1d_freq]", "gwcs/tests/test_api.py::test_lowlevel_types[gwcs_3d_spatial_wave]", "gwcs/tests/test_api.py::test_lowlevel_types[gwcs_4d_identity_units]", "gwcs/tests/test_api.py::test_lowlevel_types[gwcs_3d_identity_units]", "gwcs/tests/test_api.py::test_lowlevel_types[gwcs_stokes_lookup]", "gwcs/tests/test_api.py::test_lowlevel_types[gwcs_3d_galactic_spectral]", "gwcs/tests/test_api.py::test_names[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_names[gwcs_2d_spatial_reordered]", "gwcs/tests/test_api.py::test_names[gwcs_1d_freq]", "gwcs/tests/test_api.py::test_names[gwcs_3d_spatial_wave]", "gwcs/tests/test_api.py::test_names[gwcs_4d_identity_units]", "gwcs/tests/test_api.py::test_names[gwcs_3d_identity_units]", "gwcs/tests/test_api.py::test_names[gwcs_stokes_lookup]", "gwcs/tests/test_api.py::test_names[gwcs_3d_galactic_spectral]", "gwcs/tests/test_api.py::test_pixel_n_dim[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_pixel_n_dim[gwcs_2d_spatial_reordered]", "gwcs/tests/test_api.py::test_pixel_n_dim[gwcs_1d_freq]", "gwcs/tests/test_api.py::test_pixel_n_dim[gwcs_3d_spatial_wave]", "gwcs/tests/test_api.py::test_pixel_n_dim[gwcs_4d_identity_units]", "gwcs/tests/test_api.py::test_world_n_dim[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_world_n_dim[gwcs_2d_spatial_reordered]", "gwcs/tests/test_api.py::test_world_n_dim[gwcs_1d_freq]", "gwcs/tests/test_api.py::test_world_n_dim[gwcs_3d_spatial_wave]", "gwcs/tests/test_api.py::test_world_n_dim[gwcs_4d_identity_units]", "gwcs/tests/test_api.py::test_world_axis_physical_types[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_world_axis_physical_types[gwcs_2d_spatial_reordered]", "gwcs/tests/test_api.py::test_world_axis_physical_types[gwcs_1d_freq]", "gwcs/tests/test_api.py::test_world_axis_physical_types[gwcs_3d_spatial_wave]", "gwcs/tests/test_api.py::test_world_axis_physical_types[gwcs_4d_identity_units]", "gwcs/tests/test_api.py::test_world_axis_units[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_world_axis_units[gwcs_2d_spatial_reordered]", "gwcs/tests/test_api.py::test_world_axis_units[gwcs_1d_freq]", "gwcs/tests/test_api.py::test_world_axis_units[gwcs_3d_spatial_wave]", "gwcs/tests/test_api.py::test_world_axis_units[gwcs_4d_identity_units]", "gwcs/tests/test_api.py::test_pixel_to_world_values[1-2]", "gwcs/tests/test_api.py::test_pixel_to_world_values[x1-y1]", "gwcs/tests/test_api.py::test_pixel_to_world_values_units_2d[1-2]", "gwcs/tests/test_api.py::test_pixel_to_world_values_units_2d[x1-y1]", "gwcs/tests/test_api.py::test_pixel_to_world_values_units_1d[1]", "gwcs/tests/test_api.py::test_pixel_to_world_values_units_1d[x1]", "gwcs/tests/test_api.py::test_array_index_to_world_values[1-2]", "gwcs/tests/test_api.py::test_array_index_to_world_values[x1-y1]", "gwcs/tests/test_api.py::test_world_axis_object_components_2d", "gwcs/tests/test_api.py::test_world_axis_object_components_2d_generic", "gwcs/tests/test_api.py::test_world_axis_object_components_1d", "gwcs/tests/test_api.py::test_world_axis_object_components_4d", "gwcs/tests/test_api.py::test_world_axis_object_classes_2d", "gwcs/tests/test_api.py::test_world_axis_object_classes_2d_generic", "gwcs/tests/test_api.py::test_world_axis_object_classes_4d", "gwcs/tests/test_api.py::test_high_level_wrapper[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_high_level_wrapper[gwcs_2d_spatial_reordered]", "gwcs/tests/test_api.py::test_high_level_wrapper[gwcs_1d_freq]", "gwcs/tests/test_api.py::test_high_level_wrapper[gwcs_3d_spatial_wave]", "gwcs/tests/test_api.py::test_high_level_wrapper[gwcs_4d_identity_units]", "gwcs/tests/test_api.py::test_high_level_wrapper[gwcs_3d_identity_units]", "gwcs/tests/test_api.py::test_high_level_wrapper[gwcs_stokes_lookup]", "gwcs/tests/test_api.py::test_high_level_wrapper[gwcs_3d_galactic_spectral]", "gwcs/tests/test_api.py::test_stokes_wrapper", "gwcs/tests/test_api.py::test_pixel_bounds[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_axis_correlation_matrix[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_serialized_classes[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_low_level_wcs[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_pixel_to_world[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_array_index_to_world[gwcs_2d_spatial_shift]", "gwcs/tests/test_api.py::test_pixel_to_world_quantity", "gwcs/tests/test_api.py::test_array_index_to_world_quantity", "gwcs/tests/test_api.py::test_world_to_pixel_quantity", "gwcs/tests/test_api.py::test_world_to_array_index_quantity", "gwcs/tests/test_api.py::test_world_to_pixel[0]", "gwcs/tests/test_api.py::test_world_to_pixel[1]", "gwcs/tests/test_api.py::test_world_to_array_index[0]", "gwcs/tests/test_api.py::test_world_to_array_index[1]", "gwcs/tests/test_api.py::test_world_to_pixel_values[0]", "gwcs/tests/test_api.py::test_world_to_pixel_values[1]", "gwcs/tests/test_api.py::test_world_to_array_index_values[0]", "gwcs/tests/test_api.py::test_world_to_array_index_values[1]", "gwcs/tests/test_api.py::test_ndim_str_frames", "gwcs/tests/test_api.py::test_composite_many_base_frame", "gwcs/tests/test_api.py::test_coordinate_frame_api" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-01-25 19:08:26+00:00
bsd-3-clause
5,605
spacetelescope__gwcs-475
diff --git a/CHANGES.rst b/CHANGES.rst index 25fb598..a974d62 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -28,6 +28,9 @@ other - Register all available asdf extension manifests from ``asdf-wcs-schemas`` except 1.0.0 (which contains duplicate tag versions). [#469] +- Register empty extension for 1.0.0 to avoid warning about a missing + extension when opening old files. [#475] + 0.18.3 (2022-12-23) ------------------- diff --git a/gwcs/extension.py b/gwcs/extension.py index 33eefa7..3d74695 100644 --- a/gwcs/extension.py +++ b/gwcs/extension.py @@ -1,7 +1,7 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst import importlib.resources -from asdf.extension import ManifestExtension +from asdf.extension import Extension, ManifestExtension from .converters.wcs import ( CelestialFrameConverter, CompositeFrameConverter, FrameConverter, Frame2DConverter, SpectralFrameConverter, StepConverter, @@ -58,6 +58,20 @@ TRANSFORM_EXTENSIONS = [ if len(WCS_MANIFEST_URIS) == 1 or '1.0.0' not in uri ] +# if we don't register something for the 1.0.0 extension/manifest +# opening old files will issue AsdfWarning messages stating that +# the file was produced with an extension that is not installed +# As the 1.0.1 and 1.1.0 extensions support all the required tags +# it's not a helpful warning so here we register an 'empty' +# extension for 1.0.0 which doesn't support any tags or types +# but will be installed into asdf preventing the warning +if len(TRANSFORM_EXTENSIONS) > 1: + class _EmptyExtension(Extension): + extension_uri = 'asdf://asdf-format.org/astronomy/gwcs/extensions/gwcs-1.0.0' + legacy_class_names=["gwcs.extension.GWCSExtension"] + + TRANSFORM_EXTENSIONS.append(_EmptyExtension()) + def get_extensions(): """
spacetelescope/gwcs
83efcccc6ba1d65039eb84365223340a8e39344e
diff --git a/gwcs/tests/test_extension.py b/gwcs/tests/test_extension.py new file mode 100644 index 0000000..34a5057 --- /dev/null +++ b/gwcs/tests/test_extension.py @@ -0,0 +1,58 @@ +import io +import warnings + +import asdf +import asdf_wcs_schemas +import gwcs.extension + +import pytest + + [email protected](asdf_wcs_schemas.__version__ < "0.2.0", reason="version 0.2 provides the new manifests") +def test_empty_extension(): + """ + Test that an empty extension was installed for gwcs 1.0.0 + and that extensions are installed for gwcs 1.0.1 and 1.1.0 + """ + extensions = gwcs.extension.get_extensions() + assert len(extensions) > 1 + + extensions_by_uri = {ext.extension_uri: ext for ext in extensions} + + # check for duplicate uris + assert len(extensions_by_uri) == len(extensions) + + # check that all 3 versions are installed + for version in ('1.0.0', '1.0.1', '1.1.0'): + assert f"asdf://asdf-format.org/astronomy/gwcs/extensions/gwcs-{version}" in extensions_by_uri + + # the 1.0.0 extension should support no tags or types + legacy = extensions_by_uri["asdf://asdf-format.org/astronomy/gwcs/extensions/gwcs-1.0.0"] + assert len(legacy.tags) == 0 + assert len(legacy.converters) == 0 + + +def test_open_legacy_without_warning(): + """ + Opening a file produced with extension 1.0.0 should not produce any + warnings because of the empty extension registered for 1.0.0 + """ + asdf_bytes = b"""#ASDF 1.0.0 +#ASDF_STANDARD 1.5.0 +%YAML 1.1 +%TAG ! tag:stsci.edu:asdf/ +--- !core/asdf-1.1.0 +asdf_library: !core/software-1.0.0 {author: The ASDF Developers, homepage: 'http://github.com/asdf-format/asdf', + name: asdf, version: 2.9.2} +history: + extensions: + - !core/extension_metadata-1.0.0 + extension_class: asdf.extension._manifest.ManifestExtension + extension_uri: asdf://asdf-format.org/astronomy/gwcs/extensions/gwcs-1.0.0 + software: !core/software-1.0.0 {name: gwcs, version: 0.18.0} +foo: 1 +...""" + with warnings.catch_warnings(): + warnings.simplefilter("error") + with asdf.open(io.BytesIO(asdf_bytes)) as af: + assert af['foo'] == 1
Consider adding a description in the docs about manifest changes in #469 As brought up in this comment: https://github.com/spacetelescope/jdaviz/issues/2446#issuecomment-1716413809 Users opening files generated with gwcs prior to the fix included in #469 will see warnings like the following: ``` asdf.exceptions.AsdfWarning: File was created with extension URI 'asdf://asdf-format.org/astronomy/gwcs/extensions/gwcs-1.0.0' (from package gwcs==0.18.2), which is not currently installed ``` Due to `1.0.0` no longer being registered as an extension. As `1.0.1` and `1.1.0` cover all tags from `1.0.0` it is safe to ignore this warning but this nuance will likely be unknown to gwcs users. One option would be to update the documentation and release notes describing the issue and can be ignored until the file can e regenerate (which will get rid of the warning). I'm also open to suggestions on other ways to handle this (including possibly registering a 'dummy' `1.0.0` extension which does nothing yet should hide the warning). @nden @eslavich do you think this is worth investigating as an option?
0.0
83efcccc6ba1d65039eb84365223340a8e39344e
[ "gwcs/tests/test_extension.py::test_empty_extension", "gwcs/tests/test_extension.py::test_open_legacy_without_warning" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-09-13 13:10:54+00:00
bsd-3-clause
5,606
spacetelescope__jdaviz-2171
diff --git a/CHANGES.rst b/CHANGES.rst index 67c32ed7..8c0f0ded 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -66,6 +66,9 @@ Bug Fixes Cubeviz ^^^^^^^ +- Fixed get_model_parameters error when retrieving parameters for a cube fit. This + also removed the "_3d" previously appended to model labels in the returned dict. [#2171] + Imviz ^^^^^ diff --git a/jdaviz/app.py b/jdaviz/app.py index 81d04123..e04b932c 100644 --- a/jdaviz/app.py +++ b/jdaviz/app.py @@ -13,6 +13,7 @@ from astropy.nddata import CCDData, NDData from astropy.io import fits from astropy import units as u from astropy.coordinates import Angle +from astropy.time import Time from regions import PixCoord, CirclePixelRegion, RectanglePixelRegion, EllipsePixelRegion from echo import CallbackProperty, DictCallbackProperty, ListCallbackProperty @@ -686,8 +687,13 @@ class Application(VuetifyTemplate, HubListener): if cls is not None: handler, _ = data_translator.get_handler_for(cls) try: - layer_data = handler.to_object(layer_data, - statistic=statistic) + if cls == Spectrum1D: + # if this is a spectrum, apply the `statistic`: + layer_data = handler.to_object(layer_data, + statistic=statistic) + else: + # otherwise simply translate to an object: + layer_data = handler.to_object(layer_data) except IncompatibleAttribute: continue @@ -878,8 +884,9 @@ class Application(VuetifyTemplate, HubListener): all_subsets[label] = None continue - # Is the subset spectral or spatial? + # Is the subset spectral, spatial, temporal? is_spectral = self._is_subset_spectral(subset_region) + is_temporal = self._is_subset_temporal(subset_region) # Remove duplicate spectral regions if is_spectral and isinstance(subset_region, SpectralRegion): @@ -903,6 +910,12 @@ class Application(VuetifyTemplate, HubListener): else: all_subsets[label] = subset_region + if not (spectral_only or spatial_only) and is_temporal: + if object_only: + all_subsets[label] = [reg['region'] for reg in subset_region] + else: + all_subsets[label] = subset_region + all_subset_names = [subset.label for subset in dc.subset_groups] if subset_name and subset_name in all_subset_names: return all_subsets[subset_name] @@ -935,6 +948,14 @@ class Application(VuetifyTemplate, HubListener): return True return False + def _is_subset_temporal(self, subset_region): + if isinstance(subset_region, Time): + return True + elif isinstance(subset_region, list) and len(subset_region) > 0: + if isinstance(subset_region[0]['region'], Time): + return True + return False + def _remove_duplicate_bounds(self, spec_regions): regions_no_dups = None diff --git a/jdaviz/core/helpers.py b/jdaviz/core/helpers.py index fad12728..d9a41576 100644 --- a/jdaviz/core/helpers.py +++ b/jdaviz/core/helpers.py @@ -231,6 +231,12 @@ class ConfigHelper(HubListener): elif models is None: models = self.fitted_models + data_shapes = {} + for label in models: + data_label = label.split(" (")[0] + if data_label not in data_shapes: + data_shapes[data_label] = self.app.data_collection[data_label].data.shape + param_dict = {} parameters_cube = {} param_x_y = {} @@ -241,7 +247,7 @@ class ConfigHelper(HubListener): # looks for that style and separates out the pertinent information. if " (" in label: label_split = label.split(" (") - model_name = label_split[0] + "_3d" + model_name = label_split[0] x = int(label_split[1].split(", ")[0]) y = int(label_split[1].split(", ")[1][:-1]) @@ -268,10 +274,7 @@ class ConfigHelper(HubListener): # on whether the model in question is 3d or 1d, respectively. for model_name in param_dict: if model_name in param_x_y: - x_size = len(param_x_y[model_name]['x']) - y_size = len(param_x_y[model_name]['y']) - - parameters_cube[model_name] = {x: np.zeros(shape=(x_size, y_size)) + parameters_cube[model_name] = {x: np.zeros(shape=data_shapes[model_name][:2]) for x in param_dict[model_name]} else: parameters_cube[model_name] = {x: 0 @@ -282,7 +285,7 @@ class ConfigHelper(HubListener): for label in models: if " (" in label: label_split = label.split(" (") - model_name = label_split[0] + "_3d" + model_name = label_split[0] # If the get_models method is used to build a dictionary of # models and a value is set for the x or y parameters, that diff --git a/setup.cfg b/setup.cfg index 90d5dab1..08b3ae6f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,6 +27,7 @@ install_requires = glue-core>=1.6.0,!=v1.9.0 glue-jupyter>=0.15.0 echo>=0.5.0 + ipython<8.13;python_version=='3.8' ipykernel>=6.19.4 ipyvue>=1.6 ipyvuetify>=1.7.0 diff --git a/tox.ini b/tox.ini index 80e46204..52b0bda9 100644 --- a/tox.ini +++ b/tox.ini @@ -50,7 +50,8 @@ deps = devdeps: git+https://github.com/spacetelescope/gwcs.git devdeps: git+https://github.com/asdf-format/asdf.git devdeps: git+https://github.com/astropy/asdf-astropy.git - devdeps: git+https://github.com/spacetelescope/stdatamodels.git + # FIXME: https://github.com/spacetelescope/stdatamodels/issues/159 + #devdeps: git+https://github.com/spacetelescope/stdatamodels.git devdeps: git+https://github.com/bqplot/[email protected] devdeps: git+https://github.com/glue-viz/glue.git devdeps: git+https://github.com/voila-dashboards/voila.git
spacetelescope/jdaviz
2953dec21b66c0c0c9acd57fe2825cd7c62707aa
diff --git a/jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py b/jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py index 583c409a..41d418a0 100644 --- a/jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py +++ b/jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py @@ -7,6 +7,7 @@ from astropy.io import fits from astropy.io.registry.base import IORegistryError from astropy.modeling import models, parameters as params from astropy.nddata import StdDevUncertainty +from astropy.tests.helper import assert_quantity_allclose from astropy.wcs import WCS from numpy.testing import assert_allclose, assert_array_equal from specutils.spectra import Spectrum1D @@ -77,6 +78,30 @@ def test_model_ids(cubeviz_helper, spectral_cube_wcs): plugin.vue_add_model({}) [email protected](r"ignore:Model is linear in parameters.*") +def test_parameter_retrieval(cubeviz_helper, spectral_cube_wcs): + flux = np.ones((3, 4, 5)) + flux[2, 2, :] = [1, 2, 3, 4, 5] + cubeviz_helper.load_data(Spectrum1D(flux=flux * u.nJy, wcs=spectral_cube_wcs), + data_label='test') + plugin = cubeviz_helper.plugins["Model Fitting"] + plugin.create_model_component("Linear1D", "L") + plugin.cube_fit = True + plugin.calculate_fit() + + params = cubeviz_helper.get_model_parameters() + slope_res = np.zeros((4, 3)) + slope_res[2, 2] = 1.0 + slope_res = slope_res * u.nJy / u.Hz + intercept_res = np.ones((4, 3)) + intercept_res[2, 2] = 0 + intercept_res = intercept_res * u.nJy + assert_quantity_allclose(params['cube-fit model']['slope'], slope_res, + atol=1e-10 * u.nJy / u.Hz) + assert_quantity_allclose(params['cube-fit model']['intercept'], intercept_res, + atol=1e-10 * u.nJy) + + @pytest.mark.parametrize('unc', ('zeros', None)) def test_fitting_backend(unc): np.random.seed(42) diff --git a/jdaviz/configs/imviz/tests/test_regions.py b/jdaviz/configs/imviz/tests/test_regions.py index 10072c7a..38a60fbc 100644 --- a/jdaviz/configs/imviz/tests/test_regions.py +++ b/jdaviz/configs/imviz/tests/test_regions.py @@ -1,14 +1,18 @@ +import glue_astronomy import numpy as np from astropy import units as u from astropy.coordinates import SkyCoord, Angle from astropy.utils.data import get_pkg_data_filename +from packaging.version import Version from photutils.aperture import CircularAperture, SkyCircularAperture from regions import (PixCoord, CircleSkyRegion, RectanglePixelRegion, CirclePixelRegion, EllipsePixelRegion, PointSkyRegion, PolygonPixelRegion, - CircleAnnulusSkyRegion, Regions) + CircleAnnulusPixelRegion, CircleAnnulusSkyRegion, Regions) from jdaviz.configs.imviz.tests.utils import BaseImviz_WCS_NoWCS +GLUE_ASTRONOMY_LT_0_7_1 = not (Version(glue_astronomy.__version__) >= Version("0.7.1.dev")) + class BaseRegionHandler: """Test to see if region is loaded. @@ -230,10 +234,16 @@ class TestGetInteractiveRegions(BaseImviz_WCS_NoWCS): new_subset = subset_groups[0].subset_state & ~subset_groups[1].subset_state self.viewer.apply_subset_state(new_subset) - # Annulus is no longer accessible by API but also should not crash Imviz. + # In older glue-astronomy, annulus is no longer accessible by API + # but also should not crash Imviz. subsets = self.imviz.get_interactive_regions() assert len(self.imviz.app.data_collection.subset_groups) == 3 - assert list(subsets.keys()) == ['Subset 1', 'Subset 2'], subsets + if GLUE_ASTRONOMY_LT_0_7_1: + expected_subset_keys = ['Subset 1', 'Subset 2'] + else: + expected_subset_keys = ['Subset 1', 'Subset 2', 'Subset 3'] + assert isinstance(subsets['Subset 3'], CircleAnnulusPixelRegion) + assert list(subsets.keys()) == expected_subset_keys, subsets assert isinstance(subsets['Subset 1'], CirclePixelRegion) assert isinstance(subsets['Subset 2'], CirclePixelRegion)
[BUG] Using cubeviz.get_model_parameters after cube fitting raises error ### Jdaviz component Cubeviz ### Description This morning, I tried to do a cube fit and then get the parameter arrays out with `cubeviz.get_model_parameters()`. This returned an IndexError (indexd 45 is out of range for array of size 45). I wonder if the string labels for the components are getting 1-indexed somehow - I imagine it's something along those lines anyway. ### How to Reproduce 1. Run a model fit without and then with "Cube Fit" selected. 3. Try to run `[helper].get_model_parameters()` in the notebooks. ### Expected behavior We're supposed to get a dictionary out that contains arrays of the fitted model parameters at each spaxel. ### Browser _No response_ ### Jupyter _No response_ ### Software versions This was using the current release (3.4.0) of Jdaviz. [🐱](https://jira.stsci.edu/browse/JDAT-3306)
0.0
2953dec21b66c0c0c9acd57fe2825cd7c62707aa
[ "jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py::test_parameter_retrieval" ]
[ "jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py::test_model_params", "jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py::test_model_ids", "jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py::test_fitting_backend[zeros]", "jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py::test_fitting_backend[None]", "jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py::test_cube_fitting_backend[zeros]", "jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py::test_cube_fitting_backend[None]", "jdaviz/configs/default/plugins/model_fitting/tests/test_fitting.py::test_results_table", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegions::test_regions_invalid", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegions::test_regions_fully_out_of_bounds", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegions::test_regions_mask", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegions::test_regions_pixel", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegions::test_regions_sky_has_wcs", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegions::test_photutils_pixel", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegions::test_photutils_sky_has_wcs", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegionsFromFile::test_ds9_load_all", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegionsFromFile::test_ds9_load_two_good", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegionsFromFile::test_ds9_load_one_bad", "jdaviz/configs/imviz/tests/test_regions.py::TestLoadRegionsFromFile::test_ds9_load_one_good_one_bad", "jdaviz/configs/imviz/tests/test_regions.py::TestGetInteractiveRegions::test_annulus" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-04-27 20:58:33+00:00
bsd-3-clause
5,607
spacetelescope__jwql-208
diff --git a/jwql/utils/utils.py b/jwql/utils/utils.py index 9f3f4102..dc9eb1f1 100644 --- a/jwql/utils/utils.py +++ b/jwql/utils/utils.py @@ -29,6 +29,7 @@ from jwql.utils import permissions __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) +FILE_SUFFIX_TYPES = ['uncal', 'cal', 'rateints', 'rate', 'trapsfilled', 'uncal'] JWST_INSTRUMENTS = sorted(['NIRISS', 'NIRCam', 'NIRSpec', 'MIRI', 'FGS']) JWST_DATAPRODUCTS = ['IMAGE', 'SPECTRUM', 'SED', 'TIMESERIES', 'VISIBILITY', 'EVENTLIST', 'CUBE', 'CATALOG', 'ENGINEERING', 'NULL'] @@ -53,6 +54,11 @@ MONITORS = { NIRCAM_SHORTWAVE_DETECTORS = ['NRCA1', 'NRCA2', 'NRCA3', 'NRCA4', 'NRCB1', 'NRCB2', 'NRCB3', 'NRCB4'] NIRCAM_LONGWAVE_DETECTORS = ['NRCA5', 'NRCB5'] +INSTRUMENTS_SHORTHAND = {'gui': 'FGS', + 'mir': 'MIRI', + 'nis': 'NIRISS', + 'nrc': 'NIRCam', + 'nrs': 'NIRSpec'} def ensure_dir_exists(fullpath): @@ -100,17 +106,23 @@ def filename_parser(filename): """ filename = os.path.basename(filename) + file_root_name = (len(filename.split('.')) < 2) + + regex_string_to_compile = r"[a-z]+" \ + "(?P<program_id>\d{5})"\ + "(?P<observation>\d{3})"\ + "(?P<visit>\d{3})"\ + "_(?P<visit_group>\d{2})"\ + "(?P<parallel_seq_id>\d{1})"\ + "(?P<activity>\w{2})"\ + "_(?P<exposure_id>\d+)"\ + "_(?P<detector>\w+)" + + if not file_root_name: + regex_string_to_compile += r"_(?P<suffix>{}).*".format('|'.join(FILE_SUFFIX_TYPES)) + elements = \ - re.compile(r"[a-z]+" - "(?P<program_id>\d{5})" - "(?P<observation>\d{3})" - "(?P<visit>\d{3})" - "_(?P<visit_group>\d{2})" - "(?P<parallel_seq_id>\d{1})" - "(?P<activity>\w{2})" - "_(?P<exposure_id>\d+)" - "_(?P<detector>\w+)" - "_(?P<suffix>\w+).*") + re.compile(regex_string_to_compile) jwst_file = elements.match(filename) diff --git a/jwql/website/apps/jwql/forms.py b/jwql/website/apps/jwql/forms.py new file mode 100644 index 00000000..dac67493 --- /dev/null +++ b/jwql/website/apps/jwql/forms.py @@ -0,0 +1,158 @@ +"""Defines the forms for the ``jwql`` web app. + +Django allows for an object-oriented model representation of forms for +users to provide input through HTTP POST methods. This module defines +all of the forms that are used across the various webpages used for the +JWQL application. + +Authors +------- + + - Lauren Chambers + +Use +--- + + This module is used within ``views.py`` as such: + :: + from .forms import FileSearchForm + def view_function(request): + form = FileSearchForm(request.POST or None) + + if request.method == 'POST': + if form.is_valid(): + # Process form input and redirect + return redirect(new_url) + + template = 'some_template.html' + context = {'form': form, ...} + return render(request, template, context) + +References +---------- + For more information please see: + ``https://docs.djangoproject.com/en/2.1/topics/forms/`` + +Dependencies +------------ + The user must have a configuration file named ``config.json`` + placed in the ``jwql/utils/`` directory. +""" + +import glob +import os + +from django import forms +from django.shortcuts import redirect + +from jwql.utils.utils import get_config, filename_parser, INSTRUMENTS_SHORTHAND + +FILESYSTEM_DIR = os.path.join(get_config()['jwql_dir'], 'filesystem') + + +class FileSearchForm(forms.Form): + """A form that contains a single field for users to search for a proposal + or fileroot in the JWQL filesystem. + """ + # Define search field + search = forms.CharField(label='', max_length=500, required=True, + empty_value='Search') + + # Initialize attributes + fileroot_dict = None + search_type = None + instrument = None + + def clean_search(self): + """Validate the "search" field by checking to ensure the input + is either a proposal or fileroot, and one that matches files + in the filesystem. + + Returns + ------- + str + The cleaned data input into the "search" field + """ + # Get the cleaned search data + search = self.cleaned_data['search'] + + # Make sure the search is either a proposal or fileroot + if len(search) == 5 and search.isnumeric(): + self.search_type = 'proposal' + elif self._search_is_fileroot(search): + self.search_type = 'fileroot' + else: + raise forms.ValidationError('Invalid search term {}. Please provide proposal number or file root.'.format(search)) + + # If they searched for a proposal... + if self.search_type == 'proposal': + # See if there are any matching proposals and, if so, what + # instrument they are for + search_string = os.path.join(FILESYSTEM_DIR, 'jw{}'.format(search), + '*{}*.fits'.format(search)) + all_files = glob.glob(search_string) + if len(all_files) > 0: + all_instruments = [] + for file in all_files: + instrument = filename_parser(file)['detector'] + all_instruments.append(instrument[:3]) + if len(set(all_instruments)) > 1: + raise forms.ValidationError('Cannot return result for proposal with multiple instruments.') + + self.instrument = INSTRUMENTS_SHORTHAND[all_instruments[0]] + else: + raise forms.ValidationError('Proposal {} not in the filesystem.'.format(search)) + + # If they searched for a fileroot... + elif self.search_type == 'fileroot': + # See if there are any matching fileroots and, if so, what + # instrument they are for + search_string = os.path.join(FILESYSTEM_DIR, search[:7], '{}*.fits'.format(search)) + all_files = glob.glob(search_string) + + if len(all_files) == 0: + raise forms.ValidationError('Fileroot {} not in the filesystem.'.format(search)) + + instrument = search.split('_')[-1][:3] + self.instrument = INSTRUMENTS_SHORTHAND[instrument] + + return self.cleaned_data['search'] + + def _search_is_fileroot(self, search): + """Determine if a search value is formatted like a fileroot. + + Parameters + ---------- + search : str + The search term input by the user. + + Returns + ------- + bool + Is the search term formatted like a fileroot? + """ + try: + self.fileroot_dict = filename_parser(search) + return True + except ValueError: + return False + + def redirect_to_files(self): + """Determine where to redirect the web app based on user search input. + + Returns + ------- + HttpResponseRedirect object + Outgoing redirect response sent to the webpage + """ + + # Process the data in form.cleaned_data as required + search = self.cleaned_data['search'] + + # If they searched for a proposal + if self.search_type == 'proposal': + return redirect('/jwql/{}/archive/{}'.format(self.instrument, search)) + + # If they searched for a file root + elif self.search_type == 'fileroot': + return redirect('/jwql/{}/{}'.format(self.instrument, search)) diff --git a/jwql/website/apps/jwql/static/css/jwql.css b/jwql/website/apps/jwql/static/css/jwql.css index b2f109bb..1d7de0d9 100644 --- a/jwql/website/apps/jwql/static/css/jwql.css +++ b/jwql/website/apps/jwql/static/css/jwql.css @@ -104,6 +104,23 @@ display: inline-block; } +/*Stop the search box from glowing blue*/ +#homepage_filesearch #id_search { + width: 500px; + height: 100%; + padding: 0px; +} + +#homepage_filesearch #id_search:focus { + box-shadow: none; + border-color: #cfd4da; +} + +/*Make the form fields be inline*/ +.homepage_form_fieldWrapper { + display: inline; +} + /*Don't let the search bar be super long*/ .input-group { width: 250px; diff --git a/jwql/website/apps/jwql/templates/home.html b/jwql/website/apps/jwql/templates/home.html index 73522109..d3e6ba4f 100644 --- a/jwql/website/apps/jwql/templates/home.html +++ b/jwql/website/apps/jwql/templates/home.html @@ -4,6 +4,9 @@ <title>Home - JWQL</title> + <!-- Custom styles and scripts for this template --> + <link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.0.13/css/all.css" integrity="sha384-DNOHZ68U8hZfKXOrtjWvjxusGo9WQnrNx2sqG0tfsghAvtVlRW3tvkXWZh58N9jp" crossorigin="anonymous"> + {% endblock %} {% block content %} @@ -32,18 +35,49 @@ </div> </div> - <div class="row my-2" style="margin-left: 7rem; margin-right: 7rem;"> - <div class="col-md-6 py-2"><a class="btn btn-block btn-primary" role="button" href={{ url('jwql:dashboard') }}>Dashboard</a></div> - <div class="col-md-6 py-2"><a class="btn btn-block btn-primary disabled" role="button" href="#">Query Database</a></div> - </div> - <br> The JWST Quicklook Application (JWQL) is a database-driven web application and automation framework for use by the JWST instrument teams to monitor the health and stability of the JWST instruments. Visit our <a href={{ url('jwql:about') }}>about page</a> to learn more about our project, goals, and developers.<br><br> The JWQL application is currently under heavy development. The 1.0 release is expected in 2019. + <hr> + + <h4>Find a JWST Proposal or File</h4> + + Submit a proposal number (e.g. 86600) or file root (e.g. jw86600006001_02101_00008_guider1) to view that proposal or file: + + + + + <!--Load the file search form from the view--> + <form action="" method="post" id="homepage_filesearch"> + <!--Show any errors from a previous form submission--> + {% if form.errors %} + {% for field in form %} + {% for error in field.errors %} + <div class="alert alert-danger"> + <strong>{{ error|escape }}</strong> + </div> + {% endfor %} + {% endfor %} + {% endif %} + + <!--Django Cross-Site Request Forgery magic--> + {{ csrf_input }} + + <!--Show the field forms--> + {% for field in form %} + <div class="homepage_form_fieldWrapper"> + {{ field }} + {% if field.help_text %} + <p class="help">{{ field.help_text|safe }}</p> + {% endif %} + </div> + {% endfor %} + <button class="btn btn-primary" type="submit"><span class="fas fa-search"></span></button> + </form> </main> -{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/jwql/website/apps/jwql/views.py b/jwql/website/apps/jwql/views.py index 52b8ec83..0f4dc375 100644 --- a/jwql/website/apps/jwql/views.py +++ b/jwql/website/apps/jwql/views.py @@ -44,6 +44,7 @@ from .data_containers import get_header_info from .data_containers import get_image_info from .data_containers import get_proposal_info from .data_containers import thumbnails +from .forms import FileSearchForm from jwql.utils.utils import get_config, JWST_INSTRUMENTS, MONITORS @@ -172,10 +173,20 @@ def home(request): HttpResponse object Outgoing response sent to the webpage """ + + # Create a form instance and populate it with data from the request + form = FileSearchForm(request.POST or None) + + # If this is a POST request, we need to process the form data + if request.method == 'POST': + if form.is_valid(): + return form.redirect_to_files() + template = 'home.html' context = {'inst': '', 'inst_list': JWST_INSTRUMENTS, - 'tools': MONITORS} + 'tools': MONITORS, + 'form': form} return render(request, template, context)
spacetelescope/jwql
f3f8ac0fa336824c8a1c7c5aaeadb63e3da55266
diff --git a/jwql/tests/test_utils.py b/jwql/tests/test_utils.py index 9a72a5ba..9dc73646 100644 --- a/jwql/tests/test_utils.py +++ b/jwql/tests/test_utils.py @@ -49,6 +49,24 @@ def test_filename_parser_filename(): assert filename_dict == correct_dict +def test_filename_parser_filename_root(): + '''Generate a dictionary with parameters from a JWST filename. + Assert that the dictionary matches what is expected. + ''' + filename = 'jw00327001001_02101_00002_nrca1' + filename_dict = filename_parser(filename) + + correct_dict = {'activity': '01', + 'detector': 'nrca1', + 'exposure_id': '00002', + 'observation': '001', + 'parallel_seq_id': '1', + 'program_id': '00327', + 'visit': '001', + 'visit_group': '02'} + + assert filename_dict == correct_dict + def test_filename_parser_filepath(): '''Generate a dictionary with parameters from a JWST filepath
Form on main page to quickly view a particular rootname of proposal I think our users would find it useful to be able to get straight to viewing a single observation or a particular proposal right from the homepage, instead of having to navigate the archive. We could have a blank form to enter a rootname or proposal number that, upon submit, redirects the user to the appropriate 'view image' or 'view proposal' page.
0.0
f3f8ac0fa336824c8a1c7c5aaeadb63e3da55266
[ "jwql/tests/test_utils.py::test_filename_parser_filename_root" ]
[ "jwql/tests/test_utils.py::test_filename_parser_filename", "jwql/tests/test_utils.py::test_filename_parser_filepath", "jwql/tests/test_utils.py::test_filename_parser_nonJWST" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-12-04 05:05:42+00:00
bsd-3-clause
5,608
spacetelescope__jwst-3756
diff --git a/CHANGES.rst b/CHANGES.rst index 1db2ee834..ae5bc9f2f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -23,7 +23,7 @@ datamodels - Fixed missing TUNITn keywords caused by changes for unsigned int columns. [#3753] - Write ``siaf_xref_sci`` and ``siaf_yref_sci`` to FITS keywords ``XREF_SCI`` - and ``YREF_SCI`` for ``NRC_TSGRISM`` exposures. [#3766] + and ``YREF_SCI`` for ``NRC_TSGRISM`` exposures. [#3766 extract_1d ---------- @@ -36,6 +36,11 @@ group_scale - Updates to documentation and log messages. [#3738] +lib +--- + +- A function to determine the dispersion direction has been added. [#3756] + stpipe ------ diff --git a/jwst/lib/dispaxis.py b/jwst/lib/dispaxis.py new file mode 100644 index 000000000..74a255018 --- /dev/null +++ b/jwst/lib/dispaxis.py @@ -0,0 +1,152 @@ +import logging + +log = logging.getLogger(__name__) +log.setLevel(logging.DEBUG) + +def get_dispersion_direction(exposure_type, grating="ANY", filter_wh="ANY", + pupil="ANY"): + """Get the dispersion direction. + + Parameters + ---------- + exposure_type : str + The exposure type. + + grating : str + The name of the optic in the grating wheel. + + filter_wh : str + The name of the optic in the filter wheel. + + pupil : str + The name of the optic in the pupil wheel. + + Returns + ------- + int or None : The dispersion direction + +- None the dispersion direction is not meaningful or not defined +- 1 the dispersion direction is horizontal ("sky" coordinates) +- 2 the dispersion direction is vertical ("sky" coordinates) + """ + + exposure_type = exposure_type.upper() + grating = grating.upper() + filter_wh = filter_wh.upper() + pupil = pupil.upper() + + # The keys of the `by_exp_type` dictionary are values of exposure type. + # If the dispersion direction is uniquely determined by the exposure + # type, the associated value of by_exp_type[exposure_type] will be an + # integer, the dispersion direction. If one or more parameters other + # than the exposure type are needed, the value of + # by_exp_type[exposure_type] will be a tuple of several strings; use + # that tuple as the key for dictionary `second_pass`, and the value of + # that will be the dispersion direction. + by_exp_type = { + # FGS science + "FGS_DARK": None, + "FGS_FOCUS": None, + "FGS_IMAGE": None, + "FGS_INTFLAT": None, + "FGS_SKYFLAT": None, + # FGS guide star + "FGS_ACQ1": None, + "FGS_ACQ2": None, + "FGS_FINEGUIDE": None, + "FGS_ID-IMAGE": None, + "FGS_ID-STACK": None, + "FGS_TRACK": None, + # MIRI + "MIR_4QPM": None, + "MIR_CORONCAL": None, + "MIR_DARKALL": None, + "MIR_DARKIMG": None, + "MIR_DARKMRS": None, + "MIR_FLATALL": None, + "MIR_FLATIMAGE": None, + "MIR_FLATIMAGE-EXT": None, + "MIR_FLATMRS": 2, + "MIR_FLATMRS-EXT": 2, + "MIR_IMAGE": None, + "MIR_LRS-FIXEDSLIT": 2, + "MIR_LRS-SLITLESS": 2, + "MIR_LYOT": None, + "MIR_MRS": 2, + "MIR_TACQ": None, + # NIRISS + "NIS_AMI": None, + "NIS_DARK": None, + "NIS_EXTCAL": None, + "NIS_FOCUS": None, + "NIS_IMAGE": None, + "NIS_LAMP": None, + "NIS_SOSS": 1, + "NIS_TACQ": None, + "NIS_TACONFIRM": None, + "NIS_WFSS": (exposure_type, "ANY", filter_wh, "ANY"), + # NIRCam + "NRC_CORON": None, + "NRC_DARK": None, + "NRC_FLAT": None, + "NRC_FOCUS": None, + "NRC_GRISM": (exposure_type, "ANY", "ANY", pupil), + "NRC_IMAGE": None, + "NRC_WFSS": (exposure_type, "ANY", "ANY", pupil), + "NRC_LED": None, + "NRC_WFSC": None, + "NRC_TACONFIRM": None, + "NRC_TACQ": None, + "NRC_TSGRISM": (exposure_type, "ANY", "ANY", pupil), + "NRC_TSIMAGE": None, + # NIRSpec + "NRS_AUTOFLAT": None, + "NRS_AUTOWAVE": 1, + "NRS_BRIGHTOBJ": 1, + "NRS_CONFIRM": None, + "NRS_DARK": None, + "NRS_FIXEDSLIT": 1, + "NRS_FOCUS": None, + "NRS_IFU": 1, + "NRS_IMAGE": None, + "NRS_LAMP": 1, + "NRS_MIMF": None, + "NRS_MSASPEC": 1, + "NRS_MSATA": None, + "NRS_TACONFIRM": None, + "NRS_TACQ": None, + "NRS_TASLIT": None, + "NRS_WATA": None, + # Misc + "N/A": None, + "ANY": None + } + + if exposure_type not in by_exp_type.keys(): + return None + + # The strings in each tuple are exposure_type, grating, filter_wh, pupil. + second_pass = { + ("NIS_WFSS", "ANY", "GR150R", "ANY"): 2, + ("NIS_WFSS", "ANY", "GR150C", "ANY"): 1, + + ("NRC_GRISM", "ANY", "ANY", "GRISMR"): 1, + ("NRC_GRISM", "ANY", "ANY", "GRISMC"): 2, + + ("NRC_TSGRISM", "ANY", "ANY", "GRISMR"): 1, + + ("NRC_WFSS", "ANY", "ANY", "GRISMR"): 1, + ("NRC_WFSS", "ANY", "ANY", "GRISMC"): 2 + } + + select = by_exp_type[exposure_type] + if isinstance(select, int): + return select + else: + if select in second_pass.keys(): + return second_pass[select] + else: + log.warning("Error in get_dispersion_direction: {} not in " + "`second_pass`".format(select)) + log.warning("Dispersion direction could not be determined.") + return None
spacetelescope/jwst
328d228e8e22b1ae584aa503708317be6dc573c1
diff --git a/jwst/lib/tests/test_dispaxis.py b/jwst/lib/tests/test_dispaxis.py new file mode 100644 index 000000000..17b328f27 --- /dev/null +++ b/jwst/lib/tests/test_dispaxis.py @@ -0,0 +1,85 @@ +""" +Test for dispaxis +""" + +from .. import dispaxis + +def test_dispaxis_1(): + value = dispaxis.get_dispersion_direction("FGS_IMAGE", "junk", "junk", + "junk") + assert value == None + +def test_dispaxis_2(): + value = dispaxis.get_dispersion_direction("MIR_4QPM", "junk", "junk", + "junk") + assert value == None + +def test_dispaxis_3(): + value = dispaxis.get_dispersion_direction("MIR_LRS-SLITLESS", "junk", + "junk", "junk") + assert value == 2 + +def test_dispaxis_4(): + value = dispaxis.get_dispersion_direction("MIR_LRS-FIXEDSLIT", "junk", + "junk", "junk") + assert value == 2 + +def test_dispaxis_5(): + value = dispaxis.get_dispersion_direction("NIS_SOSS", "junk", "junk", + "junk") + assert value == 1 + +def test_dispaxis_6(): + value = dispaxis.get_dispersion_direction("NRS_FIXEDSLIT", "junk", "junk", + "junk") + assert value == 1 + +def test_dispaxis_7(): + value = dispaxis.get_dispersion_direction("NRS_IFU", "junk", "junk", + "junk") + assert value == 1 + +def test_dispaxis_8(): + value = dispaxis.get_dispersion_direction("NRS_MSASPEC", "junk", "junk", + "junk") + assert value == 1 + +def test_dispaxis_9(): + value = dispaxis.get_dispersion_direction("NIS_WFSS", "junk", "GR150R", + "junk") + assert value == 2 + +def test_dispaxis_10(): + value = dispaxis.get_dispersion_direction("NIS_WFSS", "junk", "GR150C", + "junk") + assert value == 1 + +def test_dispaxis_11(): + value = dispaxis.get_dispersion_direction("NRC_GRISM", "junk", "junk", + "GRISMR") + assert value == 1 + +def test_dispaxis_12(): + value = dispaxis.get_dispersion_direction("NRC_GRISM", "junk", "junk", + "GRISMC") + assert value == 2 + +def test_dispaxis_13(): + value = dispaxis.get_dispersion_direction("NRC_TSGRISM", "junk", "junk", + "GRISMR") + assert value == 1 + +def test_dispaxis_14(): + value = dispaxis.get_dispersion_direction("NRC_WFSS", "junk", "junk", + "GRISMR") + assert value == 1 + +def test_dispaxis_15(): + value = dispaxis.get_dispersion_direction("NRC_WFSS", "junk", "junk", + "GRISMC") + assert value == 2 + +def test_dispaxis_16(): + value = dispaxis.get_dispersion_direction("nrc_grism", "junk", "junk", + "missing") + assert value == None
Write a function to obtain the dispersion direction Issue [JP-774](https://jira.stsci.edu/browse/JP-774) was created by Philip Hodge: For spectroscopic data, there should be a function to determine the dispersion direction, based on instrument mode.
0.0
328d228e8e22b1ae584aa503708317be6dc573c1
[ "jwst/lib/tests/test_dispaxis.py::test_dispaxis_1", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_2", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_3", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_4", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_5", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_6", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_7", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_8", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_9", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_10", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_11", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_12", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_13", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_14", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_15", "jwst/lib/tests/test_dispaxis.py::test_dispaxis_16" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
2019-07-01 14:17:15+00:00
bsd-3-clause
5,609
spacetelescope__jwst-7809
diff --git a/CHANGES.rst b/CHANGES.rst index b84da22e0..7852f2583 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -6,6 +6,9 @@ assign_wcs - Use isinstance instead of comparison with a type for lamp_mode inspection [#7801] +- Save bounding box to imaging WCS matching the shape of the data, for datamodels + without a defined bounding box. [#7809] + associations ------------ @@ -93,6 +96,13 @@ residual_fringe - Use scipy.interpolate.BSpline instead of astropy.modeling.Spline1D in residual_fringe fitting utils [#7764] +undersampling_correction +------------------------ + +- Changed default signal threshold, added efficient routine to flag neighborhood + pixels, added new unit test, improved earlier unit tests, updated docs. [#7740] + + 1.11.4 (2023-08-14) =================== diff --git a/docs/jwst/pipeline/calwebb_detector1.rst b/docs/jwst/pipeline/calwebb_detector1.rst index ca126b4b5..07e78aa44 100644 --- a/docs/jwst/pipeline/calwebb_detector1.rst +++ b/docs/jwst/pipeline/calwebb_detector1.rst @@ -71,10 +71,10 @@ on either the first group or frame zero pixel values. | :ref:`dark_current <dark_current_step>` | |check| | |check| | :ref:`dark_current <dark_current_step>` | |check| | |check| | +----------------------------------------------------------------------+---------+---------+-----------------------------------------+---------+---------+ | | | | :ref:`refpix <refpix_step>` | |check| | |check| | -+----------------------------------------------------------------------+---------+---------+----------------------+------------------+---------+---------+ -| :ref:`jump <jump_step>` | |check| | |check| | :ref:`jump <jump_step>` | |check| | |check| | +----------------------------------------------------------------------+---------+---------+-----------------------------------------+---------+---------+ | :ref:`undersampling_correction <undersampling_correction_step>` [3]_ | |check| | | | | | ++----------------------------------------------------------------------+---------+---------+----------------------+------------------+---------+---------+ +| :ref:`jump <jump_step>` | |check| | |check| | :ref:`jump <jump_step>` | |check| | |check| | +----------------------------------------------------------------------+---------+---------+-----------------------------------------+---------+---------+ | :ref:`ramp_fitting <ramp_fitting_step>` | |check| | |check| | :ref:`ramp_fitting <ramp_fitting_step>` | |check| | |check| | +----------------------------------------------------------------------+---------+---------+-----------------------------------------+---------+---------+ diff --git a/docs/jwst/undersampling_correction/description.rst b/docs/jwst/undersampling_correction/description.rst index 4b9387632..3a128b339 100644 --- a/docs/jwst/undersampling_correction/description.rst +++ b/docs/jwst/undersampling_correction/description.rst @@ -12,11 +12,15 @@ a pixel. When the peak pixels of such stars approach the saturation level, they significant :ref:`charge migration <charge_migration>`: the spilling of charge from a saturated pixel into its neighboring pixels. This charge migration causes group-to-group differences to decrease significantly once the signal level is greater than -~30,000 ADU. As a result, the last several groups of these ramps get flagged by the ``jump`` step. -The smaller number of groups used for these pixels in the ``ramp_fitting`` step results in them having +~25,000 ADU. As a result, the last several groups of these ramps will be flagged by the +``undersampling_correction`` step, and then ignored by the subsequent ``jump`` step. The smaller +number of groups used for these pixels in the ``ramp_fitting`` step results in them having larger read noise variances, which in turn leads to lower weights used during resampling. This ultimately leads to a lower than normal flux for the star in resampled images. +Pixels that are the four nearest neighbors of those high-intensity pixels, will also be flagged +as affected by charge migration. + Once a group in a ramp has been flagged as affected by charge migration, all subsequent groups in the ramp are also flagged. By flagging these groups, they are not used in the computation of slopes in the :ref:`ramp_fitting <ramp_fitting_step>` step. However, as described @@ -41,6 +45,8 @@ This results in a readnoise variance for undersampled pixels that is similar to pixels unaffected by charge migration. For the Poisson noise variance calculation in :ref:`ramp_fitting <ramp_fitting_step>`, the UNDERSAMP/DO_NOT_USE groups are not included. +Pixels that are the four nearest neighbors of those high-intensity pixels will be similarly flagged. + For integrations having only 1 or 2 groups, no flagging will be performed. diff --git a/jwst/assign_wcs/util.py b/jwst/assign_wcs/util.py index f0e312c0b..50c8ba31d 100644 --- a/jwst/assign_wcs/util.py +++ b/jwst/assign_wcs/util.py @@ -958,6 +958,7 @@ def update_s_region_imaging(model): if bbox is None: bbox = wcs_bbox_from_shape(model.data.shape) + model.meta.wcs.bounding_box = bbox # footprint is an array of shape (2, 4) as we # are interested only in the footprint on the sky diff --git a/jwst/pipeline/calwebb_detector1.py b/jwst/pipeline/calwebb_detector1.py index 788d29c37..889e5f492 100644 --- a/jwst/pipeline/calwebb_detector1.py +++ b/jwst/pipeline/calwebb_detector1.py @@ -19,8 +19,8 @@ from ..linearity import linearity_step from ..dark_current import dark_current_step from ..reset import reset_step from ..persistence import persistence_step +from ..undersampling_correction import undersampling_correction_step from ..jump import jump_step -from ..undersampling_correction import undersampling_correction_step from ..ramp_fitting import ramp_fit_step from ..gain_scale import gain_scale_step @@ -60,8 +60,8 @@ class Detector1Pipeline(Pipeline): 'dark_current': dark_current_step.DarkCurrentStep, 'reset': reset_step.ResetStep, 'persistence': persistence_step.PersistenceStep, - 'jump': jump_step.JumpStep, 'undersampling_correction': undersampling_correction_step.UndersamplingCorrectionStep, + 'jump': jump_step.JumpStep, 'ramp_fit': ramp_fit_step.RampFitStep, 'gain_scale': gain_scale_step.GainScaleStep, } @@ -119,12 +119,12 @@ class Detector1Pipeline(Pipeline): input = self.dark_current(input) - # apply the jump step - input = self.jump(input) - # apply the undersampling_correction step input = self.undersampling_correction(input) + # apply the jump step + input = self.jump(input) + # save the corrected ramp data, if requested if self.save_calibrated_ramp: self.save_model(input, 'ramp') diff --git a/jwst/undersampling_correction/undersampling_correction.py b/jwst/undersampling_correction/undersampling_correction.py index f1b48513b..49c1a67ef 100644 --- a/jwst/undersampling_correction/undersampling_correction.py +++ b/jwst/undersampling_correction/undersampling_correction.py @@ -8,6 +8,11 @@ from stdatamodels.jwst.datamodels import dqflags log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) +GOOD = dqflags.group["GOOD"] +DNU = dqflags.group["DO_NOT_USE"] +UNSA = dqflags.group["UNDERSAMP"] +UNSA_DNU = UNSA + DNU + def undersampling_correction(input_model, signal_threshold): """ @@ -24,8 +29,8 @@ def undersampling_correction(input_model, signal_threshold): Returns ------- output_model : `~jwst.datamodels.RampModel` - Data model with undersampling_correction applied; add UNDERSAMP flag - to groups exceeding signal_threshold + Data model with undersampling_correction applied; add UNDERSAMP and + DO_NOT_USE flags to groups exceeding signal_threshold """ data = input_model.data gdq = input_model.groupdq @@ -45,8 +50,8 @@ def undersampling_correction(input_model, signal_threshold): def flag_pixels(data, gdq, signal_threshold): """ - Flag first group in each ramp that exceeds signal_threshold as UNDERSAMP and DO_NOT_USE, - skipping groups already flagged as DO_NOT_USE; then flag all subsequent groups in the ramp. + Flag each group in each ramp that exceeds signal_threshold as UNDERSAMP and DO_NOT_USE, + skipping groups already flagged as DO_NOT_USE. Parameters ---------- @@ -61,44 +66,78 @@ def flag_pixels(data, gdq, signal_threshold): Returns ------- - gdq : int, 4D array + new_gdq : int, 4D array updated group dq array """ n_ints, n_grps, n_rows, n_cols = gdq.shape - num_pix = n_cols * n_rows + ncols = data.shape[3] + nrows = data.shape[2] + + new_gdq = gdq.copy() # Updated gdq + + # Flag all exceedances with UNDERSAMP and NO_NOT_USE + undersamp_pix = (data > signal_threshold) & (gdq != DNU) + + new_gdq[undersamp_pix] = np.bitwise_or(new_gdq[undersamp_pix], UNSA | DNU) + + # Reset groups previously flagged as DNU + gdq_orig = gdq.copy() # For resetting to previously flagged DNU + wh_gdq_DNU = np.bitwise_and(gdq_orig, DNU) + + # Get indices for exceedances + arg_where = np.argwhere(new_gdq == UNSA_DNU) + a_int = arg_where[:, 0] # array of integrations + a_grp = arg_where[:, 1] # array of groups + a_row = arg_where[:, 2] # array of rows + a_col = arg_where[:, 3] # array of columns + + # Process the 4 nearest neighbors of each exceedance + # Pixel to the east + xx_max_p1 = a_col[a_col < (ncols-1)] + 1 + i_int = a_int[a_col < (ncols-1)] + i_grp = a_grp[a_col < (ncols-1)] + i_row = a_row[a_col < (ncols-1)] + + if len(xx_max_p1) > 0: + new_gdq[i_int, i_grp, i_row, xx_max_p1] = \ + np.bitwise_or(new_gdq[i_int, i_grp, i_row, xx_max_p1], UNSA | DNU) + + new_gdq[wh_gdq_DNU == 1] = gdq_orig[wh_gdq_DNU == 1] # reset for earlier DNUs + + # Pixel to the west + xx_m1 = a_col[a_col > 0] - 1 + i_int = a_int[a_col > 0] + i_grp = a_grp[a_col > 0] + i_row = a_row[a_col > 0] - lowest_exc_1d = np.zeros(num_pix) + n_grps + if len(xx_m1) > 0: + new_gdq[i_int, i_grp, i_row, xx_m1] = \ + np.bitwise_or(new_gdq[i_int, i_grp, i_row, xx_m1], UNSA | DNU) - for ii_int in range(n_ints): - for ii_grp in range(n_grps): - data_1d = data[ii_int, ii_grp, :, :].reshape(num_pix) # vectorize slice - gdq_1d = gdq[ii_int, ii_grp, :, :].reshape(num_pix) + new_gdq[wh_gdq_DNU == 1] = gdq_orig[wh_gdq_DNU == 1] # reset for earlier DNUs - wh_not_dnu = np.logical_not(gdq_1d & dqflags.group['DO_NOT_USE']) + # Pixel to the north + yy_m1 = a_row[a_row > 0] - 1 + i_int = a_int[a_row > 0] + i_grp = a_grp[a_row > 0] + i_col = a_col[a_row > 0] - # In the current group for all ramps, locate pixels that : - # a) exceed the signal_threshold, and - # b) have not been previously flagged as an exceedance, and - # c) were not flagged in an earlier step as DO_NOT_USE - wh_exc_1d = np.where((data_1d > signal_threshold) & - (lowest_exc_1d == n_grps) & wh_not_dnu) + if len(yy_m1) > 0: + new_gdq[i_int, i_grp, yy_m1, i_col] = \ + np.bitwise_or(new_gdq[i_int, i_grp, yy_m1, i_col], UNSA | DNU) - # ... and mark those pixels, as current group is their first exceedance - if len(wh_exc_1d[0] > 0): # For ramps previously unflagged ... - lowest_exc_1d[wh_exc_1d] = ii_grp + new_gdq[wh_gdq_DNU == 1] = gdq_orig[wh_gdq_DNU == 1] # reset for earlier DNUs - # Flag current and subsequent groups - lowest_exc_2d = lowest_exc_1d.reshape((n_rows, n_cols)) - for ii_int in range(n_ints): - for ii_grp in range(n_grps): - wh_set_flag = np.where(lowest_exc_2d == ii_grp) + # Pixel to the south + yy_max_p1 = a_row[a_row < (nrows-1)] + 1 + i_int = a_int[a_row < (nrows-1)] + i_grp = a_grp[a_row < (nrows-1)] + i_col = a_col[a_row < (nrows-1)] - # set arrays of components - yy = wh_set_flag[0] - xx = wh_set_flag[1] + if len(yy_max_p1) > 0: + new_gdq[i_int, i_grp, yy_max_p1, i_col] = \ + np.bitwise_or(new_gdq[i_int, i_grp, yy_max_p1, i_col], UNSA | DNU) - gdq[ii_int, ii_grp:, yy, xx] = \ - np.bitwise_or(gdq[ii_int, ii_grp:, yy, xx], dqflags.group['UNDERSAMP'] - | dqflags.group['DO_NOT_USE']) + new_gdq[wh_gdq_DNU == 1] = gdq_orig[wh_gdq_DNU == 1] # reset for earlier DNUs - return gdq + return new_gdq diff --git a/jwst/undersampling_correction/undersampling_correction_step.py b/jwst/undersampling_correction/undersampling_correction_step.py index e5b8ba6b0..190846058 100755 --- a/jwst/undersampling_correction/undersampling_correction_step.py +++ b/jwst/undersampling_correction/undersampling_correction_step.py @@ -21,7 +21,7 @@ class UndersamplingCorrectionStep(Step): class_alias = "undersampling_correction" spec = """ - signal_threshold = float(default=30000) + signal_threshold = float(default=25000) skip = boolean(default=True) """ @@ -31,7 +31,7 @@ class UndersamplingCorrectionStep(Step): with datamodels.RampModel(input) as input_model: if (input_model.data.shape[1] < 3): # skip step if only 1 or 2 groups/integration log.info('Too few groups per integration; skipping undersampling_correction') - + result = input_model result.meta.cal_step.undersampling_correction = 'SKIPPED'
spacetelescope/jwst
589917db326218e4135b996e66999c9f74b5066b
diff --git a/jwst/regtest/test_associations_sdp_pools.py b/jwst/regtest/test_associations_sdp_pools.py index de7e20033..7f29fac43 100644 --- a/jwst/regtest/test_associations_sdp_pools.py +++ b/jwst/regtest/test_associations_sdp_pools.py @@ -32,11 +32,6 @@ SPECIAL_DEFAULT = { 'slow': False, } SPECIAL_POOLS = { - 'jw00016_20230331t130733_pool': { - 'args': [], - 'xfail': 'See issue JP-3230', - 'slow': False, - }, 'jw00623_20190607t021101_pool': { 'args': [], 'xfail': None, diff --git a/jwst/undersampling_correction/tests/test_undersampling_correction.py b/jwst/undersampling_correction/tests/test_undersampling_correction.py index 12b90e751..806a47a3b 100644 --- a/jwst/undersampling_correction/tests/test_undersampling_correction.py +++ b/jwst/undersampling_correction/tests/test_undersampling_correction.py @@ -12,8 +12,8 @@ test_dq_flags = dqflags.pixel GOOD = test_dq_flags["GOOD"] DNU = test_dq_flags["DO_NOT_USE"] UNSA = test_dq_flags["UNDERSAMP"] -ADFL = test_dq_flags["AD_FLOOR"] DROU = test_dq_flags["DROPOUT"] +UNSA_DNU = UNSA + DNU def test_pix_0(): @@ -21,7 +21,7 @@ def test_pix_0(): Having all data in ramp below the signal threshold, the only non-GOOD groups in the output GROUPDQ should be those DNU propagated from the input. """ - ngroups, nints, nrows, ncols = set_scalars() + ngroups, nints, nrows, ncols = 10, 1, 1, 1 ramp_model, pixdq, groupdq, err = create_mod_arrays( ngroups, nints, nrows, ncols) @@ -42,59 +42,35 @@ def test_pix_0(): def test_pix_1(): """ - All input GROUPDQ = 'GOOD'. Some ramp data exceed the signal threshold, so the - only non-GOOD groups in the output GROUPDQ should be UNSA + DNU for the first - group exceeding the signal threshold and all subsequent groups. + Tests groups whose data exceeds the signal threshold; 1 group is already + flagged as DNU from a previous calibration step, and 1 group is GOOD. + Also tests groups whose data does not exceed the signal threshold; + similarly 1 group is already flagged as DNU from a previous calibration + step, and 1 is GOOD. All data beyond the first exceedance are also + flagged as UNDERSAMP and DNU. """ - ngroups, nints, nrows, ncols = set_scalars() + ngroups, nints, nrows, ncols = 10, 1, 1, 1 ramp_model, pixdq, groupdq, err = create_mod_arrays( ngroups, nints, nrows, ncols) signal_threshold = 20000. - # Populate pixel-specific SCI and GROUPDQ arrays - # Set seom groups' SCI to be above the signal threshold, and all input - # GROUPDQ to be GOOD - ramp_model.data[0, 3, 0, 0] = np.array((signal_threshold + 100.), dtype=np.float32) - ramp_model.data[0, 5, 0, 0] = np.array((signal_threshold - 200.), dtype=np.float32) - ramp_model.data[0, 8, 0, 0] = np.array((signal_threshold + 100.), dtype=np.float32) - ramp_model.groupdq[0, :, 0, 0] = [GOOD] * ngroups - - true_out_gdq = ramp_model.groupdq.copy() # all GOOD - true_out_gdq[0, :, 0, 0] = [GOOD] * ngroups - true_out_gdq[0, 3:, 0, 0] = np.bitwise_or(UNSA, DNU) - - out_model = undersampling_correction(ramp_model, signal_threshold) - out_gdq = out_model.groupdq - - npt.assert_array_equal(out_gdq, true_out_gdq) - - -def test_pix_2(): - """ - Tests groups having data exceeding the signal threshold, Some groups are - already flagged as DO_NOT_USE; they will not be checked for UC, Other - groups will have 'DNU'+'UNSA' added to their GROUPDQ, as will all later - groups. - """ - ngroups, nints, nrows, ncols = set_scalars() - ramp_model, pixdq, groupdq, err = create_mod_arrays( - ngroups, nints, nrows, ncols) + # Populate SCI and GROUPDQ arrays. + ramp_model.data[0, 1, 0, 0] = np.array((0.5 * signal_threshold), dtype=np.float32) - signal_threshold = 20000. + ramp_model.data[0, 2, 0, 0] = np.array((0.8 * signal_threshold), dtype=np.float32) + ramp_model.groupdq[0, 2, 0, 0] = DNU # should not get UNSA, not an exceedance - # Populate SCI and GROUPDQ arrays. - ramp_model.data[0, 1, 0, 0] = np.array((signal_threshold + 100.), dtype=np.float32) - ramp_model.data[0, 2, 0, 0] = np.array((signal_threshold + 100.), dtype=np.float32) - ramp_model.data[0, 3, 0, 0] = np.array((signal_threshold + 100.), dtype=np.float32) + ramp_model.data[0, 3, 0, 0] = np.array((signal_threshold + 5000.), dtype=np.float32) + ramp_model.groupdq[0, 3, 0, 0] = DNU # should not get UNSA, although exceedance - ramp_model.groupdq[0, 1, 0, 0] = DNU # should not get UNSA - ramp_model.groupdq[0, 2, 0, 0] = np.bitwise_or(ADFL, DNU) # should not get UNSA - ramp_model.groupdq[0, 3, 0, 0] = ADFL # should get UNSA + DNU + ramp_model.data[0, 4:, 0, 0] = np.array((signal_threshold + 6000.), dtype=np.float32) + ramp_model.groupdq[0, 4:, 0, 0] = GOOD true_out_gdq = ramp_model.groupdq.copy() - true_out_gdq[0, 3, 0, 0] = np.bitwise_or(np.bitwise_or(DNU, UNSA), ADFL) - true_out_gdq[0, 4:, 0, 0] = np.bitwise_or(DNU, UNSA) + true_out_gdq[0, 2, 0, 0] = DNU + true_out_gdq[0, 3, 0, 0] = DNU + true_out_gdq[0, 4:, 0, 0] = UNSA_DNU out_model = undersampling_correction(ramp_model, signal_threshold) out_gdq = out_model.groupdq @@ -107,9 +83,7 @@ def test_too_few_groups(): Test that processing for datasets having too few (<3) groups per integration are skipped. """ - ngroups, nints, nrows, ncols = set_scalars() - ngroups = 2 - + ngroups, nints, nrows, ncols = 2, 1, 1, 1 ramp_model, pixdq, groupdq, err = create_mod_arrays( ngroups, nints, nrows, ncols) @@ -123,16 +97,92 @@ def test_too_few_groups(): npt.assert_string_equal(status, "SKIPPED") -def set_scalars(): +def test_flag_neighbors(): """ - Set needed scalars for the size of the dataset, + Test flagging of 4 nearest neighbors of exceedances. Tests pixels on + array edges, Tests exclusion of groups previously flagged as DO_NOT_USE. """ - ngroups = 10 - nints = 1 - nrows = 1 - ncols = 1 + ngroups, nints, nrows, ncols = 6, 1, 4, 3 + ramp_model, pixdq, groupdq, err = create_mod_arrays( + ngroups, nints, nrows, ncols) + + signal_threshold = 4400. + + # Populate pixel-specific SCI and GROUPDQ arrays + ramp_model.data[0, :, :, :] = \ + np.array([[ + [1900., 2666., 2100.], + [3865., 2300., 3177.], + [3832., 3044., 3588.], + [3799., 3233., 3000.]], + + [[2100., 2866., 2300.], + [4065., 2500., 3377.], + [4032., 3244., 3788.], + [3999., 3433., 3200.]], + + [[2300., 3066., 2500.], + [4265., 2700., 3577.], + [4232., 3444., 3988.], + [4199., 3633., 3400.]], + + [[2500., 3266., 2700.], + [4465., 2900., 3777.], + [4432., 3644., 4188.], + [4399., 3833., 3600.]], + + [[2700., 3466., 2900.], + [4665., 3100., 3977.], + [4632., 3844., 4388.], + [4599., 4033., 3800.]], + + [[2900., 3666., 3100.], + [4865., 3300., 4177.], + [4832., 4044., 4588.], + [4799., 4233., 4000.]]], dtype=np.float32) + + # These group DQ values should propagate unchanged to the output + ramp_model.groupdq[:, 4, 2, 0] = [DNU] + ramp_model.groupdq[:, 1, 2, 2] = [DNU] + ramp_model.groupdq[:, 2, 1, 1] = [DROU + DNU] + + out_model = undersampling_correction(ramp_model, signal_threshold) + out_gdq = out_model.groupdq - return ngroups, nints, nrows, ncols + true_out_gdq = ramp_model.groupdq.copy() + true_out_gdq[0, :, :, :] = \ + np.array([[ + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]], + + [[0, 0, 0], + [0, 0, 0], + [0, 0, DNU], + [0, 0, 0]], + + [[0, 0, 0], + [0, 9, 0], + [0, 0, 0], + [0, 0, 0]], + + [[UNSA_DNU, 0, 0], + [UNSA_DNU, UNSA_DNU, 0], + [UNSA_DNU, UNSA_DNU, 0], + [UNSA_DNU, 0, 0]], + + [[UNSA_DNU, 0, 0], + [UNSA_DNU, UNSA_DNU, 0], + [DNU, 0, 0], + [UNSA_DNU, UNSA_DNU, 0]], + + [[UNSA_DNU, 0, 0], + [UNSA_DNU, UNSA_DNU, UNSA_DNU], + [UNSA_DNU, UNSA_DNU, UNSA_DNU], + [UNSA_DNU, UNSA_DNU, UNSA_DNU]]], dtype=np.uint8) + + npt.assert_array_equal(out_gdq, true_out_gdq) def create_mod_arrays(ngroups, nints, nrows, ncols):
NIRSpec imaging products have no bounding box assigned to WCS _Issue [JP-3343](https://jira.stsci.edu/browse/JP-3343) was created on JIRA by [Tyler Pauly](https://jira.stsci.edu/secure/ViewProfile.jspa?name=tpauly):_ From a Help Desk ticket (from [Charles Proffitt](https://jira.stsci.edu/secure/ViewProfile.jspa?name=proffitt)), it was noted that NIRSpec imaging products have no FITS SIP parameters assigned. This was tracked down to the WCS not having a bounding_box assigned. Simple fix is available.
0.0
589917db326218e4135b996e66999c9f74b5066b
[ "jwst/undersampling_correction/tests/test_undersampling_correction.py::test_flag_neighbors" ]
[ "jwst/undersampling_correction/tests/test_undersampling_correction.py::test_pix_0", "jwst/undersampling_correction/tests/test_undersampling_correction.py::test_pix_1" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-08-11 19:41:29+00:00
bsd-3-clause
5,610
spacetelescope__jwst-8012
diff --git a/CHANGES.rst b/CHANGES.rst index e951c3763..3ac9f225b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -28,6 +28,11 @@ resample results in modified values in the resampled images. New computations significantly reduce photometric errors. [#7894] +tweakreg +-------- + +- Improved how a image group name is determined. [#8012] + 1.12.5 (2023-10-19) =================== diff --git a/jwst/tweakreg/tweakreg_step.py b/jwst/tweakreg/tweakreg_step.py index 5bb32303f..1f0202dc0 100644 --- a/jwst/tweakreg/tweakreg_step.py +++ b/jwst/tweakreg/tweakreg_step.py @@ -276,11 +276,12 @@ class TweakRegStep(Step): elif len(grp_img) > 1: # create a list of WCS-Catalog-Images Info and/or their Groups: imcats = [] + all_group_names = [] for g in grp_img: if len(g) == 0: raise AssertionError("Logical error in the pipeline code.") else: - group_name = _common_name(g) + group_name = _common_name(g, all_group_names) wcsimlist = list(map(self._imodel2wcsim, g)) # Remove the attached catalogs for model in g: @@ -541,13 +542,31 @@ class TweakRegStep(Step): return im -def _common_name(group): +def _common_name(group, all_group_names=None): file_names = [path.splitext(im.meta.filename)[0].strip('_- ') for im in group] - fname_len = list(map(len, file_names)) - assert all(fname_len[0] == length for length in fname_len) + cn = path.commonprefix(file_names) - assert cn + + if all_group_names is None: + if not cn: + return 'Unnamed Group' + else: + if not cn or cn in all_group_names: + # find the smallest group number to make "Group #..." unique + max_id = 1 + if not cn: + cn = "Group #" + for name in all_group_names: + try: + cid = int(name.lstrip(cn)) + if cid >= max_id: + max_id = cid + 1 + except ValueError: + pass + cn = f"{cn}{max_id}" + all_group_names.append(cn) + return cn
spacetelescope/jwst
2199ba88fe0a74f825733dc7114a3f6c79c581c7
diff --git a/jwst/tweakreg/tests/test_tweakreg.py b/jwst/tweakreg/tests/test_tweakreg.py index 7f6e37ec2..ad45bf523 100644 --- a/jwst/tweakreg/tests/test_tweakreg.py +++ b/jwst/tweakreg/tests/test_tweakreg.py @@ -5,7 +5,8 @@ import asdf from astropy.modeling.models import Shift import pytest -from jwst.tweakreg import TweakRegStep +from jwst.tweakreg import tweakreg_step +from stdatamodels.jwst.datamodels import ImageModel @pytest.mark.parametrize("offset, is_good", [(1 / 3600, True), (11 / 3600, False)]) @@ -20,6 +21,37 @@ def test_is_wcs_correction_small(offset, is_good): step.transform = step.transform | Shift(offset) & Shift(offset) twcs.bounding_box = wcs.bounding_box - step = TweakRegStep() + step = tweakreg_step.TweakRegStep() assert step._is_wcs_correction_small(wcs, twcs) == is_good + + [email protected]( + "groups, all_group_names, common_name", + [ + ([['abc1_cal.fits', 'abc2_cal.fits']], [], ['abc']), + ( + [ + ['abc1_cal.fits', 'abc2_cal.fits'], + ['abc1_cal.fits', 'abc2_cal.fits'], + ['abc1_cal.fits', 'abc2_cal.fits'], + ['def1_cal.fits', 'def2_cal.fits'], + ], + [], + ["abc", "abc1", "abc2", "def"], + ), + ([['cba1_cal.fits', 'abc2_cal.fits']], [], ['Group #1']), + ([['cba1_cal.fits', 'abc2_cal.fits']], ['Group #1'], ['Group #2']), + ([['cba1_cal.fits', 'abc2_cal.fits']], None, ['Unnamed Group']), + ] +) +def test_common_name(groups, all_group_names, common_name): + for g, cn_truth in zip(groups, common_name): + group = [] + for fname in g: + model = ImageModel() + model.meta.filename = fname + group.append(model) + + cn = tweakreg_step._common_name(group, all_group_names) + assert cn == cn_truth
Improve how image group name is computed in the tweakreg step _Issue [JP-3442](https://jira.stsci.edu/browse/JP-3442) was created on JIRA by [Mihai Cara](https://jira.stsci.edu/secure/ViewProfile.jspa?name=mcara):_ Current algorithm is quite simple and does not look for duplicate names and has a check to ensure that input file names have equal length. However, some users may want to  use different length file names. See help desk ticket INC0194239 for more details. CC: [Paul Goudfrooij](https://jira.stsci.edu/secure/ViewProfile.jspa?name=goudfroo) 
0.0
2199ba88fe0a74f825733dc7114a3f6c79c581c7
[ "jwst/tweakreg/tests/test_tweakreg.py::test_common_name[groups0-all_group_names0-common_name0]", "jwst/tweakreg/tests/test_tweakreg.py::test_common_name[groups1-all_group_names1-common_name1]", "jwst/tweakreg/tests/test_tweakreg.py::test_common_name[groups2-all_group_names2-common_name2]", "jwst/tweakreg/tests/test_tweakreg.py::test_common_name[groups3-all_group_names3-common_name3]", "jwst/tweakreg/tests/test_tweakreg.py::test_common_name[groups4-None-common_name4]" ]
[ "jwst/tweakreg/tests/test_tweakreg.py::test_is_wcs_correction_small[0.0002777777777777778-True]", "jwst/tweakreg/tests/test_tweakreg.py::test_is_wcs_correction_small[0.0030555555555555557-False]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-10-19 00:44:24+00:00
bsd-3-clause
5,611
spacetelescope__jwst-8106
diff --git a/CHANGES.rst b/CHANGES.rst index 0bbaa4807..93e775416 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -52,14 +52,6 @@ general - Moved build configuration from ``setup.cfg`` to ``pyproject.toml`` to support PEP621 [#6847] -outlier_detection ------------------ - -- Remove use of ``scipy.signal.medfilt`` which is undefined for ``nan`` - inputs. [#8033] - -- Replace uses of ``datetime.utcnow`` (deprecated in python 3.12) [#8051] - imprint ------- @@ -72,6 +64,19 @@ nsclean - Implemented this new step, which is used to remove 1/f noise from NIRSpec images. [#8000] +outlier_detection +----------------- + +- Remove use of ``scipy.signal.medfilt`` which is undefined for ``nan`` + inputs. [#8033] + +- Replace uses of ``datetime.utcnow`` (deprecated in python 3.12) [#8051] + +pathloss +-------- + +- Updated code to handle NIRSpec MOS slitlets that aren't 1X1 or 1X3. [#8106] + photom ------ diff --git a/docs/jwst/pathloss/description.rst b/docs/jwst/pathloss/description.rst index e15673475..439d07fd2 100644 --- a/docs/jwst/pathloss/description.rst +++ b/docs/jwst/pathloss/description.rst @@ -49,6 +49,63 @@ The form of the 2-D correction (point or uniform) that's appropriate for the data is divided into the SCI and ERR arrays and propagated into the variance arrays of the science data. +The MSA reference file contains 2 entries: one for a 1x1 slit and one for a 1x3 slit. +Each entry contains the pathloss correction for point source and uniform sources. +The former depends on the position of the target in the fiducial shutter and +wavelength, whereas the latter depends on wavelength only. The point source +entry consists of a 3-d array, where 2 of the dimensions map to the location +of the source (ranging from -0.5 to 0.5 in both X and Y), while the third dimension +carries the wavelength dependence. The 1x3 shutter is 3 times as large in Y as in X. + +The entry to use for a point source target is determined by looking at the shutter_state +attribute of the slit used. This is a string with a length equal to the number +of shutters that make up the slit, with 1 denoting an open shutter, 0 a closed +shutter and x the fiducial (target) shutter. The reference entry is determined +by how many shutters next to the fiducial shutter are open: + +If both adjacent shutters are closed, the 1x1 entry is used. A matching +shutter_state might be 'x' or '10x01' + +If both adjacent shutters are open, the center region of the 1x3 entry is used. +This would be the case for a slit with shutter state '1x1' or '1011x1'. + +If one adjacent shutter is open and one closed, the 1x3 entry is used. If the +shutter below the fiducial is open and the shutter above closed, then the upper +region of the 1x3 pathloss array is used. This is implemented by adding 1 to the +Y coordinate of the target position (bringing it into the range +0.5 to +1.5), +moving it to the upper third of the pathloss array. A matching shutter state +might be '1x' or '11x011' + +Similarly, if the shutter below the fiducial is closed and that above is open, the +lower third of the pathloss array is used by subtracting 1 from the Y coordinate of +the target position (bringing it into the range -1.5 to -0.5). A matching shutter +state could be 'x111' or '110x1'. + +Once the X and Y coordinates of the source are mapped into a pixel location in the +spatial dimensions of the pathloss array using the WCS of the transformation of position +to pixel location, the wavelength dependence is determined +by interpolating at that (fractional) pixel position in each wavelength plane, +resulting in a pair of 1-d arrays of pathloss correction and wavelength. These arrays +are used to interpolate the correction for each pixel of the 2-d extracted science +array, since each pixel has a different wavelength, and the correction is applied +to the science pixel array. + +For uniform sources, there is no dependence of the pathloss correction on position, +so the correction arrays are just 1-d arrays of correction and wavelength. Once +again, the shutter_state attribute of each slit is used to determine the correction +entry used: + +If both shutters adjacent to the fiducial are closed, the 1x1 entry is used + +If both shutters adjacent to the fiducial are open, the 1x3 entry is used + +If one is closed and one is open, the correction used is the average of the 1x1 +and 1x3 entries. + +Like for the point source case, the 1-d arrays of pathloss correction and wavelength +are used to interpolate the correction for each pixel in the science data, using the +wavelength of each pixel to interpolate into the pathloss correction array. + MIRI LRS ++++++++ The algorithm for MIRI LRS mode is largely the same as that for NIRSpec described diff --git a/jwst/pathloss/pathloss.py b/jwst/pathloss/pathloss.py index 01ea719ba..6b52e83c7 100644 --- a/jwst/pathloss/pathloss.py +++ b/jwst/pathloss/pathloss.py @@ -98,9 +98,26 @@ def get_center(exp_type, input, offsets=False): return 0.0, 0.0 +def shutter_above_is_closed(shutter_state): + ref_loc = shutter_state.find('x') + nshutters = len(shutter_state) + if ref_loc == nshutters - 1 or shutter_state[ref_loc + 1] == '0': + return True + else: + return False + + +def shutter_below_is_closed(shutter_state): + ref_loc = shutter_state.find('x') + if ref_loc == 0 or shutter_state[ref_loc - 1] == '0': + return True + else: + return False + + def get_aperture_from_model(input_model, match): """Figure out the correct aperture based on the value of the 'match' - parameter. For MSA, match is the number of shutters, for fixed slit, + parameter. For MSA, match is the shutter state string, for fixed slit, match is the name of the slit. Parameters @@ -109,7 +126,7 @@ def get_aperture_from_model(input_model, match): science data to be corrected match : str - Aperture name + Aperture name or shutter state Returns ------- @@ -117,8 +134,15 @@ def get_aperture_from_model(input_model, match): Aperture name """ if input_model.meta.exposure.type == 'NRS_MSASPEC': + # Currently there are only 2 apertures in the MSA pathloss reference file: 1x1 and 1x3 + # Only return the 1x1 aperture if the reference shutter has closed shutters above and below + if shutter_below_is_closed(match) and shutter_above_is_closed(match): + matchsize = 1 + else: + matchsize = 3 for aperture in input_model.apertures: - if aperture.shutters == match: + # Only return the aperture + if aperture.shutters == matchsize: return aperture elif input_model.meta.exposure.type in ['NRS_FIXEDSLIT', 'NRS_BRIGHTOBJ', 'NIS_SOSS']: @@ -237,6 +261,60 @@ def calculate_pathloss_vector(pathloss_refdata, return wavelength, pathloss_vector, is_inside_slitlet +def calculate_two_shutter_uniform_pathloss(pathloss_model): + """The two shutter MOS case for uniform source calculation requires a custom + routine since it uses both the 1X1 and 1X3 extensions of the pathloss reference file + + Parameters + ---------- + pathloss_model : pathloss datamodel + The pathloss datamodel + + Returns + ------- + (wavelength, pathloss_vector) : tuple of 2 1-d numpy arrays + The wavelength and pathloss 1-d arrays + + """ + # This routine will run if the fiducial shutter has 1 adjacent open shutter + n_apertures = len(pathloss_model.apertures) + if n_apertures != 2: + log.warning(f"Expected 2 apertures in pathloss reference file, found {n_apertures}") + return (None, None) + for aperture in pathloss_model.apertures: + aperture_name = aperture.name.upper() + if aperture_name == 'MOS1X1': + aperture1x1 = aperture + elif aperture_name == 'MOS1X3': + aperture1x3 = aperture + if aperture_name not in ['MOS1X1', 'MOS1X3']: + log.warning(f"Unexpected aperture name {aperture_name} (Expected 'MOS1X1' or 'MOS1X3')") + return (None, None) + pathloss1x1 = aperture1x1.uniform_data + pathloss1x3 = aperture1x3.uniform_data + if len(pathloss1x1) != len(pathloss1x3): + log.warning("Pathloss 1x1 and 1x3 arrays have different sizes") + return (None, None) + if aperture1x1.uniform_wcs.crval1 != aperture1x3.uniform_wcs.crval1: + log.warning("1x1 and 1x3 apertures have different WCS CRVAL1") + return (None, None) + if aperture1x1.uniform_wcs.crpix1 != aperture1x3.uniform_wcs.crpix1: + log.warning("1x1 and 1x3 apertures have different WCS CRPIX1") + return (None, None) + if aperture1x1.uniform_wcs.cdelt1 != aperture1x3.uniform_wcs.cdelt1: + log.warning("1x1 and 1x3 apertures have different WCS CDELT1") + return (None, None) + wavesize = len(pathloss1x1) + wavelength = np.zeros(wavesize) + crpix1 = aperture1x1.uniform_wcs.crpix1 + crval1 = aperture1x1.uniform_wcs.crval1 + cdelt1 = aperture1x1.uniform_wcs.cdelt1 + for i in np.arange(wavesize): + wavelength[i] = crval1 + (float(i + 1) - crpix1) * cdelt1 + average_pathloss = 0.5 * (pathloss1x1 + pathloss1x3) + return (wavelength, average_pathloss) + + def do_correction(input_model, pathloss_model=None, inverse=False, source_type=None, correction_pars=None, user_slit_loc=None): """Execute all tasks for Path Loss Correction @@ -752,19 +830,41 @@ def _corrections_for_mos(slit, pathloss, exp_type, source_type=None): # Calculate the 1-d wavelength and pathloss vectors # for the source position # Get the aperture from the reference file that matches the slit - nshutters = util.get_num_msa_open_shutters(slit.shutter_state) - aperture = get_aperture_from_model(pathloss, nshutters) + slitlength = len(slit.shutter_state) + aperture = get_aperture_from_model(pathloss, slit.shutter_state) + log.info(f"Shutter state = {slit.shutter_state}, using {aperture.name} entry in ref file") + two_shutters = False + if shutter_below_is_closed(slit.shutter_state) and not shutter_above_is_closed(slit.shutter_state): + ycenter = ycenter - 1.0 + log.info('Shutter below fiducial is closed, using lower region of pathloss array') + two_shutters = True + if not shutter_below_is_closed(slit.shutter_state) and shutter_above_is_closed(slit.shutter_state): + ycenter = ycenter + 1.0 + log.info('Shutter above fiducial is closed, using upper region of pathloss array') + two_shutters = True if aperture is not None: (wavelength_pointsource, pathloss_pointsource_vector, is_inside_slitlet) = calculate_pathloss_vector(aperture.pointsource_data, aperture.pointsource_wcs, xcenter, ycenter) - (wavelength_uniformsource, - pathloss_uniform_vector, - dummy) = calculate_pathloss_vector(aperture.uniform_data, - aperture.uniform_wcs, - xcenter, ycenter) + if two_shutters: + (wavelength_uniformsource, + pathloss_uniform_vector) = calculate_two_shutter_uniform_pathloss(pathloss) + else: + (wavelength_uniformsource, + pathloss_uniform_vector, + dummy) = calculate_pathloss_vector(aperture.uniform_data, + aperture.uniform_wcs, + xcenter, ycenter) + # This should only happen if the 2 shutter uniform pathloss calculation has an error + if wavelength_uniformsource is None or pathloss_uniform_vector is None: + log.warning("Unable to calculate 2 shutter uniform pathloss, using 3 shutter aperture") + (wavelength_uniformsource, + pathloss_uniform_vector, + dummy) = calculate_pathloss_vector(aperture.uniform_data, + aperture.uniform_wcs, + xcenter, ycenter) if is_inside_slitlet: # Wavelengths in the reference file are in meters, @@ -801,7 +901,7 @@ def _corrections_for_mos(slit, pathloss, exp_type, source_type=None): log.warning("Source is outside slit.") else: log.warning("Cannot find matching pathloss model for slit with" - f"{nshutters} shutters") + f"{slitlength} shutters") else: log.warning(f"Slit has data size = {size}")
spacetelescope/jwst
4205d6079bcb2afd7c0a7f7f0f7699121bb9ddee
diff --git a/jwst/pathloss/tests/test_pathloss.py b/jwst/pathloss/tests/test_pathloss.py index c45fd01f2..b32d6ffba 100644 --- a/jwst/pathloss/tests/test_pathloss.py +++ b/jwst/pathloss/tests/test_pathloss.py @@ -8,7 +8,9 @@ from jwst.pathloss.pathloss import (calculate_pathloss_vector, get_aperture_from_model, get_center, interpolate_onto_grid, - is_pointsource) + is_pointsource, + shutter_below_is_closed, + shutter_above_is_closed) from jwst.pathloss.pathloss import do_correction import numpy as np @@ -84,10 +86,10 @@ def test_get_aper_from_model_msa(): aperture reference data is returned for MSA mode""" datmod = PathlossModel() - datmod.apertures.append({'shutters': 5}) + datmod.apertures.append({'shutters': 3}) datmod.meta.exposure.type = 'NRS_MSASPEC' - result = get_aperture_from_model(datmod, 5) + result = get_aperture_from_model(datmod, '11x11') assert result == datmod.apertures[0] @@ -335,3 +337,21 @@ def test_interpolate_onto_grid(): result_comparison = np.interp(wavelength_grid, extended_wavelength_vector, extended_pathloss_vector) np.testing.assert_array_equal(result, result_comparison) + + +def test_shutter_below_is_closed(): + shutter_below_closed = ['x111', 'x', '10x11'] + shutter_below_open = ['11x11', '111x', '11x01'] + for shutter_state in shutter_below_closed: + assert shutter_below_is_closed(shutter_state) + for shutter_state in shutter_below_open: + assert not shutter_below_is_closed(shutter_state) + + +def test_shutter_above_is_closed(): + shutter_above_closed = ['111x', 'x', '1x011'] + shutter_above_open = ['11x11', 'x111', '110x1'] + for shutter_state in shutter_above_closed: + assert shutter_above_is_closed(shutter_state) + for shutter_state in shutter_above_open: + assert not shutter_above_is_closed(shutter_state)
Pathloss corrections for long MOS slitlets _Issue [JP-3357](https://jira.stsci.edu/browse/JP-3357) was created on JIRA by [Melanie Clarke](https://jira.stsci.edu/secure/ViewProfile.jspa?name=mclarke):_ This ticket is associated with a known issue in NIRSpec MOS data products: spectra extracted from slitlets consisting of more than 3 shutters exhibit unexpected wavelength-dependent flux discrepancies. The issue appears to be that the pathloss correction file contains data with explicit corrections for 1 shutter slitlets and 3 shutter slitlets.  If the input data matches one of these two configurations, the correction is applied.  If not, no correction is applied and the pipeline issues a warning like: "WARNING: Cannot find matching pathloss model for slit with5 shutters". To properly correct arbitrary MSA configurations, the pipeline needs to implement a strategy to compose a correction from the existing reference data.    In particular, the correction derived for a 3-shutter slitlet should be applicable to any slitlet > 1 shutter in length because it considers light from a source that may extend only into adjacent shutters. So, for a source in any of the middle 3 shutters of a 5-shutter slitlet, for example, the pipeline should apply a correction based on the middle shutter of the 3-shutter case in the reference file. If the source is in the top or bottom shutter, then it should use the corresponding top/bottom shutter in the reference file.
0.0
4205d6079bcb2afd7c0a7f7f0f7699121bb9ddee
[ "jwst/pathloss/tests/test_pathloss.py::test_get_center_ifu", "jwst/pathloss/tests/test_pathloss.py::test_get_center_attr_err", "jwst/pathloss/tests/test_pathloss.py::test_get_center_exp_type", "jwst/pathloss/tests/test_pathloss.py::test_get_center_exptype", "jwst/pathloss/tests/test_pathloss.py::test_get_app_from_model_null", "jwst/pathloss/tests/test_pathloss.py::test_get_aper_from_model_fixedslit", "jwst/pathloss/tests/test_pathloss.py::test_get_aper_from_model_msa", "jwst/pathloss/tests/test_pathloss.py::test_calculate_pathloss_vector_pointsource_data", "jwst/pathloss/tests/test_pathloss.py::test_calculate_pathloss_vector_uniform_data", "jwst/pathloss/tests/test_pathloss.py::test_calculate_pathloss_vector_interpolation", "jwst/pathloss/tests/test_pathloss.py::test_calculate_pathloss_vector_interpolation_nontrivial", "jwst/pathloss/tests/test_pathloss.py::test_is_pointsource", "jwst/pathloss/tests/test_pathloss.py::test_do_correction_msa_slit_size_eq_0", "jwst/pathloss/tests/test_pathloss.py::test_do_correction_fixed_slit_exception", "jwst/pathloss/tests/test_pathloss.py::test_do_correction_nis_soss_tso", "jwst/pathloss/tests/test_pathloss.py::test_do_correction_nis_soss_pupil_position_is_none", "jwst/pathloss/tests/test_pathloss.py::test_do_correction_nis_soss_aperture_is_none", "jwst/pathloss/tests/test_pathloss.py::test_interpolate_onto_grid", "jwst/pathloss/tests/test_pathloss.py::test_shutter_below_is_closed", "jwst/pathloss/tests/test_pathloss.py::test_shutter_above_is_closed" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-12-04 18:33:50+00:00
bsd-3-clause
5,612
spacetelescope__jwst-8334
diff --git a/CHANGES.rst b/CHANGES.rst index 5cedc0faa..1abae4c32 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -16,6 +16,15 @@ background - Updated to allow multi-integration (rateints) background exposures to have a different value of NINTS than the science exposure. [#8326] +charge_migration +---------------- + +- Updated the CHARGELOSS flagging. In an integration ramp, the first group in + the SCI data is found that is above the CHARGELOSS threshold and not flagged + as DO_NOT_USE. This group, and all subsequent groups, are then flagged as + CHARGELOSS and DO_NOT_USE. The four nearest pixel neighbor are then flagged + in the same group. [#8336] + cube_build ---------- @@ -51,6 +60,12 @@ emicorr - Set skip=True by default in the code, to be turned on later by a parameter reference file. [#8171] +exp_to_source +------------- + +- Fixed a bug for multislit data that bunit values, model_type and wcsinfo was + was being overwritten with the top multispec model values. [#8294] + extract_1d ---------- @@ -88,6 +103,10 @@ general - Update minimum required stdatamodels version to include 1.10.0 [#8322] +- Update minimum required gwcs version to include 0.21.0 [#8337] + +- Remove unused asdf-transform-schemas dependency [#8337] + jump ---- @@ -100,6 +119,13 @@ lib - Updated ``set_velocity_aberration`` to use datamodels instead of `astropy.io.fits` for opening and manipulating input files. [#8285] +lib +--- + +- Added new function set_nans_to_donotuse in ``lib.basic_utils`` to + check the science data array for NaN values and check if they have + a DQ flag of DO_NOT_USE, or set it if not. [#8292] + outlier_detection ----------------- @@ -111,13 +137,11 @@ outlier_detection original input files to accidentally get deleted instead of just the intermediate files. [#8263] -resample +pathloss -------- -- Updated exposure time weighting to use the measurement time - (TMEASURE) when available. [#8212] -- Removed product exposure time (``TEXPTIME``) from all computations - in the resample step. [#8212] +- Added a check to find all NaN values in the data with a corresponding + even value flag in the DQ array, and convert them to DO_NOT_USE. [#8292] photom ------ @@ -167,6 +191,12 @@ refpix resample -------- +- Updated exposure time weighting to use the measurement time + (TMEASURE) when available. [#8212] + +- Removed product exposure time (``TEXPTIME``) from all computations + in the resample step. [#8212] + - Use the same ``iscale`` value for resampling science data and variance arrays. [#8159] - Changed to use the high-level APE 14 API (``pixel_to_world_values`` and @@ -206,6 +236,9 @@ tweakreg - Suppress warnings from ``photutils.background.Background2D`` regarding NaNs in the input data. [#8308] +- Fixed a bug that caused failures instead of warnings when no GAIA sources + were found within the bounding box of the input image. [#8334] + 1.13.4 (2024-01-25) =================== diff --git a/jwst/charge_migration/charge_migration.py b/jwst/charge_migration/charge_migration.py index 30e1b9bc1..ccfcc3836 100644 --- a/jwst/charge_migration/charge_migration.py +++ b/jwst/charge_migration/charge_migration.py @@ -5,6 +5,7 @@ import numpy as np from stdatamodels.jwst.datamodels import dqflags + log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) @@ -72,72 +73,29 @@ def flag_pixels(data, gdq, signal_threshold): updated group dq array """ n_ints, n_grps, n_rows, n_cols = gdq.shape - - new_gdq = gdq.copy() # Updated gdq - - # Flag all exceedances with CHARGELOSS and NO_NOT_USE - chargeloss_pix = (data > signal_threshold) & (gdq != DNU) - new_gdq[chargeloss_pix] = np.bitwise_or(new_gdq[chargeloss_pix], CHLO | DNU) - - # Reset groups previously flagged as DNU - gdq_orig = gdq.copy() # For resetting to previously flagged DNU - wh_gdq_DNU = np.bitwise_and(gdq_orig, DNU) - - # Get indices for exceedances - arg_where = np.argwhere(new_gdq == CHLO_DNU) - - a_int = arg_where[:, 0] # array of integrations - a_grp = arg_where[:, 1] # array of groups - a_row = arg_where[:, 2] # array of rows - a_col = arg_where[:, 3] # array of columns - - # Process the 4 nearest neighbors of each exceedance - # Pixel to the east - xx_max_p1 = a_col[a_col < (n_cols-1)] + 1 - i_int = a_int[a_col < (n_cols-1)] - i_grp = a_grp[a_col < (n_cols-1)] - i_row = a_row[a_col < (n_cols-1)] - - if len(xx_max_p1) > 0: - new_gdq[i_int, i_grp, i_row, xx_max_p1] = \ - np.bitwise_or(new_gdq[i_int, i_grp, i_row, xx_max_p1], CHLO | DNU) - - new_gdq[wh_gdq_DNU == 1] = gdq_orig[wh_gdq_DNU == 1] # reset for earlier DNUs - - # Pixel to the west - xx_m1 = a_col[a_col > 0] - 1 - i_int = a_int[a_col > 0] - i_grp = a_grp[a_col > 0] - i_row = a_row[a_col > 0] - - if len(xx_m1) > 0: - new_gdq[i_int, i_grp, i_row, xx_m1] = \ - np.bitwise_or(new_gdq[i_int, i_grp, i_row, xx_m1], CHLO | DNU) - - new_gdq[wh_gdq_DNU == 1] = gdq_orig[wh_gdq_DNU == 1] # reset for earlier DNUs - - # Pixel to the north - yy_m1 = a_row[a_row > 0] - 1 - i_int = a_int[a_row > 0] - i_grp = a_grp[a_row > 0] - i_col = a_col[a_row > 0] - - if len(yy_m1) > 0: - new_gdq[i_int, i_grp, yy_m1, i_col] = \ - np.bitwise_or(new_gdq[i_int, i_grp, yy_m1, i_col], CHLO | DNU) - - new_gdq[wh_gdq_DNU == 1] = gdq_orig[wh_gdq_DNU == 1] # reset for earlier DNUs - - # Pixel to the south - yy_max_p1 = a_row[a_row < (n_rows-1)] + 1 - i_int = a_int[a_row < (n_rows-1)] - i_grp = a_grp[a_row < (n_rows-1)] - i_col = a_col[a_row < (n_rows-1)] - - if len(yy_max_p1) > 0: - new_gdq[i_int, i_grp, yy_max_p1, i_col] = \ - np.bitwise_or(new_gdq[i_int, i_grp, yy_max_p1, i_col], CHLO | DNU) - - new_gdq[wh_gdq_DNU == 1] = gdq_orig[wh_gdq_DNU == 1] # reset for earlier DNUs + chargeloss_pix = np.where((data > signal_threshold) & (gdq != DNU)) + + new_gdq = gdq.copy() + + for k in range(len(chargeloss_pix[0])): + integ, group = chargeloss_pix[0][k], chargeloss_pix[1][k] + row, col = chargeloss_pix[2][k], chargeloss_pix[3][k] + new_gdq[integ, group:, row, col] |= CHLO_DNU + + # North + if row > 0: + new_gdq[integ, group:, row-1, col] |= CHLO_DNU + + # South + if row < (n_rows-1): + new_gdq[integ, group:, row+1, col] |= CHLO_DNU + + # East + if col < (n_cols-1): + new_gdq[integ, group:, row, col+1] |= CHLO_DNU + + # West + if col > 0: + new_gdq[integ, group:, row, col-1] |= CHLO_DNU return new_gdq diff --git a/jwst/exp_to_source/exp_to_source.py b/jwst/exp_to_source/exp_to_source.py index c8765cd21..b9540b1b1 100644 --- a/jwst/exp_to_source/exp_to_source.py +++ b/jwst/exp_to_source/exp_to_source.py @@ -40,8 +40,26 @@ def exp_to_source(inputs): log.debug(f'Copying source {slit.source_id}') result_slit = result[str(slit.source_id)] result_slit.exposures.append(slit) + # store values for later use (after merge_tree) + # these values are incorrectly getting overwritten by + # the top model. + slit_bunit = slit.meta.bunit_data + slit_bunit_err = slit.meta.bunit_err + slit_model = slit.meta.model_type + slit_wcsinfo = slit.meta.wcsinfo.instance + # exposure.meta.bunit_data and bunit_err does not exist + # before calling merge_tree save these values + # Before merge_tree the slits have a model_type of SlitModel. + # After merge_tree it is overwritten with MultiSlitModel. + # store the model type to undo overwriting of modeltype. + merge_tree(result_slit.exposures[-1].meta.instance, exposure.meta.instance) + result_slit.exposures[-1].meta.bunit_data = slit_bunit + result_slit.exposures[-1].meta.bunit_err = slit_bunit_err + result_slit.exposures[-1].meta.model_type = slit_model + result_slit.exposures[-1].meta.wcsinfo = slit_wcsinfo + if result_slit.meta.instrument.name is None: result_slit.update(exposure) diff --git a/jwst/lib/basic_utils.py b/jwst/lib/basic_utils.py index 28971ddb8..92e77dcf4 100644 --- a/jwst/lib/basic_utils.py +++ b/jwst/lib/basic_utils.py @@ -1,5 +1,31 @@ """General utility objects""" +from stdatamodels.jwst.datamodels import dqflags +import numpy as np + + +def set_nans_to_donotuse(data, dq): + """Set all NaN values in the data that have an even value to + DO_NOT_USE. + + Parameters + ---------- + data : numpy array + The science data array to find NaN values and + check of these have a DQ flag=DO_NOT_USE, or + set it if not. + + dq : numpy array + The DQ array to be checked. + + Returns + ------- + dq : numpy array + The updated DQ array. + """ + dq[np.isnan(data)] |= dqflags.pixel['DO_NOT_USE'] + return dq + class LoggingContext: """Logging context manager diff --git a/jwst/pathloss/pathloss.py b/jwst/pathloss/pathloss.py index c3339bd27..4ad2f551b 100644 --- a/jwst/pathloss/pathloss.py +++ b/jwst/pathloss/pathloss.py @@ -10,6 +10,8 @@ import stdatamodels.jwst.datamodels as datamodels from jwst.assign_wcs import nirspec, util from jwst.lib.wcs_utils import get_wavelengths +from jwst.lib.basic_utils import set_nans_to_donotuse + log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) @@ -545,6 +547,9 @@ def do_correction_mos(data, pathloss, inverse=False, source_type=None, correctio slit.pathloss_point = correction.pathloss_point slit.pathloss_uniform = correction.pathloss_uniform + # check the dq flags have the correct value + slit.dq = set_nans_to_donotuse(slit.data, slit.dq) + # Set step status to complete data.meta.cal_step.pathloss = 'COMPLETE' @@ -608,6 +613,9 @@ def do_correction_fixedslit(data, pathloss, inverse=False, source_type=None, cor slit.pathloss_point = correction.pathloss_point slit.pathloss_uniform = correction.pathloss_uniform + # check the dq flags have the correct value + slit.dq = set_nans_to_donotuse(slit.data, slit.dq) + # Set step status to complete data.meta.cal_step.pathloss = 'COMPLETE' @@ -661,6 +669,9 @@ def do_correction_ifu(data, pathloss, inverse=False, source_type=None, correctio # This might be useful to other steps data.wavelength = correction.wavelength + # check the dq flags have the correct value + data.dq = set_nans_to_donotuse(data.data, data.dq) + # Set the step status to complete data.meta.cal_step.pathloss = 'COMPLETE' @@ -702,6 +713,9 @@ def do_correction_lrs(data, pathloss, user_slit_loc): # This might be useful to other steps data.wavelength = correction.wavelength + # check the dq flags have the correct value + data.dq = set_nans_to_donotuse(data.data, data.dq) + # Set the step status to complete data.meta.cal_step.pathloss = 'COMPLETE' @@ -794,6 +808,9 @@ def do_correction_soss(data, pathloss): data.var_flat /= pathloss_2d**2 data.pathloss_point = pathloss_2d + # check the dq flags have the correct value + data.dq = set_nans_to_donotuse(data.data, data.dq) + # Set step status to complete data.meta.cal_step.pathloss = 'COMPLETE' diff --git a/jwst/tweakreg/astrometric_utils.py b/jwst/tweakreg/astrometric_utils.py index 0144fdbb7..a3034cb6c 100644 --- a/jwst/tweakreg/astrometric_utils.py +++ b/jwst/tweakreg/astrometric_utils.py @@ -96,8 +96,10 @@ def create_astrometric_catalog(input_models, catalog="GAIADR3", output="ref_cat. else Time(input_models[0].meta.observation.date).decimalyear ) ref_dict = get_catalog(fiducial[0], fiducial[1], epoch=epoch, sr=radius, catalog=catalog) + if len(ref_dict) == 0: + return ref_dict + colnames = ('ra', 'dec', 'mag', 'objID', 'epoch') - ref_table = ref_dict[colnames] # Add catalog name as meta data diff --git a/pyproject.toml b/pyproject.toml index 20dbc5433..0bbbdfa71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,12 +20,11 @@ classifiers = [ ] dependencies = [ "asdf>=2.15.1,<4", - "asdf-transform-schemas>=0.3.0", "astropy>=5.3", "BayesicFitting>=3.0.1", "crds>=11.17.14", "drizzle>=1.14.3,<1.15.0", - "gwcs>=0.20.0,<0.21.0", + "gwcs>=0.21.0,<0.22.0", "numpy>=1.22", "opencv-python-headless>=4.6.0.66", "photutils>=1.5.0",
spacetelescope/jwst
911b5c67d126a9939011509c5aed09df8e236420
diff --git a/jwst/charge_migration/tests/test_charge_migration.py b/jwst/charge_migration/tests/test_charge_migration.py index 4a2e53f73..bc6d390c5 100644 --- a/jwst/charge_migration/tests/test_charge_migration.py +++ b/jwst/charge_migration/tests/test_charge_migration.py @@ -8,6 +8,7 @@ from jwst.charge_migration.charge_migration_step import ChargeMigrationStep import numpy.testing as npt + test_dq_flags = dqflags.pixel GOOD = test_dq_flags["GOOD"] DNU = test_dq_flags["DO_NOT_USE"] @@ -78,6 +79,7 @@ def test_pix_1(): true_out_gdq[0, 4:, 0, 0] = CHLO_DNU out_model = charge_migration(ramp_model, signal_threshold) + out_data = out_model.data out_gdq = out_model.groupdq @@ -85,113 +87,154 @@ def test_pix_1(): npt.assert_array_equal(out_gdq, true_out_gdq) -def test_too_few_groups(): +def test_pix_2(): """ - Test that processing for datasets having too few (<3) groups per integration - are skipped. + Test a later group being below the threshold. """ - ngroups, nints, nrows, ncols = 2, 1, 1, 1 + ngroups, nints, nrows, ncols = 10, 1, 1, 1 ramp_model, pixdq, groupdq, err = create_mod_arrays( ngroups, nints, nrows, ncols) - ramp_model.data[0, :, 0, 0] = 20000. - sig_thresh = 100. + signal_threshold = 4000. - result = ChargeMigrationStep.call(ramp_model, skip=False, - signal_threshold=sig_thresh) - status = result.meta.cal_step.charge_migration + arr = [1000., 2000., 4005., 4500., 5000., 5500., 3500., 6000., 6500., 3700.] + ramp_model.data[0, :, 0, 0] = np.array(arr, dtype=np.float32) + arr = [0, DNU, 0, 0, 0, 0, 0, 0, 0, 0] + ramp_model.groupdq[0, :, 0, 0] = np.array(arr, dtype=np.uint8) - npt.assert_string_equal(status, "SKIPPED") + out_model = charge_migration(ramp_model, signal_threshold) + truth_arr = [0, DNU, CHLO_DNU, CHLO_DNU, CHLO_DNU, CHLO_DNU, CHLO_DNU, CHLO_DNU, CHLO_DNU, CHLO_DNU] + truth_gdq = np.array(truth_arr, dtype=np.uint8) -def test_flag_neighbors(): + npt.assert_array_equal(truth_gdq, out_model.groupdq[0, :, 0, 0]) + + + +def nearest_neighbor_base(chg_thresh, pixel): """ - Test flagging of 4 nearest neighbors of exceedances. Tests pixels on - array edges, Tests exclusion of groups previously flagged as DO_NOT_USE. + Set up ramp array that is 5, 5 with 10 groups. + The flagging starts in group 3 (zero based) in the pixel tested. """ - ngroups, nints, nrows, ncols = 6, 1, 4, 3 + nints, ngroups, nrows, ncols = 1, 10, 5, 5 ramp_model, pixdq, groupdq, err = create_mod_arrays( ngroups, nints, nrows, ncols) - signal_threshold = 4400. + # Set up dummy data + base = chg_thresh * 0.05 + base_arr = [float(k+1) * base for k in range(ngroups)] + for row in range(nrows): + for col in range(ncols): + ramp_model.data[0, :, row, col] = np.array(base_arr, dtype=np.float32) - # Populate pixel-specific SCI and GROUPDQ arrays - ramp_model.data[0, :, :, :] = \ - np.array([[ - [1900., 2666., 2100.], - [3865., 2300., 3177.], - [3832., 3044., 3588.], - [3799., 3233., 3000.]], - - [[2100., 2866., 2300.], - [4065., 2500., 3377.], - [4032., 3244., 3788.], - [3999., 3433., 3200.]], - - [[2300., 3066., 2500.], - [4265., 2700., 3577.], - [4232., 3444., 3988.], - [4199., 3633., 3400.]], - - [[2500., 3266., 2700.], - [4465., 2900., 3777.], - [4432., 3644., 4188.], - [4399., 3833., 3600.]], - - [[2700., 3466., 2900.], - [4665., 3100., 3977.], - [4632., 3844., 4388.], - [4599., 4033., 3800.]], - - [[2900., 3666., 3100.], - [4865., 3300., 4177.], - [4832., 4044., 4588.], - [4799., 4233., 4000.]]], dtype=np.float32) - - # These group DQ values should propagate unchanged to the output - ramp_model.groupdq[:, 4, 2, 0] = [DNU] - ramp_model.groupdq[:, 1, 2, 2] = [DNU] - ramp_model.groupdq[:, 2, 1, 1] = [DROU + DNU] + # Make CHARGELOSS threshold starting at group 3 + in_row, in_col = pixel + ramp_model.data[0, 3:, in_row, in_col] += chg_thresh - out_model = charge_migration(ramp_model, signal_threshold) + return ramp_model, pixdq, groupdq, err - out_gdq = out_model.groupdq - true_out_gdq = ramp_model.groupdq.copy() - true_out_gdq[0, :, :, :] = \ - np.array([[ - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0]], - - [[0, 0, 0], - [0, 0, 0], - [0, 0, DNU], - [0, 0, 0]], - - [[0, 0, 0], - [0, 9, 0], - [0, 0, 0], - [0, 0, 0]], - - [[CHLO_DNU, 0, 0], - [CHLO_DNU, CHLO_DNU, 0], - [CHLO_DNU, CHLO_DNU, 0], - [CHLO_DNU, 0, 0]], - - [[CHLO_DNU, 0, 0], - [CHLO_DNU, CHLO_DNU, 0], - [DNU, 0, 0], - [CHLO_DNU, CHLO_DNU, 0]], - - [[CHLO_DNU, 0, 0], - [CHLO_DNU, CHLO_DNU, CHLO_DNU], - [CHLO_DNU, CHLO_DNU, CHLO_DNU], - [CHLO_DNU, CHLO_DNU, CHLO_DNU]]], dtype=np.uint8) +def test_nearest_neighbor_1(): + """ + CHARGELOSS center + The flagging starts in group 3 (zero based) in the pixel tested. + """ + chg_thresh = 4000. + pixel = (2, 2) + ramp_model, pixdq, groupdq, err = nearest_neighbor_base(chg_thresh, pixel) + gdq_check = ramp_model.groupdq.copy() + ngroups = gdq_check.shape[1] + + out_model = charge_migration(ramp_model, chg_thresh) + + check_pattern = [ + [GOOD, GOOD, GOOD, GOOD, GOOD], + [GOOD, GOOD, CHLO_DNU, GOOD, GOOD], + [GOOD, CHLO_DNU, CHLO_DNU, CHLO_DNU, GOOD], + [GOOD, GOOD, CHLO_DNU, GOOD, GOOD], + [GOOD, GOOD, GOOD, GOOD, GOOD], + ] + check = np.array(check_pattern, dtype=gdq_check.dtype) + for group in range(3, ngroups): + gdq_check[0, group, :, :] = check npt.assert_array_equal(out_model.data, ramp_model.data) - npt.assert_array_equal(out_gdq, true_out_gdq) + npt.assert_array_equal(out_model.groupdq, gdq_check) + + +def test_nearest_neighbor_2(): + """ + CHARGELOSS corner + The flagging starts in group 3 (zero based) in the pixel tested. + """ + chg_thresh = 4000. + pixel = (0, 0) + ramp_model, pixdq, groupdq, err = nearest_neighbor_base(chg_thresh, pixel) + gdq_check = ramp_model.groupdq.copy() + ngroups = gdq_check.shape[1] + + out_model = charge_migration(ramp_model, chg_thresh) + + check_pattern = [ + [CHLO_DNU, CHLO_DNU, GOOD, GOOD, GOOD], + [CHLO_DNU, GOOD, GOOD, GOOD, GOOD], + [GOOD, GOOD, GOOD, GOOD, GOOD], + [GOOD, GOOD, GOOD, GOOD, GOOD], + [GOOD, GOOD, GOOD, GOOD, GOOD], + ] + check = np.array(check_pattern, dtype=gdq_check.dtype) + for group in range(3, ngroups): + gdq_check[0, group, :, :] = check + + npt.assert_array_equal(out_model.data, ramp_model.data) + npt.assert_array_equal(out_model.groupdq, gdq_check) + + +def test_nearest_neighbor_3(): + """ + CHARGELOSS Edge + The flagging starts in group 3 (zero based) in the pixel tested. + """ + chg_thresh = 4000. + pixel = (2, 4) + ramp_model, pixdq, groupdq, err = nearest_neighbor_base(chg_thresh, pixel) + gdq_check = ramp_model.groupdq.copy() + ngroups = gdq_check.shape[1] + + out_model = charge_migration(ramp_model, chg_thresh) + + check_pattern = [ + [GOOD, GOOD, GOOD, GOOD, GOOD], + [GOOD, GOOD, GOOD, GOOD, CHLO_DNU], + [GOOD, GOOD, GOOD, CHLO_DNU, CHLO_DNU], + [GOOD, GOOD, GOOD, GOOD, CHLO_DNU], + [GOOD, GOOD, GOOD, GOOD, GOOD], + ] + check = np.array(check_pattern, dtype=gdq_check.dtype) + for group in range(3, ngroups): + gdq_check[0, group, :, :] = check + + npt.assert_array_equal(out_model.data, ramp_model.data) + npt.assert_array_equal(out_model.groupdq, gdq_check) + + +def test_too_few_groups(): + """ + Test that processing for datasets having too few (<3) groups per integration + are skipped. + """ + ngroups, nints, nrows, ncols = 2, 1, 1, 1 + ramp_model, pixdq, groupdq, err = create_mod_arrays( + ngroups, nints, nrows, ncols) + + ramp_model.data[0, :, 0, 0] = 20000. + sig_thresh = 100. + + result = ChargeMigrationStep.call(ramp_model, skip=False, + signal_threshold=sig_thresh) + status = result.meta.cal_step.charge_migration + + npt.assert_string_equal(status, "SKIPPED") def create_mod_arrays(ngroups, nints, nrows, ncols): diff --git a/jwst/tweakreg/tests/test_amutils.py b/jwst/tweakreg/tests/test_amutils.py index c31975f29..3f2e40fca 100644 --- a/jwst/tweakreg/tests/test_amutils.py +++ b/jwst/tweakreg/tests/test_amutils.py @@ -54,3 +54,22 @@ def test_create_catalog(wcsobj): ) # check that we got expected number of sources assert len(gcat) == EXPECTED_NUM_SOURCES + + +def test_create_catalog_graceful_failure(wcsobj): + ''' + Ensure catalog retuns zero sources instead of failing outright + when the bounding box is too small to find any sources + ''' + wcsobj.bounding_box = ((0, 0.5), (0, 0.5)) + + # Create catalog + gcat = amutils.create_astrometric_catalog( + None, + existing_wcs=wcsobj, + catalog=TEST_CATALOG, + output=None, + epoch='2016.0', + ) + # check that we got expected number of sources + assert len(gcat) == 0
level3 nis_image of jw01230-o009_20240210t011656_image3_00001 crashed with KeyError: 'ra' _Issue [JP-3541](https://jira.stsci.edu/browse/JP-3541) was created on JIRA by [Hien Tran](https://jira.stsci.edu/secure/ViewProfile.jspa?name=htran):_ ops saw a new error during reprocessing of nis_image dataset [jw01230-o009_20240210t011656_image3_00001](https://pljwdmsweb.stsci.edu/owlgui/logs/directory_view?directory_path=/ifs/archive/ops/jwst/info/owl/logs/owlmgr_jw01230-o009_20240210t011656_image3_00001_1707534171.411246&fileset_name=jw01230-o009_20240210t011656_image3_00001&dag_name=REPRO_LEVEL_3) (for l3 product jw01230-o009_t002_niriss_clearp-f480m-sub80) with b10.0, in the tweakreg step ```java 2024-02-17 00:34:17,588 - CRDS - DEBUG - Final effective context is 'jwst_1197.pmap' 2024-02-17 00:34:17,588 - CRDS - DEBUG - Computing best references locally. 2024-02-17 00:34:17,589 - CRDS - DEBUG - Bestrefs header: {'META.EXPOSURE.READPATT [READPATT]': 'NISRAPID', 'META.EXPOSURE.TYPE [EXP_TYPE]': 'NIS_IMAGE', 'META.INSTRUMENT.DETECTOR [DETECTOR]': 'NIS', 'META.INSTRUMENT.FILTER [FILTER]': 'F480M', 'META.INSTRUMENT.LAMP_STATE [LAMP]': 'NONE', 'META.INSTRUMENT.NAME [INSTRUME]': 'NIRISS', 'META.INSTRUMENT.PUPIL [PUPIL]': 'CLEARP', 'META.OBSERVATION.DATE [DATE-OBS]': '2023-04-17', 'META.OBSERVATION.TIME [TIME-OBS]': '22:24:14.405', 'META.SUBARRAY.NAME [SUBARRAY]': 'SUB80', 'META.VISIT.CROWDED_FIELD [CROWDFLD]': 'F', 'META.VISIT.TSOVISIT [TSOVISIT]': 'F', 'META.VISIT.TYPE [VISITYPE]': 'PRIME_TARGETED_FIXED', 'REFTYPE': 'UNDEFINED'} 2024-02-17 00:34:17,589 - CRDS - DEBUG - Reference type 'abvegaoffset' defined as 'jwst_niriss_abvegaoffset_0003.asdf' 2024-02-17 00:34:17,589 - CRDS - DEBUG - Reference type 'apcorr' defined as 'jwst_niriss_apcorr_0008.fits' 2024-02-17 00:34:17,589 - CRDS - DEBUG - Reference type 'drizpars' defined as 'jwst_niriss_drizpars_0002.fits' 2024-02-17 00:34:17,658 - stpipe.Image3Pipeline - INFO - Prefetch for ABVEGAOFFSET reference file is '/ifs/archive/ops/jwst/ref/tmp_crds/crds/cache/references/jwst/niriss/jwst_niriss_abvegaoffset_0003.asdf'. 2024-02-17 00:34:17,659 - stpipe.Image3Pipeline - INFO - Prefetch for APCORR reference file is '/ifs/archive/ops/jwst/ref/tmp_crds/crds/cache/references/jwst/niriss/jwst_niriss_apcorr_0008.fits'. 2024-02-17 00:34:17,660 - stpipe.Image3Pipeline - INFO - Prefetch for DRIZPARS reference file is '/ifs/archive/ops/jwst/ref/tmp_crds/crds/cache/references/jwst/niriss/jwst_niriss_drizpars_0002.fits'. 2024-02-17 00:34:17,661 - stpipe.Image3Pipeline - INFO - Starting calwebb_image3 ... 2024-02-17 00:34:17,870 - stpipe.Image3Pipeline.tweakreg - INFO - Step tweakreg running with args (<ModelContainer>,). 2024-02-17 00:34:17,872 - stpipe.Image3Pipeline.tweakreg - INFO - Step tweakreg parameters are: {'pre_hooks': [], 'post_hooks': [], 'output_file': None, 'output_dir': None, 'output_ext': '.fits', 'output_use_model': True, 'output_use_index': True, 'save_results': False, 'skip': False, 'suffix': None, 'search_output_file': True, 'input_dir': '/ifs/archive/ops/jwst/info/owlmgr/paths/sdp/asn_creation/cal/level3', 'save_catalogs': False, 'use_custom_catalogs': False, 'catalog_format': 'ecsv', 'catfile': '', 'kernel_fwhm': 2.5, 'snr_threshold': 10, 'sharplo': 0.2, 'sharphi': 1.0, 'roundlo': -1.0, 'roundhi': 1.0, 'brightest': 100, 'peakmax': None, 'bkg_boxsize': 400, 'enforce_user_order': False, 'expand_refcat': False, 'minobj': 15, 'searchrad': 1.0, 'use2dhist': True, 'separation': 1.0, 'tolerance': 1.0, 'xoffset': 0.0, 'yoffset': 0.0, 'fitgeometry': 'shift', 'nclip': 3, 'sigma': 3.0, 'abs_refcat': 'GAIADR3', 'save_abs_catalog': False, 'abs_minobj': 15, 'abs_searchrad': 6.0, 'abs_use2dhist': True, 'abs_separation': 0.1, 'abs_tolerance': 0.7, 'abs_fitgeometry': 'rshift', 'abs_nclip': 3, 'abs_sigma': 3.0} 2024-02-17 00:34:17,878 - stpipe.Image3Pipeline.tweakreg - WARNING - /dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/photutils/background/background_2d.py:274: AstropyUserWarning: Input data contains invalid values (NaNs or infs), which were automatically masked. 2024-02-17 00:34:17,879 - stpipe.Image3Pipeline.tweakreg - WARNING - warnings.warn('Input data contains invalid values (NaNs or ' 2024-02-17 00:34:17,879 - stpipe.Image3Pipeline.tweakreg - WARNING - 2024-02-17 00:34:17,879 - stpipe.Image3Pipeline.tweakreg - WARNING - /dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/photutils/background/background_2d.py:274: AstropyUserWarning: Input data contains invalid values (NaNs or infs), which were automatically masked. 2024-02-17 00:34:17,879 - stpipe.Image3Pipeline.tweakreg - WARNING - warnings.warn('Input data contains invalid values (NaNs or ' 2024-02-17 00:34:17,879 - stpipe.Image3Pipeline.tweakreg - WARNING - 2024-02-17 00:34:17,880 - stpipe.Image3Pipeline.tweakreg - INFO - Background could not be estimated in meshes. Using the entire unmasked array for background estimation: bkg_boxsize=(80, 80). 2024-02-17 00:34:17,886 - stpipe.Image3Pipeline.tweakreg - INFO - Detected 10 sources in jw01230009001_03103_00001_nis_cal.fits. 2024-02-17 00:34:17,894 - stpipe.Image3Pipeline.tweakreg - INFO - 2024-02-17 00:34:17,894 - stpipe.Image3Pipeline.tweakreg - INFO - Number of image groups to be aligned: 1. 2024-02-17 00:34:17,894 - stpipe.Image3Pipeline.tweakreg - INFO - Image groups: 2024-02-17 00:34:17,913 - stpipe.Image3Pipeline.tweakreg - INFO - * Images in GROUP 'jw01230009001_03103_00001_nis_cal': 2024-02-17 00:34:17,914 - stpipe.Image3Pipeline.tweakreg - INFO - jw01230009001_03103_00001_nis_cal 2024-02-17 00:34:17,914 - stpipe.Image3Pipeline.tweakreg - INFO - Traceback (most recent call last): File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/bin/strun", line 26, in <module> step = Step.from_cmdline(sys.argv[1:]) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/stpipe/step.py", line 186, in from_cmdline return cmdline.step_from_cmdline(args) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/stpipe/cmdline.py", line 386, in step_from_cmdline step.run(*positional) File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/stpipe/step.py", line 478, in run step_result = self.process(*args) ^^^^^^^^^^^^^^^^^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/jwst/pipeline/calwebb_image3.py", line 89, in process input_models = self.tweakreg(input_models) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/stpipe/step.py", line 478, in run step_result = self.process(*args) ^^^^^^^^^^^^^^^^^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/jwst/tweakreg/tweakreg_step.py", line 392, in process ref_cat = amutils.create_astrometric_catalog( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/jwst/tweakreg/astrometric_utils.py", line 98, in create_astrometric_catalog ref_table = ref_dict[colnames] ~~~~~~~~^^^^^^^^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/astropy/table/table.py", line 2064, in __getitem__ [self[x] for x in item], copy_indices=self._copy_indices ^^^^^^^^^^^^^^^^^^^^^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/astropy/table/table.py", line 2064, in <listcomp> [self[x] for x in item], copy_indices=self._copy_indices ~~~~^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/astropy/table/table.py", line 2055, in __getitem__ return self.columns[item] ~~~~~~~~~~~~^^^^^^ File "/dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/astropy/table/table.py", line 264, in __getitem__ return OrderedDict.__getitem__(self, item) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ KeyError: 'ra' 2024048003418 INFO src=ssb_calibration_wrapper._strun_analyze_and_log_failure fsn=jw01230-o009_20240210t011656_image3_00001_asn msg="strun /dms/local/jwst/pipeline/pkgs/miniconda3/envs/jwstdp-1.12.5.20231019-py3.11/lib/python3.11/site-packages/jwst/pipeline/calwebb_image3.cfg FAILED (exit=1) on jw01230-o009_20240210t011656_image3_00001_asn.json." 2024048003418 ERROR src=ssb_calibration_wrapper.calibrate._strun._strun_analyze_and_log_failure fsn=jw01230-o009_20240210t011656_image3_00001_asn msg="strun exit status=1" ``` we successfully generate l3 products for this same dataset before and did not see such error in previous builds.
0.0
911b5c67d126a9939011509c5aed09df8e236420
[ "jwst/charge_migration/tests/test_charge_migration.py::test_pix_2", "jwst/tweakreg/tests/test_amutils.py::test_create_catalog_graceful_failure" ]
[ "jwst/charge_migration/tests/test_charge_migration.py::test_pix_0", "jwst/charge_migration/tests/test_charge_migration.py::test_pix_1", "jwst/charge_migration/tests/test_charge_migration.py::test_nearest_neighbor_1", "jwst/charge_migration/tests/test_charge_migration.py::test_nearest_neighbor_2", "jwst/charge_migration/tests/test_charge_migration.py::test_nearest_neighbor_3", "jwst/tweakreg/tests/test_amutils.py::test_radius", "jwst/tweakreg/tests/test_amutils.py::test_get_catalog", "jwst/tweakreg/tests/test_amutils.py::test_create_catalog" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-03-06 21:36:15+00:00
bsd-3-clause
5,613
spacetelescope__synphot_refactor-331
diff --git a/CHANGES.rst b/CHANGES.rst index fe2844c..50ecb8f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,6 +4,8 @@ - Dropped support for Python 3.6 and 3.7. Minimum supported Python version is now 3.8. [#330] +- OBMAG and VEGAMAG are no longer interchangeable. [#331] + 1.1.1 (2021-11-18) ================== diff --git a/docs/synphot/spectrum.rst b/docs/synphot/spectrum.rst index 661aa70..43645a6 100644 --- a/docs/synphot/spectrum.rst +++ b/docs/synphot/spectrum.rst @@ -48,7 +48,7 @@ for flux conversion):: <Quantity [6.62607015e-24, 6.62607015e-23] FNU> >>> area = 45238.93416 * units.AREA # HST >>> sp(wave, flux_unit=units.OBMAG, area=area) # doctest: +FLOAT_CMP - <Quantity [-21.52438718,-21.52438718] OBMAG> + <Magnitude [-21.52438718,-21.52438718] mag(OB)> .. _synphot_reddening: diff --git a/synphot/observation.py b/synphot/observation.py index 97f8281..5514b0e 100644 --- a/synphot/observation.py +++ b/synphot/observation.py @@ -458,7 +458,7 @@ class Observation(BaseSourceSpectrum): if flux_unit == u.count or flux_unit_name == units.OBMAG.to_string(): val = self.countrate(area, binned=False, wavelengths=wavelengths) - if flux_unit.decompose() == u.mag: + if flux_unit == units.OBMAG: eff_stim = (-2.5 * np.log10(val.value)) * flux_unit else: eff_stim = val diff --git a/synphot/units.py b/synphot/units.py index fc129ee..8dbc68f 100644 --- a/synphot/units.py +++ b/synphot/units.py @@ -1,9 +1,6 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst """This module handles photometry units that are not in `astropy.units`.""" -# THIRD-PARTY -import numpy as np - # ASTROPY from astropy import constants as const from astropy import units as u @@ -49,10 +46,10 @@ FLAM = u.def_unit( FNU = u.def_unit( 'fnu', u.erg / (u.cm**2 * u.s * u.Hz), format={'generic': 'FNU', 'console': 'FNU'}) -OBMAG = u.def_unit( - 'obmag', u.mag, format={'generic': 'OBMAG', 'console': 'OBMAG'}) -VEGAMAG = u.def_unit( - 'vegamag', u.mag, format={'generic': 'VEGAMAG', 'console': 'VEGAMAG'}) +_u_ob = u.def_unit('OB') +OBMAG = u.mag(_u_ob) +_u_vega = u.def_unit('VEGA') +VEGAMAG = u.mag(_u_vega) # Register with astropy units u.add_enabled_units([PHOTLAM, PHOTNU, FLAM, FNU, OBMAG, VEGAMAG]) @@ -109,20 +106,12 @@ def spectral_density_vega(wav, vegaflux): PHOTLAM, equivalencies=u.spectral_density(wav)).value def converter(x): - """Set nan/inf to -99 mag.""" - val = -2.5 * np.log10(x / vega_photlam) - result = np.zeros(val.shape, dtype=np.float64) - 99 - mask = np.isfinite(val) - if result.ndim > 0: - result[mask] = val[mask] - elif mask: - result = np.asarray(val) - return result + return x / vega_photlam def iconverter(x): - return vega_photlam * 10**(-0.4 * x) + return x * vega_photlam - return [(PHOTLAM, VEGAMAG, converter, iconverter)] + return [(PHOTLAM, VEGAMAG.physical_unit, converter, iconverter)] def spectral_density_count(wav, area): @@ -156,14 +145,8 @@ def spectral_density_count(wav, area): def iconverter_count(x): return x / factor - def converter_obmag(x): - return -2.5 * np.log10(x * factor) - - def iconverter_obmag(x): - return 10**(-0.4 * x) / factor - return [(PHOTLAM, u.count, converter_count, iconverter_count), - (PHOTLAM, OBMAG, converter_obmag, iconverter_obmag)] + (PHOTLAM, OBMAG.physical_unit, converter_count, iconverter_count)] def convert_flux(wavelengths, fluxes, out_flux_unit, **kwargs): @@ -348,6 +331,10 @@ def validate_unit(input_unit): output_unit = u.STmag elif input_unit_lowcase in ('abmag', 'mag(ab)'): output_unit = u.ABmag + elif input_unit_lowcase in ('obmag', 'mag(ob)'): + output_unit = OBMAG + elif input_unit_lowcase in ('vegamag', 'mag(vega)'): + output_unit = VEGAMAG else: try: # astropy.units is case-sensitive
spacetelescope/synphot_refactor
bd229608b8a2977ca1fad9cf2a2d5abb4bfcae80
diff --git a/synphot/tests/test_units.py b/synphot/tests/test_units.py index 95aef73..3ffb064 100644 --- a/synphot/tests/test_units.py +++ b/synphot/tests/test_units.py @@ -1,7 +1,7 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst """Test units.py module. -.. note:: VEGAMAG conversion is tested in test_spectrum.py. +.. note:: VEGAMAG conversion is tested in test_spectrum_source.py. .. note:: spectral_density_integrated is tested in astropy>=4.1. @@ -13,6 +13,7 @@ import pytest # ASTROPY from astropy import units as u +from astropy.tests.helper import assert_quantity_allclose # LOCAL from synphot import exceptions, units @@ -176,3 +177,18 @@ def test_flux_conversion_exceptions(): units.convert_flux(_wave, _flux_photlam, u.count, area=None) with pytest.raises(exceptions.SynphotError): units.convert_flux(_wave, _flux_obmag, units.PHOTLAM, area=None) + + +def test_vegamag_obmag_calculations(): + assert_quantity_allclose( + 5 * units.VEGAMAG - 2.5 * units.VEGAMAG, u.Magnitude(2.5)) + assert_quantity_allclose( + (5 * units.VEGAMAG - 2.5 * units.VEGAMAG).to(u.one), 0.1) + + # Should not be interchangeable with astropy mag unit or with another + # custom mag unit, but error is only raised if .to(u.one) is called. + msg = 'subtract magnitudes so the unit got lost' + with pytest.raises(u.UnitConversionError, match=msg): + (5 * units.VEGAMAG - 2.5 * u.STmag).to(u.one) + with pytest.raises(u.UnitConversionError, match=msg): + 5 * units.VEGAMAG - 2.5 * units.OBMAG.to(u.one)
Redefine VEGAMAG and OBMAG so astropy can tell them apart https://github.com/spacetelescope/synphot_refactor/blob/e51d7605349eebea0921c7d72da674b0af7d6db9/synphot/units.py#L52-L55 As stated in https://github.com/astropy/astropy/pull/13158#discussion_r858962549 , the new `astropy.modeling.models.Schechter1D` takes magnitude as parameter but currently it cannot tell VEGAMAG and OBMAG apart. @mhvk advised the following: <hr/> [The] problem is that they now are both just aliases to `u.mag`, so the units system cannot know what to do. I think best might be to treat it a bit like ST and AB, and define units `VEGA` and `OB` - which both being units without a known number, and then define `VEGAMAG = u.mag(VEGA)` and `OBMAB = u.mag(OB)`. Then one can no longer just mix and match those magnitude systems: ```python >>> from astropy import units as u >>> VEGA = u.def_unit('VEGA') >>> OB = u.def_unit('OB') >>> VEGAMAG = u.mag(VEGA) >>> OBMAG = u.mag(OB) >>> (5 * VEGAMAG - 2.5 * VEGAMAG).to(u.one) <Quantity 0.1> >>> (5 * VEGAMAG - 2.5 * OBMAG).to(u.one) UnitConversionError: ("'VEGA / OB' and '' (dimensionless) are not convertible", 'Did you perhaps subtract magnitudes so the unit got lost?') ``` Note that of course the user can still shoot themselves in the foot by forgetting what band a magnitude is in... <hr/> To go with this patch, relevant documentation and tests might also need updating. And this might be a breaking change, so should not be in a bugfix release.
0.0
bd229608b8a2977ca1fad9cf2a2d5abb4bfcae80
[ "synphot/tests/test_units.py::test_vegamag_obmag_calculations" ]
[ "synphot/tests/test_units.py::test_implicit_assumptions", "synphot/tests/test_units.py::test_validate_unit[angstroms-out_u0]", "synphot/tests/test_units.py::test_validate_unit[inversemicrons-out_u1]", "synphot/tests/test_units.py::test_validate_unit[transmission-out_u2]", "synphot/tests/test_units.py::test_validate_unit[TRANSMISSION-out_u3]", "synphot/tests/test_units.py::test_validate_unit[extinction-out_u4]", "synphot/tests/test_units.py::test_validate_unit[emissivity-out_u5]", "synphot/tests/test_units.py::test_validate_unit[photlam-out_u6]", "synphot/tests/test_units.py::test_validate_unit[photnu-out_u7]", "synphot/tests/test_units.py::test_validate_unit[flam-out_u8]", "synphot/tests/test_units.py::test_validate_unit[fnu-out_u9]", "synphot/tests/test_units.py::test_validate_unit[stmag-out_u10]", "synphot/tests/test_units.py::test_validate_unit[abmag-out_u11]", "synphot/tests/test_units.py::test_validate_unit[obmag-out_u12]", "synphot/tests/test_units.py::test_validate_unit[vegamag-out_u13]", "synphot/tests/test_units.py::test_validate_unit[Kelvin-out_u14]", "synphot/tests/test_units.py::test_validate_unit[in_u15-out_u15]", "synphot/tests/test_units.py::test_validate_wave_unit[angstroms-out_u0]", "synphot/tests/test_units.py::test_validate_wave_unit[inversemicrons-out_u1]", "synphot/tests/test_units.py::test_validate_wave_unit[Hz-out_u2]", "synphot/tests/test_units.py::test_validate_unit_exceptions", "synphot/tests/test_units.py::test_validate_quantity[100.0-out_u0-eqv0-100.0]", "synphot/tests/test_units.py::test_validate_quantity[in_val1-out_u1-eqv1-0.01]", "synphot/tests/test_units.py::test_validate_quantity[in_val2-out_u2-eqv2-ans2]", "synphot/tests/test_units.py::test_wave_conversion[in_q0-out_u0-ans0]", "synphot/tests/test_units.py::test_wave_conversion[in_q1-out_u1-ans1]", "synphot/tests/test_units.py::test_wave_conversion[in_q2-out_u2-ans2]", "synphot/tests/test_units.py::test_wave_conversion[in_q3-out_u3-ans3]", "synphot/tests/test_units.py::test_wave_conversion[in_q4-out_u4-ans4]", "synphot/tests/test_units.py::test_wave_conversion[in_q5-out_u5-ans5]", "synphot/tests/test_units.py::test_flux_conversion[in_q0-out_u0-ans0-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q1-out_u1-ans1-False]", "synphot/tests/test_units.py::test_flux_conversion[in_q2-out_u2-ans2-False]", "synphot/tests/test_units.py::test_flux_conversion[in_q3-out_u3-ans3-False]", "synphot/tests/test_units.py::test_flux_conversion[in_q4-out_u4-ans4-False]", "synphot/tests/test_units.py::test_flux_conversion[in_q5-out_u5-ans5-False]", "synphot/tests/test_units.py::test_flux_conversion[in_q6-out_u6-ans6-False]", "synphot/tests/test_units.py::test_flux_conversion[in_q7-out_u7-ans7-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q8-out_u8-ans8-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q9-out_u9-ans9-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q10-out_u10-ans10-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q11-out_u11-ans11-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q12-out_u12-ans12-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q13-out_u13-ans13-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q14-out_u14-ans14-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q15-out_u15-ans15-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q16-out_u16-ans16-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q17-out_u17-ans17-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q18-out_u18-ans18-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q19-out_u19-ans19-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q20-out_u20-ans20-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q21-out_u21-ans21-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q22-out_u22-ans22-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q23-out_u23-ans23-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q24-out_u24-ans24-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q25-out_u25-ans25-True]", "synphot/tests/test_units.py::test_flux_conversion[in_q26-out_u26-ans26-True]", "synphot/tests/test_units.py::test_flux_conversion_exceptions" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-05-11 14:18:55+00:00
bsd-3-clause
5,614
spacetelescope__tweakwcs-136
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ebe60f5..2dde896 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,9 +8,12 @@ Release Notes ================== -0.7.2 (06-May-2021) +0.7.2 (07-May-2021) =================== +- Fixed a bug in ``matchutils._find_peak()`` due to which it could return + coordinates of the peak that were outside of the image. [#136] + - Fixed a bug in how re-projection was computed when ``center`` of the transformations was provided. [#135] diff --git a/tweakwcs/matchutils.py b/tweakwcs/matchutils.py index 9fd05e9..6def57b 100644 --- a/tweakwcs/matchutils.py +++ b/tweakwcs/matchutils.py @@ -518,12 +518,12 @@ def _find_peak(data, peak_fit_box=5, mask=None): ym = (c10 * c11 - 2.0 * c01 * c20) / det if 0.0 <= xm <= (nx - 1.0) and 0.0 <= ym <= (ny - 1.0): - coord = (xm, ym) fit_status = 'SUCCESS' - else: xm = 0.0 if xm < 0.0 else min(xm, nx - 1.0) ym = 0.0 if ym < 0.0 else min(ym, ny - 1.0) fit_status = 'WARNING:EDGE' + coord = (xm, ym) + return coord, fit_status, np.s_[y1:y2, x1:x2]
spacetelescope/tweakwcs
0056b97943e6a0649685b0d93fdf66491b60d86e
diff --git a/tweakwcs/tests/test_matchutils.py b/tweakwcs/tests/test_matchutils.py index a390812..59d6dd3 100644 --- a/tweakwcs/tests/test_matchutils.py +++ b/tweakwcs/tests/test_matchutils.py @@ -189,7 +189,7 @@ def test_find_peak_fit_over_edge(): data[:, 0] = 0.0 coord, fit_status, fit_box = _find_peak(data, peak_fit_box=7) assert fit_status == 'WARNING:EDGE' - assert np.allclose(coord, (1, 11), rtol=0, atol=1e-6) + assert np.allclose(coord, (0, 11), rtol=0, atol=1e-6) @pytest.mark.parametrize('shift', [100, 2])
Peak measurement of 2d histogram can find unrealistic positions for the peak Processing the singleton 'ibbr02c5q' triggers a logical error when determining the position of the peak of the 2d histogram. Specifically, Line 489 of `matchutils.py` in the `_find_peak()` function can return a solution that ends up outside the range of values defined by the slice being used for the fit defined by (x1, x2), (y1,y2). Logic needs to be added to look for this situation and trigger the failure mode to revert to simply reporting the position of the max value instead as currently happens now when the 'det <= 0' in Line 513. The current logic does not prevent this function from returning values outside of the slice defined for the peak.
0.0
0056b97943e6a0649685b0d93fdf66491b60d86e
[ "tweakwcs/tests/test_matchutils.py::test_find_peak_fit_over_edge" ]
[ "tweakwcs/tests/test_matchutils.py::test_xy_2dhist", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_all_zeros[shape0-None]", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_all_zeros[shape1-0]", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_all_zeros[shape2-1]", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_all_zeros[shape3-None]", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_all_zeros[shape4-0]", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_all_zeros[shape5-1]", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_all_zeros[shape6-None]", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_all_zeros[shape7-0]", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_all_zeros[shape8-1]", "tweakwcs/tests/test_matchutils.py::test_find_peak_edge_1pix_valid_strip[True]", "tweakwcs/tests/test_matchutils.py::test_find_peak_edge_1pix_valid_strip[False]", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_peak_is_invalid", "tweakwcs/tests/test_matchutils.py::test_find_peak_few_data_center_of_mass", "tweakwcs/tests/test_matchutils.py::test_find_peak_few_data_for_center_of_mass", "tweakwcs/tests/test_matchutils.py::test_find_peak_negative_peak", "tweakwcs/tests/test_matchutils.py::test_find_peak_tiny_box_1pix", "tweakwcs/tests/test_matchutils.py::test_find_peak_negative_box_size", "tweakwcs/tests/test_matchutils.py::test_find_peak_success", "tweakwcs/tests/test_matchutils.py::test_find_peak_fail_lstsq", "tweakwcs/tests/test_matchutils.py::test_find_peak_nodata_after_fail", "tweakwcs/tests/test_matchutils.py::test_find_peak_badfit", "tweakwcs/tests/test_matchutils.py::test_estimate_2dhist_shift_one_bin[100]", "tweakwcs/tests/test_matchutils.py::test_estimate_2dhist_shift_one_bin[2]", "tweakwcs/tests/test_matchutils.py::test_estimate_2dhist_shift_edge", "tweakwcs/tests/test_matchutils.py::test_estimate_2dhist_shift_fit_failed", "tweakwcs/tests/test_matchutils.py::test_estimate_2dhist_shift_two_equal_maxima", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_pars[0-1-1]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_pars[1-0-1]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_pars[1-1-0]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat0-imcat0-None-TypeError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat1-imcat1-None-ValueError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat2-imcat2-None-TypeError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat3-imcat3-None-ValueError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat4-imcat4-None-KeyError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat5-imcat5-None-KeyError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat6-imcat6-None-KeyError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat7-imcat7-None-KeyError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat8-imcat8-tp_wcs8-KeyError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch_bad_call_pars[refcat9-imcat9-tp_wcs9-KeyError]", "tweakwcs/tests/test_matchutils.py::test_tpmatch[None-False]", "tweakwcs/tests/test_matchutils.py::test_tpmatch[None-True]", "tweakwcs/tests/test_matchutils.py::test_tpmatch[tp_wcs2-False]", "tweakwcs/tests/test_matchutils.py::test_tpmatch[tp_wcs3-True]", "tweakwcs/tests/test_matchutils.py::test_match_catalogs_abc" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2021-05-07 02:14:53+00:00
bsd-3-clause
5,615
spacetx__slicedimage-26
diff --git a/slicedimage/io.py b/slicedimage/io.py index f930798..afe39c6 100644 --- a/slicedimage/io.py +++ b/slicedimage/io.py @@ -9,7 +9,7 @@ import tempfile from packaging import version from six.moves import urllib -from slicedimage.urlpath import pathsplit +from slicedimage.urlpath import pathjoin, pathsplit from .backends import DiskBackend, HttpBackend from ._collection import Collection from ._formats import ImageFormat @@ -58,26 +58,35 @@ def resolve_path_or_url(path_or_url, allow_caching=True): raise +def _resolve_absolute_url(absolute_url, allow_caching): + """ + Given a string that is an absolute URL, return a tuple consisting of: a + :py:class:`slicedimage.backends._base.Backend`, the basename of the object, and the baseurl of + the object. + """ + splitted = pathsplit(absolute_url) + backend = infer_backend(splitted[0], allow_caching) + return backend, splitted[1], splitted[0] + + def resolve_url(name_or_url, baseurl=None, allow_caching=True): """ Given a string that can either be a name or a fully qualified url, return a tuple consisting of: a :py:class:`slicedimage.backends._base.Backend`, the basename of the object, and the baseurl of the object. - If the string is a name and not a fully qualified url, then baseurl must be set. + If the string is a name and not a fully qualified url, then baseurl must be set. If the string + is a fully qualified url, then baseurl is ignored. """ try: # assume it's a fully qualified url. - splitted = pathsplit(name_or_url) - backend = infer_backend(splitted[0], allow_caching) - return backend, splitted[1], splitted[0] + return _resolve_absolute_url(name_or_url, allow_caching) except ValueError: if baseurl is None: # oh, we have no baseurl. punt. raise - # it's not a fully qualified url. - backend = infer_backend(baseurl, allow_caching) - return backend, name_or_url, baseurl + absolute_url = pathjoin(baseurl, name_or_url) + return _resolve_absolute_url(absolute_url, allow_caching) class Reader(object):
spacetx/slicedimage
a4bcd1715016ca1411d70cdd728ed53be411a341
diff --git a/tests/io/test_resolve_url.py b/tests/io/test_resolve_url.py new file mode 100644 index 0000000..c39152f --- /dev/null +++ b/tests/io/test_resolve_url.py @@ -0,0 +1,65 @@ +import os +import sys +import tempfile +import unittest +import uuid + + +pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) # noqa +sys.path.insert(0, pkg_root) # noqa + + +from slicedimage.io import resolve_path_or_url, resolve_url + + +class TestResolvePathOrUrl(unittest.TestCase): + def test_valid_local_path(self): + with tempfile.NamedTemporaryFile() as tfn: + abspath = os.path.realpath(tfn.name) + _, name, baseurl = resolve_path_or_url(abspath) + self.assertEqual(name, os.path.basename(abspath)) + self.assertEqual("file://{}".format(os.path.dirname(abspath)), baseurl) + + cwd = os.getcwd() + try: + os.chdir(os.path.dirname(abspath)) + _, name, baseurl = resolve_path_or_url(os.path.basename(abspath)) + self.assertEqual(name, os.path.basename(abspath)) + self.assertEqual("file://{}".format(os.path.dirname(abspath)), baseurl) + finally: + os.chdir(cwd) + + def test_invalid_local_path(self): + with self.assertRaises(ValueError): + resolve_path_or_url(str(uuid.uuid4())) + + def test_url(self): + _, name, baseurl = resolve_path_or_url("https://github.com/abc/def") + self.assertEqual(name, "def") + self.assertEqual(baseurl, "https://github.com/abc") + + +class TestResolveUrl(unittest.TestCase): + def test_fully_qualified_url(self): + _, name, baseurl = resolve_url("https://github.com/abc/def") + self.assertEqual(name, "def") + self.assertEqual(baseurl, "https://github.com/abc") + + # even with a baseurl, this should work. + _, name, baseurl = resolve_url("https://github.com/abc/def", "https://github.io") + self.assertEqual(name, "def") + self.assertEqual(baseurl, "https://github.com/abc") + + def test_relative_url(self): + _, name, baseurl = resolve_url("def", "https://github.com/abc") + self.assertEqual(name, "def") + self.assertEqual(baseurl, "https://github.com/abc") + + # even with a path separator in the relative path, it should work. + _, name, baseurl = resolve_url("abc/def", "https://github.com/") + self.assertEqual(name, "def") + self.assertEqual(baseurl, "https://github.com/abc") + + +if __name__ == "__main__": + unittest.main()
slicedimage relative paths in hybridization.json are relative to experiment.json For example, in the following file structure: ``` experiment.json fov_001/ hybridization.json/ file_0.tiff ``` The file path to `file_0.tiff` in `hybridization.json` needs to be `fov_001/file_0.tiff` _not_ `file_0.tiff`, which is counter-intuitive. The below code reproduces this issue -- it loads, but contains the odd file paths suggested above. ``` experiment = Experiment() experiment.read('http://czi.starfish.data.public.s3.amazonaws.com/20180813/MERFISH/experiment.json') ```
0.0
a4bcd1715016ca1411d70cdd728ed53be411a341
[ "tests/io/test_resolve_url.py::TestResolveUrl::test_relative_url" ]
[ "tests/io/test_resolve_url.py::TestResolvePathOrUrl::test_invalid_local_path", "tests/io/test_resolve_url.py::TestResolvePathOrUrl::test_url", "tests/io/test_resolve_url.py::TestResolvePathOrUrl::test_valid_local_path", "tests/io/test_resolve_url.py::TestResolveUrl::test_fully_qualified_url" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2018-08-14 05:31:46+00:00
mit
5,616
spatial-image__spatial-image-20
diff --git a/spatial_image.py b/spatial_image.py index 68248db..de9d3e8 100644 --- a/spatial_image.py +++ b/spatial_image.py @@ -176,7 +176,7 @@ class SpatialImageXCDataClass(SpatialImageXDataClass): c_axis_name = axis_names["c"] c_axis_units = "" if axis_units and "c" in axis_units: - c_axis_units["c"] = "" + c_axis_units = axis_units["c"] self.c = CAxis(c_coords, c_axis_name, c_axis_units) @@ -343,7 +343,7 @@ class SpatialImageYXCDataClass(SpatialImageYXDataClass): c_axis_name = axis_names["c"] c_axis_units = "" if axis_units and "c" in axis_units: - c_axis_units["c"] = "" + c_axis_units = axis_units["c"] self.c = CAxis(c_coords, c_axis_name, c_axis_units) @@ -535,7 +535,7 @@ class SpatialImageZYXCDataClass(SpatialImageZYXDataClass): c_axis_name = axis_names["c"] c_axis_units = "" if axis_units and "c" in axis_units: - c_axis_units["c"] = "" + c_axis_units = axis_units["c"] self.c = CAxis(c_coords, c_axis_name, c_axis_units) @@ -697,7 +697,7 @@ class SpatialImageCXDataClass(SpatialImageDataClass): c_axis_name = axis_names["c"] c_axis_units = "" if axis_units and "c" in axis_units: - c_axis_units["c"] = "" + c_axis_units = axis_units["c"] self.c = CAxis(c_coords, c_axis_name, c_axis_units) @@ -761,7 +761,7 @@ class SpatialImageTCXDataClass(SpatialImageDataClass): c_axis_name = axis_names["c"] c_axis_units = "" if axis_units and "c" in axis_units: - c_axis_units["c"] = "" + c_axis_units = axis_units["c"] self.c = CAxis(c_coords, c_axis_name, c_axis_units) @@ -826,7 +826,7 @@ class SpatialImageCYXDataClass(SpatialImageDataClass): c_axis_name = axis_names["c"] c_axis_units = "" if axis_units and "c" in axis_units: - c_axis_units["c"] = "" + c_axis_units = axis_units["c"] self.c = CAxis(c_coords, c_axis_name, c_axis_units) @@ -903,7 +903,7 @@ class SpatialImageTCYXDataClass(SpatialImageDataClass): c_axis_name = axis_names["c"] c_axis_units = "" if axis_units and "c" in axis_units: - c_axis_units["c"] = "" + c_axis_units = axis_units["c"] self.c = CAxis(c_coords, c_axis_name, c_axis_units) @@ -980,7 +980,7 @@ class SpatialImageCZYXDataClass(SpatialImageDataClass): c_axis_name = axis_names["c"] c_axis_units = "" if axis_units and "c" in axis_units: - c_axis_units["c"] = "" + c_axis_units = axis_units["c"] self.c = CAxis(c_coords, c_axis_name, c_axis_units) @@ -1070,7 +1070,7 @@ class SpatialImageTCZYXDataClass(SpatialImageDataClass): c_axis_name = axis_names["c"] c_axis_units = "" if axis_units and "c" in axis_units: - c_axis_units["c"] = "" + c_axis_units = axis_units["c"] self.c = CAxis(c_coords, c_axis_name, c_axis_units)
spatial-image/spatial-image
43b2faddce73cb17c2a5bdef5998e76e75870463
diff --git a/test_spatial_image.py b/test_spatial_image.py index 914135c..e04d94e 100644 --- a/test_spatial_image.py +++ b/test_spatial_image.py @@ -83,6 +83,7 @@ def test_4D_default_coords_channels_last(): assert np.array_equal(image.coords["x"], np.arange(6, dtype=np.float64)) assert np.array_equal(image.coords["c"], np.arange(6, dtype=np.float64)) + def test_4D_default_coords_channels_first(): array = np.random.random((3, 4, 6, 6)) image = si.to_spatial_image(array, dims=("c", "z", "y", "x")) @@ -91,6 +92,7 @@ def test_4D_default_coords_channels_first(): assert np.array_equal(image.coords["y"], np.arange(6, dtype=np.float64)) assert np.array_equal(image.coords["x"], np.arange(6, dtype=np.float64)) + def test_5D_default_coords(): array = np.random.random((3, 4, 6, 6, 5)) image = si.to_spatial_image(array) @@ -179,17 +181,17 @@ def test_SpatialImageXCDataClass(): scale={"x": 2.0}, translation={"x": 3.5}, name="img", - axis_names={"x": "left-right", "c": "features"}, - c_coords=["fa", "fb"], + axis_names={"x": "left-right", "c": "channel-wavelength"}, + c_coords=["c1", "c2"], ) assert np.array_equal(image.data, array) assert np.array_equal( image.coords["x"].data, np.arange(3, dtype=np.float64) * 2.0 + 3.5 ) - assert np.array_equal(image.coords["c"].data, ["fa", "fb"]) + assert np.array_equal(image.coords["c"].data, ["c1", "c2"]) assert image.name == "img" assert image.x.long_name == "left-right" - assert image.c.long_name == "features" + assert image.c.long_name == "channel-wavelength" assert si.SpatialImageDataClasses[("x", "c")] is si.SpatialImageXCDataClass @@ -246,22 +248,24 @@ def test_SpatialImageTXCDataClass(): scale={"x": 2.0}, translation={"x": 3.5}, name="img", - axis_names={"x": "left-right", "t": "time"}, - axis_units={"x": "millimeters", "t": "seconds"}, + axis_names={"x": "left-right", "t": "time", "c": "channel-wavelength"}, + axis_units={"x": "millimeters", "t": "seconds", "c": "nanometers"}, t_coords=["ta", "tb", "tc"], - c_coords=["fa", "fb", "fc", "fd"], + c_coords=["c1", "c2", "c3", "c4"], ) assert np.array_equal(image.data, array) assert np.array_equal( image.coords["x"].data, np.arange(2, dtype=np.float64) * 2.0 + 3.5 ) assert np.array_equal(image.coords["t"].data, ["ta", "tb", "tc"]) - assert np.array_equal(image.coords["c"].data, ["fa", "fb", "fc", "fd"]) + assert np.array_equal(image.coords["c"].data, ["c1", "c2", "c3", "c4"]) assert image.name == "img" assert image.x.long_name == "left-right" assert image.t.long_name == "time" + assert image.c.long_name == "channel-wavelength" assert image.x.units == "millimeters" assert image.t.units == "seconds" + assert image.c.units == "nanometers" assert si.SpatialImageDataClasses[("t", "x", "c")] is si.SpatialImageTXCDataClass @@ -324,8 +328,12 @@ def test_SpatialImageYXCDataClass(): scale={"y": 3.4, "x": 2.0}, translation={"y": 1.2, "x": 3.5}, name="img", - axis_names={"x": "left-right", "y": "anterior-posterior"}, - axis_units={"x": "millimeters", "y": "micrometers"}, + axis_names={ + "x": "left-right", + "y": "anterior-posterior", + "c": "channel-wavelength", + }, + axis_units={"x": "millimeters", "y": "micrometers", "c": "nanometers"}, c_coords=[ 40, ], @@ -346,8 +354,10 @@ def test_SpatialImageYXCDataClass(): assert image.name == "img" assert image.x.long_name == "left-right" assert image.y.long_name == "anterior-posterior" + assert image.c.long_name == "channel-wavelength" assert image.x.units == "millimeters" assert image.y.units == "micrometers" + assert image.c.units == "nanometers" assert si.SpatialImageDataClasses[("y", "x", "c")] is si.SpatialImageYXCDataClass @@ -427,8 +437,18 @@ def test_SpatialImageTYXCDataClass(): scale={"y": 3.4, "x": 2.0}, translation={"y": 1.2, "x": 3.5}, name="img", - axis_names={"x": "left-right", "y": "anterior-posterior"}, - axis_units={"x": "millimeters", "y": "micrometers"}, + axis_names={ + "t": "time", + "x": "left-right", + "y": "anterior-posterior", + "c": "channel-wavelength", + }, + axis_units={ + "t": "milliseconds", + "x": "millimeters", + "y": "micrometers", + "c": "nanometers", + }, t_coords=[ 20, 40, @@ -458,10 +478,14 @@ def test_SpatialImageTYXCDataClass(): ], ) assert image.name == "img" + assert image.t.long_name == "time" assert image.x.long_name == "left-right" assert image.y.long_name == "anterior-posterior" + assert image.c.long_name == "channel-wavelength" + assert image.t.units == "milliseconds" assert image.x.units == "millimeters" assert image.y.units == "micrometers" + assert image.c.units == "nanometers" assert ( si.SpatialImageDataClasses[("t", "y", "x", "c")] is si.SpatialImageTYXCDataClass @@ -545,8 +569,14 @@ def test_SpatialImageZYXCDataClass(): "z": "inferior-superior", "x": "left-right", "y": "anterior-posterior", + "c": "channel-wavelength", + }, + axis_units={ + "z": "millimeters", + "x": "millimeters", + "y": "micrometers", + "c": "nanometers", }, - axis_units={"z": "millimeters", "x": "millimeters", "y": "micrometers"}, c_coords=[ 3, ], @@ -571,9 +601,11 @@ def test_SpatialImageZYXCDataClass(): assert image.x.long_name == "left-right" assert image.y.long_name == "anterior-posterior" assert image.z.long_name == "inferior-superior" + assert image.c.long_name == "channel-wavelength" assert image.x.units == "millimeters" assert image.y.units == "micrometers" assert image.z.units == "millimeters" + assert image.c.units == "nanometers" assert ( si.SpatialImageDataClasses[("z", "y", "x", "c")] is si.SpatialImageZYXCDataClass @@ -671,11 +703,19 @@ def test_SpatialImageTZYXCDataClass(): translation={"z": 0.9, "y": 1.2, "x": 3.5}, name="img", axis_names={ + "t": "time", "z": "inferior-superior", "x": "left-right", "y": "anterior-posterior", + "c": "channel-wavelength", + }, + axis_units={ + "t": "time", + "z": "millimeters", + "x": "millimeters", + "y": "micrometers", + "c": "nanometers", }, - axis_units={"z": "millimeters", "x": "millimeters", "y": "micrometers"}, t_coords=[ 20, ], @@ -709,9 +749,11 @@ def test_SpatialImageTZYXCDataClass(): assert image.x.long_name == "left-right" assert image.y.long_name == "anterior-posterior" assert image.z.long_name == "inferior-superior" + assert image.c.long_name == "channel-wavelength" assert image.x.units == "millimeters" assert image.y.units == "micrometers" assert image.z.units == "millimeters" + assert image.c.units == "nanometers" assert ( si.SpatialImageDataClasses[("t", "z", "y", "x", "c")] @@ -735,17 +777,20 @@ def test_SpatialImageCXDataClass(): scale={"x": 2.0}, translation={"x": 3.5}, name="img", - axis_names={"x": "left-right", "c": "features"}, - c_coords=["fa", "fb"], + axis_names={"c": "channel-wavelength", "x": "left-right"}, + axis_units={"c": "nanometers", "x": "micrometers"}, + c_coords=["c1", "c2"], ) assert np.array_equal(image.data, array) assert np.array_equal( image.coords["x"].data, np.arange(3, dtype=np.float64) * 2.0 + 3.5 ) - assert np.array_equal(image.coords["c"].data, ["fa", "fb"]) + assert np.array_equal(image.coords["c"].data, ["c1", "c2"]) assert image.name == "img" assert image.x.long_name == "left-right" - assert image.c.long_name == "features" + assert image.c.long_name == "channel-wavelength" + assert image.x.units == "micrometers" + assert image.c.units == "nanometers" assert si.SpatialImageDataClasses[("c", "x")] is si.SpatialImageCXDataClass @@ -771,20 +816,22 @@ def test_SpatialImageTCXDataClass(): scale={"x": 2.0}, translation={"x": 3.5}, name="img", - axis_names={"x": "left-right", "t": "time"}, - axis_units={"x": "millimeters", "t": "seconds"}, + axis_names={"t": "time", "c": "channel-wavelength", "x": "left-right"}, + axis_units={"t": "seconds", "c": "nanometers", "x": "millimeters"}, t_coords=["ta", "tb", "tc"], - c_coords=["fa", "fb", "fc", "fd"], + c_coords=["c1", "c2", "c3", "c4"], ) assert np.array_equal(image.data, array) assert np.array_equal( image.coords["x"].data, np.arange(2, dtype=np.float64) * 2.0 + 3.5 ) assert np.array_equal(image.coords["t"].data, ["ta", "tb", "tc"]) - assert np.array_equal(image.coords["c"].data, ["fa", "fb", "fc", "fd"]) + assert np.array_equal(image.coords["c"].data, ["c1", "c2", "c3", "c4"]) assert image.name == "img" + assert image.t.long_name == "time" assert image.x.long_name == "left-right" assert image.t.long_name == "time" + assert image.t.units == "seconds" assert image.x.units == "millimeters" assert image.t.units == "seconds" @@ -812,8 +859,12 @@ def test_SpatialImageCYXDataClass(): scale={"y": 3.4, "x": 2.0}, translation={"y": 1.2, "x": 3.5}, name="img", - axis_names={"x": "left-right", "y": "anterior-posterior"}, - axis_units={"x": "millimeters", "y": "micrometers"}, + axis_names={ + "c": "channel-wavelength", + "x": "left-right", + "y": "anterior-posterior", + }, + axis_units={"c": "nanometers", "x": "millimeters", "y": "micrometers"}, c_coords=[ 40, ], @@ -832,10 +883,12 @@ def test_SpatialImageCYXDataClass(): ], ) assert image.name == "img" + assert image.c.long_name == "channel-wavelength" assert image.x.long_name == "left-right" assert image.y.long_name == "anterior-posterior" assert image.x.units == "millimeters" assert image.y.units == "micrometers" + assert image.c.units == "nanometers" assert si.SpatialImageDataClasses[("c", "y", "x")] is si.SpatialImageCYXDataClass @@ -864,8 +917,18 @@ def test_SpatialImageTCYXDataClass(): scale={"y": 3.4, "x": 2.0}, translation={"y": 1.2, "x": 3.5}, name="img", - axis_names={"x": "left-right", "y": "anterior-posterior"}, - axis_units={"x": "millimeters", "y": "micrometers"}, + axis_names={ + "t": "time", + "c": "channel-wavelength", + "x": "left-right", + "y": "anterior-posterior", + }, + axis_units={ + "t": "seconds", + "c": "nanometers", + "x": "millimeters", + "y": "micrometers", + }, t_coords=[ 20, 40, @@ -895,8 +958,12 @@ def test_SpatialImageTCYXDataClass(): ], ) assert image.name == "img" + assert image.t.long_name == "time" + assert image.c.long_name == "channel-wavelength" assert image.x.long_name == "left-right" assert image.y.long_name == "anterior-posterior" + assert image.t.units == "seconds" + assert image.c.units == "nanometers" assert image.x.units == "millimeters" assert image.y.units == "micrometers" @@ -905,6 +972,77 @@ def test_SpatialImageTCYXDataClass(): ) +def test_SpatialImageCZYXDataClass(): + array = np.random.random((1, 2, 3, 2)) + + image = si.SpatialImageCZYXDataClass.new(array) + assert np.array_equal(image.data, array) + assert np.array_equal(image.coords["c"].data, np.arange(1)) + assert np.array_equal(image.coords["z"].data, np.arange(2, dtype=np.float64)) + assert np.array_equal(image.coords["y"].data, np.arange(3, dtype=np.float64)) + assert np.array_equal(image.coords["x"].data, np.arange(2, dtype=np.float64)) + assert image.name == "image" + assert image.x.long_name == "x" + assert image.y.long_name == "y" + assert image.z.long_name == "z" + assert image.c.long_name == "c" + assert image.x.units == "" + assert image.y.units == "" + assert image.z.units == "" + assert image.c.units == "" + + image = si.SpatialImageCZYXDataClass.new( + array, + scale={"z": 1.8, "y": 3.4, "x": 2.0}, + translation={"z": 0.9, "y": 1.2, "x": 3.5}, + name="img", + axis_names={ + "c": "channel-wavelength", + "z": "inferior-superior", + "x": "left-right", + "y": "anterior-posterior", + }, + axis_units={ + "c": "nanometers", + "z": "millimeters", + "x": "millimeters", + "y": "micrometers", + }, + c_coords=[ + 4, + ], + ) + assert np.array_equal(image.data, array) + assert np.array_equal( + image.coords["z"].data, np.arange(2, dtype=np.float64) * 1.8 + 0.9 + ) + assert np.array_equal( + image.coords["y"].data, np.arange(3, dtype=np.float64) * 3.4 + 1.2 + ) + assert np.array_equal( + image.coords["x"].data, np.arange(2, dtype=np.float64) * 2.0 + 3.5 + ) + assert np.array_equal( + image.coords["c"].data, + [ + 4, + ], + ) + assert image.name == "img" + assert image.c.long_name == "channel-wavelength" + assert image.x.long_name == "left-right" + assert image.y.long_name == "anterior-posterior" + assert image.z.long_name == "inferior-superior" + assert image.c.units == "nanometers" + assert image.x.units == "millimeters" + assert image.y.units == "micrometers" + assert image.z.units == "millimeters" + + assert ( + si.SpatialImageDataClasses[("c", "z", "y", "x")] is si.SpatialImageCZYXDataClass + ) + + def test_SpatialImageTCZYXDataClass(): array = np.random.random((1, 1, 2, 3, 2)) @@ -933,11 +1071,19 @@ def test_SpatialImageTCZYXDataClass(): translation={"z": 0.9, "y": 1.2, "x": 3.5}, name="img", axis_names={ + "t": "time", + "c": "channel-wavelength", "z": "inferior-superior", "x": "left-right", "y": "anterior-posterior", }, - axis_units={"z": "millimeters", "x": "millimeters", "y": "micrometers"}, + axis_units={ + "t": "seconds", + "c": "nanometers", + "z": "millimeters", + "x": "millimeters", + "y": "micrometers", + }, t_coords=[ 20, ], @@ -968,9 +1114,13 @@ def test_SpatialImageTCZYXDataClass(): ], ) assert image.name == "img" + assert image.t.long_name == "time" + assert image.c.long_name == "channel-wavelength" assert image.x.long_name == "left-right" assert image.y.long_name == "anterior-posterior" assert image.z.long_name == "inferior-superior" + assert image.t.units == "seconds" + assert image.c.units == "nanometers" assert image.x.units == "millimeters" assert image.y.units == "micrometers" assert image.z.units == "millimeters"
Assigning the `c` Coordinate for `axis_units` in `SpatialImageCYXDataClass` raises a TypeError Hello, thanks for making `spatial-image`! I've run into a small error with when initializing a `SpatialImageCYXDataClass` as setting an axis unit for "Channel" / "c" errors. ```python # imports, example data import numpy as np import spatialdata as sd from spatialdata.models import Image2DModel, C, Y, X from spatial_image import to_spatial_image rng = np.random.default_rng() _data = rng.random(size=(3, 10, 10)) channel_names = ["chan_0", "chan_1", "chan_2"] ``` Here is a small example demonstrating the error using `spatialdata`. ```python fov_sd = sd.SpatialData( images={ "test": Image2DModel.parse( data=_data, dims=(C, Y, X), axis_names={C: "channel", Y: "y", X: "x"}, axis_units={C: "channel", Y: "pixel", X: "pixel"}, c_coords=channel_names, ) } ) ``` In addition, here is one just using `spatial-image` ```python fov_si = to_spatial_image( array_like=_data, dims=(C, Y, X), axis_names={C: "channel", Y: "y", X: "x"}, axis_units={C: "channel", Y: "pixel", X: "pixel"}, c_coords=channel_names, ) ``` The error happens here at https://github.com/spatial-image/spatial-image/blob/43b2faddce73cb17c2a5bdef5998e76e75870463/spatial_image.py#L827-L829 I'd love to contribute to the repo with a small fix! <details><summary>Traceback</summary> ```python --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In[2], line 12 7 _data = rng.random(size=(3, 10, 10)) 8 channel_names = ["chan_0", "chan_1", "chan_2"] 10 fov_sd = sd.SpatialData( 11 images={ ---> 12 "test": Image2DModel.parse( 13 data=_data, 14 dims=(C, Y, X), 15 axis_names={C: "channel", Y: "y", X: "x"}, 16 axis_units={C: "channel", Y: "pixel", X: "pixel"}, 17 c_coords=channel_names, 18 ) 19 } 20 ) File ~/.pyenv/versions/3.11.4/envs/spatial_data/lib/python3.11/site-packages/spatialdata/models/models.py:176, in RasterSchema.parse(cls, data, dims, transformations, scale_factors, method, chunks, **kwargs) 170 raise ValueError( 171 f"Cannot transpose arrays to match `dims`: {dims}.", 172 "Try to reshape `data` or `dims`.", 173 ) from e 175 # finally convert to spatial image --> 176 data = to_spatial_image(array_like=data, dims=cls.dims.dims, **kwargs) 177 # parse transformations 178 _parse_transformations(data, transformations) File ~/.pyenv/versions/3.11.4/envs/spatial_data/lib/python3.11/site-packages/spatial_image.py:1228, in to_spatial_image(array_like, dims, scale, translation, name, axis_names, axis_units, t_coords, c_coords) 1225 if "t" in dims: 1226 si_kwargs["t_coords"] = t_coords -> 1228 image = SIDataClass.new(array_like, **si_kwargs) 1230 return image File ~/.pyenv/versions/3.11.4/envs/spatial_data/lib/python3.11/site-packages/xarray_dataclasses/dataarray.py:148, in AsDataArray.new..new(cls, *args, **kwargs) 147 def new(cls: Any, *args: Any, **kwargs: Any) -> Any: --> 148 return asdataarray(cls(*args, **kwargs)) File ~/.pyenv/versions/3.11.4/envs/spatial_data/lib/python3.11/site-packages/spatial_image.py:829, in SpatialImageCYXDataClass.__init__(self, data, scale, translation, name, axis_names, axis_units, c_coords) 827 c_axis_units = "" 828 if axis_units and "c" in axis_units: --> 829 c_axis_units["c"] = "" 831 self.c = CAxis(c_coords, c_axis_name, c_axis_units) TypeError: 'str' object does not support item assignment ``` </details>
0.0
43b2faddce73cb17c2a5bdef5998e76e75870463
[ "test_spatial_image.py::test_SpatialImageYXCDataClass", "test_spatial_image.py::test_SpatialImageZYXCDataClass", "test_spatial_image.py::test_SpatialImageCXDataClass", "test_spatial_image.py::test_SpatialImageTCXDataClass", "test_spatial_image.py::test_SpatialImageCYXDataClass", "test_spatial_image.py::test_SpatialImageTCYXDataClass", "test_spatial_image.py::test_SpatialImageCZYXDataClass", "test_spatial_image.py::test_SpatialImageTCZYXDataClass" ]
[ "test_spatial_image.py::test_is_spatial_image", "test_spatial_image.py::test_to_spatial_image", "test_spatial_image.py::test_2D_default_dims", "test_spatial_image.py::test_3D_default_dims", "test_spatial_image.py::test_4D_default_dims", "test_spatial_image.py::test_5D_default_dims", "test_spatial_image.py::test_catch_unsupported_dims", "test_spatial_image.py::test_2D_default_coords", "test_spatial_image.py::test_3D_default_coords", "test_spatial_image.py::test_4D_default_coords_channels_last", "test_spatial_image.py::test_4D_default_coords_channels_first", "test_spatial_image.py::test_5D_default_coords", "test_spatial_image.py::test_spatial_coords_set_scale", "test_spatial_image.py::test_time_coords", "test_spatial_image.py::test_c_coords", "test_spatial_image.py::test_SpatialImage_type", "test_spatial_image.py::test_SpatialImageXDataClass", "test_spatial_image.py::test_SpatialImageXCDataClass", "test_spatial_image.py::test_SpatialImageTXDataClass", "test_spatial_image.py::test_SpatialImageTXCDataClass", "test_spatial_image.py::test_SpatialImageYXDataClass", "test_spatial_image.py::test_SpatialImageTYXDataClass", "test_spatial_image.py::test_SpatialImageTYXCDataClass", "test_spatial_image.py::test_SpatialImageZYXDataClass", "test_spatial_image.py::test_SpatialImageTZYXDataClass", "test_spatial_image.py::test_SpatialImageTZYXCDataClass" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-15 06:48:32+00:00
mit
5,617
spcl__pymlir-16
diff --git a/AUTHORS b/AUTHORS index 5047613..6583143 100644 --- a/AUTHORS +++ b/AUTHORS @@ -5,3 +5,4 @@ ETH Zurich Tal Ben-Nun Kaushik Kulkarni Mehdi Amini +Berke Ates \ No newline at end of file diff --git a/mlir/astnodes.py b/mlir/astnodes.py index e15c71f..db8b57c 100644 --- a/mlir/astnodes.py +++ b/mlir/astnodes.py @@ -508,6 +508,7 @@ class Op(Node): class GenericOperation(Op): name: str args: Optional[List[SsaId]] + successors: Optional[List[BlockId]] attributes: Optional[AttributeDict] type: List[Type] @@ -519,6 +520,8 @@ class GenericOperation(Op): result += ', '.join(dump_or_value(arg, indent) for arg in self.args) result += ')' + if self.successors: + result += '[' + dump_or_value(self.successors, indent) + ']' if self.attributes: result += ' ' + dump_or_value(self.attributes, indent) if isinstance(self.type, list): diff --git a/mlir/lark/mlir.lark b/mlir/lark/mlir.lark index 8636fc9..d54dffe 100644 --- a/mlir/lark/mlir.lark +++ b/mlir/lark/mlir.lark @@ -167,7 +167,7 @@ location : string_literal ":" decimal_literal ":" decimal_literal trailing_location : ("loc" "(" location ")") // Undefined operations in all dialects -generic_operation : string_literal "(" optional_ssa_use_list ")" optional_attr_dict trailing_type +generic_operation : string_literal "(" optional_ssa_use_list ")" optional_successor_list optional_attr_dict trailing_type custom_operation : bare_id "." bare_id optional_ssa_use_list trailing_type // Final operation definition @@ -184,6 +184,7 @@ ssa_id_and_type_list : ssa_id_and_type ("," ssa_id_and_type)* operation_list: operation+ block_label : block_id optional_block_arg_list ":" +successor_list : "[" block_id? ("," block_id)* "]" block : optional_block_label operation_list region : "{" block* "}" @@ -211,6 +212,7 @@ region : "{" block* "}" ?optional_memory_space : ("," memory_space)? -> optional ?optional_block_label : block_label? -> optional ?optional_symbol_use_list : symbol_use_list? -> optional +?optional_successor_list : successor_list? -> optional // ---------------------------------------------------------------------- // Modules and functions
spcl/pymlir
5ce958e917c3249aff0076fd98e428d9f26e1dfd
diff --git a/tests/test_syntax.py b/tests/test_syntax.py index a785b23..37208de 100644 --- a/tests/test_syntax.py +++ b/tests/test_syntax.py @@ -215,6 +215,24 @@ def test_generic_dialect_std(parser: Optional[Parser] = None): module = parser.parse(code) print(module.pretty()) +def test_generic_dialect_std_cond_br(parser: Optional[Parser] = None): + code = ''' +"module"() ( { +"func"() ( { +^bb0(%arg0: i32): // no predecessors + %c1_i32 = "std.constant"() {value = 1 : i32} : () -> i32 + %0 = "std.cmpi"(%arg0, %c1_i32) {predicate = 3 : i64} : (i32, i32) -> i1 + "std.cond_br"(%0)[^bb1, ^bb2] {operand_segment_sizes = dense<[1, 0, 0]> : vector<3xi32>} : (i1) -> () +^bb1: // pred: ^bb0 + "std.return"(%c1_i32) : (i32) -> () +^bb2: // pred: ^bb0 + "std.return"(%c1_i32) : (i32) -> () +}) {sym_name = "mlir_entry", type = (i32) -> i32} : () -> () +}) : () -> () + ''' + parser = parser or Parser() + module = parser.parse(code) + print(module.pretty()) def test_generic_dialect_llvm(parser: Optional[Parser] = None): code = ''' @@ -255,5 +273,6 @@ if __name__ == '__main__': test_affine(p) test_definitions(p) test_generic_dialect_std(p) + test_generic_dialect_std_cond_br(p) test_generic_dialect_llvm(p) test_integer_sign(p)
Conditional branches in generic form The following MLIR: ``` module { func @mlir_entry(%a: i32) -> i32 { %1 = constant 1 : i32 %isOne = cmpi "sle", %a, %1 : i32 cond_br %isOne, ^one, ^else ^one: return %1 : i32 ^else: return %1 : i32 } } ``` results in the following generic form: ``` // out.mlir "module"() ( { "func"() ( { ^bb0(%arg0: i32): // no predecessors %c1_i32 = "std.constant"() {value = 1 : i32} : () -> i32 %0 = "std.cmpi"(%arg0, %c1_i32) {predicate = 3 : i64} : (i32, i32) -> i1 "std.cond_br"(%0)[^bb1, ^bb2] {operand_segment_sizes = dense<[1, 0, 0]> : vector<3xi32>} : (i1) -> () ^bb1: // pred: ^bb0 "std.return"(%c1_i32) : (i32) -> () ^bb2: // pred: ^bb0 "std.return"(%c1_i32) : (i32) -> () }) {sym_name = "mlir_entry", type = (i32) -> i32} : () -> () }) : () -> () ``` Executing: ``` import mlir ast1 = mlir.parse_path('out.mlir') ``` throws this exception: ``` lark.exceptions.UnexpectedCharacters: No terminal matches '[' in the current parser context, at line 6 col 22 "std.cond_br"(%0)[^bb1, ^bb2] {operand_segment_sizes = de ^ Expected one of: * LBRACE * COLON ```
0.0
5ce958e917c3249aff0076fd98e428d9f26e1dfd
[ "tests/test_syntax.py::test_generic_dialect_std_cond_br" ]
[ "tests/test_syntax.py::test_attributes", "tests/test_syntax.py::test_memrefs", "tests/test_syntax.py::test_trailing_loc", "tests/test_syntax.py::test_modules", "tests/test_syntax.py::test_functions", "tests/test_syntax.py::test_toplevel_function", "tests/test_syntax.py::test_toplevel_functions", "tests/test_syntax.py::test_affine", "tests/test_syntax.py::test_definitions", "tests/test_syntax.py::test_generic_dialect_std", "tests/test_syntax.py::test_generic_dialect_llvm", "tests/test_syntax.py::test_integer_sign" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-07-01 18:55:39+00:00
bsd-3-clause
5,618
spdx__tools-python-103
diff --git a/spdx/config.py b/spdx/config.py index 59a11b4..1d80666 100644 --- a/spdx/config.py +++ b/spdx/config.py @@ -34,7 +34,7 @@ def load_license_list(file_name): licenses_map = {} with codecs.open(file_name, 'rb', encoding='utf-8') as lics: licenses = json.load(lics) - version = licenses['licenseListVersion'].split('.') + version, _, _ = licenses['licenseListVersion'].split('-') for lic in licenses['licenses']: if lic.get('isDeprecatedLicenseId'): continue @@ -54,7 +54,7 @@ def load_exception_list(file_name): exceptions_map = {} with codecs.open(file_name, 'rb', encoding='utf-8') as excs: exceptions = json.load(excs) - version = exceptions['licenseListVersion'].split('.') + version, _, _ = exceptions['licenseListVersion'].split('-') for exc in exceptions['exceptions']: if exc.get('isDeprecatedLicenseId'): continue @@ -65,8 +65,8 @@ def load_exception_list(file_name): return version, exceptions_map -(_major, _minor), LICENSE_MAP = load_license_list(_licenses) -LICENSE_LIST_VERSION = Version(major=_major, minor=_minor) +_version, LICENSE_MAP = load_license_list(_licenses) +LICENSE_LIST_VERSION = Version.from_str(_version) -(_major, _minor), EXCEPTION_MAP = load_exception_list(_exceptions) -EXCEPTION_LIST_VERSION = Version(major=_major, minor=_minor) \ No newline at end of file +_version, EXCEPTION_MAP = load_exception_list(_exceptions) +EXCEPTION_LIST_VERSION = Version.from_str(_version)
spdx/tools-python
276e30d5ddeae98e4cefb455b4d245506e6345b1
diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 0000000..3db5f6e --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,43 @@ +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +import unittest +from unittest import TestCase + +from spdx import config +from spdx.version import Version + +class TestLicenseList(TestCase): + + def test_load_license_list(self): + version, licenses_map = config.load_license_list(config._licenses) + assert version == '3.5' + # Test some instances in licenses_map + assert licenses_map['MIT License'] == 'MIT' + assert licenses_map['MIT'] == 'MIT License' + assert licenses_map['Apache License 2.0'] == 'Apache-2.0' + assert licenses_map['Apache-2.0'] == 'Apache License 2.0' + assert licenses_map['GNU General Public License v3.0 only'] == 'GPL-3.0-only' + assert licenses_map['GPL-3.0-only'] == 'GNU General Public License v3.0 only' + + def test_config_license_list_version_constant(self): + assert config.LICENSE_LIST_VERSION == Version(major=3, minor=5) + + def test_load_exception_list(self): + version, exception_map = config.load_exception_list(config._exceptions) + assert version == '3.5' + # Test some instances in exception_map + assert exception_map['Bison exception 2.2'] == 'Bison-exception-2.2' + assert exception_map['Bison-exception-2.2'] == 'Bison exception 2.2' + assert exception_map['OpenVPN OpenSSL Exception'] == 'openvpn-openssl-exception' + assert exception_map['openvpn-openssl-exception'] == 'OpenVPN OpenSSL Exception' + assert exception_map['Qt GPL exception 1.0'] == 'Qt-GPL-exception-1.0' + assert exception_map['Qt-GPL-exception-1.0'] == 'Qt GPL exception 1.0' + + def test_config_exception_list_version_constant(self): + assert config.EXCEPTION_LIST_VERSION == Version(major=3, minor=5) + + +if __name__ == '__main__': + unittest.main()
Version model fields must be integers but sometimes they aren't **BACKGROUND INFORMATION** `major` and `minor` fields of `Version` model are supposed to be integers and the class method `Version.from_str()` is responsible for assuring that. https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/version.py#L36-L45 If `license_list_version` field is absent when initializing a `CreationInfo` object, a default value is taken from the `config.py` file. https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/creationinfo.py#L131-L136 **THE ISSUE** The default `Version` object described above is created assigning `major` and `minor` fields directly as string values. https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/config.py#L48-L49 `__lt__` method and parsers tests (expect integer values) may fail.
0.0
276e30d5ddeae98e4cefb455b4d245506e6345b1
[ "tests/test_config.py::TestLicenseList::test_config_exception_list_version_constant", "tests/test_config.py::TestLicenseList::test_config_license_list_version_constant", "tests/test_config.py::TestLicenseList::test_load_exception_list", "tests/test_config.py::TestLicenseList::test_load_license_list" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-04-17 21:36:51+00:00
apache-2.0
5,619
spdx__tools-python-283
diff --git a/spdx/parsers/tagvaluebuilders.py b/spdx/parsers/tagvaluebuilders.py index 27b4180..4bf2320 100644 --- a/spdx/parsers/tagvaluebuilders.py +++ b/spdx/parsers/tagvaluebuilders.py @@ -1092,7 +1092,7 @@ class FileBuilder(object): """ if self.has_package(doc) and self.has_file(doc): if validations.validate_file_attribution_text(text): - self.file(doc).comment = str_from_text(text) + self.file(doc).attribution_text = str_from_text(text) return True else: raise SPDXValueError("File::AttributionText")
spdx/tools-python
328a816a5bb8ee43d4031c8df0a2655f571b532c
diff --git a/tests/test_tag_value_parser.py b/tests/test_tag_value_parser.py index 54dde11..64ddb17 100644 --- a/tests/test_tag_value_parser.py +++ b/tests/test_tag_value_parser.py @@ -243,7 +243,8 @@ class TestParser(TestCase): 'ArtifactOfProjectName: AcmeTest', 'ArtifactOfProjectHomePage: http://www.acme.org/', 'ArtifactOfProjectURI: http://www.acme.org/', - 'FileComment: <text>Very long file</text>' + 'FileComment: <text>Very long file</text>', + 'FileAttributionText: <text>Acknowledgements that might be required to be communicated in some contexts.</text>' ]) unknown_tag_str = 'SomeUnknownTag: SomeUnknownValue' @@ -318,6 +319,9 @@ class TestParser(TestCase): assert len(spdx_file.artifact_of_project_name) == 1 assert len(spdx_file.artifact_of_project_home) == 1 assert len(spdx_file.artifact_of_project_uri) == 1 + assert spdx_file.comment == 'Very long file' + assert spdx_file.attribution_text == 'Acknowledgements that might be required to be communicated in ' \ + 'some contexts.' def test_unknown_tag(self):
TagValue builder writes attribution text into comment See https://github.com/spdx/tools-python/blob/main/spdx/parsers/tagvaluebuilders.py#L1088-L1095
0.0
328a816a5bb8ee43d4031c8df0a2655f571b532c
[ "tests/test_tag_value_parser.py::TestParser::test_file" ]
[ "tests/test_tag_value_parser.py::TestLexer::test_creation_info", "tests/test_tag_value_parser.py::TestLexer::test_document", "tests/test_tag_value_parser.py::TestLexer::test_external_document_references", "tests/test_tag_value_parser.py::TestLexer::test_pacakage", "tests/test_tag_value_parser.py::TestLexer::test_review_info", "tests/test_tag_value_parser.py::TestLexer::test_snippet", "tests/test_tag_value_parser.py::TestLexer::test_unknown_tag", "tests/test_tag_value_parser.py::TestParser::test_creation_info", "tests/test_tag_value_parser.py::TestParser::test_doc", "tests/test_tag_value_parser.py::TestParser::test_package", "tests/test_tag_value_parser.py::TestParser::test_review", "tests/test_tag_value_parser.py::TestParser::test_snippet", "tests/test_tag_value_parser.py::TestParser::test_unknown_tag" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2022-11-11 15:06:10+00:00
apache-2.0
5,620
spdx__tools-python-390
diff --git a/src/model/relationship.py b/src/model/relationship.py index 1b34fe0..88026c9 100644 --- a/src/model/relationship.py +++ b/src/model/relationship.py @@ -9,8 +9,10 @@ # See the License for the specific language governing permissions and # limitations under the License. from enum import auto, Enum -from typing import Optional +from typing import Optional, Union +from src.model.spdx_no_assertion import SpdxNoAssertion +from src.model.spdx_none import SpdxNone from src.model.typing.dataclass_with_properties import dataclass_with_properties from src.model.typing.type_checks import check_types_and_set_values @@ -67,9 +69,9 @@ class RelationshipType(Enum): class Relationship: spdx_element_id: str relationship_type: RelationshipType - related_spdx_element_id: str + related_spdx_element_id: Union[str, SpdxNone, SpdxNoAssertion] comment: Optional[str] = None - def __init__(self, spdx_element_id: str, relationship_type: RelationshipType, related_spdx_element_id: str, - comment: Optional[str] = None): + def __init__(self, spdx_element_id: str, relationship_type: RelationshipType, + related_spdx_element_id: Union[str, SpdxNone, SpdxNoAssertion], comment: Optional[str] = None): check_types_and_set_values(self, locals()) diff --git a/src/parser/json/relationship_parser.py b/src/parser/json/relationship_parser.py index defecd3..e2c910e 100644 --- a/src/parser/json/relationship_parser.py +++ b/src/parser/json/relationship_parser.py @@ -15,7 +15,7 @@ from src.model.typing.constructor_type_errors import ConstructorTypeErrors from src.parser.error import SPDXParsingError from src.parser.json.dict_parsing_functions import raise_parsing_error_if_logger_has_messages, json_str_to_enum_name, \ construct_or_raise_parsing_error, \ - parse_field_or_log_error, parse_list_of_elements + parse_field_or_log_error, parse_field_or_no_assertion_or_none from src.parser.logger import Logger @@ -58,7 +58,7 @@ class RelationshipParser: def parse_relationship(self, relationship_dict: Dict) -> Relationship: logger = Logger() spdx_element_id: Optional[str] = relationship_dict.get("spdxElementId") - related_spdx_element: Optional[str] = relationship_dict.get("relatedSpdxElement") + related_spdx_element: Optional[str] = parse_field_or_no_assertion_or_none(relationship_dict.get("relatedSpdxElement")) relationship_type: Optional[RelationshipType] = parse_field_or_log_error(logger, relationship_dict.get( "relationshipType"), self.parse_relationship_type) relationship_comment: Optional[str] = relationship_dict.get("comment") diff --git a/src/validation/relationship_validator.py b/src/validation/relationship_validator.py index a929225..edf8754 100644 --- a/src/validation/relationship_validator.py +++ b/src/validation/relationship_validator.py @@ -9,10 +9,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Union from src.model.document import Document from src.model.relationship import Relationship, RelationshipType +from src.model.spdx_no_assertion import SpdxNoAssertion +from src.model.spdx_none import SpdxNone from src.validation.spdx_id_validators import validate_spdx_id from src.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType @@ -30,12 +32,14 @@ def validate_relationship(relationship: Relationship, document: Document, spdx_v context = ValidationContext(element_type=SpdxElementType.RELATIONSHIP, full_element=relationship) - first_id: str = relationship.spdx_element_id - second_id: str = relationship.related_spdx_element_id relationship_type: RelationshipType = relationship.relationship_type - for spdx_id in [first_id, second_id]: - messages: List[str] = validate_spdx_id(spdx_id, document, check_document=True) + messages: List[str] = validate_spdx_id(relationship.spdx_element_id, document, check_document=True) + for message in messages: + validation_messages.append(ValidationMessage(message, context)) + + if relationship.related_spdx_element_id not in [SpdxNone(), SpdxNoAssertion()]: + messages: List[str] = validate_spdx_id(relationship.related_spdx_element_id, document, check_document=True) for message in messages: validation_messages.append(ValidationMessage(message, context))
spdx/tools-python
f3bca2d7601acf2dfa8a130912af02e4caf85066
diff --git a/tests/model/test_relationship.py b/tests/model/test_relationship.py index 4d0a0a6..c3b99a6 100644 --- a/tests/model/test_relationship.py +++ b/tests/model/test_relationship.py @@ -1,19 +1,20 @@ import pytest from src.model.relationship import Relationship, RelationshipType +from src.model.spdx_no_assertion import SpdxNoAssertion def test_correct_initialization(): - relationship = Relationship("id", RelationshipType.OTHER, "other_id", "comment") + relationship = Relationship("id", RelationshipType.OTHER, SpdxNoAssertion(), "comment") assert relationship.spdx_element_id == "id" assert relationship.relationship_type == RelationshipType.OTHER - assert relationship.related_spdx_element_id == "other_id" + assert relationship.related_spdx_element_id == SpdxNoAssertion() assert relationship.comment == "comment" def test_wrong_type_in_spdx_element_id(): with pytest.raises(TypeError): - Relationship(42, RelationshipType.OTHER, "other_id") + Relationship(SpdxNoAssertion(), RelationshipType.OTHER, "other_id") def test_wrong_type_in_relationship_type(): diff --git a/tests/parser/test_relationship_parser.py b/tests/parser/test_relationship_parser.py index d1afa64..27f2c4f 100644 --- a/tests/parser/test_relationship_parser.py +++ b/tests/parser/test_relationship_parser.py @@ -13,6 +13,7 @@ from unittest import TestCase import pytest from src.model.relationship import RelationshipType, Relationship +from src.model.spdx_no_assertion import SpdxNoAssertion from src.parser.error import SPDXParsingError from src.parser.json.relationship_parser import RelationshipParser @@ -23,7 +24,7 @@ def test_parse_relationship(): relationship_dict = { "spdxElementId": "SPDXRef-DOCUMENT", "relationshipType": "CONTAINS", - "relatedSpdxElement": "SPDXRef-Package", + "relatedSpdxElement": "NOASSERTION", "comment": "Comment." } @@ -31,7 +32,7 @@ def test_parse_relationship(): assert relationship.relationship_type == RelationshipType.CONTAINS assert relationship.spdx_element_id == "SPDXRef-DOCUMENT" - assert relationship.related_spdx_element_id == "SPDXRef-Package" + assert relationship.related_spdx_element_id == SpdxNoAssertion() assert relationship.comment == "Comment." diff --git a/tests/validation/test_relationship_validator.py b/tests/validation/test_relationship_validator.py index 1d56bd6..2e2a08b 100644 --- a/tests/validation/test_relationship_validator.py +++ b/tests/validation/test_relationship_validator.py @@ -15,15 +15,19 @@ import pytest from src.model.document import Document from src.model.relationship import Relationship, RelationshipType +from src.model.spdx_no_assertion import SpdxNoAssertion +from src.model.spdx_none import SpdxNone from src.validation.relationship_validator import validate_relationship from src.validation.validation_message import ValidationMessage, SpdxElementType, ValidationContext from tests.valid_defaults import get_document, get_package, get_relationship, get_file -def test_valid_relationship(): [email protected]("related_spdx_element", + ["SPDXRef-Package", SpdxNoAssertion(), SpdxNone()]) +def test_valid_relationship(related_spdx_element): document: Document = get_document(packages=[get_package(spdx_id="SPDXRef-Package")]) - relationship = Relationship("SPDXRef-DOCUMENT", RelationshipType.AMENDS, "SPDXRef-Package", comment="comment") + relationship = Relationship("SPDXRef-DOCUMENT", RelationshipType.AMENDS, related_spdx_element, comment="comment") validation_messages: List[ValidationMessage] = validate_relationship(relationship, document, "2.3") assert validation_messages == []
relationship validation must allow NONE and NOASSERTION The spec explicitly allows the keywords `NOASSERTION` or `NONE` on the right hand side of a relationship (see [here](https://spdx.github.io/spdx-spec/v2.3/relationships-between-SPDX-elements/)). When trying to parse the example file `SPDXJSONExample-v2.3.spdx.json` the validation returns the following messages: ``` ValidationMessage(validation_message='spdx_id must only contain letters, numbers, "." and "-" and must begin with "SPDXRef-", but is: NOASSERTION', context=ValidationContext(spdx_id=None, parent_id=None, element_type=<SpdxElementType.RELATIONSHIP: 13>, full_element=Relationship(spdx_element_id='SPDXRef-CommonsLangSrc', relationship_type=<RelationshipType.GENERATED_FROM: 25>, related_spdx_element_id='NOASSERTION', comment=None))), ValidationMessage(validation_message='did not find the referenced spdx_id NOASSERTION in the SPDX document', context=ValidationContext(spdx_id=None, parent_id=None, element_type=<SpdxElementType.RELATIONSHIP: 13>, full_element=Relationship(spdx_element_id='SPDXRef-CommonsLangSrc', relationship_type=<RelationshipType.GENERATED_FROM: 25>, related_spdx_element_id='NOASSERTION', comment=None))) ```
0.0
f3bca2d7601acf2dfa8a130912af02e4caf85066
[ "tests/model/test_relationship.py::test_correct_initialization", "tests/parser/test_relationship_parser.py::test_parse_relationship", "tests/validation/test_relationship_validator.py::test_valid_relationship[related_spdx_element1]", "tests/validation/test_relationship_validator.py::test_valid_relationship[related_spdx_element2]" ]
[ "tests/parser/test_relationship_parser.py::test_parse_relationship_type", "tests/parser/test_relationship_parser.py::test_parse_document_describes", "tests/parser/test_relationship_parser.py::test_parse_document_describes_without_duplicating_relationships", "tests/parser/test_relationship_parser.py::test_parse_has_files", "tests/parser/test_relationship_parser.py::test_parse_has_files_without_duplicating_relationships", "tests/validation/test_relationship_validator.py::test_valid_relationship[SPDXRef-Package]", "tests/validation/test_relationship_validator.py::test_unknown_spdx_id[SPDXRef-unknownFile-SPDXRef-File-did", "tests/validation/test_relationship_validator.py::test_unknown_spdx_id[SPDXRef-File-SPDXRef-unknownFile-did", "tests/validation/test_relationship_validator.py::test_v2_3_only_types[relationship0-RelationshipType.SPECIFICATION_FOR", "tests/validation/test_relationship_validator.py::test_v2_3_only_types[relationship1-RelationshipType.REQUIREMENT_DESCRIPTION_FOR" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-12-29 07:40:39+00:00
apache-2.0
5,621
spdx__tools-python-404
diff --git a/src/validation/creation_info_validator.py b/src/validation/creation_info_validator.py index 5f7a427..50446f8 100644 --- a/src/validation/creation_info_validator.py +++ b/src/validation/creation_info_validator.py @@ -24,14 +24,6 @@ def validate_creation_info(creation_info: CreationInfo) -> List[ValidationMessag context = ValidationContext(spdx_id=creation_info.spdx_id, element_type=SpdxElementType.DOCUMENT) - if not re.match(r"^SPDX-\d+.\d+$", creation_info.spdx_version): - validation_messages.append( - ValidationMessage( - f'spdx_version must be of the form "SPDX-[major].[minor]" but is: {creation_info.spdx_version}', - context - ) - ) - if creation_info.spdx_id != "SPDXRef-DOCUMENT": validation_messages.append( ValidationMessage( diff --git a/src/validation/document_validator.py b/src/validation/document_validator.py index bf2c114..cab7b09 100644 --- a/src/validation/document_validator.py +++ b/src/validation/document_validator.py @@ -8,7 +8,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import re from typing import List from src.model.document import Document @@ -24,9 +24,34 @@ from src.validation.snippet_validator import validate_snippets from src.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType -def validate_full_spdx_document(document: Document, spdx_version: str) -> List[ValidationMessage]: +def validate_full_spdx_document(document: Document, spdx_version: str = None) -> List[ValidationMessage]: validation_messages: List[ValidationMessage] = [] + # SPDX version validation has to happen here because subsequent validators rely on it + document_version: str = document.creation_info.spdx_version + context = ValidationContext(spdx_id=document.creation_info.spdx_id, element_type=SpdxElementType.DOCUMENT) + if not spdx_version: + spdx_version = document_version + + if not re.match(r"^SPDX-\d+.\d+$", document_version): + validation_messages.append( + ValidationMessage( + f'the document\'s spdx_version must be of the form "SPDX-[major].[minor]" but is: {document_version}', + context + ) + ) + elif spdx_version != document_version: + validation_messages.append( + ValidationMessage(f"provided SPDX version {spdx_version} does not match " + f"the document's SPDX version {document_version}", context) + ) + + if validation_messages: + validation_messages.append(ValidationMessage("There are issues concerning the SPDX version of the document. " + "As subsequent validation relies on the correct version, " + "the validation process has been cancelled.", context)) + return validation_messages + validation_messages.extend(validate_creation_info(document.creation_info)) validation_messages.extend(validate_packages(document.packages, document)) validation_messages.extend(validate_files(document.files, document)) diff --git a/src/validation/relationship_validator.py b/src/validation/relationship_validator.py index 87da7f9..bace552 100644 --- a/src/validation/relationship_validator.py +++ b/src/validation/relationship_validator.py @@ -43,9 +43,9 @@ def validate_relationship(relationship: Relationship, document: Document, spdx_v for message in messages: validation_messages.append(ValidationMessage(message, context)) - if spdx_version != "2.3": + if spdx_version != "SPDX-2.3": if relationship_type == RelationshipType.SPECIFICATION_FOR or relationship_type == RelationshipType.REQUIREMENT_DESCRIPTION_FOR: validation_messages.append( - ValidationMessage(f"{relationship_type} is not supported for SPDX versions below 2.3", context)) + ValidationMessage(f"{relationship_type} is not supported for SPDX versions below SPDX-2.3", context)) return validation_messages diff --git a/src/writer/json/json_writer.py b/src/writer/json/json_writer.py index 5083c8c..fbde9ad 100644 --- a/src/writer/json/json_writer.py +++ b/src/writer/json/json_writer.py @@ -24,8 +24,7 @@ def write_document(document: Document, file_name: str, validate: bool = True, co a new one is created. """ if validate: - validation_messages: List[ValidationMessage] = validate_full_spdx_document(document, - document.creation_info.spdx_version) + validation_messages: List[ValidationMessage] = validate_full_spdx_document(document) if validation_messages: raise ValueError(f"Document is not valid. The following errors were detected: {validation_messages}") if converter is None:
spdx/tools-python
35ed3d88ac9e97700d02f893929de1c019069d3d
diff --git a/tests/validation/test_creation_info_validator.py b/tests/validation/test_creation_info_validator.py index 5de0416..8f0d661 100644 --- a/tests/validation/test_creation_info_validator.py +++ b/tests/validation/test_creation_info_validator.py @@ -27,9 +27,7 @@ def test_valid_creation_info(): @pytest.mark.parametrize \ ("creation_info_input, spdx_id, expected_message", - [(creation_info_fixture(spdx_version="version-2.3"), "SPDXRef-DOCUMENT", - 'spdx_version must be of the form "SPDX-[major].[minor]" but is: version-2.3'), - (creation_info_fixture(spdx_id="SPDXRef-doc"), "SPDXRef-doc", + [(creation_info_fixture(spdx_id="SPDXRef-doc"), "SPDXRef-doc", 'spdx_id must be "SPDXRef-DOCUMENT", but is: SPDXRef-doc'), (creation_info_fixture(data_license="MIT"), "SPDXRef-DOCUMENT", 'data_license must be "CC0-1.0", but is: MIT'), diff --git a/tests/validation/test_document_validator.py b/tests/validation/test_document_validator.py index 637c6ad..a7f2d7a 100644 --- a/tests/validation/test_document_validator.py +++ b/tests/validation/test_document_validator.py @@ -9,17 +9,50 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Optional +import pytest + +from src.model.document import Document, CreationInfo from src.validation.document_validator import validate_full_spdx_document -from src.validation.validation_message import ValidationMessage -from tests.fixtures import document_fixture +from src.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType +from tests.fixtures import document_fixture, creation_info_fixture def test_valid_document(): document = document_fixture() - validation_messages: List[ValidationMessage] = validate_full_spdx_document(document, "2.3") + validation_messages: List[ValidationMessage] = validate_full_spdx_document(document) assert validation_messages == [] -# TODO: https://github.com/spdx/tools-python/issues/375 + [email protected]("creation_info, version_input, expected_message", + [(creation_info_fixture(spdx_version="SPDX-2.3"), "SPDX-2.3", None), + (creation_info_fixture(spdx_version="SPDX-2.3"), None, None), + (creation_info_fixture(spdx_version="SPDX-2.3"), "SPDX-2.2", + "provided SPDX version SPDX-2.2 does not match the document's SPDX version SPDX-2.3"), + (creation_info_fixture(spdx_version="SPDX-2.3"), "SPDX2.3", + "provided SPDX version SPDX2.3 does not match the document's SPDX version SPDX-2.3"), + (creation_info_fixture(spdx_version="SPDX2.3"), "SPDX-2.3", + 'the document\'s spdx_version must be of the form "SPDX-[major].[minor]" but is: SPDX2.3'), + (creation_info_fixture(spdx_version="SPDX2.3"), None, + 'the document\'s spdx_version must be of the form "SPDX-[major].[minor]" but is: SPDX2.3'), + (creation_info_fixture(spdx_version="SPDX2.3"), "SPDX2.3", + 'the document\'s spdx_version must be of the form "SPDX-[major].[minor]" but is: SPDX2.3'), + ]) +def test_spdx_version_handling(creation_info: CreationInfo, version_input: str, expected_message: Optional[str]): + document: Document = document_fixture(creation_info=creation_info) + validation_messages: List[ValidationMessage] = validate_full_spdx_document(document, version_input) + + context = ValidationContext(spdx_id=creation_info.spdx_id, element_type=SpdxElementType.DOCUMENT) + expected: List[ValidationMessage] = [] + + if expected_message: + expected.append(ValidationMessage(expected_message, context)) + expected.append(ValidationMessage("There are issues concerning the SPDX version of the document. " + "As subsequent validation relies on the correct version, " + "the validation process has been cancelled.", context)) + + assert validation_messages == expected + + # TODO: https://github.com/spdx/tools-python/issues/375 diff --git a/tests/validation/test_relationship_validator.py b/tests/validation/test_relationship_validator.py index 7066555..1d164e2 100644 --- a/tests/validation/test_relationship_validator.py +++ b/tests/validation/test_relationship_validator.py @@ -40,7 +40,7 @@ def test_valid_relationship(related_spdx_element): def test_unknown_spdx_id(spdx_element_id, related_spdx_element_id, expected_message): relationship: Relationship = relationship_fixture(spdx_element_id=spdx_element_id, related_spdx_element_id=related_spdx_element_id) - validation_messages: List[ValidationMessage] = validate_relationship(relationship, document_fixture(), "2.3") + validation_messages: List[ValidationMessage] = validate_relationship(relationship, document_fixture(), "SPDX-2.3") expected = ValidationMessage(expected_message, ValidationContext(element_type=SpdxElementType.RELATIONSHIP, @@ -51,14 +51,14 @@ def test_unknown_spdx_id(spdx_element_id, related_spdx_element_id, expected_mess @pytest.mark.parametrize("relationship, expected_message", [(Relationship("SPDXRef-DOCUMENT", RelationshipType.SPECIFICATION_FOR, "SPDXRef-Package"), - "RelationshipType.SPECIFICATION_FOR is not supported for SPDX versions below 2.3"), + "RelationshipType.SPECIFICATION_FOR is not supported for SPDX versions below SPDX-2.3"), (Relationship("SPDXRef-DOCUMENT", RelationshipType.REQUIREMENT_DESCRIPTION_FOR, "SPDXRef-Package"), - "RelationshipType.REQUIREMENT_DESCRIPTION_FOR is not supported for SPDX versions below 2.3")]) + "RelationshipType.REQUIREMENT_DESCRIPTION_FOR is not supported for SPDX versions below SPDX-2.3")]) def test_v2_3_only_types(relationship, expected_message): document: Document = document_fixture() - validation_message: List[ValidationMessage] = validate_relationship(relationship, document, "2.2") + validation_message: List[ValidationMessage] = validate_relationship(relationship, document, "SPDX-2.2") expected = [ValidationMessage(expected_message, ValidationContext(element_type=SpdxElementType.RELATIONSHIP,
Use document SPDX version as default for validation An optional `version` parameter can be given to the document validator to overwrite the document version. In case of a discrepancy, a validation error will be logged.
0.0
35ed3d88ac9e97700d02f893929de1c019069d3d
[ "tests/validation/test_document_validator.py::test_valid_document", "tests/validation/test_document_validator.py::test_spdx_version_handling[creation_info2-SPDX-2.2-provided", "tests/validation/test_document_validator.py::test_spdx_version_handling[creation_info3-SPDX2.3-provided", "tests/validation/test_document_validator.py::test_spdx_version_handling[creation_info4-SPDX-2.3-the", "tests/validation/test_document_validator.py::test_spdx_version_handling[creation_info5-None-the", "tests/validation/test_document_validator.py::test_spdx_version_handling[creation_info6-SPDX2.3-the", "tests/validation/test_relationship_validator.py::test_v2_3_only_types[relationship0-RelationshipType.SPECIFICATION_FOR", "tests/validation/test_relationship_validator.py::test_v2_3_only_types[relationship1-RelationshipType.REQUIREMENT_DESCRIPTION_FOR" ]
[ "tests/validation/test_creation_info_validator.py::test_valid_creation_info", "tests/validation/test_creation_info_validator.py::test_invalid_creation_info[creation_info_input0-SPDXRef-doc-spdx_id", "tests/validation/test_creation_info_validator.py::test_invalid_creation_info[creation_info_input1-SPDXRef-DOCUMENT-data_license", "tests/validation/test_creation_info_validator.py::test_invalid_creation_info[creation_info_input2-SPDXRef-DOCUMENT-document_namespace", "tests/validation/test_document_validator.py::test_spdx_version_handling[creation_info0-SPDX-2.3-None]", "tests/validation/test_document_validator.py::test_spdx_version_handling[creation_info1-None-None]", "tests/validation/test_relationship_validator.py::test_valid_relationship[SPDXRef-Package]", "tests/validation/test_relationship_validator.py::test_valid_relationship[related_spdx_element1]", "tests/validation/test_relationship_validator.py::test_valid_relationship[related_spdx_element2]", "tests/validation/test_relationship_validator.py::test_unknown_spdx_id[SPDXRef-unknownFile-SPDXRef-File-did", "tests/validation/test_relationship_validator.py::test_unknown_spdx_id[SPDXRef-File-SPDXRef-unknownFile-did" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-01-03 08:34:32+00:00
apache-2.0
5,622
spdx__tools-python-439
diff --git a/src/spdx/model/package.py b/src/spdx/model/package.py index bbeff65..0a62170 100644 --- a/src/spdx/model/package.py +++ b/src/spdx/model/package.py @@ -11,7 +11,7 @@ from dataclasses import field from datetime import datetime from enum import Enum, auto -from typing import Optional, Union, List +from typing import Optional, Union, List, Dict from spdx.model.actor import Actor from spdx.model.checksum import Checksum @@ -54,6 +54,13 @@ class ExternalPackageRefCategory(Enum): OTHER = auto() +CATEGORY_TO_EXTERNAL_PACKAGE_REF_TYPES: Dict[ExternalPackageRefCategory, List[str]] = { + ExternalPackageRefCategory.SECURITY : ["cpe22Type", "cpe23Type", "advisory", "fix", "url", "swid"], + ExternalPackageRefCategory.PACKAGE_MANAGER : ["maven-central", "npm", "nuget", "bower", "purl"], + ExternalPackageRefCategory.PERSISTENT_ID : ["swh", "gitoid"] +} + + @dataclass_with_properties class ExternalPackageRef: category: ExternalPackageRefCategory diff --git a/src/spdx/validation/external_package_ref_validator.py b/src/spdx/validation/external_package_ref_validator.py index 10ff0ee..7be94f6 100644 --- a/src/spdx/validation/external_package_ref_validator.py +++ b/src/spdx/validation/external_package_ref_validator.py @@ -8,11 +8,34 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import re +from typing import List, Dict -from typing import List +from spdx.model.package import ExternalPackageRef, ExternalPackageRefCategory, CATEGORY_TO_EXTERNAL_PACKAGE_REF_TYPES +from spdx.validation.uri_validators import validate_url, validate_uri +from spdx.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType -from spdx.model.package import ExternalPackageRef -from spdx.validation.validation_message import ValidationMessage +CPE22TYPE_REGEX = r'^c[pP][eE]:/[AHOaho]?(:[A-Za-z0-9._\-~%]*){0,6}$' +CPE23TYPE_REGEX = r'^cpe:2\.3:[aho\*\-](:(((\?*|\*?)([a-zA-Z0-9\-\._]|(\\[\\\*\?!"#$$%&\'\(\)\+,\/:;<=>@\[\]\^`\{\|}~]))+(\?*|\*?))|[\*\-])){5}(:(([a-zA-Z]{2,3}(-([a-zA-Z]{2}|[0-9]{3}))?)|[\*\-]))(:(((\?*|\*?)([a-zA-Z0-9\-\._]|(\\[\\\*\?!"#$$%&\'\(\)\+,\/:;<=>@\[\]\^`\{\|}~]))+(\?*|\*?))|[\*\-])){4}$' +MAVEN_CENTRAL_REGEX = r'^[^:]+:[^:]+(:[^:]+)?$' +NPM_REGEX = r'^[^@]+@[^@]+$' +NUGET_REGEX = r'^[^/]+/[^/]+$' +BOWER_REGEX = r'^[^#]+#[^#]+$' +PURL_REGEX = r'^pkg:.+(\/.+)?\/.+(@.+)?(\?.+)?(#.+)?$' +SWH_REGEX = r'^swh:1:(snp|rel|rev|dir|cnt):[0-9a-fA-F]{40}$' +GITOID_REGEX = r'^gitoid:(blob|tree|commit|tag):(sha1:[0-9a-fA-F]{40}|sha256:[0-9a-fA-F]{64})$' + +TYPE_TO_REGEX: Dict[str, str] = { + "cpe22Type": CPE22TYPE_REGEX, + "cpe23Type": CPE23TYPE_REGEX, + "maven-central": MAVEN_CENTRAL_REGEX, + "npm": NPM_REGEX, + "nuget": NUGET_REGEX, + "bower": BOWER_REGEX, + "purl": PURL_REGEX, + "swh": SWH_REGEX, + "gitoid": GITOID_REGEX +} def validate_external_package_refs(external_package_refs: List[ExternalPackageRef], parent_id: str) -> List[ @@ -25,5 +48,47 @@ def validate_external_package_refs(external_package_refs: List[ExternalPackageRe def validate_external_package_ref(external_package_ref: ExternalPackageRef, parent_id: str) -> List[ValidationMessage]: - # TODO: https://github.com/spdx/tools-python/issues/373 + context = ValidationContext(parent_id=parent_id, element_type=SpdxElementType.EXTERNAL_PACKAGE_REF, + full_element=external_package_ref) + + category = external_package_ref.category + locator = external_package_ref.locator + reference_type = external_package_ref.reference_type + + if category == ExternalPackageRefCategory.OTHER: + if " " in locator: + return [ValidationMessage( + f"externalPackageRef locator in category OTHER must contain no spaces, but is: {locator}", + context)] + return [] + + if reference_type not in CATEGORY_TO_EXTERNAL_PACKAGE_REF_TYPES[category]: + return [ValidationMessage( + f"externalPackageRef type in category {category.name} must be one of {CATEGORY_TO_EXTERNAL_PACKAGE_REF_TYPES[category]}, but is: {reference_type}", + context)] + + if reference_type in ["advisory", "fix", "url"]: + if validate_url(locator): + return [ValidationMessage( + f'externalPackageRef locator of type "{reference_type}" must be a valid URL, but is: {locator}', + context)] + return [] + + if reference_type == "swid": + if validate_uri(locator) or not locator.startswith("swid"): + return [ValidationMessage( + f'externalPackageRef locator of type "swid" must be a valid URI with scheme swid, but is: {locator}', + context)] + return [] + + return validate_against_regex(locator, reference_type, context) + + +def validate_against_regex(string_to_validate: str, reference_type: str, context: ValidationContext) -> List[ + ValidationMessage]: + regex = TYPE_TO_REGEX[reference_type] + if not re.match(regex, string_to_validate): + return [ValidationMessage( + f'externalPackageRef locator of type "{reference_type}" must conform with the regex {regex}, but is: {string_to_validate}', + context)] return []
spdx/tools-python
9ae0ebc9e18b549c2106579e8bde1be84dc4b7f0
diff --git a/tests/spdx/validation/test_external_package_ref_validator.py b/tests/spdx/validation/test_external_package_ref_validator.py index ee0c386..f0085c8 100644 --- a/tests/spdx/validation/test_external_package_ref_validator.py +++ b/tests/spdx/validation/test_external_package_ref_validator.py @@ -13,25 +13,137 @@ from typing import List import pytest -from spdx.validation.external_package_ref_validator import validate_external_package_ref +from spdx.model.package import ExternalPackageRef, ExternalPackageRefCategory +from spdx.validation.external_package_ref_validator import validate_external_package_ref, CPE22TYPE_REGEX, \ + CPE23TYPE_REGEX, MAVEN_CENTRAL_REGEX, NPM_REGEX, NUGET_REGEX, BOWER_REGEX, PURL_REGEX, SWH_REGEX, GITOID_REGEX from spdx.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType -from tests.spdx.fixtures import external_package_ref_fixture -def test_valid_external_package_ref(): - external_package_ref = external_package_ref_fixture() [email protected]("category, reference_type, locator", + [(ExternalPackageRefCategory.SECURITY, "cpe22Type", + "cpe:/o:canonical:ubuntu_linux:10.04:-:lts"), + (ExternalPackageRefCategory.SECURITY, "cpe23Type", + "cpe:2.3:o:canonical:ubuntu_linux:10.04:-:lts:*:*:*:*:*"), + (ExternalPackageRefCategory.SECURITY, "advisory", + "https://nvd.nist.gov/vuln/detail/CVE-2020-28498"), + (ExternalPackageRefCategory.SECURITY, "fix", + "https://github.com/indutny/elliptic/commit/441b7428"), + (ExternalPackageRefCategory.SECURITY, "url", + "https://github.com/christianlundkvist/blog/blob/master/2020_05_26_secp256k1_twist_attacks/secp256k1_twist_attacks.md"), + (ExternalPackageRefCategory.SECURITY, "swid", "swid:2df9de35-0aff-4a86-ace6-f7dddd1ade4c"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "maven-central", + "org.apache.tomcat:tomcat:9.0.0.M4"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "npm", "[email protected]"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "nuget", "Microsoft.AspNet.MVC/5.0.0"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "bower", "modernizr#2.6.2"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:docker/debian@sha256:2f04d3d33b6027bb74ecc81397abe780649ec89f1a2af18d7022737d0482cefe"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:bitbucket/birkenfeld/pygments-main@244fd47e07d1014f0aed9c"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:deb/debian/[email protected]?arch=i386&distro=jessie"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:docker/customer/dockerimage@sha256:244fd47e07d1004f0aed9c?repository_url=gcr.io"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:gem/[email protected]?platform=java"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", "pkg:gem/[email protected]"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:golang/google.golang.org/genproto#googleapis/api/annotations"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:maven/org.apache.xmlgraphics/[email protected]?repository_url=repo.spring.io%2Frelease"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", "pkg:npm/%40angular/[email protected]"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:nuget/[email protected]"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", + "pkg:rpm/fedora/[email protected]?arch=i386&distro=fedora-25"), + (ExternalPackageRefCategory.PERSISTENT_ID, "swh", + "swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2"), + (ExternalPackageRefCategory.PERSISTENT_ID, "swh", + "swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505"), + (ExternalPackageRefCategory.PERSISTENT_ID, "swh", + "swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d"), + (ExternalPackageRefCategory.PERSISTENT_ID, "swh", + "swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f"), + (ExternalPackageRefCategory.PERSISTENT_ID, "swh", + "swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453"), + (ExternalPackageRefCategory.PERSISTENT_ID, "gitoid", + "gitoid:blob:sha1:261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64"), + (ExternalPackageRefCategory.PERSISTENT_ID, "gitoid", + "gitoid:blob:sha256:3557f7eb43c621c71483743d4b37059bb80933e7f71277c0c3b3846159d1f61c"), + (ExternalPackageRefCategory.OTHER, "some idstring", "#//string-withOUT!Spaces\\?") + ]) +def test_valid_external_package_ref(category, reference_type, locator): + external_package_ref = ExternalPackageRef(category, reference_type, locator, "externalPackageRef comment") validation_messages: List[ValidationMessage] = validate_external_package_ref(external_package_ref, "parent_id") assert validation_messages == [] [email protected]("external_package_ref, expected_message", - [(external_package_ref_fixture(), - "TBD"), [email protected]("category, reference_type, locator, expected_message", + [( + ExternalPackageRefCategory.SECURITY, "cpe22Typo", "cpe:/o:canonical:ubuntu_linux:10.04:-:lts", + "externalPackageRef type in category SECURITY must be one of ['cpe22Type', 'cpe23Type', 'advisory', 'fix', 'url', 'swid'], but is: cpe22Typo"), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "nugat", + "cpe:/o:canonical:ubuntu_linux:10.04:-:lts", + "externalPackageRef type in category PACKAGE_MANAGER must be one of ['maven-central', 'npm', 'nuget', 'bower', 'purl'], but is: nugat"), + (ExternalPackageRefCategory.PERSISTENT_ID, "git-oid", + "cpe:/o:canonical:ubuntu_linux:10.04:-:lts", + "externalPackageRef type in category PERSISTENT_ID must be one of ['swh', 'gitoid'], but is: git-oid") + ]) +def test_invalid_external_package_ref_types(category, reference_type, locator, expected_message): + external_package_ref = ExternalPackageRef(category, reference_type, locator, "externalPackageRef comment") + parent_id = "SPDXRef-Package" + validation_messages: List[ValidationMessage] = validate_external_package_ref(external_package_ref, parent_id) + + expected = ValidationMessage(expected_message, + ValidationContext(parent_id=parent_id, + element_type=SpdxElementType.EXTERNAL_PACKAGE_REF, + full_element=external_package_ref)) + + assert validation_messages == [expected] + + [email protected]("category, reference_type, locator, expected_message", + [(ExternalPackageRefCategory.SECURITY, "cpe22Type", "cpe:o:canonical:ubuntu_linux:10.04:-:lts", + f'externalPackageRef locator of type "cpe22Type" must conform with the regex {CPE22TYPE_REGEX}, but is: cpe:o:canonical:ubuntu_linux:10.04:-:lts'), + (ExternalPackageRefCategory.SECURITY, "cpe23Type", + "cpe:2.3:/o:canonical:ubuntu_linux:10.04:-:lts:*:*:*:*:*", + f'externalPackageRef locator of type "cpe23Type" must conform with the regex {CPE23TYPE_REGEX}, but is: cpe:2.3:/o:canonical:ubuntu_linux:10.04:-:lts:*:*:*:*:*'), + (ExternalPackageRefCategory.SECURITY, "advisory", "http://locatorurl", + f'externalPackageRef locator of type "advisory" must be a valid URL, but is: http://locatorurl'), + (ExternalPackageRefCategory.SECURITY, "fix", "http://fixurl", + f'externalPackageRef locator of type "fix" must be a valid URL, but is: http://fixurl'), + (ExternalPackageRefCategory.SECURITY, "url", "http://url", + f'externalPackageRef locator of type "url" must be a valid URL, but is: http://url'), + (ExternalPackageRefCategory.SECURITY, "swid", "2df9de35-0aff-4a86-ace6-f7dddd1ade4c", + f'externalPackageRef locator of type "swid" must be a valid URI with scheme swid, but is: 2df9de35-0aff-4a86-ace6-f7dddd1ade4c'), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "maven-central", + "org.apache.tomcat:tomcat:tomcat:9.0.0.M4", + f'externalPackageRef locator of type "maven-central" must conform with the regex {MAVEN_CENTRAL_REGEX}, but is: org.apache.tomcat:tomcat:tomcat:9.0.0.M4'), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "npm", "http-server:0.3.0", + f'externalPackageRef locator of type "npm" must conform with the regex {NPM_REGEX}, but is: http-server:0.3.0'), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "nuget", "[email protected]", + f'externalPackageRef locator of type "nuget" must conform with the regex {NUGET_REGEX}, but is: [email protected]'), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "bower", "modernizr:2.6.2", + f'externalPackageRef locator of type "bower" must conform with the regex {BOWER_REGEX}, but is: modernizr:2.6.2'), + (ExternalPackageRefCategory.PACKAGE_MANAGER, "purl", "pkg:[email protected]", + f'externalPackageRef locator of type "purl" must conform with the regex {PURL_REGEX}, but is: pkg:[email protected]'), + (ExternalPackageRefCategory.PERSISTENT_ID, "swh", + "swh:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2", + f'externalPackageRef locator of type "swh" must conform with the regex {SWH_REGEX}, but is: swh:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2'), + (ExternalPackageRefCategory.PERSISTENT_ID, "gitoid", + "gitoid:blob:sha1:3557f7eb43c621c71483743d4b37059bb80933e7f71277c0c3b3846159d1f61c", + f'externalPackageRef locator of type "gitoid" must conform with the regex {GITOID_REGEX}, but is: gitoid:blob:sha1:3557f7eb43c621c71483743d4b37059bb80933e7f71277c0c3b3846159d1f61c'), + (ExternalPackageRefCategory.PERSISTENT_ID, "gitoid", + "gitoid:blob:sha256:261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64", + f'externalPackageRef locator of type "gitoid" must conform with the regex {GITOID_REGEX}, but is: gitoid:blob:sha256:261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64'), + (ExternalPackageRefCategory.OTHER, "id string", "locator string", + "externalPackageRef locator in category OTHER must contain no spaces, but is: locator string"), ]) [email protected]( - "add tests once external package ref validation is implemented: https://github.com/spdx/tools-python/issues/373") -def test_invalid_external_package_ref(external_package_ref, expected_message): +def test_invalid_external_package_ref_locators(category, reference_type, locator, expected_message): + external_package_ref = ExternalPackageRef(category, reference_type, locator, "externalPackageRef comment") parent_id = "SPDXRef-Package" validation_messages: List[ValidationMessage] = validate_external_package_ref(external_package_ref, parent_id)
Implement External Package Reference validation part of #307 This is probably the most involved part of the validation process, earning it [an own annex](https://spdx.github.io/spdx-spec/v2.3-RC1/external-repository-identifiers/) in the specification. Also add tests for this
0.0
9ae0ebc9e18b549c2106579e8bde1be84dc4b7f0
[ "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.SECURITY-cpe22Type-cpe:/o:canonical:ubuntu_linux:10.04:-:lts]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.SECURITY-cpe23Type-cpe:2.3:o:canonical:ubuntu_linux:10.04:-:lts:*:*:*:*:*]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.SECURITY-advisory-https://nvd.nist.gov/vuln/detail/CVE-2020-28498]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.SECURITY-fix-https://github.com/indutny/elliptic/commit/441b7428]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.SECURITY-url-https://github.com/christianlundkvist/blog/blob/master/2020_05_26_secp256k1_twist_attacks/secp256k1_twist_attacks.md]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.SECURITY-swid-swid:2df9de35-0aff-4a86-ace6-f7dddd1ade4c]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-maven-central-org.apache.tomcat:tomcat:9.0.0.M4]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[[email protected]]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-nuget-Microsoft.AspNet.MVC/5.0.0]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-bower-modernizr#2.6.2]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:docker/debian@sha256:2f04d3d33b6027bb74ecc81397abe780649ec89f1a2af18d7022737d0482cefe]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:bitbucket/birkenfeld/pygments-main@244fd47e07d1014f0aed9c]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:deb/debian/[email protected]?arch=i386&distro=jessie]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:docker/customer/dockerimage@sha256:244fd47e07d1004f0aed9c?repository_url=gcr.io]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:gem/[email protected]?platform=java]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:gem/[email protected]]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:golang/google.golang.org/genproto#googleapis/api/annotations]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:maven/org.apache.xmlgraphics/[email protected]?repository_url=repo.spring.io%2Frelease]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:npm/%40angular/[email protected]]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:nuget/[email protected]]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:rpm/fedora/[email protected]?arch=i386&distro=fedora-25]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PERSISTENT_ID-swh-swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PERSISTENT_ID-swh-swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PERSISTENT_ID-swh-swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PERSISTENT_ID-swh-swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PERSISTENT_ID-swh-swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PERSISTENT_ID-gitoid-gitoid:blob:sha1:261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.PERSISTENT_ID-gitoid-gitoid:blob:sha256:3557f7eb43c621c71483743d4b37059bb80933e7f71277c0c3b3846159d1f61c]", "tests/spdx/validation/test_external_package_ref_validator.py::test_valid_external_package_ref[ExternalPackageRefCategory.OTHER-some", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_types[ExternalPackageRefCategory.SECURITY-cpe22Typo-cpe:/o:canonical:ubuntu_linux:10.04:-:lts-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_types[ExternalPackageRefCategory.PACKAGE_MANAGER-nugat-cpe:/o:canonical:ubuntu_linux:10.04:-:lts-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_types[ExternalPackageRefCategory.PERSISTENT_ID-git-oid-cpe:/o:canonical:ubuntu_linux:10.04:-:lts-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.SECURITY-cpe22Type-cpe:o:canonical:ubuntu_linux:10.04:-:lts-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.SECURITY-cpe23Type-cpe:2.3:/o:canonical:ubuntu_linux:10.04:-:lts:*:*:*:*:*-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.SECURITY-advisory-http://locatorurl-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.SECURITY-fix-http://fixurl-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.SECURITY-url-http://url-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.SECURITY-swid-2df9de35-0aff-4a86-ace6-f7dddd1ade4c-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.PACKAGE_MANAGER-maven-central-org.apache.tomcat:tomcat:tomcat:9.0.0.M4-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.PACKAGE_MANAGER-npm-http-server:0.3.0-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.PACKAGE_MANAGER-nuget-Microsoft.AspNet.MVC@5.0.0-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.PACKAGE_MANAGER-bower-modernizr:2.6.2-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.PACKAGE_MANAGER-purl-pkg:[email protected]", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.PERSISTENT_ID-swh-swh:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.PERSISTENT_ID-gitoid-gitoid:blob:sha1:3557f7eb43c621c71483743d4b37059bb80933e7f71277c0c3b3846159d1f61c-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.PERSISTENT_ID-gitoid-gitoid:blob:sha256:261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64-externalPackageRef", "tests/spdx/validation/test_external_package_ref_validator.py::test_invalid_external_package_ref_locators[ExternalPackageRefCategory.OTHER-id" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-01-24 15:58:36+00:00
apache-2.0
5,623
spdx__tools-python-445
diff --git a/src/spdx/validation/file_validator.py b/src/spdx/validation/file_validator.py index cc4375e..16845e5 100644 --- a/src/spdx/validation/file_validator.py +++ b/src/spdx/validation/file_validator.py @@ -50,10 +50,10 @@ def validate_file(file: File, context: Optional[ValidationContext] = None) -> Li if not context: context = ValidationContext(spdx_id=file.spdx_id, element_type=SpdxElementType.FILE, full_element=file) - if not file.name.startswith("./"): + if file.name.startswith("/"): validation_messages.append( ValidationMessage( - f'file name must be a relative path to the file, starting with "./", but is: {file.name}', context) + f'file name must not be an absolute path starting with "/", but is: {file.name}', context) ) if ChecksumAlgorithm.SHA1 not in [checksum.algorithm for checksum in file.checksums]:
spdx/tools-python
6b3899f96e549dd414543176c81dbca09d4b19ac
diff --git a/tests/spdx/validation/test_file_validator.py b/tests/spdx/validation/test_file_validator.py index 040edde..d06a24d 100644 --- a/tests/spdx/validation/test_file_validator.py +++ b/tests/spdx/validation/test_file_validator.py @@ -27,9 +27,9 @@ def test_valid_file(): @pytest.mark.parametrize("file_input, spdx_id, expected_message", - [(file_fixture(name="invalid file name"), file_fixture().spdx_id, - 'file name must be a relative path to the file, starting with "./", but is: invalid file name'), - ( + [(file_fixture(name="/invalid/file/name"), file_fixture().spdx_id, + f'file name must not be an absolute path starting with "/", but is: /invalid/file/name'), + ( file_fixture(checksums=[Checksum(ChecksumAlgorithm.MD2, "d4c41ce30a517d6ce9d79c8c17bb4b66")]), file_fixture().spdx_id, f'checksums must contain a SHA1 algorithm checksum, but only contains: [<ChecksumAlgorithm.MD2: 13>]')
Change fileName validation - file names do not have to start with `./` - file names must not start with `/`
0.0
6b3899f96e549dd414543176c81dbca09d4b19ac
[ "tests/spdx/validation/test_file_validator.py::test_invalid_file[file_input0-SPDXRef-File-file" ]
[ "tests/spdx/validation/test_file_validator.py::test_valid_file", "tests/spdx/validation/test_file_validator.py::test_invalid_file[file_input1-SPDXRef-File-checksums" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2023-01-25 13:58:25+00:00
apache-2.0
5,624
spdx__tools-python-458
diff --git a/src/spdx/validation/external_package_ref_validator.py b/src/spdx/validation/external_package_ref_validator.py index 7be94f6..bff504d 100644 --- a/src/spdx/validation/external_package_ref_validator.py +++ b/src/spdx/validation/external_package_ref_validator.py @@ -11,6 +11,8 @@ import re from typing import List, Dict +import uritools + from spdx.model.package import ExternalPackageRef, ExternalPackageRefCategory, CATEGORY_TO_EXTERNAL_PACKAGE_REF_TYPES from spdx.validation.uri_validators import validate_url, validate_uri from spdx.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType @@ -75,7 +77,7 @@ def validate_external_package_ref(external_package_ref: ExternalPackageRef, pare return [] if reference_type == "swid": - if validate_uri(locator) or not locator.startswith("swid"): + if not uritools.isuri(locator) or not locator.startswith("swid"): return [ValidationMessage( f'externalPackageRef locator of type "swid" must be a valid URI with scheme swid, but is: {locator}', context)] diff --git a/src/spdx/validation/uri_validators.py b/src/spdx/validation/uri_validators.py index 701ec3c..3033e04 100644 --- a/src/spdx/validation/uri_validators.py +++ b/src/spdx/validation/uri_validators.py @@ -38,7 +38,7 @@ def validate_download_location(location: str) -> List[str]: def validate_uri(uri: str) -> List[str]: if not isabsuri(uri): - return [f"must be a valid URI specified in RFC-3986, but is: {uri}"] + return [f"must be a valid URI specified in RFC-3986 and must contain no fragment (#), but is: {uri}"] else: split = urisplit(uri) if split.scheme is None:
spdx/tools-python
b1cd78c368c8094fdf598ea8f01da0a47c463959
diff --git a/tests/spdx/validation/test_creation_info_validator.py b/tests/spdx/validation/test_creation_info_validator.py index 7ad38e5..0fb4fc7 100644 --- a/tests/spdx/validation/test_creation_info_validator.py +++ b/tests/spdx/validation/test_creation_info_validator.py @@ -32,7 +32,7 @@ def test_valid_creation_info(): (creation_info_fixture(data_license="MIT"), "SPDXRef-DOCUMENT", 'data_license must be "CC0-1.0", but is: MIT'), (creation_info_fixture(document_namespace="some_namespace"), "SPDXRef-DOCUMENT", - "document_namespace must be a valid URI specified in RFC-3986, but is: some_namespace"), + "document_namespace must be a valid URI specified in RFC-3986 and must contain no fragment (#), but is: some_namespace"), ]) def test_invalid_creation_info(creation_info_input, expected_message, spdx_id): validation_messages: List[ValidationMessage] = validate_creation_info(creation_info_input) diff --git a/tests/spdx/validation/test_uri_validators.py b/tests/spdx/validation/test_uri_validators.py index a2a5822..a692ee8 100644 --- a/tests/spdx/validation/test_uri_validators.py +++ b/tests/spdx/validation/test_uri_validators.py @@ -99,7 +99,7 @@ def test_valid_uri(input_value): def test_invalid_uri(input_value): message = validate_uri(input_value) - assert message == [f"must be a valid URI specified in RFC-3986, but is: {input_value}"] + assert message == [f"must be a valid URI specified in RFC-3986 and must contain no fragment (#), but is: {input_value}"] @pytest.mark.parametrize("input_value", ["://spdx.org/spdxdocs/spdx-tools-v1.2-3F2504E0-4F89-41D3-9A0C-0305E82..."])
Validation: No `#` in DocumentNamespace The namespace of the document must not contain any `#`. This has been overlooked so far.
0.0
b1cd78c368c8094fdf598ea8f01da0a47c463959
[ "tests/spdx/validation/test_creation_info_validator.py::test_invalid_creation_info[creation_info_input2-SPDXRef-DOCUMENT-document_namespace", "tests/spdx/validation/test_uri_validators.py::test_invalid_uri[/invalid/uri]", "tests/spdx/validation/test_uri_validators.py::test_invalid_uri[http//uri]", "tests/spdx/validation/test_uri_validators.py::test_invalid_uri[http://some#uri]", "tests/spdx/validation/test_uri_validators.py::test_invalid_uri[some/uri]", "tests/spdx/validation/test_uri_validators.py::test_invalid_uri[some" ]
[ "tests/spdx/validation/test_creation_info_validator.py::test_valid_creation_info", "tests/spdx/validation/test_creation_info_validator.py::test_invalid_creation_info[creation_info_input0-SPDXRef-doc-spdx_id", "tests/spdx/validation/test_creation_info_validator.py::test_invalid_creation_info[creation_info_input1-SPDXRef-DOCUMENT-data_license", "tests/spdx/validation/test_uri_validators.py::test_valid_url[https://some.url]", "tests/spdx/validation/test_uri_validators.py::test_valid_url[https://spdx.org/spdxdocs/spdx-tools-v1.2-3F2504E0-4F89-41D3-9A0C-0305E82...]", "tests/spdx/validation/test_uri_validators.py::test_valid_url[http://some.url]", "tests/spdx/validation/test_uri_validators.py::test_valid_url[http://ftp.gnu.org/gnu/glibc/glibc-ports-2.15.tar.gz]", "tests/spdx/validation/test_uri_validators.py::test_invalid_url[:::::]", "tests/spdx/validation/test_uri_validators.py::test_invalid_url[http://testurl]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[http://ftp.gnu.org/gnu/glibc/glibc-ports-2.15.tar.gz]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git://git.myproject.org/MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git+https://git.myproject.org/MyProject.git]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git+http://git.myproject.org/MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git+ssh://git.myproject.org/MyProject.git]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git+git://git.myproject.org/MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[[email protected]:MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git://git.myproject.org/MyProject#src/somefile.c]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git+https://git.myproject.org/MyProject#src/Class.java]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git://git.myproject.org/MyProject.git@master]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git+https://git.myproject.org/[email protected]]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git://git.myproject.org/MyProject.git@da39a3ee5e6b4b0d3255bfef95601890afd80709]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git+https://git.myproject.org/MyProject.git@master#/src/MyClass.cpp]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[git+https://git.myproject.org/MyProject@da39a3ee5e6b4b0d3255bfef95601890afd80709#lib/variable.rb]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+http://hg.myproject.org/MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+https://hg.myproject.org/MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+ssh://hg.myproject.org/MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+https://hg.myproject.org/MyProject#src/somefile.c]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+https://hg.myproject.org/MyProject#src/Class.java]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+https://hg.myproject.org/MyProject@da39a3ee5e6b]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+https://hg.myproject.org/MyProject@2019]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+https://hg.myproject.org/[email protected]]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+https://hg.myproject.org/MyProject@special_feature]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+https://hg.myproject.org/MyProject@master#/src/MyClass.cpp]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[hg+https://hg.myproject.org/MyProject@da39a3ee5e6b#lib/variable.rb]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn://svn.myproject.org/svn/MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+svn://svn.myproject.org/svn/MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+http://svn.myproject.org/svn/MyProject/trunk]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+https://svn.myproject.org/svn/MyProject/trunk]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+https://svn.myproject.org/MyProject#src/somefile.c]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+https://svn.myproject.org/MyProject#src/Class.java]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+https://svn.myproject.org/MyProject/trunk#src/somefile.c]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+https://svn.myproject.org/MyProject/trunk/src/somefile.c]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+https://svn.myproject.org/svn/MyProject/trunk@2019]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+https://svn.myproject.org/MyProject@123#/src/MyClass.cpp]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[svn+https://svn.myproject.org/MyProject/trunk@1234#lib/variable/variable.rb]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+https://bzr.myproject.org/MyProject/trunk]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+http://bzr.myproject.org/MyProject/trunk]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+sftp://myproject.org/MyProject/trunk]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+ssh://myproject.org/MyProject/trunk]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+ftp://myproject.org/MyProject/trunk]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+lp:MyProject]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+https://bzr.myproject.org/MyProject/trunk#src/somefile.c]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+https://bzr.myproject.org/MyProject/trunk#src/Class.java]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+https://bzr.myproject.org/MyProject/trunk@2019]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+http://bzr.myproject.org/MyProject/[email protected]]", "tests/spdx/validation/test_uri_validators.py::test_valid_package_download_location[bzr+https://bzr.myproject.org/MyProject/trunk@2019#src/somefile.c]", "tests/spdx/validation/test_uri_validators.py::test_invalid_package_download_location[:::::]", "tests/spdx/validation/test_uri_validators.py::test_valid_uri[https://some.uri]", "tests/spdx/validation/test_uri_validators.py::test_valid_uri[http:////some]", "tests/spdx/validation/test_uri_validators.py::test_valid_uri[https://spdx.org/spdxdocs/spdx-tools-v1.2-3F2504E0-4F89-41D3-9A0C-0305E82...]", "tests/spdx/validation/test_uri_validators.py::test_valid_uri[h://someweirdtest^?]", "tests/spdx/validation/test_uri_validators.py::test_valid_uri[https://some.uri" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-01-31 11:55:09+00:00
apache-2.0
5,625
spdx__tools-python-459
diff --git a/src/spdx/validation/document_validator.py b/src/spdx/validation/document_validator.py index a3fa007..fe6422a 100644 --- a/src/spdx/validation/document_validator.py +++ b/src/spdx/validation/document_validator.py @@ -21,6 +21,7 @@ from spdx.validation.file_validator import validate_files from spdx.validation.package_validator import validate_packages from spdx.validation.relationship_validator import validate_relationships from spdx.validation.snippet_validator import validate_snippets +from spdx.validation.spdx_id_validators import get_list_of_all_spdx_ids from spdx.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType @@ -74,4 +75,16 @@ def validate_full_spdx_document(document: Document, spdx_version: str = None) -> ValidationContext(spdx_id=document_id, element_type=SpdxElementType.DOCUMENT))) + all_spdx_ids: List[str] = get_list_of_all_spdx_ids(document) + auxiliary_set = set() + duplicated_spdx_ids = set( + spdx_id for spdx_id in all_spdx_ids if spdx_id in auxiliary_set or auxiliary_set.add(spdx_id)) + + if duplicated_spdx_ids: + validation_messages.append( + ValidationMessage( + f"every spdx_id must be unique within the document, but found the following duplicates: {sorted(duplicated_spdx_ids)}", + context) + ) + return validation_messages
spdx/tools-python
e25467fb6125911bfa08de4d7ccfb885092a6612
diff --git a/tests/spdx/validation/test_document_validator.py b/tests/spdx/validation/test_document_validator.py index c3b8055..944f307 100644 --- a/tests/spdx/validation/test_document_validator.py +++ b/tests/spdx/validation/test_document_validator.py @@ -16,7 +16,7 @@ import pytest from spdx.model.document import Document, CreationInfo from spdx.validation.document_validator import validate_full_spdx_document from spdx.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType -from tests.spdx.fixtures import document_fixture, creation_info_fixture +from tests.spdx.fixtures import document_fixture, creation_info_fixture, file_fixture, package_fixture, snippet_fixture def test_valid_document(): @@ -56,3 +56,18 @@ def test_spdx_version_handling(creation_info: CreationInfo, version_input: str, assert validation_messages == expected # TODO: https://github.com/spdx/tools-python/issues/375 + + +def test_duplicated_spdx_ids(): + document = document_fixture( + files=[file_fixture(spdx_id="SPDXRef-File"), file_fixture(spdx_id="SPDXRef-2"), file_fixture(spdx_id="SPDXRef-3")], + packages=[package_fixture(spdx_id="SPDXRef-2"), package_fixture(spdx_id="SPDXRef-DOCUMENT")], + snippets=[snippet_fixture(spdx_id="SPDXRef-2"), snippet_fixture(spdx_id="SPDXRef-3")]) + + context = ValidationContext(spdx_id=document.creation_info.spdx_id, element_type=SpdxElementType.DOCUMENT) + + validation_messages: List[ValidationMessage] = validate_full_spdx_document(document) + + assert validation_messages == [ValidationMessage( + "every spdx_id must be unique within the document, but found the following duplicates: ['SPDXRef-2', 'SPDXRef-3', 'SPDXRef-DOCUMENT']", + context)]
Check for duplicated SPDX ids This should be part of the validation. There can only be a single element per SPDX id.
0.0
e25467fb6125911bfa08de4d7ccfb885092a6612
[ "tests/spdx/validation/test_document_validator.py::test_duplicated_spdx_ids" ]
[ "tests/spdx/validation/test_document_validator.py::test_valid_document", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info0-SPDX-2.3-None]", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info1-None-None]", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info2-SPDX-2.2-provided", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info3-SPDX2.3-provided", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info4-SPDX-2.3-the", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info5-None-the", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info6-SPDX2.3-the" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2023-02-01 09:39:50+00:00
apache-2.0
5,626
spdx__tools-python-460
diff --git a/src/spdx/validation/document_validator.py b/src/spdx/validation/document_validator.py index a3fa007..fe6422a 100644 --- a/src/spdx/validation/document_validator.py +++ b/src/spdx/validation/document_validator.py @@ -21,6 +21,7 @@ from spdx.validation.file_validator import validate_files from spdx.validation.package_validator import validate_packages from spdx.validation.relationship_validator import validate_relationships from spdx.validation.snippet_validator import validate_snippets +from spdx.validation.spdx_id_validators import get_list_of_all_spdx_ids from spdx.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType @@ -74,4 +75,16 @@ def validate_full_spdx_document(document: Document, spdx_version: str = None) -> ValidationContext(spdx_id=document_id, element_type=SpdxElementType.DOCUMENT))) + all_spdx_ids: List[str] = get_list_of_all_spdx_ids(document) + auxiliary_set = set() + duplicated_spdx_ids = set( + spdx_id for spdx_id in all_spdx_ids if spdx_id in auxiliary_set or auxiliary_set.add(spdx_id)) + + if duplicated_spdx_ids: + validation_messages.append( + ValidationMessage( + f"every spdx_id must be unique within the document, but found the following duplicates: {sorted(duplicated_spdx_ids)}", + context) + ) + return validation_messages diff --git a/src/spdx/validation/spdx_id_validators.py b/src/spdx/validation/spdx_id_validators.py index dfe25f4..cdf8f0c 100644 --- a/src/spdx/validation/spdx_id_validators.py +++ b/src/spdx/validation/spdx_id_validators.py @@ -53,8 +53,7 @@ def validate_spdx_id(spdx_id: str, document: Document, check_document: bool = Fa str]: """ Test that the given spdx_id (and a potential DocumentRef to an external document) is valid and, if it is a reference, actually exists in the document. Optionally checks files or the whole document - for the existence of the spdx_id (i.e. if it is used as a reference). Returns a list of validation messages, - and the external document ref part and id part of the provided spdx_id. """ + for the existence of the spdx_id (i.e. if it is used as a reference). Returns a list of validation messages. """ validation_messages: List[str] = [] split_id: List[str] = spdx_id.split(":") @@ -74,7 +73,7 @@ def validate_spdx_id(spdx_id: str, document: Document, check_document: bool = Fa f'the internal SPDX id part of spdx_id must only contain letters, numbers, "." and "-" and must begin with "SPDXRef-", but is: {split_id[1]}') if not is_external_doc_ref_present_in_document(split_id[0], document): validation_messages.append( - f"did not find the external document reference {split_id[0]} in the SPDX document") + f'did not find the external document reference "{split_id[0]}" in the SPDX document') return validation_messages @@ -85,10 +84,10 @@ def validate_spdx_id(spdx_id: str, document: Document, check_document: bool = Fa if check_document: if not is_spdx_id_present_in_document(spdx_id, document): - validation_messages.append(f"did not find the referenced spdx_id {spdx_id} in the SPDX document") + validation_messages.append(f'did not find the referenced spdx_id "{spdx_id}" in the SPDX document') if check_files: if not is_spdx_id_present_in_files(spdx_id, document.files): - validation_messages.append(f"did not find the referenced spdx_id {spdx_id} in the SPDX document's files") + validation_messages.append(f'did not find the referenced spdx_id "{spdx_id}" in the SPDX document\'s files') return validation_messages
spdx/tools-python
e25467fb6125911bfa08de4d7ccfb885092a6612
diff --git a/tests/spdx/validation/test_annotation_validator.py b/tests/spdx/validation/test_annotation_validator.py index b37a686..608de5b 100644 --- a/tests/spdx/validation/test_annotation_validator.py +++ b/tests/spdx/validation/test_annotation_validator.py @@ -28,7 +28,7 @@ def test_valid_annotation(): @pytest.mark.parametrize("annotation_id, file_id, expected_message", [("SPDXRef-File", "SPDXRef-hiddenFile", - "did not find the referenced spdx_id SPDXRef-File in the SPDX document") + 'did not find the referenced spdx_id "SPDXRef-File" in the SPDX document') ]) def test_invalid_annotation(annotation_id, file_id, expected_message): annotation: Annotation = annotation_fixture(spdx_id=annotation_id) diff --git a/tests/spdx/validation/test_document_validator.py b/tests/spdx/validation/test_document_validator.py index c3b8055..944f307 100644 --- a/tests/spdx/validation/test_document_validator.py +++ b/tests/spdx/validation/test_document_validator.py @@ -16,7 +16,7 @@ import pytest from spdx.model.document import Document, CreationInfo from spdx.validation.document_validator import validate_full_spdx_document from spdx.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType -from tests.spdx.fixtures import document_fixture, creation_info_fixture +from tests.spdx.fixtures import document_fixture, creation_info_fixture, file_fixture, package_fixture, snippet_fixture def test_valid_document(): @@ -56,3 +56,18 @@ def test_spdx_version_handling(creation_info: CreationInfo, version_input: str, assert validation_messages == expected # TODO: https://github.com/spdx/tools-python/issues/375 + + +def test_duplicated_spdx_ids(): + document = document_fixture( + files=[file_fixture(spdx_id="SPDXRef-File"), file_fixture(spdx_id="SPDXRef-2"), file_fixture(spdx_id="SPDXRef-3")], + packages=[package_fixture(spdx_id="SPDXRef-2"), package_fixture(spdx_id="SPDXRef-DOCUMENT")], + snippets=[snippet_fixture(spdx_id="SPDXRef-2"), snippet_fixture(spdx_id="SPDXRef-3")]) + + context = ValidationContext(spdx_id=document.creation_info.spdx_id, element_type=SpdxElementType.DOCUMENT) + + validation_messages: List[ValidationMessage] = validate_full_spdx_document(document) + + assert validation_messages == [ValidationMessage( + "every spdx_id must be unique within the document, but found the following duplicates: ['SPDXRef-2', 'SPDXRef-3', 'SPDXRef-DOCUMENT']", + context)] diff --git a/tests/spdx/validation/test_relationship_validator.py b/tests/spdx/validation/test_relationship_validator.py index c93c2e2..89ea088 100644 --- a/tests/spdx/validation/test_relationship_validator.py +++ b/tests/spdx/validation/test_relationship_validator.py @@ -33,9 +33,9 @@ def test_valid_relationship(related_spdx_element): @pytest.mark.parametrize("spdx_element_id, related_spdx_element_id, expected_message", [("SPDXRef-unknownFile", "SPDXRef-File", - 'did not find the referenced spdx_id SPDXRef-unknownFile in the SPDX document'), + 'did not find the referenced spdx_id "SPDXRef-unknownFile" in the SPDX document'), ("SPDXRef-File", "SPDXRef-unknownFile", - 'did not find the referenced spdx_id SPDXRef-unknownFile in the SPDX document'), + 'did not find the referenced spdx_id "SPDXRef-unknownFile" in the SPDX document'), ]) def test_unknown_spdx_id(spdx_element_id, related_spdx_element_id, expected_message): relationship: Relationship = relationship_fixture(spdx_element_id=spdx_element_id, diff --git a/tests/spdx/validation/test_spdx_id_validators.py b/tests/spdx/validation/test_spdx_id_validators.py index 4c00169..ee8536c 100644 --- a/tests/spdx/validation/test_spdx_id_validators.py +++ b/tests/spdx/validation/test_spdx_id_validators.py @@ -8,5 +8,115 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from unittest import TestCase + +import pytest + +from spdx.validation.spdx_id_validators import is_valid_internal_spdx_id, is_valid_external_doc_ref_id, \ + get_list_of_all_spdx_ids, is_spdx_id_present_in_document, is_external_doc_ref_present_in_document, validate_spdx_id +from tests.spdx.fixtures import document_fixture, file_fixture, package_fixture, snippet_fixture, creation_info_fixture, \ + external_document_ref_fixture + +DOCUMENT = document_fixture(files=[file_fixture(spdx_id="SPDXRef-File1"), + file_fixture(spdx_id="SPDXRef-File2")], + packages=[package_fixture(spdx_id="SPDXRef-Package1"), + package_fixture(spdx_id="SPDXRef-Package2")], + snippets=[snippet_fixture(spdx_id="SPDXRef-Snippet1"), + snippet_fixture(spdx_id="SPDXRef-Snippet2")], + creation_info=creation_info_fixture( + external_document_refs=[external_document_ref_fixture(document_ref_id="DocumentRef-external"), + external_document_ref_fixture(document_ref_id="DocumentRef-1.2-ext")])) + + [email protected]("spdx_id", ["SPDXRef-DOCUMENT", "SPDXRef-File1", "SPDXRef-1.3-3.7"]) +def test_valid_internal_spdx_ids(spdx_id): + assert is_valid_internal_spdx_id(spdx_id) + + [email protected]("spdx_id", + ["spdxId", "spdxRef-DOCUMENT", "SPDXRef.File", "SPDXRef#Snippet", "SPDXRef-1.3_3.7"]) +def test_invalid_internal_spdx_ids(spdx_id): + assert not is_valid_internal_spdx_id(spdx_id) + + [email protected]("doc_ref_id", ["DocumentRef-external", "DocumentRef-...+", "DocumentRef-v0.4.2-alpha"]) +def test_valid_external_doc_ref_ids(doc_ref_id): + assert is_valid_external_doc_ref_id(doc_ref_id) + + [email protected]("doc_ref_id", + ["external-ref", "Documentref-external", "DocumentRef-...#", "DocumentRef-v0_4_2-alpha"]) +def test_invalid_external_doc_ref_ids(doc_ref_id): + assert not is_valid_external_doc_ref_id(doc_ref_id) + + +def test_is_spdx_id_present_in_document(): + assert is_spdx_id_present_in_document("SPDXRef-File1", DOCUMENT) + assert is_spdx_id_present_in_document("SPDXRef-Package2", DOCUMENT) + assert is_spdx_id_present_in_document("SPDXRef-Snippet1", DOCUMENT) + assert is_spdx_id_present_in_document("SPDXRef-DOCUMENT", DOCUMENT) + assert not is_spdx_id_present_in_document("SPDXRef-file2", DOCUMENT) + + +def test_is_external_doc_ref_present_in_document(): + assert is_external_doc_ref_present_in_document("DocumentRef-1.2-ext", DOCUMENT) + assert not is_external_doc_ref_present_in_document("DocumentRef-External1", DOCUMENT) + +def test_list_of_all_spdx_ids(): + TestCase().assertCountEqual(get_list_of_all_spdx_ids(DOCUMENT), + ["SPDXRef-DOCUMENT", "SPDXRef-File1", "SPDXRef-File2", "SPDXRef-Package1", + "SPDXRef-Package2", "SPDXRef-Snippet1", "SPDXRef-Snippet2"]) + + [email protected]("spdx_id", + ["DocumentRef-external:SPDXRef-File", "SPDXRef-Package"]) +def test_valid_spdx_id(spdx_id): + validation_messages = validate_spdx_id(spdx_id, DOCUMENT) + + assert validation_messages == [] + + [email protected]("spdx_id, expected_messages", + [("DocumentRef-external:extern:SPDXRef-File", + [f"spdx_id must not contain more than one colon in order to separate the external document reference id from the internal SPDX id, but is: DocumentRef-external:extern:SPDXRef-File"]), + ("DocumentRef external:SPDXRef-File", + ['the external document reference part of spdx_id must only contain letters, numbers, ".", "-" and "+" and must begin with "DocumentRef-", but is: DocumentRef external', + 'did not find the external document reference "DocumentRef external" in the SPDX document']), + ("DocRef-ext:SPDXRef-File_2", + ['the external document reference part of spdx_id must only contain letters, numbers, ".", "-" and "+" and must begin with "DocumentRef-", but is: DocRef-ext', + 'the internal SPDX id part of spdx_id must only contain letters, numbers, "." and "-" and must begin with "SPDXRef-", but is: SPDXRef-File_2', + 'did not find the external document reference "DocRef-ext" in the SPDX document']), + ("DocumentRef-external:SPDXRef-File_2", + ['the internal SPDX id part of spdx_id must only contain letters, numbers, "." and "-" and must begin with "SPDXRef-", but is: SPDXRef-File_2']), + ("SPDXRef-42+", + ['spdx_id must only contain letters, numbers, "." and "-" and must begin with "SPDXRef-", but is: SPDXRef-42+']) + ]) +def test_invalid_spdx_id(spdx_id, expected_messages): + validation_messages = validate_spdx_id(spdx_id, DOCUMENT) + + TestCase().assertCountEqual(validation_messages, expected_messages) + + [email protected]("spdx_id", + ["DocumentRef-external:SPDXRef-File", "SPDXRef-DOCUMENT", "SPDXRef-File1", "SPDXRef-Package1", "SPDXRef-Snippet1"]) +def test_valid_spdx_id_with_check_document(spdx_id): + validation_messages = validate_spdx_id(spdx_id, DOCUMENT, check_document=True) + assert validation_messages == [] + + +def test_invalid_spdx_id_with_check_document(): + validation_messages = validate_spdx_id("SPDXRef-Filet", DOCUMENT, check_document=True) + assert validation_messages == ['did not find the referenced spdx_id "SPDXRef-Filet" in the SPDX document'] + + [email protected]("spdx_id", + ["DocumentRef-external:SPDXRef-File", "SPDXRef-File1"]) +def test_valid_spdx_id_with_check_files(spdx_id): + validation_messages = validate_spdx_id(spdx_id, DOCUMENT, check_files=True) + assert validation_messages == [] + + +def test_invalid_spdx_id_with_check_files(): + validation_messages = validate_spdx_id("SPDXRef-Package1", DOCUMENT, check_files=True) + assert validation_messages == ['did not find the referenced spdx_id "SPDXRef-Package1" in the SPDX document\'s files'] -# TODO: https://github.com/spdx/tools-python/issues/376
Test `spdx_id_validators` Test the methods found in `src/validation/spdx_id_validators`. A dummy file already exists at `tests/validation/test_spdx_id_validators`.
0.0
e25467fb6125911bfa08de4d7ccfb885092a6612
[ "tests/spdx/validation/test_annotation_validator.py::test_invalid_annotation[SPDXRef-File-SPDXRef-hiddenFile-did", "tests/spdx/validation/test_document_validator.py::test_duplicated_spdx_ids", "tests/spdx/validation/test_relationship_validator.py::test_unknown_spdx_id[SPDXRef-unknownFile-SPDXRef-File-did", "tests/spdx/validation/test_relationship_validator.py::test_unknown_spdx_id[SPDXRef-File-SPDXRef-unknownFile-did", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_spdx_id[DocumentRef", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_spdx_id[DocRef-ext:SPDXRef-File_2-expected_messages2]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_spdx_id_with_check_document", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_spdx_id_with_check_files" ]
[ "tests/spdx/validation/test_annotation_validator.py::test_valid_annotation", "tests/spdx/validation/test_document_validator.py::test_valid_document", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info0-SPDX-2.3-None]", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info1-None-None]", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info2-SPDX-2.2-provided", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info3-SPDX2.3-provided", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info4-SPDX-2.3-the", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info5-None-the", "tests/spdx/validation/test_document_validator.py::test_spdx_version_handling[creation_info6-SPDX2.3-the", "tests/spdx/validation/test_relationship_validator.py::test_valid_relationship[SPDXRef-Package]", "tests/spdx/validation/test_relationship_validator.py::test_valid_relationship[related_spdx_element1]", "tests/spdx/validation/test_relationship_validator.py::test_valid_relationship[related_spdx_element2]", "tests/spdx/validation/test_relationship_validator.py::test_v2_3_only_types[relationship0-RelationshipType.SPECIFICATION_FOR", "tests/spdx/validation/test_relationship_validator.py::test_v2_3_only_types[relationship1-RelationshipType.REQUIREMENT_DESCRIPTION_FOR", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_internal_spdx_ids[SPDXRef-DOCUMENT]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_internal_spdx_ids[SPDXRef-File1]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_internal_spdx_ids[SPDXRef-1.3-3.7]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_internal_spdx_ids[spdxId]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_internal_spdx_ids[spdxRef-DOCUMENT]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_internal_spdx_ids[SPDXRef.File]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_internal_spdx_ids[SPDXRef#Snippet]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_internal_spdx_ids[SPDXRef-1.3_3.7]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_external_doc_ref_ids[DocumentRef-external]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_external_doc_ref_ids[DocumentRef-...+]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_external_doc_ref_ids[DocumentRef-v0.4.2-alpha]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_external_doc_ref_ids[external-ref]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_external_doc_ref_ids[Documentref-external]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_external_doc_ref_ids[DocumentRef-...#]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_external_doc_ref_ids[DocumentRef-v0_4_2-alpha]", "tests/spdx/validation/test_spdx_id_validators.py::test_is_spdx_id_present_in_document", "tests/spdx/validation/test_spdx_id_validators.py::test_is_external_doc_ref_present_in_document", "tests/spdx/validation/test_spdx_id_validators.py::test_list_of_all_spdx_ids", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_spdx_id[DocumentRef-external:SPDXRef-File]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_spdx_id[SPDXRef-Package]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_spdx_id[DocumentRef-external:extern:SPDXRef-File-expected_messages0]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_spdx_id[DocumentRef-external:SPDXRef-File_2-expected_messages3]", "tests/spdx/validation/test_spdx_id_validators.py::test_invalid_spdx_id[SPDXRef-42+-expected_messages4]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_spdx_id_with_check_document[DocumentRef-external:SPDXRef-File]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_spdx_id_with_check_document[SPDXRef-DOCUMENT]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_spdx_id_with_check_document[SPDXRef-File1]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_spdx_id_with_check_document[SPDXRef-Package1]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_spdx_id_with_check_document[SPDXRef-Snippet1]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_spdx_id_with_check_files[DocumentRef-external:SPDXRef-File]", "tests/spdx/validation/test_spdx_id_validators.py::test_valid_spdx_id_with_check_files[SPDXRef-File1]" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-02-01 12:03:58+00:00
apache-2.0
5,627
spdx__tools-python-467
diff --git a/src/spdx/writer/tagvalue/tagvalue_writer_helper_functions.py b/src/spdx/writer/tagvalue/tagvalue_writer_helper_functions.py index 4bf7c71..8468719 100644 --- a/src/spdx/writer/tagvalue/tagvalue_writer_helper_functions.py +++ b/src/spdx/writer/tagvalue/tagvalue_writer_helper_functions.py @@ -14,7 +14,7 @@ from spdx.model.actor import Actor from spdx.model.file import File from license_expression import LicenseExpression from spdx.model.package import Package -from spdx.model.relationship import Relationship +from spdx.model.relationship import Relationship, RelationshipType from spdx.model.snippet import Snippet from spdx.model.spdx_no_assertion import SpdxNoAssertion from spdx.model.spdx_none import SpdxNone @@ -81,19 +81,19 @@ def scan_relationships(relationships: List[Relationship], packages: List[Package files_by_spdx_id = {file.spdx_id: file for file in files} packages_spdx_ids = [package.spdx_id for package in packages] for relationship in relationships: - if relationship.relationship_type == "CONTAINS" and \ + if relationship.relationship_type == RelationshipType.CONTAINS and \ relationship.spdx_element_id in packages_spdx_ids and \ - relationship.related_spdx_element in files_by_spdx_id.keys(): + relationship.related_spdx_element_id in files_by_spdx_id.keys(): contained_files_by_package_id.setdefault(relationship.spdx_element_id, []).append( - files_by_spdx_id[relationship.related_spdx_element]) - if relationship.has_comment: + files_by_spdx_id[relationship.related_spdx_element_id]) + if relationship.comment: relationships_to_write.append(relationship) - elif relationship.relationship_type == "CONTAINED_BY" and \ - relationship.related_spdx_element in packages_spdx_ids and \ + elif relationship.relationship_type == RelationshipType.CONTAINED_BY and \ + relationship.related_spdx_element_id in packages_spdx_ids and \ relationship.spdx_element_id in files_by_spdx_id: - contained_files_by_package_id.setdefault(relationship.related_spdx_element, []).append( + contained_files_by_package_id.setdefault(relationship.related_spdx_element_id, []).append( files_by_spdx_id[relationship.spdx_element_id]) - if relationship.has_comment: + if relationship.comment: relationships_to_write.append(relationship) else: relationships_to_write.append(relationship)
spdx/tools-python
788e8d4aec940a3c2b2f16bd731564ee1a8da5d8
diff --git a/tests/spdx/writer/tagvalue/test_tagvalue_writer_helper_functions.py b/tests/spdx/writer/tagvalue/test_tagvalue_writer_helper_functions.py new file mode 100644 index 0000000..300a3fd --- /dev/null +++ b/tests/spdx/writer/tagvalue/test_tagvalue_writer_helper_functions.py @@ -0,0 +1,33 @@ +# Copyright (c) 2023 spdx contributors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from spdx.model.relationship import RelationshipType +from spdx.writer.tagvalue.tagvalue_writer_helper_functions import scan_relationships +from tests.spdx.fixtures import package_fixture, file_fixture, relationship_fixture + + +def test_scan_relationships(): + first_package_spdx_id = "SPDXRef-Package1" + second_package_spdx_id = "SPDXRef-Package2" + packages = [package_fixture(spdx_id=first_package_spdx_id), package_fixture(spdx_id=second_package_spdx_id)] + file_spdx_id = "SPDXRef-File" + files = [file_fixture(spdx_id=file_spdx_id)] + relationships = [ + relationship_fixture(spdx_element_id=first_package_spdx_id, relationship_type=RelationshipType.CONTAINS, + related_spdx_element_id=file_spdx_id, comment=None), + relationship_fixture(spdx_element_id=second_package_spdx_id, relationship_type=RelationshipType.CONTAINS, + related_spdx_element_id=file_spdx_id, comment=None) + ] + + relationships_to_write, contained_files_by_package_id = scan_relationships(relationships, packages, files) + + assert relationships_to_write == [] + assert contained_files_by_package_id == {first_package_spdx_id: files, + second_package_spdx_id: files}
Tag-value writer: pacakges should precede contained files According to the [spec](https://spdx.github.io/spdx-spec/v2.3/composition-of-an-SPDX-document/#5.2.3): > Files are assumed to be associated with the package information that immediately precedes it, if a package exists. [The implemented logic](https://github.com/spdx/tools-python/blob/788e8d4aec940a3c2b2f16bd731564ee1a8da5d8/src/spdx/writer/tagvalue/tagvalue_writer_helper_functions.py#L84) in the tag-value writer checks against strings and not `RelationshipType` which leads to output where contained files do not follow the related package.
0.0
788e8d4aec940a3c2b2f16bd731564ee1a8da5d8
[ "tests/spdx/writer/tagvalue/test_tagvalue_writer_helper_functions.py::test_scan_relationships" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference" ], "has_test_patch": true, "is_lite": false }
2023-02-08 10:04:37+00:00
apache-2.0
5,628
spdx__tools-python-480
diff --git a/spdx/parsers/lexers/tagvalue.py b/spdx/parsers/lexers/tagvalue.py index ba7aa62..61a99ad 100644 --- a/spdx/parsers/lexers/tagvalue.py +++ b/spdx/parsers/lexers/tagvalue.py @@ -8,6 +8,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import re from ply import lex @@ -170,10 +171,6 @@ class Lexer(object): t.value = t.value[1:].strip() return t - def t_RANGE(self, t): - r":\s*\d+:\d+" - t.value = t.value[1:].strip() - return t def t_DOC_REF_ID(self, t): r":\s*DocumentRef-([A-Za-z0-9\+\.\-]+)" @@ -221,6 +218,10 @@ class Lexer(object): t.value = t.value[1:].strip() if t.value in self.reserved.keys(): t.type = self.reserved[t.value] + return t + range_pattern = re.compile("\d+:\d(?!\D)") + if range_pattern.match(t.value): + t.type = "RANGE" else: t.type = "LINE" return t
spdx/tools-python
b7294172d36425718c98876810fe727c224272d3
diff --git a/tests/test_tag_value_parser.py b/tests/test_tag_value_parser.py index 4008c30..6ce4af7 100644 --- a/tests/test_tag_value_parser.py +++ b/tests/test_tag_value_parser.py @@ -50,7 +50,7 @@ review_str = '\n'.join([ package_str = '\n'.join([ 'PackageName: Test', 'SPDXID: SPDXRef-Package', - 'PackageVersion: Version 0.9.2', + 'PackageVersion: 1:2.36.1-8+deb11u1', 'PackageDownloadLocation: http://example.com/test', 'FilesAnalyzed: True', 'PackageSummary: <text>Test package</text>', @@ -104,7 +104,7 @@ snippet_str = '\n'.join([ 'SnippetLicenseConcluded: Apache-2.0', 'LicenseInfoInSnippet: Apache-2.0', 'SnippetByteRange: 310:420', - 'SnippetLineRange: 5:23', + 'SnippetLineRange: 5:7', ]) annotation_str = '\n'.join([ @@ -195,7 +195,7 @@ class TestLexer(TestCase): self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 2) self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Package', 2) self.token_assert_helper(self.l.token(), 'PKG_VERSION', 'PackageVersion', 3) - self.token_assert_helper(self.l.token(), 'LINE', 'Version 0.9.2', 3) + self.token_assert_helper(self.l.token(), 'LINE', '1:2.36.1-8+deb11u1', 3) self.token_assert_helper(self.l.token(), 'PKG_DOWN', 'PackageDownloadLocation', 4) self.token_assert_helper(self.l.token(), 'LINE', 'http://example.com/test', 4) self.token_assert_helper(self.l.token(), 'PKG_FILES_ANALYZED', 'FilesAnalyzed', 5) @@ -275,7 +275,7 @@ class TestLexer(TestCase): self.token_assert_helper(self.l.token(), 'SNIPPET_BYTE_RANGE', 'SnippetByteRange', 9) self.token_assert_helper(self.l.token(), 'RANGE', '310:420', 9) self.token_assert_helper(self.l.token(), 'SNIPPET_LINE_RANGE', 'SnippetLineRange', 10) - self.token_assert_helper(self.l.token(), 'RANGE', '5:23', 10) + self.token_assert_helper(self.l.token(), 'RANGE', '5:7', 10) def test_annotation(self): data = annotation_str @@ -337,7 +337,7 @@ class TestParser(TestCase): assert not error assert document.package.name == 'Test' assert document.package.spdx_id == 'SPDXRef-Package' - assert document.package.version == 'Version 0.9.2' + assert document.package.version == '1:2.36.1-8+deb11u1' assert len(document.package.licenses_from_files) == 2 assert (document.package.conc_lics.identifier == 'LicenseRef-2.0 AND Apache-2.0') assert document.package.files_analyzed is True @@ -408,4 +408,4 @@ class TestParser(TestCase): assert document.snippet[-1].byte_range[0] == 310 assert document.snippet[-1].byte_range[1] == 420 assert document.snippet[-1].line_range[0] == 5 - assert document.snippet[-1].line_range[1] == 23 + assert document.snippet[-1].line_range[1] == 7
PackageVersion with ":" are considered invalid token While applying [sbomqs quality checks](https://github.com/interlynk-io/sbomqs) on SBOMs, we found the parser failing to parse versions with ":" in them ``` pyspdxtools_parser --file bom.nginx.spdx ``` results in the attached file with the missing field - ``` PackageVersion must be single line of text, line: 148 PackageVersion must be single line of text, line: 272 ... ``` However, the included versions is indeed ``` PackageName: bsdutils PackageVersion: 1**:**2.36.1-8+deb11u1 ``` This is not an issue for spdx-json because of the quotes. **What did you expect to happen?** Accept the valid versions correctly. **What happened instead?** [bom.nginx.spdx.txt](https://github.com/spdx/tools-python/files/10726753/bom.nginx.spdx.txt) parser considers the SBOM invalid **Additional details (base image name, container registry info...):**
0.0
b7294172d36425718c98876810fe727c224272d3
[ "tests/test_tag_value_parser.py::TestLexer::test_package", "tests/test_tag_value_parser.py::TestParser::test_annotation", "tests/test_tag_value_parser.py::TestParser::test_creation_info", "tests/test_tag_value_parser.py::TestParser::test_doc", "tests/test_tag_value_parser.py::TestParser::test_file", "tests/test_tag_value_parser.py::TestParser::test_package", "tests/test_tag_value_parser.py::TestParser::test_review", "tests/test_tag_value_parser.py::TestParser::test_snippet" ]
[ "tests/test_tag_value_parser.py::TestLexer::test_annotation", "tests/test_tag_value_parser.py::TestLexer::test_creation_info", "tests/test_tag_value_parser.py::TestLexer::test_document", "tests/test_tag_value_parser.py::TestLexer::test_external_document_references", "tests/test_tag_value_parser.py::TestLexer::test_review_info", "tests/test_tag_value_parser.py::TestLexer::test_snippet", "tests/test_tag_value_parser.py::TestLexer::test_unknown_tag", "tests/test_tag_value_parser.py::TestParser::test_unknown_tag" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2023-02-14 08:38:48+00:00
apache-2.0
5,629
spdx__tools-python-481
diff --git a/spdx/parsers/lexers/tagvalue.py b/spdx/parsers/lexers/tagvalue.py index 61a99ad..4421bae 100644 --- a/spdx/parsers/lexers/tagvalue.py +++ b/spdx/parsers/lexers/tagvalue.py @@ -235,7 +235,7 @@ class Lexer(object): t.lexer.lineno += len(t.value) def t_whitespace(self, t): - r"\s+" + r"[ \t]+" pass def build(self, **kwargs): diff --git a/spdx/parsers/tagvalue.py b/spdx/parsers/tagvalue.py index bf2e937..e3ecffd 100644 --- a/spdx/parsers/tagvalue.py +++ b/spdx/parsers/tagvalue.py @@ -37,6 +37,8 @@ ERROR_MESSAGES = { "DOC_VERSION_VALUE_TYPE": "Invalid SPDXVersion value, must be SPDX-M.N where M and N are numbers. Line: {0}", "DOC_NAME_VALUE": "DocumentName must be single line of text, line: {0}", "DOC_SPDX_ID_VALUE": "Invalid SPDXID value, SPDXID must be SPDXRef-DOCUMENT, line: {0}", + "LIC_LIST_VER_VALUE": "Invalid LicenseListVersion '{0}', must be of type M.N where M and N are numbers. Line: {1}", + "LIC_LIST_VER_VALUE_TYPE": "Could not read value after LicenseListVersion-tag. Line{0}", "EXT_DOC_REF_VALUE": "ExternalDocumentRef must contain External Document ID, SPDX Document URI and Checksum" "in the standard format, line:{0}.", "DOC_COMMENT_VALUE_TYPE": "DocumentComment value must be free form text between <text></text> tags"
spdx/tools-python
3838ef18317ca20626387f5aa05a506d71cf606a
diff --git a/tests/test_tag_value_parser.py b/tests/test_tag_value_parser.py index 6ce4af7..00be54b 100644 --- a/tests/test_tag_value_parser.py +++ b/tests/test_tag_value_parser.py @@ -115,6 +115,9 @@ annotation_str = '\n'.join([ 'SPDXREF: SPDXRef-DOCUMENT' ]) +document_str_with_empty_line = "\n".join( + ['SPDXVersion: SPDX-2.1', ' ', 'DataLicense: CC0-1.0']) + class TestLexer(TestCase): maxDiff = None @@ -291,6 +294,14 @@ class TestLexer(TestCase): self.token_assert_helper(self.l.token(), 'ANNOTATION_SPDX_ID', 'SPDXREF', 5) self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DOCUMENT', 5) + def test_correct_line_number_with_empty_line_between(self): + data = document_str_with_empty_line + self.l.input(data) + self.token_assert_helper(self.l.token(), 'DOC_VERSION', 'SPDXVersion', 1) + self.token_assert_helper(self.l.token(), 'LINE', 'SPDX-2.1', 1) + self.token_assert_helper(self.l.token(), 'DOC_LICENSE', 'DataLicense', 3) + self.token_assert_helper(self.l.token(), 'LINE', 'CC0-1.0', 3) + def token_assert_helper(self, token, ttype, value, line): assert token.type == ttype assert token.value == value
Line mismatch in error messages of the tag-value parser When we try to parse a tag-value file that contains errors, the tag-value parser provides an error message like ``` LicenseID must start with 'LicenseRef-', line: 4948 ``` with an explicit line number. This line number does not always match the incorrect line in the file, probably because the parser ignores empty lines and comments. To be helpful additional infromation, the outputted line number should match the place within the file.
0.0
3838ef18317ca20626387f5aa05a506d71cf606a
[ "tests/test_tag_value_parser.py::TestLexer::test_correct_line_number_with_empty_line_between" ]
[ "tests/test_tag_value_parser.py::TestLexer::test_annotation", "tests/test_tag_value_parser.py::TestLexer::test_creation_info", "tests/test_tag_value_parser.py::TestLexer::test_document", "tests/test_tag_value_parser.py::TestLexer::test_external_document_references", "tests/test_tag_value_parser.py::TestLexer::test_package", "tests/test_tag_value_parser.py::TestLexer::test_review_info", "tests/test_tag_value_parser.py::TestLexer::test_snippet", "tests/test_tag_value_parser.py::TestLexer::test_unknown_tag", "tests/test_tag_value_parser.py::TestParser::test_annotation", "tests/test_tag_value_parser.py::TestParser::test_creation_info", "tests/test_tag_value_parser.py::TestParser::test_doc", "tests/test_tag_value_parser.py::TestParser::test_file", "tests/test_tag_value_parser.py::TestParser::test_package", "tests/test_tag_value_parser.py::TestParser::test_review", "tests/test_tag_value_parser.py::TestParser::test_snippet", "tests/test_tag_value_parser.py::TestParser::test_unknown_tag" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-02-14 14:17:09+00:00
apache-2.0
5,630
spdx__tools-python-493
diff --git a/src/spdx/validation/file_validator.py b/src/spdx/validation/file_validator.py index a21f28f..745131b 100644 --- a/src/spdx/validation/file_validator.py +++ b/src/spdx/validation/file_validator.py @@ -41,6 +41,10 @@ def validate_file_within_document(file: File, spdx_version: str, document: Docum for message in validate_spdx_id(file.spdx_id, document): validation_messages.append(ValidationMessage(message, context)) + validation_messages.extend(validate_license_expression(file.license_concluded, document, file.spdx_id)) + + validation_messages.extend(validate_license_expressions(file.license_info_in_file, document, file.spdx_id)) + validation_messages.extend(validate_file(file, spdx_version, context)) return validation_messages @@ -67,10 +71,6 @@ def validate_file(file: File, spdx_version: str, context: Optional[ValidationCon validation_messages.extend(validate_checksums(file.checksums, file.spdx_id, spdx_version)) - validation_messages.extend(validate_license_expression(file.license_concluded)) - - validation_messages.extend(validate_license_expressions(file.license_info_in_file)) - if spdx_version == "SPDX-2.2": if file.license_concluded is None: validation_messages.append( diff --git a/src/spdx/validation/license_expression_validator.py b/src/spdx/validation/license_expression_validator.py index 0d6d0e6..f2c8ddf 100644 --- a/src/spdx/validation/license_expression_validator.py +++ b/src/spdx/validation/license_expression_validator.py @@ -11,25 +11,60 @@ from typing import List, Optional, Union -from license_expression import LicenseExpression +from license_expression import LicenseExpression, get_spdx_licensing, ExpressionError, ExpressionParseError +from spdx.model.document import Document + from spdx.model.spdx_no_assertion import SpdxNoAssertion from spdx.model.spdx_none import SpdxNone -from spdx.validation.validation_message import ValidationMessage +from spdx.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType def validate_license_expressions(license_expressions: Optional[ - Union[List[LicenseExpression], SpdxNoAssertion, SpdxNone]]) -> List[ValidationMessage]: + Union[List[LicenseExpression], SpdxNoAssertion, SpdxNone]], document: Document, parent_id: str) -> List[ValidationMessage]: if license_expressions in [SpdxNoAssertion(), SpdxNone(), None]: return [] - error_messages = [] + context = ValidationContext(parent_id=parent_id, element_type=SpdxElementType.LICENSE_EXPRESSION, full_element=license_expressions) + validation_messages = [] for license_expression in license_expressions: - error_messages.extend(validate_license_expression(license_expression)) + validation_messages.extend(validate_license_expression(license_expression, document, parent_id, context)) + + return validation_messages + + +def validate_license_expression(license_expression: Optional[ + Union[LicenseExpression, SpdxNoAssertion, SpdxNone]], document: Document, parent_id: str, context: ValidationContext = None) -> List[ValidationMessage]: + if license_expression in [SpdxNoAssertion(), SpdxNone(), None]: + return [] + + if not context: + context = ValidationContext(parent_id=parent_id, element_type=SpdxElementType.LICENSE_EXPRESSION, full_element=license_expression) + + validation_messages = [] + license_ref_ids: List[str] = [license_ref.license_id for license_ref in document.extracted_licensing_info] - return error_messages + for non_spdx_token in get_spdx_licensing().validate(license_expression).invalid_symbols: + if non_spdx_token not in license_ref_ids: + validation_messages.append( + ValidationMessage( + f"Unrecognized license reference: {non_spdx_token}. license_expression must only use IDs from the license list or extracted licensing info, but is: {license_expression}", + context) + ) + try: + get_spdx_licensing().parse(str(license_expression), validate=True, strict=True) + except ExpressionParseError as err: + # This error is raised when an exception symbol is used as a license symbol and vice versa. + # So far, it only catches the first such error in the provided string. + validation_messages.append( + ValidationMessage( + f"{err}. for license_expression: {license_expression}", + context) + ) + except ExpressionError: + # This error is raised for invalid symbols within the license_expression, but it provides only a string of these. + # On the other hand, get_spdx_licensing().validate() gives an actual list of invalid symbols, so this is handled above. + pass -def validate_license_expression(license_expression: LicenseExpression) -> List[ValidationMessage]: - # TODO: implement this once we have a better license expression model: https://github.com/spdx/tools-python/issues/374 - return [] + return validation_messages diff --git a/src/spdx/validation/package_validator.py b/src/spdx/validation/package_validator.py index 8889110..4cd850f 100644 --- a/src/spdx/validation/package_validator.py +++ b/src/spdx/validation/package_validator.py @@ -61,6 +61,21 @@ def validate_package_within_document(package: Package, spdx_version: str, docume context) ) + validation_messages.extend(validate_license_expression(package.license_concluded, document, package.spdx_id)) + + license_info_from_files = package.license_info_from_files + if license_info_from_files: + if not package.files_analyzed: + validation_messages.append( + ValidationMessage( + f"license_info_from_files must be None if files_analyzed is False, but is: {license_info_from_files}", + context) + ) + else: + validation_messages.extend(validate_license_expressions(license_info_from_files, document, package.spdx_id)) + + validation_messages.extend(validate_license_expression(package.license_declared, document, package.spdx_id)) + validation_messages.extend(validate_package(package, spdx_version, context)) return validation_messages @@ -94,21 +109,6 @@ def validate_package(package: Package, spdx_version: str, context: Optional[Vali validation_messages.extend(validate_checksums(package.checksums, package.spdx_id, spdx_version)) - validation_messages.extend(validate_license_expression(package.license_concluded)) - - license_info_from_files = package.license_info_from_files - if license_info_from_files: - if not package.files_analyzed: - validation_messages.append( - ValidationMessage( - f"license_info_from_files must be None if files_analyzed is False, but is: {license_info_from_files}", - context) - ) - else: - validation_messages.extend(validate_license_expressions(license_info_from_files)) - - validation_messages.extend(validate_license_expression(package.license_declared)) - validation_messages.extend( validate_external_package_refs(package.external_references, package.spdx_id, spdx_version)) diff --git a/src/spdx/validation/snippet_validator.py b/src/spdx/validation/snippet_validator.py index 21d2e0d..90a110f 100644 --- a/src/spdx/validation/snippet_validator.py +++ b/src/spdx/validation/snippet_validator.py @@ -46,6 +46,10 @@ def validate_snippet_within_document(snippet: Snippet, spdx_version: str, docume for message in messages: validation_messages.append(ValidationMessage(message, context)) + validation_messages.extend(validate_license_expression(snippet.license_concluded, document, snippet.spdx_id)) + + validation_messages.extend(validate_license_expressions(snippet.license_info_in_snippet, document, snippet.spdx_id)) + validation_messages.extend(validate_snippet(snippet, spdx_version, context)) return validation_messages @@ -86,10 +90,6 @@ def validate_snippet(snippet: Snippet, spdx_version: str, context: Optional[Vali context) ) - validation_messages.extend(validate_license_expression(snippet.license_concluded)) - - validation_messages.extend(validate_license_expressions(snippet.license_info_in_snippet)) - if spdx_version == "SPDX-2.2": if snippet.license_concluded is None: validation_messages.append( diff --git a/src/spdx/validation/validation_message.py b/src/spdx/validation/validation_message.py index bf14901..70001b7 100644 --- a/src/spdx/validation/validation_message.py +++ b/src/spdx/validation/validation_message.py @@ -15,6 +15,7 @@ from typing import Optional, Any class SpdxElementType(Enum): + LICENSE_EXPRESSION = auto() PACKAGE_VERIFICATION_CODE = auto() EXTERNAL_DOCUMENT_REF = auto() CHECKSUM = auto() @@ -25,7 +26,6 @@ class SpdxElementType(Enum): PACKAGE = auto() FILE = auto() SNIPPET = auto() - LICENSE = auto() ANNOTATION = auto() RELATIONSHIP = auto() EXTRACTED_LICENSING_INFO = auto()
spdx/tools-python
c8fd2d7ac7547e1db1c4aef5556238fa9c430d48
diff --git a/tests/spdx/validation/test_license_expression_validator.py b/tests/spdx/validation/test_license_expression_validator.py index 458a98a..b6b89ba 100644 --- a/tests/spdx/validation/test_license_expression_validator.py +++ b/tests/spdx/validation/test_license_expression_validator.py @@ -10,14 +10,94 @@ # limitations under the License. from typing import List +from unittest import TestCase -from license_expression import Licensing -from spdx.validation.license_expression_validator import validate_license_expression -from spdx.validation.validation_message import ValidationMessage +import pytest +from license_expression import get_spdx_licensing, LicenseExpression +from spdx.model.document import Document +from spdx.model.spdx_no_assertion import SpdxNoAssertion +from spdx.model.spdx_none import SpdxNone +from spdx.validation.license_expression_validator import validate_license_expression, validate_license_expressions +from spdx.validation.validation_message import ValidationMessage, ValidationContext, SpdxElementType +from tests.spdx.fixtures import document_fixture, extracted_licensing_info_fixture -def test_valid_license_expression(): - license_expression = Licensing().parse("something") - validation_messages: List[ValidationMessage] = validate_license_expression(license_expression) +FIXTURE_LICENSE_ID = extracted_licensing_info_fixture().license_id + + [email protected]("expression_string", + ["MIT", FIXTURE_LICENSE_ID, + f"GPL-2.0-only with GPL-CC-1.0 and {FIXTURE_LICENSE_ID} with 389-exception or Beerware"]) +def test_valid_license_expression(expression_string): + document: Document = document_fixture() + license_expression: LicenseExpression = get_spdx_licensing().parse(expression_string) + validation_messages: List[ValidationMessage] = validate_license_expression(license_expression, document, + parent_id="SPDXRef-File") + + assert validation_messages == [] + + [email protected]("expression", [SpdxNone(), SpdxNoAssertion()]) +def test_none_and_no_assertion(expression): + document: Document = document_fixture() + validation_messages: List[ValidationMessage] = validate_license_expression(expression, document, + parent_id="SPDXRef-File") + assert validation_messages == [] + + [email protected]("expression_list", + [SpdxNone(), SpdxNoAssertion(), + [get_spdx_licensing().parse("MIT and GPL-3.0-only"), + get_spdx_licensing().parse(FIXTURE_LICENSE_ID)] + ]) +def test_valid_license_expressions(expression_list): + document: Document = document_fixture() + validation_messages: List[ValidationMessage] = validate_license_expressions(expression_list, document, + parent_id="SPDXRef-File") assert validation_messages == [] + + [email protected]("expression_string, unknown_symbols", + [(f"{FIXTURE_LICENSE_ID} or LicenseRef-22", ["LicenseRef-22"]), + ("nope with 389-exception and _.- or LicenseRef-10", ["nope", "_.-", "LicenseRef-10"]) + ]) +def test_invalid_license_expression_with_unknown_symbols(expression_string, unknown_symbols): + document: Document = document_fixture() + license_expression: LicenseExpression = get_spdx_licensing().parse(expression_string) + parent_id = "SPDXRef-File" + context = ValidationContext(parent_id=parent_id, element_type=SpdxElementType.LICENSE_EXPRESSION, + full_element=license_expression) + + validation_messages: List[ValidationMessage] = validate_license_expression(license_expression, document, parent_id) + expected_messages = [ValidationMessage( + f"Unrecognized license reference: {symbol}. license_expression must only use IDs from the license list or extracted licensing info, but is: {license_expression}", + context + ) for symbol in unknown_symbols] + + TestCase().assertCountEqual(validation_messages, expected_messages) + + [email protected]("expression_string, expected_message", + [("MIT with MIT", + 'A plain license symbol cannot be used as an exception in a "WITH symbol" statement. for token: "MIT" at position: 9. for license_expression: MIT WITH MIT'), + (f"GPL-2.0-or-later and {FIXTURE_LICENSE_ID} with {FIXTURE_LICENSE_ID}", + f'A plain license symbol cannot be used as an exception in a "WITH symbol" statement. for token: "{FIXTURE_LICENSE_ID}" at position: 39. for license_expression: GPL-2.0-or-later AND {FIXTURE_LICENSE_ID} WITH {FIXTURE_LICENSE_ID}'), + (f"GPL-2.0-or-later with MIT and {FIXTURE_LICENSE_ID} with GPL-2.0-or-later", + f'A plain license symbol cannot be used as an exception in a "WITH symbol" statement. for token: "MIT" at position: 22. for license_expression: GPL-2.0-or-later WITH MIT AND {FIXTURE_LICENSE_ID} WITH GPL-2.0-or-later'), + ("389-exception with 389-exception", + 'A license exception symbol can only be used as an exception in a "WITH exception" statement. for token: "389-exception". for license_expression: 389-exception WITH 389-exception'), + ("389-exception with MIT", + 'A license exception symbol can only be used as an exception in a "WITH exception" statement. for token: "389-exception". for license_expression: 389-exception WITH MIT'), + ]) +def test_invalid_license_expression_with_invalid_exceptions(expression_string, expected_message): + document: Document = document_fixture() + license_expression: LicenseExpression = get_spdx_licensing().parse(expression_string) + parent_id = "SPDXRef-File" + context = ValidationContext(parent_id=parent_id, element_type=SpdxElementType.LICENSE_EXPRESSION, + full_element=license_expression) + + validation_messages: List[ValidationMessage] = validate_license_expression(license_expression, document, parent_id) + expected_messages = [ValidationMessage(expected_message, context)] + + assert validation_messages == expected_messages
Implement license expression validation can be done after #10 part of #307 Also add more tests for this
0.0
c8fd2d7ac7547e1db1c4aef5556238fa9c430d48
[ "tests/spdx/validation/test_license_expression_validator.py::test_valid_license_expression[MIT]", "tests/spdx/validation/test_license_expression_validator.py::test_valid_license_expression[LicenseRef-1]", "tests/spdx/validation/test_license_expression_validator.py::test_valid_license_expression[GPL-2.0-only", "tests/spdx/validation/test_license_expression_validator.py::test_none_and_no_assertion[expression0]", "tests/spdx/validation/test_license_expression_validator.py::test_none_and_no_assertion[expression1]", "tests/spdx/validation/test_license_expression_validator.py::test_valid_license_expressions[expression_list0]", "tests/spdx/validation/test_license_expression_validator.py::test_valid_license_expressions[expression_list1]", "tests/spdx/validation/test_license_expression_validator.py::test_valid_license_expressions[expression_list2]", "tests/spdx/validation/test_license_expression_validator.py::test_invalid_license_expression_with_unknown_symbols[LicenseRef-1", "tests/spdx/validation/test_license_expression_validator.py::test_invalid_license_expression_with_unknown_symbols[nope", "tests/spdx/validation/test_license_expression_validator.py::test_invalid_license_expression_with_invalid_exceptions[MIT", "tests/spdx/validation/test_license_expression_validator.py::test_invalid_license_expression_with_invalid_exceptions[GPL-2.0-or-later", "tests/spdx/validation/test_license_expression_validator.py::test_invalid_license_expression_with_invalid_exceptions[389-exception" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-02-23 11:56:32+00:00
apache-2.0
5,631
spdx__tools-python-494
diff --git a/src/spdx/clitools/pyspdxtools.py b/src/spdx/clitools/pyspdxtools.py index ed32dbb..44a4a3b 100644 --- a/src/spdx/clitools/pyspdxtools.py +++ b/src/spdx/clitools/pyspdxtools.py @@ -10,6 +10,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging import sys from typing import List @@ -26,8 +27,11 @@ from spdx.writer.write_anything import write_file @click.command() @click.option("--infile", "-i", help="The file containing the document to be validated or converted.") [email protected]("--outfile", "-o", help="The file to write the converted document to (write a dash for output to stdout or omit for no conversion).") [email protected]("--version", help='The SPDX version to be used during parsing and validation ("SPDX-2.2" or "SPDX-2.3"). Will be read from the document if not provided.', default=None) [email protected]("--outfile", "-o", + help="The file to write the converted document to (write a dash for output to stdout or omit for no conversion).") [email protected]("--version", + help='The SPDX version to be used during parsing and validation ("SPDX-2.2" or "SPDX-2.3"). Will be read from the document if not provided.', + default=None) @click.option("--novalidation", is_flag=True, help="Don't validate the provided document.") def main(infile: str, outfile: str, version: str, novalidation: bool): """ @@ -46,34 +50,34 @@ def main(infile: str, outfile: str, version: str, novalidation: bool): version = document.creation_info.spdx_version if not version in ["SPDX-2.2", "SPDX-2.3"]: - print(f"This tool only supports SPDX versions SPDX-2.2 and SPDX-2.3, but got: {version}", - file=sys.stderr) + logging.error(f"This tool only supports SPDX versions SPDX-2.2 and SPDX-2.3, but got: {version}") sys.exit(1) validation_messages: List[ValidationMessage] = validate_full_spdx_document(document, version) if validation_messages: - print("The document is invalid. The following issues have been found:", file=sys.stderr) - for message in validation_messages: - print(message.validation_message, file=sys.stderr) + log_string = "\n".join( + ["The document is invalid. The following issues have been found:"] + + [message.validation_message for message in validation_messages]) + logging.error(log_string) sys.exit(1) else: - print("The document is valid.", file=sys.stderr) + logging.info("The document is valid.") if outfile and outfile != "-": write_file(document, outfile, validate=False) except NotImplementedError as err: - print(err.args[0], file=sys.stderr) - print("Please note that this project is currently undergoing a major refactoring and therefore missing " - "a few features which will be added in time (refer to https://github.com/spdx/tools-python/issues " - "for insights into the current status).\n" - "In the meantime, please use the PyPI release version 0.7.0.", file=sys.stderr) + logging.error(err.args[0] + + "\nPlease note that this project is currently undergoing a major refactoring and therefore missing " + "a few features which will be added in time (refer to https://github.com/spdx/tools-python/issues " + "for insights into the current status).\n" + "In the meantime, please use the current PyPI release version.") sys.exit(1) except SPDXParsingError as err: - print("There have been issues while parsing the provided document:", file=sys.stderr) - for message in err.get_messages(): - print(message, file=sys.stderr) + log_string = "\n".join(["There have been issues while parsing the provided document:"] + + [message for message in err.get_messages()]) + logging.error(log_string) sys.exit(1) diff --git a/src/spdx/parser/rdf/creation_info_parser.py b/src/spdx/parser/rdf/creation_info_parser.py index 522b21e..7ca2215 100644 --- a/src/spdx/parser/rdf/creation_info_parser.py +++ b/src/spdx/parser/rdf/creation_info_parser.py @@ -8,6 +8,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging import sys from typing import Tuple from urllib.parse import urldefrag @@ -72,19 +73,23 @@ def parse_namespace_and_spdx_id(graph: Graph) -> (str, str): try: subject = graph.value(predicate=RDF.type, object=SPDX_NAMESPACE.SpdxDocument, any=False) except UniquenessError: - sys.exit("Multiple SpdxDocuments found, can't parse rdf file.") + logging.error("Multiple SpdxDocuments found, can't parse rdf file.") + sys.exit(1) if not subject: - sys.exit("No SpdxDocument found, can't parse rdf file.") + logging.error("No SpdxDocument found, can't parse rdf file.") + sys.exit(1) if not "#" in subject: - sys.exit("No '#' found in the URI of SpdxDocument, " - "the URI for the SpdxDocument should be the namespace appended by '#SPDXRef-DOCUMENT.") + logging.error("No '#' found in the URI of SpdxDocument, " + "the URI for the SpdxDocument should be the namespace appended by '#SPDXRef-DOCUMENT.") + sys.exit(1) namespace, spdx_id = urldefrag(subject) if not namespace: - sys.exit( + logging.error( "No namespace found, the URI for the SpdxDocument should be the namespace appended by '#SPDXRef-DOCUMENT.") + sys.exit(1) if not spdx_id: spdx_id = None diff --git a/src/spdx/writer/rdf/writer_utils.py b/src/spdx/writer/rdf/writer_utils.py index 772f80f..1498b75 100644 --- a/src/spdx/writer/rdf/writer_utils.py +++ b/src/spdx/writer/rdf/writer_utils.py @@ -8,7 +8,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import sys +import logging from datetime import datetime from typing import Any, Optional, Dict @@ -59,8 +59,7 @@ def add_namespace_to_spdx_id(spdx_id: str, doc_namespace: str, external_doc_name if ":" in spdx_id: external_doc_ref_id = spdx_id.split(":")[0] if external_doc_ref_id not in external_doc_namespaces.keys(): - print(f"No namespace for external document reference with id {external_doc_ref_id} provided.", - file=sys.stderr) + logging.warning(f"No namespace for external document reference with id {external_doc_ref_id} provided.") return spdx_id return f"{external_doc_namespaces[external_doc_ref_id]}#{spdx_id.split(':')[1]}"
spdx/tools-python
9315e42d8ea064bae47e69fe07b79934ef6dddb0
diff --git a/tests/spdx/parser/rdf/test_creation_info_parser.py b/tests/spdx/parser/rdf/test_creation_info_parser.py index 516b0ff..71ee6cc 100644 --- a/tests/spdx/parser/rdf/test_creation_info_parser.py +++ b/tests/spdx/parser/rdf/test_creation_info_parser.py @@ -60,14 +60,16 @@ def test_parse_namespace_and_spdx_id(): ([(URIRef("docNamespace1"), RDF.type, SPDX_NAMESPACE.SpdxDocument), (URIRef("docNamespace2"), RDF.type, SPDX_NAMESPACE.SpdxDocument)], "Multiple SpdxDocuments found")]) -def test_parse_namespace_and_spdx_id_with_system_exit(triples: List[Tuple[Node, Node, Node]], error_message: str): +def test_parse_namespace_and_spdx_id_with_system_exit(triples: List[Tuple[Node, Node, Node]], error_message: str, caplog): graph = Graph() for triple in triples: graph = graph.add(triple) - with pytest.raises(SystemExit, match=error_message): + with pytest.raises(SystemExit): parse_namespace_and_spdx_id(graph) + assert error_message in caplog.text + def test_parse_external_document_refs(): graph = Graph().parse(os.path.join(os.path.dirname(__file__), "data/file_to_test_rdf_parser.rdf.xml"))
Add a proper logging framework Making sure that `stdout` is not cluttered with unwanted output that could go to `stderr`
0.0
9315e42d8ea064bae47e69fe07b79934ef6dddb0
[ "tests/spdx/parser/rdf/test_creation_info_parser.py::test_parse_namespace_and_spdx_id_with_system_exit[triples0-No", "tests/spdx/parser/rdf/test_creation_info_parser.py::test_parse_namespace_and_spdx_id_with_system_exit[triples1-No", "tests/spdx/parser/rdf/test_creation_info_parser.py::test_parse_namespace_and_spdx_id_with_system_exit[triples2-No", "tests/spdx/parser/rdf/test_creation_info_parser.py::test_parse_namespace_and_spdx_id_with_system_exit[triples3-Multiple" ]
[ "tests/spdx/parser/rdf/test_creation_info_parser.py::test_parse_creation_info", "tests/spdx/parser/rdf/test_creation_info_parser.py::test_parse_namespace_and_spdx_id", "tests/spdx/parser/rdf/test_creation_info_parser.py::test_parse_external_document_refs" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2023-02-24 11:31:37+00:00
apache-2.0
5,632
spdx__tools-python-520
diff --git a/spdx/writers/rdf.py b/spdx/writers/rdf.py index 6622072..c64b36b 100644 --- a/spdx/writers/rdf.py +++ b/spdx/writers/rdf.py @@ -793,18 +793,9 @@ class PackageWriter(LicenseWriter): Return a Node representing the package. Files must have been added to the graph before this method is called. """ - package_node = URIRef("http://www.spdx.org/tools#SPDXRef-Package") + package_node = URIRef(f"http://www.spdx.org/tools#{package.spdx_id}") type_triple = (package_node, RDF.type, self.spdx_namespace.Package) self.graph.add(type_triple) - # Package SPDXID - if package.spdx_id: - pkg_spdx_id = URIRef(package.spdx_id) - pkg_spdx_id_triple = ( - package_node, - self.spdx_namespace.Package, - pkg_spdx_id, - ) - self.graph.add(pkg_spdx_id_triple) # Handle optional fields: self.handle_pkg_optional_fields(package, package_node) # package name
spdx/tools-python
b0d946b354045ae4e01398f416ee7abbabfe8443
diff --git a/tests/data/doc_write/rdf-simple-plus.json b/tests/data/doc_write/rdf-simple-plus.json index d2b65ae..9a50011 100644 --- a/tests/data/doc_write/rdf-simple-plus.json +++ b/tests/data/doc_write/rdf-simple-plus.json @@ -6,9 +6,6 @@ "ns1:describesPackage": { "ns1:Package": { "@rdf:about": "http://www.spdx.org/tools#SPDXRef-Package", - "ns1:Package": { - "@rdf:resource": "SPDXRef-Package" - }, "ns1:hasFile": { "@rdf:resource": "http://www.spdx.org/files#SPDXRef-File" }, diff --git a/tests/data/doc_write/rdf-simple.json b/tests/data/doc_write/rdf-simple.json index 00064a3..6b1ac2d 100644 --- a/tests/data/doc_write/rdf-simple.json +++ b/tests/data/doc_write/rdf-simple.json @@ -6,9 +6,6 @@ "ns1:describesPackage": { "ns1:Package": { "@rdf:about": "http://www.spdx.org/tools#SPDXRef-Package", - "ns1:Package": { - "@rdf:resource": "SPDXRef-Package" - }, "ns1:hasFile": { "@rdf:resource": "http://www.spdx.org/files#SPDXRef-File" }, diff --git a/tests/test_rdf_writer.py b/tests/test_rdf_writer.py index 9153dba..65b0347 100644 --- a/tests/test_rdf_writer.py +++ b/tests/test_rdf_writer.py @@ -1,4 +1,6 @@ import os +from typing import Optional +from unittest import TestCase import pytest from rdflib import URIRef @@ -62,6 +64,35 @@ def test_external_package_references(temporary_file_path) -> None: assert second_ref.category in parsed_reference_categories +# This test is really clunky since it's hard to isolate features of the rdf writer to test. Should be improved when +# that part is refactored. +def test_multiple_packages_in_one_document(temporary_file_path) -> None: + doc_node = URIRef("http://www.spdx.org/tools#SPDXRef-DOCUMENT") + document = Document() + document.creation_info.set_created_now() + package = Package() + package.spdx_id = "SPDXRef-Package" + package.version = "2.1" + document.add_package(package) + package2 = Package() + package2.spdx_id = "SPDXRef-Another-Package" + package2.version = "2.3" + document.add_package(package2) + + with open(temporary_file_path, "wb") as out: + writer = Writer(document, out) + writer.write(doc_node) + parser = Parser(Builder(), StandardLogger()) + with open(temporary_file_path, "r") as file: + parsed_document: Document = parser.parse(file)[0] + + assert len(parsed_document.packages) == 2 + first_package = get_package_by_spdx_id("SPDXRef-Package", document) + assert first_package.version == "2.1" + second_package = get_package_by_spdx_id("SPDXRef-Another-Package", document) + assert second_package.version == "2.3" + + def minimal_document_with_package() -> Document: document = Document(data_license=License.from_identifier('CC0-1.0')) document.creation_info.set_created_now() @@ -72,7 +103,15 @@ def minimal_document_with_package() -> Document: def minimal_package() -> Package: package = Package() + package.spdx_id = "SPDXRef-Package" package.conc_lics = NoAssert() package.license_declared = NoAssert() package.add_lics_from_file(NoAssert()) return package + + +def get_package_by_spdx_id(package_spdx_id: str, document: Document) -> Optional[Package]: + for package in document.packages: + if package.spdx_id == package_spdx_id: + return package + return None
Incorrect RDF generated in 1 document with 2 packages I am trying to generate a simple document with two packages. I am not sure I use the library or SPDX properly so any help would be appreciated. My code creates an SPDX document and then adds two packages to it. Then I try to parse the file with another script. Two packages are indeed detected, but some of the properties are misplaced (they are parsed in the wrong package). Taking a look at the generated XML I can see only 1 package element and all attributes are mixed up inside of it. Am I making a mistake or it is a bug in the library? Here is the generating code: ```python import codecs import logging import os from spdx.writers.rdf import write_document, InvalidDocumentError from spdx.parsers.loggers import ErrorMessages def write_spdx_to_file(doc, path: str): with open(path, 'wb') as out: try: write_document(doc, out) logging.info("The document has been saved to: '%s'", path) except InvalidDocumentError: logging.error('Document is Invalid') messages = ErrorMessages() doc.validate(messages) logging.error('\n'.join(messages)) # Error handling can be customized here, e.g. mail sending def generate_sample_spdx_3(): """ A more specific example of SPDX document with 2 package to illustrate python SPDX tools problems to the community. """ doc = Document() doc.spdx_id = "Abc-SPDXRef-DOCUMENT" doc.version = Version(2, 2) doc.data_license = License.from_identifier('CC0-1.0') doc.namespace = "com.example" doc.name = "http://example.com/documents/abc" doc.comment = 'Example Document' doc.creation_info.add_creator(Person('John Smith', '[email protected]')) doc.creation_info.add_creator(Organization("Example Ltd.", "[email protected]")) doc.creation_info.add_creator(Tool("def")) doc.creation_info.set_created_now() # Package 1 package = Package() package.name = 'TagWriteTest' package.spdx_id = "TagWriteTest" package.version = '1.2' package.file_name = 'twt.jar' package.download_location = 'http://www.tagwritetest.test/download' package.homepage = SPDXNone() package.verif_code = '4e3211c67a2d28fced849ee1bb76e7391b93feba' license_set = LicenseConjunction(License.from_identifier('Apache-2.0'), License.from_identifier('BSD-2-Clause')) package.conc_lics = license_set package.license_declared = license_set package.add_lics_from_file(License.from_identifier('Apache-2.0')) package.add_lics_from_file(License.from_identifier('BSD-2-Clause')) package.cr_text = NoAssert() package.summary = 'Simple package 1.' package.description = 'Really simple package 1.' doc.add_package(package) # Package 2 package = Package() package.name = 'FooTest' package.spdx_id = "FooTest" package.version = '3.4' package.file_name = 'foo.jar' package.download_location = 'http://www.footest.test/download' package.homepage = SPDXNone() package.verif_code = '4e3211c67a2d28fced849ee1bb76e7391b93feb1' license_set = LicenseConjunction(License.from_identifier('Apache-2.0'), License.from_identifier('BSD-2-Clause')) package.conc_lics = license_set package.license_declared = license_set package.add_lics_from_file(License.from_identifier('Apache-2.0')) package.add_lics_from_file(License.from_identifier('BSD-2-Clause')) package.cr_text = NoAssert() package.summary = 'Simple package 2.' package.description = 'Really simple package 2.' doc.add_package(package) write_spdx_to_file(doc, "/tmp/abc.rdf") if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) generate_sample_spdx_3() ``` The parsing is done like so: ```python import logging import spdx from spdx.parsers.parse_anything import parse_file def test_parse_spdx(input_file: str): doc, errors = parse_file(input_file) if errors: logging.warning("Errors while parsing SPDX document: errors=%s, file='%s'", errors, input_file) logging.debug("SPDX document: %s", doc.name) print(len(doc.packages)) for package in doc.packages: print(package.name) print(package.version) print(package.summary) print(package.description) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) test_parse_spdx("/tmp/abc.rdf") ``` This produces the following output (notice the first package version, summary and descriptions are None and the second ones belong to the first package and are showed in the wrong place): ``` More than one Package version info defined. More than one Package file name defined. More than one Package download location defined. More than one package verification code defined. More than one package http://spdx.org/rdf/terms#licenseConcluded defined. More than one package http://spdx.org/rdf/terms#licenseDeclared defined. More than one package summary defined. More than one package description defined. WARNING:root:Errors while parsing SPDX document: errors=True, file='/tmp/abc.rdf' DEBUG:root:SPDX document: http://example.com/documents/abc 2 TagWriteTest None None None FooTest 1.2 Simple package 2. Really simple package 1. ``` And finally, here is the generated XML: ```xml <?xml version="1.0" encoding="utf-8"?> <rdf:RDF xmlns:ns1="http://spdx.org/rdf/terms#" xmlns:ns2="http://usefulinc.com/ns/doap#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" > <ns1:SpdxDocument rdf:about="http://www.spdx.org/tools#SPDXRef-DOCUMENT"> <ns1:specVersion>SPDX-2.2</ns1:specVersion> <ns1:name rdf:resource="http://example.com/documents/abc"/> <ns1:dataLicense rdf:resource="http://spdx.org/licenses/CC0-1.0"/> <ns1:describesPackage> <ns1:Package rdf:about="http://www.spdx.org/tools#SPDXRef-Package"> <ns2:homepage rdf:resource="http://spdx.org/rdf/terms#none"/> <ns1:licenseDeclared> <ns1:ConjunctiveLicenseSet rdf:nodeID="N803aaddeb84a40c194f448b61db20a18"> <ns1:member rdf:resource="http://spdx.org/licenses/BSD-2-Clause"/> <ns1:member rdf:resource="http://spdx.org/licenses/Apache-2.0"/> </ns1:ConjunctiveLicenseSet> </ns1:licenseDeclared> <ns1:licenseDeclared> <ns1:ConjunctiveLicenseSet rdf:nodeID="Nbc56a48570a34e11af70d50c96a650b6"> <ns1:member rdf:resource="http://spdx.org/licenses/Apache-2.0"/> <ns1:member rdf:resource="http://spdx.org/licenses/BSD-2-Clause"/> </ns1:ConjunctiveLicenseSet> </ns1:licenseDeclared> <ns1:name>TagWriteTest</ns1:name> <ns1:name>FooTest</ns1:name> <ns1:licenseInfoFromFiles rdf:resource="http://spdx.org/licenses/Apache-2.0"/> <ns1:licenseInfoFromFiles rdf:resource="http://spdx.org/licenses/BSD-2-Clause"/> <ns1:packageVerificationCode> <ns1:PackageVerificationCode rdf:nodeID="N6ce231f395d749fbb38fbe4db70c55ea"> <ns1:packageVerificationCodeValue>4e3211c67a2d28fced849ee1bb76e7391b93feba</ns1:packageVerificationCodeValue> </ns1:PackageVerificationCode> </ns1:packageVerificationCode> <ns1:packageVerificationCode> <ns1:PackageVerificationCode rdf:nodeID="N8759686ff5f344d7a7efa06c2565377d"> <ns1:packageVerificationCodeValue>4e3211c67a2d28fced849ee1bb76e7391b93feb1</ns1:packageVerificationCodeValue> </ns1:PackageVerificationCode> </ns1:packageVerificationCode> <ns1:summary>Simple package 2.</ns1:summary> <ns1:summary>Simple package 1.</ns1:summary> <ns1:packageFileName>twt.jar</ns1:packageFileName> <ns1:packageFileName>foo.jar</ns1:packageFileName> <ns1:versionInfo>1.2</ns1:versionInfo> <ns1:versionInfo>3.4</ns1:versionInfo> <ns1:downloadLocation>http://www.footest.test/download</ns1:downloadLocation> <ns1:downloadLocation>http://www.tagwritetest.test/download</ns1:downloadLocation> <ns1:description>Really simple package 1.</ns1:description> <ns1:description>Really simple package 2.</ns1:description> <ns1:licenseConcluded> <ns1:ConjunctiveLicenseSet rdf:nodeID="N091584493e0e4624bd54e640cc5bb3ed"> <ns1:member rdf:resource="http://spdx.org/licenses/BSD-2-Clause"/> <ns1:member rdf:resource="http://spdx.org/licenses/Apache-2.0"/> </ns1:ConjunctiveLicenseSet> </ns1:licenseConcluded> <ns1:licenseConcluded> <ns1:ConjunctiveLicenseSet rdf:nodeID="N35334f5df81f4e6facf7fe32a49936c2"> <ns1:member rdf:resource="http://spdx.org/licenses/Apache-2.0"/> <ns1:member rdf:resource="http://spdx.org/licenses/BSD-2-Clause"/> </ns1:ConjunctiveLicenseSet> </ns1:licenseConcluded> <ns1:Package rdf:resource="TagWriteTest"/> <ns1:Package rdf:resource="FooTest"/> <ns1:copyrightText rdf:resource="http://spdx.org/rdf/terms#noassertion"/> </ns1:Package> </ns1:describesPackage> <ns1:creationInfo> <ns1:CreationInfo rdf:nodeID="N38c8160ed5124b269ed54d8cf045f6c0"> <ns1:creator>Organization: Example Ltd. ([email protected])</ns1:creator> <ns1:creator>Person: John Smith ([email protected])</ns1:creator> <ns1:creator>Tool: def</ns1:creator> <ns1:created>2023-03-09T20:45:07Z</ns1:created> </ns1:CreationInfo> </ns1:creationInfo> </ns1:SpdxDocument> </rdf:RDF> ``` There is only one Package and all attributes are mixed up in it. I expect two Package elements to be generated. What causes this issue?
0.0
b0d946b354045ae4e01398f416ee7abbabfe8443
[ "tests/test_rdf_writer.py::test_multiple_packages_in_one_document" ]
[ "tests/test_rdf_writer.py::test_accept_provided_doc_node", "tests/test_rdf_writer.py::test_external_package_references" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_git_commit_hash" ], "has_test_patch": true, "is_lite": false }
2023-03-10 15:35:44+00:00
apache-2.0
5,633
spdx__tools-python-543
diff --git a/spdx/writers/rdf.py b/spdx/writers/rdf.py index c64b36b..ec8b263 100644 --- a/spdx/writers/rdf.py +++ b/spdx/writers/rdf.py @@ -248,9 +248,7 @@ class FileWriter(LicenseWriter): """ Create a node for spdx.file. """ - file_node = URIRef( - "http://www.spdx.org/files#{id}".format(id=str(doc_file.spdx_id)) - ) + file_node = URIRef(f"{self.document.namespace}#{doc_file.spdx_id}") type_triple = (file_node, RDF.type, self.spdx_namespace.File) self.graph.add(type_triple) @@ -386,7 +384,7 @@ class SnippetWriter(LicenseWriter): """ Return a snippet node. """ - snippet_node = URIRef("http://spdx.org/rdf/terms/Snippet#" + snippet.spdx_id) + snippet_node = URIRef(f"{self.document.namespace}#{snippet.spdx_id}") type_triple = (snippet_node, RDF.type, self.spdx_namespace.Snippet) self.graph.add(type_triple) @@ -793,7 +791,7 @@ class PackageWriter(LicenseWriter): Return a Node representing the package. Files must have been added to the graph before this method is called. """ - package_node = URIRef(f"http://www.spdx.org/tools#{package.spdx_id}") + package_node = URIRef(f"{self.document.namespace}#{package.spdx_id}") type_triple = (package_node, RDF.type, self.spdx_namespace.Package) self.graph.add(type_triple) # Handle optional fields: @@ -984,7 +982,7 @@ class Writer( """ Add and return the root document node to graph. """ - doc_node = URIRef("http://www.spdx.org/tools#SPDXRef-DOCUMENT") + doc_node = URIRef(f"{self.document.namespace}#SPDXRef-DOCUMENT") # Doc type self.graph.add((doc_node, RDF.type, self.spdx_namespace.SpdxDocument)) # Version
spdx/tools-python
73f3f462591894cac8ef11e01ebec7e702e33c25
diff --git a/tests/data/doc_write/rdf-mini.json b/tests/data/doc_write/rdf-mini.json index d413c6a..282ee17 100644 --- a/tests/data/doc_write/rdf-mini.json +++ b/tests/data/doc_write/rdf-mini.json @@ -3,7 +3,7 @@ "@xmlns:ns1": "http://spdx.org/rdf/terms#", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "ns1:SpdxDocument": { - "@rdf:about": "http://www.spdx.org/tools#SPDXRef-DOCUMENT", + "@rdf:about": "None#SPDXRef-DOCUMENT", "ns1:specVersion": "SPDX-2.1", "ns1:dataLicense": { "@rdf:resource": "http://spdx.org/licenses/CC0-1.0" diff --git a/tests/data/doc_write/rdf-simple-plus.json b/tests/data/doc_write/rdf-simple-plus.json index 9a50011..cc94dce 100644 --- a/tests/data/doc_write/rdf-simple-plus.json +++ b/tests/data/doc_write/rdf-simple-plus.json @@ -5,9 +5,9 @@ "ns1:SpdxDocument": { "ns1:describesPackage": { "ns1:Package": { - "@rdf:about": "http://www.spdx.org/tools#SPDXRef-Package", + "@rdf:about": "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301#SPDXRef-Package", "ns1:hasFile": { - "@rdf:resource": "http://www.spdx.org/files#SPDXRef-File" + "@rdf:resource": "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301#SPDXRef-File" }, "ns1:name": "some/path", "ns1:licenseDeclared": { @@ -48,7 +48,7 @@ }, "ns1:referencesFile": { "ns1:File": { - "@rdf:about": "http://www.spdx.org/files#SPDXRef-File", + "@rdf:about": "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301#SPDXRef-File", "ns1:fileName": "./some/path/tofile", "ns1:checksum": [ { @@ -96,7 +96,7 @@ } } ], - "@rdf:about": "http://www.spdx.org/tools#SPDXRef-DOCUMENT" + "@rdf:about": "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301#SPDXRef-DOCUMENT" } } } diff --git a/tests/data/doc_write/rdf-simple.json b/tests/data/doc_write/rdf-simple.json index 6b1ac2d..e9fb374 100644 --- a/tests/data/doc_write/rdf-simple.json +++ b/tests/data/doc_write/rdf-simple.json @@ -5,9 +5,9 @@ "ns1:SpdxDocument": { "ns1:describesPackage": { "ns1:Package": { - "@rdf:about": "http://www.spdx.org/tools#SPDXRef-Package", + "@rdf:about": "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301#SPDXRef-Package", "ns1:hasFile": { - "@rdf:resource": "http://www.spdx.org/files#SPDXRef-File" + "@rdf:resource": "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301#SPDXRef-File" }, "ns1:downloadLocation": { "@rdf:resource": "http://spdx.org/rdf/terms#noassertion" @@ -48,7 +48,7 @@ }, "ns1:referencesFile": { "ns1:File": { - "@rdf:about": "http://www.spdx.org/files#SPDXRef-File", + "@rdf:about": "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301#SPDXRef-File", "ns1:licenseInfoInFile": { "@rdf:resource": "http://spdx.org/licenses/LGPL-2.1-only" }, @@ -96,7 +96,7 @@ } } ], - "@rdf:about": "http://www.spdx.org/tools#SPDXRef-DOCUMENT" + "@rdf:about": "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301#SPDXRef-DOCUMENT" } } }
Document node in RDF output has no unique ID The RDF writer produces a `SpdxDocument` instance that always has the identifier `http://www.spdx.org/tools#SPDXRef-DOCUMENT`. When merging multiple SPDX documents from across multiple distributions, this means that e.g. all package dependencies, the names, the version, the license, creationInfo etc. will be attached to a single node in the RDF graph. https://github.com/spdx/tools-python/blob/73f3f462591894cac8ef11e01ebec7e702e33c25/spdx/writers/rdf.py#L987 It makes much more sense to use the `name` of the document to construct an identifier as in this line: https://github.com/spdx/tools-python/blob/73f3f462591894cac8ef11e01ebec7e702e33c25/spdx/writers/rdf.py#L996
0.0
73f3f462591894cac8ef11e01ebec7e702e33c25
[ "tests/test_document.py::TestWriters::test_write_document_rdf_mini", "tests/test_document.py::TestWriters::test_write_document_rdf_with_or_later_with_validate", "tests/test_document.py::TestWriters::test_write_document_rdf_with_validate" ]
[ "tests/test_builder.py::TestDocumentBuilder::test_comment_cardinality", "tests/test_builder.py::TestDocumentBuilder::test_comment_value", "tests/test_builder.py::TestDocumentBuilder::test_correct_data_comment", "tests/test_builder.py::TestDocumentBuilder::test_correct_data_lics", "tests/test_builder.py::TestDocumentBuilder::test_correct_doc_namespace", "tests/test_builder.py::TestDocumentBuilder::test_correct_name", "tests/test_builder.py::TestDocumentBuilder::test_correct_version", "tests/test_builder.py::TestDocumentBuilder::test_data_lics_cardinality", "tests/test_builder.py::TestDocumentBuilder::test_data_lics_value", "tests/test_builder.py::TestDocumentBuilder::test_doc_namespace_cardinality", "tests/test_builder.py::TestDocumentBuilder::test_doc_namespace_value", "tests/test_builder.py::TestDocumentBuilder::test_name_cardinality", "tests/test_builder.py::TestDocumentBuilder::test_version_cardinality", "tests/test_builder.py::TestDocumentBuilder::test_version_value", "tests/test_builder.py::TestExternalDocumentRefBuilder::test_add_ext_doc_refs", "tests/test_builder.py::TestExternalDocumentRefBuilder::test_checksum", "tests/test_builder.py::TestExternalDocumentRefBuilder::test_external_doc_id", "tests/test_builder.py::TestExternalDocumentRefBuilder::test_spdx_doc_uri", "tests/test_builder.py::TestEntityBuilder::test_org", "tests/test_builder.py::TestEntityBuilder::test_org_value_error", "tests/test_builder.py::TestEntityBuilder::test_org_with_email", "tests/test_builder.py::TestEntityBuilder::test_per", "tests/test_builder.py::TestEntityBuilder::test_per_value_error", "tests/test_builder.py::TestEntityBuilder::test_person_with_email", "tests/test_builder.py::TestEntityBuilder::test_tool", "tests/test_builder.py::TestEntityBuilder::test_tool_value_error", "tests/test_builder.py::TestCreationInfoBuilder::test_add_creator", "tests/test_builder.py::TestCreationInfoBuilder::test_created", "tests/test_builder.py::TestCreationInfoBuilder::test_created_value", "tests/test_builder.py::TestCreationInfoBuilder::test_invalid_creator_type", "tests/test_builder.py::TestCreationInfoBuilder::test_license_list_vers", "tests/test_builder.py::TestCreationInfoBuilder::test_lics_list_ver_card", "tests/test_builder.py::TestCreationInfoBuilder::test_lics_list_ver_value", "tests/test_builder.py::TestCreationInfoBuilder::test_more_than_one_created", "tests/test_builder.py::TestReviewBuilder::test_comment_cardinality", "tests/test_builder.py::TestReviewBuilder::test_comment_reset", "tests/test_builder.py::TestReviewBuilder::test_comment_value", "tests/test_builder.py::TestReviewBuilder::test_comment_without_reviewer", "tests/test_builder.py::TestReviewBuilder::test_date_value", "tests/test_builder.py::TestReviewBuilder::test_reviewed_cardinality", "tests/test_builder.py::TestReviewBuilder::test_reviewed_reset", "tests/test_builder.py::TestReviewBuilder::test_reviewed_without_reviewer", "tests/test_builder.py::TestAnnotationBuilder::test_annotation_cardinality", "tests/test_builder.py::TestAnnotationBuilder::test_annotation_comment_cardinality", "tests/test_builder.py::TestAnnotationBuilder::test_annotation_comment_reset", "tests/test_builder.py::TestAnnotationBuilder::test_annotation_comment_value", "tests/test_builder.py::TestAnnotationBuilder::test_annotation_date_value", "tests/test_builder.py::TestAnnotationBuilder::test_annotation_reset", "tests/test_builder.py::TestAnnotationBuilder::test_annotation_spdx_id_cardinality", "tests/test_builder.py::TestAnnotationBuilder::test_annotation_type_cardinality", "tests/test_builder.py::TestAnnotationBuilder::test_annotation_without_annotator", "tests/test_builder.py::TestAnnotationBuilder::test_comment_without_annotator", "tests/test_builder.py::TestAnnotationBuilder::test_correct_annotation_spdx_id", "tests/test_builder.py::TestAnnotationBuilder::test_correct_annotation_type", "tests/test_builder.py::TestAnnotationBuilder::test_incorrect_annotation_type_value", "tests/test_builder.py::TestAnnotationBuilder::test_spdx_id_without_annotator", "tests/test_builder.py::TestAnnotationBuilder::test_type_without_annotator", "tests/test_builder.py::TestRelationshipBuilder::test_correct_relationship", "tests/test_builder.py::TestRelationshipBuilder::test_relationship_comment_reset", "tests/test_builder.py::TestRelationshipBuilder::test_relationship_comment_value", "tests/test_builder.py::TestRelationshipBuilder::test_relationship_comment_without_relationship", "tests/test_builder.py::TestPackageBuilder::test_correct_pkg_attribution_text", "tests/test_builder.py::TestPackageBuilder::test_correct_pkg_comment", "tests/test_builder.py::TestPackageBuilder::test_correct_pkg_ext_comment", "tests/test_builder.py::TestPackageBuilder::test_correct_pkg_ext_ref_category", "tests/test_builder.py::TestPackageBuilder::test_correct_pkg_ext_ref_locator", "tests/test_builder.py::TestPackageBuilder::test_correct_pkg_ext_ref_type", "tests/test_builder.py::TestPackageBuilder::test_correct_pkg_files_analyzed_1", "tests/test_builder.py::TestPackageBuilder::test_correct_pkg_files_analyzed_2", "tests/test_builder.py::TestPackageBuilder::test_correct_pkg_spdx_id", "tests/test_builder.py::TestPackageBuilder::test_file_name_order", "tests/test_builder.py::TestPackageBuilder::test_incorrect_pkg_attribution_text", "tests/test_builder.py::TestPackageBuilder::test_incorrect_pkg_comment", "tests/test_builder.py::TestPackageBuilder::test_incorrect_pkg_ext_ref_category", "tests/test_builder.py::TestPackageBuilder::test_incorrect_pkg_ext_ref_type", "tests/test_builder.py::TestPackageBuilder::test_incorrect_pkg_files_analyzed", "tests/test_builder.py::TestPackageBuilder::test_incorrect_pkg_spdx_id", "tests/test_builder.py::TestPackageBuilder::test_package_cardinality", "tests/test_builder.py::TestPackageBuilder::test_pkg_attribution_text_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_chksum_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_cr_text_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_down_loc_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_ext_comment_without_pkg_ext_ref", "tests/test_builder.py::TestPackageBuilder::test_pkg_ext_ref_without_pkg", "tests/test_builder.py::TestPackageBuilder::test_pkg_home_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_licenses_concluded_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_lics_comment_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_lics_decl_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_lics_from_file_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_originator_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_source_info_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_summary_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_supplier_order", "tests/test_builder.py::TestPackageBuilder::test_pkg_verif_order", "tests/test_builder.py::TestPackageBuilder::test_set_pkg_comment_order", "tests/test_builder.py::TestPackageBuilder::test_set_pkg_desc_order", "tests/test_builder.py::TestPackageBuilder::test_set_pkg_files_analyzed_order", "tests/test_builder.py::TestPackageBuilder::test_set_pkg_spdx_id_order", "tests/test_builder.py::TestPackageBuilder::test_vers_order", "tests/test_builder.py::TestSnippetBuilder::test_correct_snippet_attribution_text", "tests/test_builder.py::TestSnippetBuilder::test_create_snippet", "tests/test_builder.py::TestSnippetBuilder::test_incorrect_snippet_attribution_text", "tests/test_builder.py::TestSnippetBuilder::test_incorrect_snippet_spdx_id", "tests/test_builder.py::TestSnippetBuilder::test_snippet_attribution_text_order", "tests/test_builder.py::TestSnippetBuilder::test_snippet_byte_range", "tests/test_builder.py::TestSnippetBuilder::test_snippet_byte_range_order", "tests/test_builder.py::TestSnippetBuilder::test_snippet_byte_range_value", "tests/test_builder.py::TestSnippetBuilder::test_snippet_byte_range_value_wrong_format", "tests/test_builder.py::TestSnippetBuilder::test_snippet_comment", "tests/test_builder.py::TestSnippetBuilder::test_snippet_comment_order", "tests/test_builder.py::TestSnippetBuilder::test_snippet_comment_text_value", "tests/test_builder.py::TestSnippetBuilder::test_snippet_conc_lics", "tests/test_builder.py::TestSnippetBuilder::test_snippet_conc_lics_cardinality", "tests/test_builder.py::TestSnippetBuilder::test_snippet_conc_lics_order", "tests/test_builder.py::TestSnippetBuilder::test_snippet_conc_lics_value", "tests/test_builder.py::TestSnippetBuilder::test_snippet_copyright", "tests/test_builder.py::TestSnippetBuilder::test_snippet_copyright_order", "tests/test_builder.py::TestSnippetBuilder::test_snippet_copyright_text_value", "tests/test_builder.py::TestSnippetBuilder::test_snippet_from_file_spdxid", "tests/test_builder.py::TestSnippetBuilder::test_snippet_from_file_spdxid_cardinality", "tests/test_builder.py::TestSnippetBuilder::test_snippet_from_file_spdxid_order", "tests/test_builder.py::TestSnippetBuilder::test_snippet_from_file_spdxid_value", "tests/test_builder.py::TestSnippetBuilder::test_snippet_lic_comment", "tests/test_builder.py::TestSnippetBuilder::test_snippet_lic_comment_order", "tests/test_builder.py::TestSnippetBuilder::test_snippet_lic_comment_text_value", "tests/test_builder.py::TestSnippetBuilder::test_snippet_lics_info", "tests/test_builder.py::TestSnippetBuilder::test_snippet_lics_info_order", "tests/test_builder.py::TestSnippetBuilder::test_snippet_lics_info_value", "tests/test_builder.py::TestSnippetBuilder::test_snippet_line_range", "tests/test_builder.py::TestSnippetBuilder::test_snippet_line_range_order", "tests/test_builder.py::TestSnippetBuilder::test_snippet_line_range_value", "tests/test_builder.py::TestSnippetBuilder::test_snippet_line_range_value_wrong_format", "tests/test_builder.py::TestSnippetBuilder::test_snippet_name", "tests/test_builder.py::TestSnippetBuilder::test_snippet_name_order", "tests/test_checksum.py::test_checksum_to_rdf[SHA1-checksumAlgorithm_sha1]", "tests/test_checksum.py::test_checksum_to_rdf[SHA224-checksumAlgorithm_sha224]", "tests/test_checksum.py::test_checksum_to_rdf[SHA3_256-checksumAlgorithm_sha3_256]", "tests/test_checksum.py::test_checksum_to_rdf[BLAKE2B_256-checksumAlgorithm_blake2b256]", "tests/test_checksum.py::test_checksum_to_rdf[MD5-checksumAlgorithm_md5]", "tests/test_checksum.py::test_checksum_from_rdf[ChecksumAlgorithm.SHA1-checksumAlgorithm_sha1]", "tests/test_checksum.py::test_checksum_from_rdf[ChecksumAlgorithm.SHA224-checksumAlgorithm_sha224]", "tests/test_checksum.py::test_checksum_from_rdf[ChecksumAlgorithm.SHA3_256-checksumAlgorithm_sha3_256]", "tests/test_checksum.py::test_checksum_from_rdf[ChecksumAlgorithm.BLAKE2B_256-checksumAlgorithm_blake2b256]", "tests/test_checksum.py::test_checksum_from_rdf[ChecksumAlgorithm.MD5-checksumAlgorithm_md5]", "tests/test_checksum.py::test_checksum_from_wrong_rdf[_checksumAlgorithm_sha1]", "tests/test_checksum.py::test_checksum_from_wrong_rdf[checksumAlgorithm_sha_224]", "tests/test_checksum.py::test_checksum_from_wrong_rdf[checksumAlgorithm_sha3256]", "tests/test_checksum.py::test_checksum_from_wrong_rdf[checksumAlgorithm_blake2b", "tests/test_checksum.py::test_checksum_from_wrong_rdf[checksumAlgorithm_blake2b-256]", "tests/test_checksum.py::test_checksum_from_wrong_rdf[checksumAlgorithm_bblake2b", "tests/test_checksum.py::test_checksum_from_string[SHA1:", "tests/test_checksum.py::test_checksum_from_string[SHA3-256:", "tests/test_checksum.py::test_checksum_from_string[ADLER32:", "tests/test_checksum.py::test_checksum_from_string[BLAKE3:", "tests/test_checksum.py::test_checksum_from_string[BLAKE2b-256:", "tests/test_checksum.py::test_checksum_from_string[MD5:", "tests/test_checksum.py::test_wrong_checksum_from_string[SHA1:", "tests/test_checksum.py::test_wrong_checksum_from_string[SHA1000:", "tests/test_checksum.py::test_checksum_to_tv[checksum0-SHA1:", "tests/test_checksum.py::test_checksum_to_tv[checksum1-SHA3-256:", "tests/test_checksum.py::test_checksum_to_tv[checksum2-ADLER32:", "tests/test_checksum.py::test_checksum_to_tv[checksum3-BLAKE3:", "tests/test_checksum.py::test_checksum_to_tv[checksum4-BLAKE2b-256:", "tests/test_checksum.py::test_checksum_to_tv[checksum5-MD5:", "tests/test_cli_convertor.py::TestConvertor::test_determine_input_with_invalid_arguments", "tests/test_cli_convertor.py::TestConvertor::test_determine_input_with_known_i_format_unknown_o_format", "tests/test_cli_convertor.py::TestConvertor::test_determine_input_with_known_i_o_format", "tests/test_cli_convertor.py::TestConvertor::test_determine_input_with_unknown_i_format_known_o_format", "tests/test_cli_convertor.py::TestConvertor::test_determine_input_with_unknown_i_o_format", "tests/test_config.py::TestLicenseList::test_config_license_list_version_constant", "tests/test_config.py::TestLicenseList::test_load_exception_list", "tests/test_config.py::TestLicenseList::test_load_license_list", "tests/test_conversion.py::TestConversions::test_json_json", "tests/test_conversion.py::TestConversions::test_json_rdf", "tests/test_conversion.py::TestConversions::test_json_tagvalue", "tests/test_conversion.py::TestConversions::test_json_xml", "tests/test_conversion.py::TestConversions::test_json_yaml", "tests/test_conversion.py::TestConversions::test_rdf_rdf", "tests/test_conversion.py::TestConversions::test_tagvalue_json", "tests/test_conversion.py::TestConversions::test_tagvalue_rdf", "tests/test_conversion.py::TestConversions::test_tagvalue_tagvalue", "tests/test_conversion.py::TestConversions::test_tagvalue_xml", "tests/test_conversion.py::TestConversions::test_tagvalue_yaml", "tests/test_conversion.py::TestConversions::test_xml_json", "tests/test_conversion.py::TestConversions::test_xml_rdf", "tests/test_conversion.py::TestConversions::test_xml_tagvalue", "tests/test_conversion.py::TestConversions::test_xml_xml", "tests/test_conversion.py::TestConversions::test_xml_yaml", "tests/test_conversion.py::TestConversions::test_yaml_json", "tests/test_conversion.py::TestConversions::test_yaml_rdf", "tests/test_conversion.py::TestConversions::test_yaml_tagvalue", "tests/test_conversion.py::TestConversions::test_yaml_xml", "tests/test_conversion.py::TestConversions::test_yaml_yaml", "tests/test_creationinfo.py::TestCreationInfo::test_comment", "tests/test_creationinfo.py::TestCreationInfo::test_creators", "tests/test_creationinfo.py::TestCreationInfo::test_iso_format", "tests/test_creationinfo.py::TestCreationInfo::test_license_list_version", "tests/test_creationinfo.py::TestCreationInfo::test_timestamp", "tests/test_document.py::TestVersion::test_comparison", "tests/test_document.py::TestVersion::test_creation", "tests/test_document.py::TestDocument::test_creation", "tests/test_document.py::TestDocument::test_document_is_valid_when_using_or_later_licenses", "tests/test_document.py::TestDocument::test_document_multiple_packages", "tests/test_document.py::TestDocument::test_document_validate_failures_returns_informative_messages", "tests/test_document.py::TestDocument::test_document_without_packages", "tests/test_document.py::TestWriters::test_write_document_json_multi_package_with_validate", "tests/test_document.py::TestWriters::test_write_document_json_with_or_later_with_validate", "tests/test_document.py::TestWriters::test_write_document_json_with_validate", "tests/test_document.py::TestWriters::test_write_document_tv_mini", "tests/test_document.py::TestWriters::test_write_document_tv_with_or_later_with_validate", "tests/test_document.py::TestWriters::test_write_document_tv_with_validate", "tests/test_document.py::TestWriters::test_write_document_xml_multi_package_with_validate", "tests/test_document.py::TestWriters::test_write_document_xml_with_or_later_with_validate", "tests/test_document.py::TestWriters::test_write_document_xml_with_validate", "tests/test_document.py::TestWriters::test_write_document_yaml_multi_package_with_validate", "tests/test_document.py::TestWriters::test_write_document_yaml_with_or_later_with_validate", "tests/test_document.py::TestWriters::test_write_document_yaml_with_validate", "tests/test_document.py::TestLicense::test_from_full_name", "tests/test_document.py::TestLicense::test_from_identifier", "tests/test_document.py::TestLicense::test_license_list", "tests/test_document.py::TestLicense::test_url", "tests/test_document.py::TestException::test_exception_list", "tests/test_error_messages.py::test_error_message_context", "tests/test_jsonyamlxml_parser.py::TestParser::test_json_parser", "tests/test_jsonyamlxml_parser.py::TestParser::test_sbomyaml_parser", "tests/test_jsonyamlxml_parser.py::TestParser::test_xml_parser", "tests/test_jsonyamlxml_parser.py::TestParser::test_yaml_parser", "tests/test_jsonyamlxml_writer.py::test_external_package_references[yaml]", "tests/test_jsonyamlxml_writer.py::test_external_package_references[xml]", "tests/test_jsonyamlxml_writer.py::test_external_package_references[json]", "tests/test_jsonyamlxml_writer.py::test_primary_package_purpose[yaml]", "tests/test_jsonyamlxml_writer.py::test_primary_package_purpose[xml]", "tests/test_jsonyamlxml_writer.py::test_primary_package_purpose[json]", "tests/test_jsonyamlxml_writer.py::test_release_built_valid_until_date[yaml]", "tests/test_jsonyamlxml_writer.py::test_release_built_valid_until_date[xml]", "tests/test_jsonyamlxml_writer.py::test_release_built_valid_until_date[json]", "tests/test_jsonyamlxml_writer.py::test_snippet_byte_range[yaml]", "tests/test_jsonyamlxml_writer.py::test_snippet_byte_range[xml]", "tests/test_jsonyamlxml_writer.py::test_snippet_byte_range[json]", "tests/test_jsonyamlxml_writer.py::test_snippet_ranges[yaml]", "tests/test_jsonyamlxml_writer.py::test_snippet_ranges[xml]", "tests/test_jsonyamlxml_writer.py::test_snippet_ranges[json]", "tests/test_jsonyamlxml_writer.py::test_files_without_package[yaml]", "tests/test_jsonyamlxml_writer.py::test_files_without_package[xml]", "tests/test_jsonyamlxml_writer.py::test_files_without_package[json]", "tests/test_jsonyamlxml_writer.py::test_update_dict_item_with_new_item[existing_key-new_value-2]", "tests/test_jsonyamlxml_writer.py::test_update_dict_item_with_new_item[existing_key-existing_value-1]", "tests/test_jsonyamlxml_writer.py::test_update_dict_item_with_new_item[new_key-new_value-1]", "tests/test_package.py::TestPackage::test_package_with_non_sha1_check_sum", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXJsonExampleEmptyArrays.json]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXYAMLExample-2.3.spdx.yaml]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXJsonExample.json]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXTagExample-v2.2.spdx]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXTagExample.tag]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXRdfExample.rdf]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXYAMLExample-2.2.spdx.yaml]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXTagExample-v2.3.spdx]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXYamlExample.yaml]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXXMLExample-v2.2.spdx.xml]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXJSONExample-v2.3.spdx.json]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXSBOMExample.tag]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXJSONExample-v2.2.spdx.json]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXXmlExample.xml]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXSimpleTag.tag]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXXMLExample-v2.3.spdx.xml]", "tests/test_parse_anything.py::test_parse_anything[/root/data/temp_dir/tmpktnoe_76/spdx__tools-python__0.0/tests/data/formats/SPDXSBOMExample.spdx.yml]", "tests/test_parsers_validation.py::TestValidations::test_validate_file_cpyright_does_not_crash_on_NoAssert_or_SPDXNone", "tests/test_parsers_validation.py::TestValidations::test_validate_file_cpyright_does_not_crash_on_None", "tests/test_parsers_validation.py::TestValidations::test_validate_pkg_cr_text_does_not_crash_on_NoAssert_or_SPDXNone", "tests/test_parsers_validation.py::TestValidations::test_validate_pkg_cr_text_does_not_crash_on_None", "tests/test_rdf_parser.py::TestParser::test_rdf_parser", "tests/test_rdf_writer.py::test_accept_provided_doc_node", "tests/test_rdf_writer.py::test_external_package_references", "tests/test_rdf_writer.py::test_multiple_packages_in_one_document", "tests/test_tag_value_parser.py::TestLexer::test_annotation", "tests/test_tag_value_parser.py::TestLexer::test_correct_line_number_with_empty_line_between", "tests/test_tag_value_parser.py::TestLexer::test_creation_info", "tests/test_tag_value_parser.py::TestLexer::test_document", "tests/test_tag_value_parser.py::TestLexer::test_external_document_references", "tests/test_tag_value_parser.py::TestLexer::test_package", "tests/test_tag_value_parser.py::TestLexer::test_review_info", "tests/test_tag_value_parser.py::TestLexer::test_snippet", "tests/test_tag_value_parser.py::TestLexer::test_unknown_tag", "tests/test_tag_value_parser.py::TestParser::test_annotation", "tests/test_tag_value_parser.py::TestParser::test_creation_info", "tests/test_tag_value_parser.py::TestParser::test_doc", "tests/test_tag_value_parser.py::TestParser::test_file", "tests/test_tag_value_parser.py::TestParser::test_package", "tests/test_tag_value_parser.py::TestParser::test_review", "tests/test_tag_value_parser.py::TestParser::test_snippet", "tests/test_tag_value_parser.py::TestParser::test_unknown_tag", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJsonExampleEmptyArrays.json-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJsonExampleEmptyArrays.json-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJsonExampleEmptyArrays.json-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJsonExampleEmptyArrays.json-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYAMLExample-2.3.spdx.yaml-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYAMLExample-2.3.spdx.yaml-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYAMLExample-2.3.spdx.yaml-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYAMLExample-2.3.spdx.yaml-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJsonExample.json-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJsonExample.json-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJsonExample.json-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJsonExample.json-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXTagExample.tag-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXTagExample.tag-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXTagExample.tag-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXTagExample.tag-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYAMLExample-2.2.spdx.yaml-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYAMLExample-2.2.spdx.yaml-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYAMLExample-2.2.spdx.yaml-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYAMLExample-2.2.spdx.yaml-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYamlExample.yaml-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYamlExample.yaml-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYamlExample.yaml-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXYamlExample.yaml-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXMLExample-v2.2.spdx.xml-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXMLExample-v2.2.spdx.xml-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXMLExample-v2.2.spdx.xml-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXMLExample-v2.2.spdx.xml-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJSONExample-v2.3.spdx.json-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJSONExample-v2.3.spdx.json-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJSONExample-v2.3.spdx.json-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJSONExample-v2.3.spdx.json-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXSBOMExample.tag-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXSBOMExample.tag-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXSBOMExample.tag-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXSBOMExample.tag-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJSONExample-v2.2.spdx.json-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJSONExample-v2.2.spdx.json-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJSONExample-v2.2.spdx.json-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXJSONExample-v2.2.spdx.json-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXmlExample.xml-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXmlExample.xml-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXmlExample.xml-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXmlExample.xml-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXSimpleTag.tag-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXSimpleTag.tag-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXSimpleTag.tag-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXSimpleTag.tag-tag]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXMLExample-v2.3.spdx.xml-yaml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXMLExample-v2.3.spdx.xml-xml]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXMLExample-v2.3.spdx.xml-json]", "tests/test_write_anything.py::test_write_anything_json_yaml_xml_tv[SPDXXMLExample-v2.3.spdx.xml-tag]", "tests/test_write_anything.py::test_write_anything_rdf[SPDXRdfExample.rdf-rdf]" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-03-29 14:44:17+00:00
apache-2.0
5,634