repo
stringclasses 358
values | pull_number
int64 6
67.9k
| instance_id
stringlengths 12
49
| issue_numbers
sequencelengths 1
7
| base_commit
stringlengths 40
40
| patch
stringlengths 87
101M
| test_patch
stringlengths 72
22.3M
| problem_statement
stringlengths 3
256k
| hints_text
stringlengths 0
545k
| created_at
stringlengths 20
20
| PASS_TO_PASS
sequencelengths 0
0
| FAIL_TO_PASS
sequencelengths 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|
huggingface/datasets | 6,127 | huggingface__datasets-6127 | [
"6126"
] | ef17d9fd6c648bb41d43ba301c3de4d7b6f833d8 | diff --git a/src/datasets/config.py b/src/datasets/config.py
--- a/src/datasets/config.py
+++ b/src/datasets/config.py
@@ -24,6 +24,7 @@
# Hub
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
+HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}"
HUB_DEFAULT_VERSION = "main"
PY_VERSION = version.parse(platform.python_version())
diff --git a/src/datasets/download/download_config.py b/src/datasets/download/download_config.py
--- a/src/datasets/download/download_config.py
+++ b/src/datasets/download/download_config.py
@@ -92,3 +92,11 @@ def __post_init__(self, use_auth_token):
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
+
+ def __setattr__(self, name, value):
+ if name == "token" and getattr(self, "storage_options", None) is not None:
+ if "hf" not in self.storage_options:
+ self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
+ elif getattr(self.storage_options["hf"], "token", None) is None:
+ self.storage_options["hf"]["token"] = value
+ super().__setattr__(name, value)
diff --git a/src/datasets/features/audio.py b/src/datasets/features/audio.py
--- a/src/datasets/features/audio.py
+++ b/src/datasets/features/audio.py
@@ -173,8 +173,11 @@ def decode_example(
if file is None:
token_per_repo_id = token_per_repo_id or {}
source_url = path.split("::")[-1]
+ pattern = (
+ config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
+ )
try:
- repo_id = string_to_dict(source_url, config.HUB_DATASETS_URL)["repo_id"]
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id[repo_id]
except (ValueError, KeyError):
token = None
diff --git a/src/datasets/features/image.py b/src/datasets/features/image.py
--- a/src/datasets/features/image.py
+++ b/src/datasets/features/image.py
@@ -166,8 +166,13 @@ def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Imag
image = PIL.Image.open(path)
else:
source_url = path.split("::")[-1]
+ pattern = (
+ config.HUB_DATASETS_URL
+ if source_url.startswith(config.HF_ENDPOINT)
+ else config.HUB_DATASETS_HFFS_URL
+ )
try:
- repo_id = string_to_dict(source_url, config.HUB_DATASETS_URL)["repo_id"]
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id.get(repo_id)
except ValueError:
token = None
diff --git a/src/datasets/load.py b/src/datasets/load.py
--- a/src/datasets/load.py
+++ b/src/datasets/load.py
@@ -539,6 +539,7 @@ def create_builder_configs_from_metadata_configs(
base_path: Optional[str] = None,
default_builder_kwargs: Dict[str, Any] = None,
allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
) -> Tuple[List[BuilderConfig], str]:
builder_cls = import_main_class(module_path)
builder_config_cls = builder_cls.BUILDER_CONFIG_CLASS
@@ -560,6 +561,7 @@ def create_builder_configs_from_metadata_configs(
config_patterns,
base_path=config_base_path,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ download_config=download_config,
)
except EmptyDatasetError as e:
raise EmptyDatasetError(
@@ -1070,6 +1072,7 @@ def get_module(self) -> DatasetModule:
base_path=base_path,
supports_metadata=supports_metadata,
default_builder_kwargs=default_builder_kwargs,
+ download_config=self.download_config,
)
else:
builder_configs, default_config_name = None, None
| diff --git a/tests/fixtures/hub.py b/tests/fixtures/hub.py
--- a/tests/fixtures/hub.py
+++ b/tests/fixtures/hub.py
@@ -48,12 +48,8 @@ def hf_api():
@pytest.fixture(scope="session")
-def hf_token(hf_api: HfApi):
- previous_token = HfFolder.get_token()
- HfFolder.save_token(CI_HUB_USER_TOKEN)
+def hf_token():
yield CI_HUB_USER_TOKEN
- if previous_token is not None:
- HfFolder.save_token(previous_token)
@pytest.fixture
diff --git a/tests/test_inspect.py b/tests/test_inspect.py
--- a/tests/test_inspect.py
+++ b/tests/test_inspect.py
@@ -47,6 +47,11 @@ def test_get_dataset_config_info(path, config_name, expected_splits):
assert list(info.splits.keys()) == expected_splits
+def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data):
+ info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token)
+ assert list(info.splits.keys()) == ["train"]
+
+
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
diff --git a/tests/test_load.py b/tests/test_load.py
--- a/tests/test_load.py
+++ b/tests/test_load.py
@@ -38,6 +38,7 @@
PackagedDatasetModuleFactory,
infer_module_for_data_files_list,
infer_module_for_data_files_list_in_archives,
+ load_dataset_builder,
)
from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder, AudioFolderConfig
from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder, ImageFolderConfig
@@ -1223,13 +1224,19 @@ def assert_auth(method, url, *args, headers, **kwargs):
@pytest.mark.integration
def test_load_streaming_private_dataset(hf_token, hf_private_dataset_repo_txt_data):
- ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True)
+ ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True, token=hf_token)
assert next(iter(ds)) is not None
[email protected]
+def test_load_dataset_builder_private_dataset(hf_token, hf_private_dataset_repo_txt_data):
+ builder = load_dataset_builder(hf_private_dataset_repo_txt_data, token=hf_token)
+ assert isinstance(builder, DatasetBuilder)
+
+
@pytest.mark.integration
def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data):
- ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True)
+ ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, token=hf_token)
assert next(iter(ds)) is not None
@@ -1309,13 +1316,9 @@ def test_load_hub_dataset_without_script_with_metadata_config_in_parallel():
@require_pil
@pytest.mark.integration
[email protected]("implicit_token", [True])
@pytest.mark.parametrize("streaming", [True])
-def test_load_dataset_private_zipped_images(
- hf_private_dataset_repo_zipped_img_data, hf_token, streaming, implicit_token
-):
- token = None if implicit_token else hf_token
- ds = load_dataset(hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, token=token)
+def test_load_dataset_private_zipped_images(hf_private_dataset_repo_zipped_img_data, hf_token, streaming):
+ ds = load_dataset(hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, token=hf_token)
assert isinstance(ds, IterableDataset if streaming else Dataset)
ds_items = list(ds)
assert len(ds_items) == 2
diff --git a/tests/test_upstream_hub.py b/tests/test_upstream_hub.py
--- a/tests/test_upstream_hub.py
+++ b/tests/test_upstream_hub.py
@@ -33,12 +33,12 @@
@for_all_test_methods(xfail_if_500_502_http_error)
[email protected]("set_ci_hub_access_token", "ci_hfh_hf_hub_url")
[email protected]("ci_hub_config", "ci_hfh_hf_hub_url")
class TestPushToHub:
_api = HfApi(endpoint=CI_HUB_ENDPOINT)
_token = CI_HUB_USER_TOKEN
- def test_push_dataset_dict_to_hub_no_token(self, temporary_repo):
+ def test_push_dataset_dict_to_hub_no_token(self, temporary_repo, set_ci_hub_access_token):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
local_ds = DatasetDict({"train": ds})
@@ -778,6 +778,7 @@ def test_push_dataset_to_hub_with_config_no_metadata_configs(self, temporary_rep
path_in_repo="data/train-00000-of-00001.parquet",
repo_id=ds_name,
repo_type="dataset",
+ token=self._token,
)
ds_another_config.push_to_hub(ds_name, "another_config", token=self._token)
ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload")
@@ -811,6 +812,7 @@ def test_push_dataset_dict_to_hub_with_config_no_metadata_configs(self, temporar
path_in_repo="data/random-00000-of-00001.parquet",
repo_id=ds_name,
repo_type="dataset",
+ token=self._token,
)
local_ds_another_config.push_to_hub(ds_name, "another_config", token=self._token)
ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload")
| Private datasets do not load when passing token
### Describe the bug
Since the release of `datasets` 2.14, private/gated datasets do not load when passing `token`: they raise `EmptyDatasetError`.
This is a non-planned backward incompatible breaking change.
Note that private datasets do load if instead `download_config` is passed:
```python
from datasets import DownloadConfig, load_dataset
ds = load_dataset("albertvillanova/tmp-private", split="train", download_config=DownloadConfig(token="<MY-TOKEN>"))
ds
```
gives
```
Dataset({
features: ['text'],
num_rows: 4
})
```
### Steps to reproduce the bug
```python
from datasets import load_dataset
ds = load_dataset("albertvillanova/tmp-private", split="train", token="<MY-TOKEN>")
```
gives
```
---------------------------------------------------------------------------
EmptyDatasetError Traceback (most recent call last)
[<ipython-input-2-25b48732107a>](https://localhost:8080/#) in <cell line: 3>()
1 from datasets import load_dataset
2
----> 3 ds = load_dataset("albertvillanova/tmp-private", split="train", token="<MY-TOKEN>")
5 frames
[/usr/local/lib/python3.10/dist-packages/datasets/load.py](https://localhost:8080/#) in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)
2107
2108 # Create a dataset builder
-> 2109 builder_instance = load_dataset_builder(
2110 path=path,
2111 name=name,
[/usr/local/lib/python3.10/dist-packages/datasets/load.py](https://localhost:8080/#) in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, token, use_auth_token, storage_options, **config_kwargs)
1793 download_config = download_config.copy() if download_config else DownloadConfig()
1794 download_config.storage_options.update(storage_options)
-> 1795 dataset_module = dataset_module_factory(
1796 path,
1797 revision=revision,
[/usr/local/lib/python3.10/dist-packages/datasets/load.py](https://localhost:8080/#) in dataset_module_factory(path, revision, download_config, download_mode, dynamic_modules_path, data_dir, data_files, **download_kwargs)
1484 raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None
1485 if isinstance(e1, EmptyDatasetError):
-> 1486 raise e1 from None
1487 if isinstance(e1, FileNotFoundError):
1488 raise FileNotFoundError(
[/usr/local/lib/python3.10/dist-packages/datasets/load.py](https://localhost:8080/#) in dataset_module_factory(path, revision, download_config, download_mode, dynamic_modules_path, data_dir, data_files, **download_kwargs)
1474 download_config=download_config,
1475 download_mode=download_mode,
-> 1476 ).get_module()
1477 except (
1478 Exception
[/usr/local/lib/python3.10/dist-packages/datasets/load.py](https://localhost:8080/#) in get_module(self)
1030 sanitize_patterns(self.data_files)
1031 if self.data_files is not None
-> 1032 else get_data_patterns(base_path, download_config=self.download_config)
1033 )
1034 data_files = DataFilesDict.from_patterns(
[/usr/local/lib/python3.10/dist-packages/datasets/data_files.py](https://localhost:8080/#) in get_data_patterns(base_path, download_config)
457 return _get_data_files_patterns(resolver)
458 except FileNotFoundError:
--> 459 raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None
460
461
EmptyDatasetError: The directory at hf://datasets/albertvillanova/tmp-private@79b9e4fe79670a9a050d6ebc385464891915a71d doesn't contain any data files
```
### Expected behavior
The dataset should load.
### Environment info
- `datasets` version: 2.14.3
- Platform: Linux-5.15.109+-x86_64-with-glibc2.35
- Python version: 3.10.12
- Huggingface_hub version: 0.16.4
- PyArrow version: 9.0.0
- Pandas version: 1.5.3
| Our CI did not catch this issue because with current implementation, stored token in `HfFolder` (which always exists) is used by default. | 2023-08-07T15:41:25Z | [] | [] |
huggingface/datasets | 6,175 | huggingface__datasets-6175 | [
"6173"
] | 4566827557acbeba0d4cb66449bb70367e341b05 | diff --git a/src/datasets/table.py b/src/datasets/table.py
--- a/src/datasets/table.py
+++ b/src/datasets/table.py
@@ -1916,7 +1916,7 @@ def _concat_arrays(arrays):
_concat_arrays([array.values for array in arrays]),
)
elif pa.types.is_fixed_size_list(array_type):
- if config.PYARROW_VERSION.major < 13:
+ if config.PYARROW_VERSION.major < 14:
# PyArrow bug: https://github.com/apache/arrow/issues/35360
return pa.FixedSizeListArray.from_arrays(
_concat_arrays([array.values[array.offset * array.type.list_size :] for array in arrays]),
@@ -1993,7 +1993,7 @@ def array_cast(array: pa.Array, pa_type: pa.DataType, allow_number_to_str=True):
return pa.ListArray.from_arrays(array.offsets, _c(array.values, pa_type.value_type))
elif pa.types.is_fixed_size_list(array.type):
array_values = array.values
- if config.PYARROW_VERSION.major < 13:
+ if config.PYARROW_VERSION.major < 14:
# PyArrow bug: https://github.com/apache/arrow/issues/35360
array_values = array.values[array.offset * array.type.list_size :]
if pa.types.is_fixed_size_list(pa_type):
@@ -2109,7 +2109,7 @@ def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_
elif pa.types.is_fixed_size_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
array_values = array.values
- if config.PYARROW_VERSION.major < 13:
+ if config.PYARROW_VERSION.major < 14:
# PyArrow bug: https://github.com/apache/arrow/issues/35360
array_values = array.values[array.offset * array.type.list_size :]
if isinstance(feature, list):
@@ -2216,7 +2216,7 @@ def embed_array_storage(array: pa.Array, feature: "FeatureType"):
elif pa.types.is_fixed_size_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
array_values = array.values
- if config.PYARROW_VERSION.major < 13:
+ if config.PYARROW_VERSION.major < 14:
# PyArrow bug: https://github.com/apache/arrow/issues/35360
array_values = array.values[array.offset * array.type.list_size :]
if isinstance(feature, list):
| diff --git a/tests/test_formatting.py b/tests/test_formatting.py
--- a/tests/test_formatting.py
+++ b/tests/test_formatting.py
@@ -76,7 +76,7 @@ def test_numpy_extractor(self):
np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B)})
def test_numpy_extractor_nested(self):
- pa_table = self._create_dummy_table().drop(["a", "b"])
+ pa_table = self._create_dummy_table().drop(["a", "b", "d"])
extractor = NumpyArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row["c"][0].dtype, np.float64)
@@ -109,14 +109,39 @@ def test_pandas_extractor(self):
self.assertIsInstance(row, pd.DataFrame)
pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1])
pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1])
- pd.testing.assert_series_equal(row["d"], pd.Series(_COL_D, name="d")[:1])
col = extractor.extract_column(pa_table)
pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a"))
batch = extractor.extract_batch(pa_table)
self.assertIsInstance(batch, pd.DataFrame)
pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a"))
pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b"))
- pd.testing.assert_series_equal(batch["d"], pd.Series(_COL_D, name="d"))
+
+ def test_pandas_extractor_nested(self):
+ pa_table = self._create_dummy_table().drop(["a", "b", "d"])
+ extractor = PandasArrowExtractor()
+ row = extractor.extract_row(pa_table)
+ self.assertEqual(row["c"][0][0].dtype, np.float64)
+ self.assertEqual(row["c"].dtype, object)
+ col = extractor.extract_column(pa_table)
+ self.assertEqual(col[0][0].dtype, np.float64)
+ self.assertEqual(col[0].dtype, object)
+ self.assertEqual(col.dtype, object)
+ batch = extractor.extract_batch(pa_table)
+ self.assertEqual(batch["c"][0][0].dtype, np.float64)
+ self.assertEqual(batch["c"][0].dtype, object)
+ self.assertEqual(batch["c"].dtype, object)
+
+ def test_pandas_extractor_temporal(self):
+ pa_table = self._create_dummy_table().drop(["a", "b", "c"])
+ extractor = PandasArrowExtractor()
+ row = extractor.extract_row(pa_table)
+ self.assertTrue(pd.api.types.is_datetime64_any_dtype(row["d"].dtype))
+ col = extractor.extract_column(pa_table)
+ self.assertTrue(isinstance(col[0], datetime.datetime))
+ self.assertTrue(pd.api.types.is_datetime64_any_dtype(col.dtype))
+ batch = extractor.extract_batch(pa_table)
+ self.assertTrue(isinstance(batch["d"][0], datetime.datetime))
+ self.assertTrue(pd.api.types.is_datetime64_any_dtype(batch["d"].dtype))
class LazyDictTest(TestCase):
| Fix CI for pyarrow 13.0.0
pyarrow 13.0.0 just came out
```
FAILED tests/test_formatting.py::ArrowExtractorTest::test_pandas_extractor - AssertionError: Attributes of Series are different
Attribute "dtype" are different
[left]: datetime64[us, UTC]
[right]: datetime64[ns, UTC]
```
```
FAILED tests/test_table.py::test_cast_sliced_fixed_size_array_to_features - TypeError: Couldn't cast array of type
fixed_size_list<item: int32>[3]
to
Sequence(feature=Value(dtype='int64', id=None), length=3, id=None)
```
e.g. in https://github.com/huggingface/datasets/actions/runs/5952253963/job/16143847230
first error may be related to https://github.com/apache/arrow/issues/33321
second one maybe because `feature.length * len(array) == len(array_values)` is not satisfied anymore somehow ?
| 2023-08-23T15:45:53Z | [] | [] |
|
huggingface/datasets | 6,198 | huggingface__datasets-6198 | [
"6196"
] | 0f8c58002481568eb1aa4f6f86c4509cf476800a | diff --git a/src/datasets/data_files.py b/src/datasets/data_files.py
--- a/src/datasets/data_files.py
+++ b/src/datasets/data_files.py
@@ -682,17 +682,6 @@ def from_patterns(
)
return out
- def __reduce__(self):
- """
- To make sure the order of the keys doesn't matter when pickling and hashing:
-
- >>> from datasets.data_files import DataFilesDict
- >>> from datasets.fingerprint import Hasher
- >>> assert Hasher.hash(DataFilesDict(a=[], b=[])) == Hasher.hash(DataFilesDict(b=[], a=[]))
-
- """
- return DataFilesDict, (dict(sorted(self.items())),)
-
def filter_extensions(self, extensions: List[str]) -> "DataFilesDict":
out = type(self)()
for key, data_files_list in self.items():
| diff --git a/tests/test_builder.py b/tests/test_builder.py
--- a/tests/test_builder.py
+++ b/tests/test_builder.py
@@ -759,10 +759,6 @@ def test_cache_dir_for_data_files(self):
cache_dir=tmp_dir, data_files={"train": [dummy_data1], "test": dummy_data2}
)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
- other_builder = DummyGeneratorBasedBuilder(
- cache_dir=tmp_dir, data_files={"test": dummy_data2, "train": dummy_data1}
- )
- self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir, data_files={"train": dummy_data1, "validation": dummy_data2}
)
diff --git a/tests/test_data_files.py b/tests/test_data_files.py
--- a/tests/test_data_files.py
+++ b/tests/test_data_files.py
@@ -1,3 +1,4 @@
+import copy
import os
from pathlib import Path, PurePath
from typing import List
@@ -385,6 +386,13 @@ def test_DataFilesList_from_patterns_raises_FileNotFoundError(complex_data_dir):
DataFilesList.from_patterns(["file_that_doesnt_exist.txt"], complex_data_dir)
+class TestDataFilesDict:
+ def test_key_order_after_copy(self):
+ data_files = DataFilesDict({"train": "train.csv", "test": "test.csv"})
+ copied_data_files = copy.deepcopy(data_files)
+ assert list(copied_data_files.keys()) == list(data_files.keys()) # test split order with list()
+
+
@pytest.mark.parametrize("pattern", _TEST_PATTERNS)
def test_DataFilesDict_from_patterns_in_dataset_repository(
hub_dataset_repo_path, hub_dataset_repo_patterns_results, pattern
| Split order is not preserved
I have noticed that in some cases the split order is not preserved.
For example, consider a no-script dataset with configs:
```yaml
configs:
- config_name: default
data_files:
- split: train
path: train.csv
- split: test
path: test.csv
```
- Note the defined split order is [train, test]
Once the dataset is loaded, the split order is not preserved:
```python
In [16]: ds
Out[16]:
DatasetDict({
test: Dataset({
features: ['text', 'label'],
num_rows: 1
})
train: Dataset({
features: ['text', 'label'],
num_rows: 2
})
})
```
- Note the obtained split order is [test, train]
| 2023-08-31T09:00:26Z | [] | [] |
|
huggingface/datasets | 6,208 | huggingface__datasets-6208 | [
"6207"
] | a1d520a5226268f2c6f0303de3e8bfd72198b082 | diff --git a/src/datasets/packaged_modules/__init__.py b/src/datasets/packaged_modules/__init__.py
--- a/src/datasets/packaged_modules/__init__.py
+++ b/src/datasets/packaged_modules/__init__.py
@@ -60,5 +60,5 @@ def _hash_python_lines(lines: List[str]) -> str:
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
-_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
-_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
+for _module in _MODULE_TO_EXTENSIONS:
+ _MODULE_TO_EXTENSIONS[_module].append(".zip")
| diff --git a/tests/fixtures/files.py b/tests/fixtures/files.py
--- a/tests/fixtures/files.py
+++ b/tests/fixtures/files.py
@@ -289,7 +289,7 @@ def bz2_csv_path(csv_path, tmp_path_factory):
@pytest.fixture(scope="session")
def zip_csv_path(csv_path, csv2_path, tmp_path_factory):
- path = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
+ path = tmp_path_factory.mktemp("zip_csv_path") / "csv-dataset.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.basename(csv_path))
f.write(csv2_path, arcname=os.path.basename(csv2_path))
diff --git a/tests/test_load.py b/tests/test_load.py
--- a/tests/test_load.py
+++ b/tests/test_load.py
@@ -1458,3 +1458,12 @@ def test_load_dataset_with_storage_options_with_decoding(mockfs, image_file):
ds = load_dataset("imagefolder", data_files=data_files, storage_options=mockfs.storage_options)
assert len(ds["train"]) == 1
assert isinstance(ds["train"][0]["image"], PIL.Image.Image)
+
+
+def test_load_dataset_without_script_with_zip(zip_csv_path):
+ path = str(zip_csv_path.parent)
+ ds = load_dataset(path)
+ assert list(ds.keys()) == ["train"]
+ assert ds["train"].column_names == ["col_1", "col_2", "col_3"]
+ assert ds["train"].num_rows == 8
+ assert ds["train"][0] == {"col_1": 0, "col_2": 0, "col_3": 0.0}
| No-script datasets with ZIP files do not load
While investigating an issue on a Hub dataset, I have discovered the no-script datasets containing ZIP files do not load.
For example, that no-script dataset containing ZIP files, raises NonMatchingSplitsSizesError:
```python
In [2]: ds = load_dataset("sidovic/LearningQ-qg")
NonMatchingSplitsSizesError: [
{
'expected': SplitInfo(name='train', num_bytes=0, num_examples=188660, shard_lengths=None, dataset_name=None),
'recorded': SplitInfo(name='train', num_bytes=0, num_examples=0, shard_lengths=None, dataset_name='learning_q-qg')
}, {
'expected': SplitInfo(name='validation', num_bytes=0, num_examples=20630, shard_lengths=None, dataset_name=None),
'recorded': SplitInfo(name='validation', num_bytes=0, num_examples=0, shard_lengths=None, dataset_name='learning_q-qg')
}, {
'expected': SplitInfo(name='test', num_bytes=0, num_examples=18227, shard_lengths=None, dataset_name=None),
'recorded': SplitInfo(name='test', num_bytes=0, num_examples=0, shard_lengths=None, dataset_name='learning_q-qg')
}
]
```
As another example, a no-script dataset containing just a (CSV)-ZIP file, raises a DatasetGenerationError:
```
> num_examples, num_bytes = writer.finalize()
src/datasets/builder.py:1949:
> raise SchemaInferenceError("Please pass `features` or at least one example when writing data")
E datasets.arrow_writer.SchemaInferenceError: Please pass `features` or at least one example when writing data
src/datasets/arrow_writer.py:598: SchemaInferenceError
The above exception was the direct cause of the following exception:
src/datasets/load.py:2143: in load_dataset
builder_instance.download_and_prepare(
src/datasets/builder.py:954: in download_and_prepare
self._download_and_prepare(
src/datasets/builder.py:1049: in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
src/datasets/builder.py:1813: in _prepare_split
for job_id, done, content in self._prepare_split_single(
> raise DatasetGenerationError("An error occurred while generating the dataset") from e
E datasets.builder.DatasetGenerationError: An error occurred while generating the dataset
src/datasets/builder.py:1958: DatasetGenerationError
```
After investigating, I think this bug was introduced in this PR:
- #5972
Related to:
- https://huggingface.co/datasets/sidovic/LearningQ-qg/discussions/1
CC: @lhoestq
| 2023-09-04T06:07:12Z | [] | [] |
|
huggingface/datasets | 6,243 | huggingface__datasets-6243 | [
"6242"
] | 9b21e181b642bd55b3ef68c1948bfbcd388136d6 | diff --git a/src/datasets/iterable_dataset.py b/src/datasets/iterable_dataset.py
--- a/src/datasets/iterable_dataset.py
+++ b/src/datasets/iterable_dataset.py
@@ -134,7 +134,7 @@ def _convert_to_arrow(
iterator = iter(iterable)
for key, example in iterator:
iterator_batch = islice(iterator, batch_size - 1)
- key_examples_list = [(key, example)] + [(key, example) for key, example in iterator_batch]
+ key_examples_list = [(key, example)] + list(iterator_batch)
if len(key_examples_list) < batch_size and drop_last_batch:
return
keys, examples = zip(*key_examples_list)
@@ -697,7 +697,7 @@ def _iter(self):
if self.batch_size is None or self.batch_size <= 0
else islice(iterator, self.batch_size - 1)
)
- key_examples_list = [(key, example)] + [(key, example) for key, example in iterator_batch]
+ key_examples_list = [(key, example)] + list(iterator_batch)
keys, examples = zip(*key_examples_list)
if (
self.drop_last_batch
@@ -880,7 +880,7 @@ def _iter(self):
if self.batch_size is None or self.batch_size <= 0
else islice(iterator, self.batch_size - 1)
)
- key_examples_list = [(key, example)] + [(key, example) for key, example in iterator_batch]
+ key_examples_list = [(key, example)] + list(iterator_batch)
keys, examples = zip(*key_examples_list)
batch = _examples_to_batch(examples)
batch = format_dict(batch) if format_dict else batch
diff --git a/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py b/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
--- a/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
+++ b/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
@@ -146,7 +146,7 @@ def analyze(files_or_archives, downloaded_files_or_dirs, split):
datasets.SplitGenerator(
name=split_name,
gen_kwargs={
- "files": [(file, downloaded_file) for file, downloaded_file in zip(files, downloaded_files)]
+ "files": list(zip(files, downloaded_files))
+ [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs],
"metadata_files": metadata_files,
"split_name": split_name,
diff --git a/src/datasets/table.py b/src/datasets/table.py
--- a/src/datasets/table.py
+++ b/src/datasets/table.py
@@ -2002,7 +2002,7 @@ def array_cast(array: pa.Array, pa_type: pa.DataType, allow_number_to_str=True):
pa_type.list_size,
)
elif pa.types.is_list(pa_type):
- offsets_arr = pa.array(range(len(array) + 1), pa.int32())
+ offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32())
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
@@ -2061,6 +2061,7 @@ def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_
array = array.storage
if hasattr(feature, "cast_storage"):
return feature.cast_storage(array)
+
elif pa.types.is_struct(array.type):
# feature must be a dict or Sequence(subfeatures_dict)
if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
@@ -2126,7 +2127,7 @@ def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_
if feature.length * len(array) == len(array_values):
return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length)
else:
- offsets_arr = pa.array(range(len(array) + 1), pa.int32())
+ offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32())
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
@@ -2233,7 +2234,7 @@ def embed_array_storage(array: pa.Array, feature: "FeatureType"):
if feature.length * len(array) == len(array_values):
return pa.FixedSizeListArray.from_arrays(_e(array_values, feature.feature), feature.length)
else:
- offsets_arr = pa.array(range(len(array) + 1), pa.int32())
+ offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32())
if array.null_count > 0:
if config.PYARROW_VERSION.major < 10:
warnings.warn(
| diff --git a/tests/test_iterable_dataset.py b/tests/test_iterable_dataset.py
--- a/tests/test_iterable_dataset.py
+++ b/tests/test_iterable_dataset.py
@@ -131,7 +131,7 @@ def test_convert_to_arrow(batch_size, drop_last_batch):
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(
_convert_to_arrow(
- [(i, example) for i, example in enumerate(examples)],
+ list(enumerate(examples)),
batch_size=batch_size,
drop_last_batch=drop_last_batch,
)
@@ -162,9 +162,7 @@ def test_batch_arrow_tables(tables, batch_size, drop_last_batch):
num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(
- _batch_arrow_tables(
- [(i, table) for i, table in enumerate(tables)], batch_size=batch_size, drop_last_batch=drop_last_batch
- )
+ _batch_arrow_tables(list(enumerate(tables)), batch_size=batch_size, drop_last_batch=drop_last_batch)
)
assert len(subtables) == num_batches
if drop_last_batch:
diff --git a/tests/test_table.py b/tests/test_table.py
--- a/tests/test_table.py
+++ b/tests/test_table.py
@@ -1189,6 +1189,18 @@ def test_cast_array_to_features_sequence_classlabel():
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"])))
+def test_cast_fixed_size_array_to_features_sequence():
+ arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3))
+ # Fixed size list
+ casted_array = cast_array_to_feature(arr, Sequence(Value("int64"), length=3))
+ assert casted_array.type == pa.list_(pa.int64(), 3)
+ assert casted_array.to_pylist() == arr.to_pylist()
+ # Variable size list
+ casted_array = cast_array_to_feature(arr, Sequence(Value("int64")))
+ assert casted_array.type == pa.list_(pa.int64())
+ assert casted_array.to_pylist() == arr.to_pylist()
+
+
def test_cast_sliced_fixed_size_array_to_features():
arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3))
casted_array = cast_array_to_feature(arr[1:], Sequence(Value("int64"), length=3))
| Data alteration when loading dataset with unspecified inner sequence length
### Describe the bug
When a dataset saved with a specified inner sequence length is loaded without specifying that length, the original data is altered and becomes inconsistent.
### Steps to reproduce the bug
```python
from datasets import Dataset, Features, Value, Sequence, load_dataset
# Repository ID
repo_id = "my_repo_id"
# Define features with a specific length of 3 for each inner sequence
specified_features = Features({"key": Sequence(Sequence(Value("float32"), length=3))})
# Create a dataset with the specified features
data = [
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
]
dataset = Dataset.from_dict({"key": data}, features=specified_features)
# Push the dataset to the hub
dataset.push_to_hub(repo_id)
# Define features without specifying the length
unspecified_features = Features({"key": Sequence(Sequence(Value("float32")))})
# Load the dataset from the hub with this new feature definition
dataset = load_dataset(f"qgallouedec/{repo_id}", split="train", features=unspecified_features)
# The obtained data is altered
print(dataset.to_dict()) # {'key': [[[1.0], [2.0]], [[3.0], [4.0]]]}
```
### Expected behavior
```python
print(dataset.to_dict()) # {'key': [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]]}
```
### Environment info
- `datasets` version: 2.14.4
- Platform: Linux-6.2.0-32-generic-x86_64-with-glibc2.35
- Python version: 3.9.12
- Huggingface_hub version: 0.15.1
- PyArrow version: 12.0.1
- Pandas version: 2.0.3
| While this issue may seem specific, it led to a silent problem in my workflow that took days to diagnose. If this feature is not intended to be supported, an error should be raised when encountering this configuration to prevent such issues. | 2023-09-15T14:23:33Z | [] | [] |
huggingface/datasets | 6,244 | huggingface__datasets-6244 | [
"6214"
] | a1e1867e932f14233244fb25713f3c94c46ff50a | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -126,7 +126,7 @@
"multiprocess",
# to save datasets locally or on any filesystem
# minimum 2023.1.0 to support protocol=kwargs in fsspec's `open`, `get_fs_token_paths`, etc.: see https://github.com/fsspec/filesystem_spec/pull/1143
- "fsspec[http]>=2023.1.0,<2023.9.0", # Temporary pin
+ "fsspec[http]>=2023.1.0",
# for data streaming via http
"aiohttp",
# To get datasets from the Datasets Hub on huggingface.co
diff --git a/src/datasets/config.py b/src/datasets/config.py
--- a/src/datasets/config.py
+++ b/src/datasets/config.py
@@ -36,6 +36,7 @@
# Imports
DILL_VERSION = version.parse(importlib.metadata.version("dill"))
+FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec"))
PANDAS_VERSION = version.parse(importlib.metadata.version("pandas"))
PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow"))
diff --git a/src/datasets/data_files.py b/src/datasets/data_files.py
--- a/src/datasets/data_files.py
+++ b/src/datasets/data_files.py
@@ -9,6 +9,7 @@
from fsspec import get_fs_token_paths
from fsspec.implementations.http import HTTPFileSystem
from huggingface_hub import HfFileSystem
+from packaging import version
from tqdm.contrib.concurrent import thread_map
from . import config
@@ -42,23 +43,17 @@ class EmptyDatasetError(FileNotFoundError):
Split.TEST: ["test", "testing", "eval", "evaluation"],
}
NON_WORDS_CHARS = "-._ 0-9"
-KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"]
-KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**[{sep}/]{keyword}[{sep}/]**"]
+if config.FSSPEC_VERSION < version.parse("2023.9.0"):
+ KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**[{sep}/]{keyword}[{sep}/]**"]
+else:
+ KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**/*[{sep}/]{keyword}[{sep}/]**"]
DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST]
-DEFAULT_PATTERNS_SPLIT_IN_FILENAME = {
+DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME = {
split: [
pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
for keyword in SPLIT_KEYWORDS[split]
- for pattern in KEYWORDS_IN_FILENAME_BASE_PATTERNS
- ]
- for split in DEFAULT_SPLITS
-}
-DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME = {
- split: [
- pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
- for keyword in SPLIT_KEYWORDS[split]
- for pattern in KEYWORDS_IN_DIR_NAME_BASE_PATTERNS
+ for pattern in KEYWORDS_IN_PATH_NAME_BASE_PATTERNS
]
for split in DEFAULT_SPLITS
}
@@ -69,16 +64,21 @@ class EmptyDatasetError(FileNotFoundError):
ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED]
ALL_DEFAULT_PATTERNS = [
- DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME,
- DEFAULT_PATTERNS_SPLIT_IN_FILENAME,
+ DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME,
DEFAULT_PATTERNS_ALL,
]
-METADATA_PATTERNS = [
- "metadata.csv",
- "**/metadata.csv",
- "metadata.jsonl",
- "**/metadata.jsonl",
-] # metadata file for ImageFolder and AudioFolder
+if config.FSSPEC_VERSION < version.parse("2023.9.0"):
+ METADATA_PATTERNS = [
+ "metadata.csv",
+ "**/metadata.csv",
+ "metadata.jsonl",
+ "**/metadata.jsonl",
+ ] # metadata file for ImageFolder and AudioFolder
+else:
+ METADATA_PATTERNS = [
+ "**/metadata.csv",
+ "**/metadata.jsonl",
+ ] # metadata file for ImageFolder and AudioFolder
WILDCARD_CHARACTERS = "*[]"
FILES_TO_IGNORE = [
"README.md",
@@ -297,10 +297,10 @@ def resolve_pattern(
- data/** to match all the files inside "data" and its subdirectories
The patterns are resolved using the fsspec glob.
- Here are some behaviors specific to fsspec glob that are different from glob.glob, Path.glob, Path.match or fnmatch:
- - '*' matches only first level items
- - '**' matches all items
- - '**/*' matches all at least second level items
+
+ glob.glob, Path.glob, Path.match or fnmatch do not support ** with a prefix/suffix other than a forward slash /.
+ For instance, this means **.json is the same as *.json. On the contrary, the fsspec glob has no limits regarding the ** prefix/suffix,
+ resulting in **.json being equivalent to **/*.json.
More generally:
- '*' matches any character except a forward-slash (to match just the file or directory name)
@@ -417,7 +417,8 @@ def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig]
Output:
- {"train": ["**train*"], "test": ["**test*"]}
+ {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'],
+ 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]}
Input:
@@ -435,7 +436,8 @@ def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig]
Output:
- {"train": ["**train*/**"], "test": ["**test*/**"]}
+ {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'],
+ 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]}
Input:
@@ -452,11 +454,9 @@ def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig]
Output:
- {
- "train": ["data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9].*"],
- "test": ["data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9].*"],
- "random": ["data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9].*"],
- }
+ {'train': ['data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
+ 'test': ['data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
+ 'random': ['data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']}
In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
"""
| diff --git a/tests/test_data_files.py b/tests/test_data_files.py
--- a/tests/test_data_files.py
+++ b/tests/test_data_files.py
@@ -1,6 +1,6 @@
import copy
import os
-from pathlib import Path, PurePath
+from pathlib import Path
from typing import List
from unittest.mock import patch
@@ -493,7 +493,8 @@ def mock_fs(file_paths: List[str]):
Example:
```py
- >>> fs = mock_fs(["data/train.txt", "data.test.txt"])
+ >>> DummyTestFS = mock_fs(["data/train.txt", "data.test.txt"])
+ >>> fs = DummyTestFS()
>>> assert fsspec.get_filesystem_class("mock").__name__ == "DummyTestFS"
>>> assert type(fs).__name__ == "DummyTestFS"
>>> print(fs.glob("**"))
@@ -501,7 +502,7 @@ def mock_fs(file_paths: List[str]):
```
"""
- dir_paths = {file_path.rsplit("/")[0] for file_path in file_paths if "/" in file_path}
+ dir_paths = {file_path.rsplit("/", 1)[0] for file_path in file_paths if "/" in file_path}
fs_contents = [{"name": dir_path, "type": "directory"} for dir_path in dir_paths] + [
{"name": file_path, "type": "file", "size": 10} for file_path in file_paths
]
@@ -619,16 +620,17 @@ def resolver(pattern):
["metadata.jsonl"],
["metadata.csv"],
# nested metadata files
- ["data/metadata.jsonl", "data/train/metadata.jsonl"],
- ["data/metadata.csv", "data/train/metadata.csv"],
+ ["metadata.jsonl", "data/metadata.jsonl"],
+ ["metadata.csv", "data/metadata.csv"],
],
)
def test_get_metadata_files_patterns(metadata_files):
+ DummyTestFS = mock_fs(metadata_files)
+ fs = DummyTestFS()
+
def resolver(pattern):
- return [PurePath(path) for path in set(metadata_files) if PurePath(path).match(pattern)]
+ return [file_path for file_path in fs.glob(pattern) if fs.isfile(file_path)]
patterns = _get_metadata_files_patterns(resolver)
- matched = [path for path in metadata_files for pattern in patterns if PurePath(path).match(pattern)]
- # Use set to remove the difference between in behavior between PurePath.match and mathcing via fsspec.glob
- assert len(set(matched)) == len(metadata_files)
- assert sorted(set(matched)) == sorted(metadata_files)
+ matched = [file_path for pattern in patterns for file_path in resolver(pattern)]
+ assert sorted(matched) == sorted(metadata_files)
| Unpin fsspec < 2023.9.0
Once root issue is fixed, remove temporary pin of fsspec < 2023.9.0 introduced by:
- #6210
Related to issue:
- #6209
After investigation, I think the root issue is related to the new glob behavior with double asterisk `**` they have introduced in:
- https://github.com/fsspec/filesystem_spec/pull/1329
| 2023-09-15T17:58:25Z | [] | [] |
|
huggingface/datasets | 6,283 | huggingface__datasets-6283 | [
"6280",
"6360"
] | f9975f636542df7f95c27065ea93147440d690b7 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -113,8 +113,8 @@
# We use numpy>=1.17 to have np.random.Generator (Dataset shuffling)
"numpy>=1.17",
# Backend and serialization.
- # Minimum 8.0.0 to be able to use .to_reader()
- "pyarrow>=8.0.0",
+ # Minimum 12.0.0 to be able to concatenate extension arrays
+ "pyarrow>=12.0.0",
# As long as we allow pyarrow < 14.0.1, to fix vulnerability CVE-2023-47248
"pyarrow-hotfix",
# For smart caching dataset processing
@@ -166,7 +166,7 @@
"pytest-datadir",
"pytest-xdist",
# optional dependencies
- "apache-beam>=2.26.0,<2.44.0;python_version<'3.10'", # doesn't support recent dill versions for recent python versions
+ "apache-beam>=2.26.0; sys_platform != 'win32' and python_version<'3.10'", # doesn't support recent dill versions for recent python versions and on windows requires pyarrow<12.0.0
"elasticsearch<8.0.0", # 8.0 asks users to provide hosts or cloud_id when instantiating ElasticSearch()
"faiss-cpu>=1.6.4",
"jax>=0.3.14; sys_platform != 'win32'",
@@ -233,7 +233,7 @@
EXTRAS_REQUIRE = {
"audio": AUDIO_REQUIRE,
"vision": VISION_REQUIRE,
- "apache-beam": ["apache-beam>=2.26.0,<2.44.0"],
+ "apache-beam": ["apache-beam>=2.26.0"],
"tensorflow": [
"tensorflow>=2.2.0,!=2.6.0,!=2.6.1; sys_platform != 'darwin' or platform_machine != 'arm64'",
"tensorflow-macos; sys_platform == 'darwin' and platform_machine == 'arm64'",
diff --git a/src/datasets/arrow_writer.py b/src/datasets/arrow_writer.py
--- a/src/datasets/arrow_writer.py
+++ b/src/datasets/arrow_writer.py
@@ -39,7 +39,7 @@
from .filesystems import is_remote_filesystem
from .info import DatasetInfo
from .keyhash import DuplicatedKeysError, KeyHasher
-from .table import array_cast, array_concat, cast_array_to_feature, embed_table_storage, table_cast
+from .table import array_cast, cast_array_to_feature, embed_table_storage, table_cast
from .utils import logging
from .utils import tqdm as hf_tqdm
from .utils.file_utils import hash_url_to_filename
@@ -441,7 +441,12 @@ def write_examples_on_file(self):
# This can happen in `.map()` when we want to re-write the same Arrow data
if all(isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples):
arrays = [row[0][col] for row in self.current_examples]
- batch_examples[col] = array_concat(arrays)
+ arrays = [
+ chunk
+ for array in arrays
+ for chunk in (array.chunks if isinstance(array, pa.ChunkedArray) else [array])
+ ]
+ batch_examples[col] = pa.concat_arrays(arrays)
else:
batch_examples[col] = [
row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]
@@ -669,33 +674,23 @@ def finalize(self, metrics_query_result: dict):
metrics_query_result: `dict` obtained from pipeline_results.metrics().query(m_filter). Make sure
that the filter keeps only the metrics for the considered split, under the namespace `split_name`.
"""
- import apache_beam as beam
-
- from .utils import beam_utils
# Beam FileSystems require the system's path separator in the older versions
fs, _, [parquet_path] = fsspec.get_fs_token_paths(self._parquet_path)
parquet_path = str(Path(parquet_path)) if not is_remote_filesystem(fs) else fs.unstrip_protocol(parquet_path)
- shards_metadata = list(beam.io.filesystems.FileSystems.match([parquet_path + "*.parquet"])[0].metadata_list)
- shards = [metadata.path for metadata in shards_metadata]
- num_bytes = sum([metadata.size_in_bytes for metadata in shards_metadata])
+ shards = fs.glob(parquet_path + "*.parquet")
+ num_bytes = sum(fs.sizes(shards))
shard_lengths = get_parquet_lengths(shards)
# Convert to arrow
if self._path.endswith(".arrow"):
logger.info(f"Converting parquet files {self._parquet_path} to arrow {self._path}")
- shards = [
- metadata.path
- for metadata in beam.io.filesystems.FileSystems.match([parquet_path + "*.parquet"])[0].metadata_list
- ]
try: # stream conversion
num_bytes = 0
for shard in hf_tqdm(shards, unit="shards"):
- with beam.io.filesystems.FileSystems.open(shard) as source:
- with beam.io.filesystems.FileSystems.create(
- shard.replace(".parquet", ".arrow")
- ) as destination:
+ with fs.open(shard, "rb") as source:
+ with fs.open(shard.replace(".parquet", ".arrow"), "wb") as destination:
shard_num_bytes, _ = parquet_to_arrow(source, destination)
num_bytes += shard_num_bytes
except OSError as e: # broken pipe can happen if the connection is unstable, do local conversion instead
@@ -709,12 +704,12 @@ def finalize(self, metrics_query_result: dict):
num_bytes = 0
for shard in hf_tqdm(shards, unit="shards"):
local_parquet_path = os.path.join(local_convert_dir, hash_url_to_filename(shard) + ".parquet")
- beam_utils.download_remote_to_local(shard, local_parquet_path)
+ fs.download(shard, local_parquet_path)
local_arrow_path = local_parquet_path.replace(".parquet", ".arrow")
shard_num_bytes, _ = parquet_to_arrow(local_parquet_path, local_arrow_path)
num_bytes += shard_num_bytes
remote_arrow_path = shard.replace(".parquet", ".arrow")
- beam_utils.upload_local_to_remote(local_arrow_path, remote_arrow_path)
+ fs.upload(local_arrow_path, remote_arrow_path)
# Save metrics
counters_dict = {metric.key.metric.name: metric.result for metric in metrics_query_result["counters"]}
@@ -735,8 +730,9 @@ def get_parquet_lengths(sources) -> List[int]:
def parquet_to_arrow(source, destination) -> List[int]:
"""Convert parquet file to arrow file. Inputs can be str paths or file-like objects"""
stream = None if isinstance(destination, str) else destination
- with ArrowWriter(path=destination, stream=stream) as writer:
- parquet_file = pa.parquet.ParquetFile(source)
+ parquet_file = pa.parquet.ParquetFile(source)
+ # Beam can create empty Parquet files, so we need to pass the source Parquet file's schema
+ with ArrowWriter(schema=parquet_file.schema_arrow, path=destination, stream=stream) as writer:
for record_batch in parquet_file.iter_batches():
pa_table = pa.Table.from_batches([record_batch])
writer.write_table(pa_table)
diff --git a/src/datasets/table.py b/src/datasets/table.py
--- a/src/datasets/table.py
+++ b/src/datasets/table.py
@@ -1,6 +1,5 @@
import copy
import os
-import warnings
from functools import partial
from itertools import groupby
from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union
@@ -8,6 +7,7 @@
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
+import pyarrow.types
from . import config
from .utils.logging import get_logger
@@ -239,11 +239,7 @@ def to_pylist(self, *args, **kwargs):
Returns:
`list`
"""
- try:
- return self.table.to_pylist(*args, **kwargs)
- except AttributeError: # pyarrow <7 does not have to_pylist, so we use to_pydict
- pydict = self.table.to_pydict(*args, **kwargs)
- return [{k: pydict[k][i] for k in pydict} for i in range(len(self.table))]
+ return self.table.to_pylist(*args, **kwargs)
def to_pandas(self, *args, **kwargs):
"""
@@ -1801,102 +1797,35 @@ def wrapper(array, *args, **kwargs):
return wrapper
-def _is_extension_type(pa_type: pa.DataType) -> bool:
- """
- Check (recursively) if a pyarrow type is an extension type.
- """
- if isinstance(pa_type, pa.StructType):
- return any(_is_extension_type(field.type) for field in pa_type)
- elif isinstance(pa_type, (pa.ListType, pa.FixedSizeListType, pa.LargeListType)):
- return _is_extension_type(pa_type.value_type)
- elif isinstance(pa_type, pa.ExtensionType):
- return True
- else:
- return False
-
-
-def array_concat(arrays: List[pa.Array]):
- """Improved version of pa.concat_arrays
+def _are_list_values_of_length(array: pa.ListArray, length: int) -> bool:
+ """Check if all the sub-lists of a `pa.ListArray` have the specified length."""
+ return pc.all(pc.equal(array.value_lengths(), length)).as_py() or array.null_count == len(array)
- It supports concatenating pa.ExtensionArray objects by concatenating the underlying storages.
- Args:
- arrays (List[pa.Array]): List of arrays to contatenate
-
- Raises:
- pa.ArrowInvalid: if the arrow array concatenation fails
- ValueError: if the list of arrays is empty
- TypeError: if the arrays to be concatenated have different types
+def _combine_list_array_offsets_with_mask(array: pa.ListArray) -> pa.Array:
+ """Add the null bitmap to the offsets of a `pa.ListArray`."""
+ offsets = array.offsets
+ if array.null_count > 0:
+ offsets = pa.concat_arrays(
+ [
+ pc.replace_with_mask(offsets[:-1], array.is_null(), pa.nulls(len(array), pa.int32())),
+ offsets[-1:],
+ ]
+ )
+ return offsets
- Returns:
- array (:obj:`pyarrow.Array`): the concatenated array
- """
- arrays = list(arrays)
- array_types = {array.type for array in arrays}
-
- if not array_types:
- raise ValueError("Couldn't concatenate empty list of arrays")
- if len(array_types) > 1:
- array_types = list(array_types)
- raise TypeError(f"Couldn't concatenate arrays with different types {array_types[0]} and {array_types[1]}")
-
- array_type = arrays[0].type
- arrays = [chunk for arr in arrays for chunk in (arr.chunks if isinstance(arr, pa.ChunkedArray) else (arr,))]
-
- if not _is_extension_type(array_type):
- return pa.concat_arrays(arrays)
-
- def _offsets_concat(offsets):
- offset = offsets[0]
- concatenated_offsets = offset
- for offset in offsets[1:]:
- offset = pc.subtract(offset, offset[0])
- offset = pc.add(offset[1:], concatenated_offsets[-1])
- concatenated_offsets = pa.concat_arrays([concatenated_offsets, offset])
- return concatenated_offsets
-
- def _concat_arrays(arrays):
- array_type = arrays[0].type
- if isinstance(array_type, pa.ExtensionType):
- return array_type.wrap_array(_concat_arrays([array.storage for array in arrays]))
- elif pa.types.is_struct(array_type):
- return pa.StructArray.from_arrays(
- [_concat_arrays([array.field(field.name) for array in arrays]) for field in array_type],
- fields=list(array_type),
- mask=pa.concat_arrays([array.is_null() for array in arrays]),
- )
- elif pa.types.is_list(array_type):
- if any(array.null_count > 0 for array in arrays):
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- "None values are converted to empty lists in `pyarrow<10.0.0` when concatenating list arrays with None values. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
- )
- else:
- return pa.ListArray.from_arrays(
- _offsets_concat([array.offsets for array in arrays]),
- _concat_arrays([array.values for array in arrays]),
- mask=pa.concat_arrays([array.is_null() for array in arrays]),
- )
- return pa.ListArray.from_arrays(
- _offsets_concat([array.offsets for array in arrays]),
- _concat_arrays([array.values for array in arrays]),
- )
- elif pa.types.is_fixed_size_list(array_type):
- if config.PYARROW_VERSION.major < 15:
- # PyArrow bug: https://github.com/apache/arrow/issues/35360
- return pa.FixedSizeListArray.from_arrays(
- _concat_arrays([array.values[array.offset * array.type.list_size :] for array in arrays]),
- array_type.list_size,
- )
- else:
- return pa.FixedSizeListArray.from_arrays(
- _concat_arrays([array.values for array in arrays]),
- array_type.value_type,
- array_type.list_size,
- )
- return pa.concat_arrays(arrays)
- return _concat_arrays(arrays)
+def _storage_type(type: pa.DataType) -> pa.DataType:
+ """Convert a (possibly nested) `pa.ExtensionType` to its storage type."""
+ if isinstance(type, pa.ExtensionType):
+ return _storage_type(type.storage_type)
+ elif isinstance(type, pa.StructType):
+ return pa.struct([pa.field(field.name, _storage_type(field.type)) for field in type])
+ elif isinstance(type, pa.ListType):
+ return pa.list_(_storage_type(type.value_type))
+ elif isinstance(type, pa.FixedSizeListType):
+ return pa.list_(_storage_type(type.value_type), type.list_size)
+ return type
@_wrap_for_chunked_arrays
@@ -1941,44 +1870,59 @@ def array_cast(array: pa.Array, pa_type: pa.DataType, allow_number_to_str=True):
return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null())
elif pa.types.is_list(array.type):
if pa.types.is_fixed_size_list(pa_type):
- if pa_type.list_size * len(array) == len(array.values):
- return pa.FixedSizeListArray.from_arrays(
- _c(array.values, pa_type.value_type),
- pa_type.list_size,
- )
- elif pa.types.is_list(pa_type):
- if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {pa_type}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
- )
+ if _are_list_values_of_length(array, pa_type.list_size):
+ if array.null_count > 0:
+ # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
+ array_type = array.type
+ storage_type = _storage_type(array_type)
+ if array_type != storage_type:
+ # Temporarily convert to the storage type to support extension types in the slice operation
+ array = _c(array, storage_type)
+ array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
+ array = _c(array, array_type)
+ else:
+ array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
+ array_values = array.values
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa_type,
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[_c(array_values, pa_type.value_type)],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
+ )
else:
- return pa.ListArray.from_arrays(
- array.offsets, _c(array.values, pa_type.value_type), mask=array.is_null()
- )
- return pa.ListArray.from_arrays(array.offsets, _c(array.values, pa_type.value_type))
+ array_values = array.values[
+ array.offset * pa_type.length : (array.offset + len(array)) * pa_type.length
+ ]
+ return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size)
+ elif pa.types.is_list(pa_type):
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type))
elif pa.types.is_fixed_size_list(array.type):
- array_values = array.values
- if config.PYARROW_VERSION.major < 15:
- # PyArrow bug: https://github.com/apache/arrow/issues/35360
- array_values = array.values[array.offset * array.type.list_size :]
if pa.types.is_fixed_size_list(pa_type):
- return pa.FixedSizeListArray.from_arrays(
- _c(array_values, pa_type.value_type),
- pa_type.list_size,
- )
- elif pa.types.is_list(pa_type):
- offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32())
- if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {pa_type}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
+ if pa_type.list_size == array.type.list_size:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa_type,
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[_c(array_values, pa_type.value_type)],
)
else:
- return pa.ListArray.from_arrays(
- offsets_arr, _c(array_values, pa_type.value_type), mask=array.is_null()
+ return pa.FixedSizeListArray.from_arrays(
+ _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
)
- return pa.ListArray.from_arrays(offsets_arr, _c(array_values, pa_type.value_type))
+ elif pa.types.is_list(pa_type):
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null())
else:
if (
not allow_number_to_str
@@ -2042,69 +1986,79 @@ def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_
elif pa.types.is_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
if isinstance(feature, list):
- casted_values = _c(array.values, feature[0])
- if casted_values.type == array.values.type:
+ casted_array_values = _c(array.values, feature[0])
+ if casted_array_values.type == array.values.type:
return array
else:
- if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
- )
- else:
- return pa.ListArray.from_arrays(array.offsets, casted_values, mask=array.is_null())
- return pa.ListArray.from_arrays(array.offsets, casted_values)
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, casted_array_values)
elif isinstance(feature, Sequence):
if feature.length > -1:
- if feature.length * len(array) == len(array.values):
- return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length)
- else:
- casted_values = _c(array.values, feature.feature)
- if casted_values.type == array.values.type:
- return array
- else:
+ if _are_list_values_of_length(array, feature.length):
if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676."
+ # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
+ array_type = array.type
+ storage_type = _storage_type(array_type)
+ if array_type != storage_type:
+ # Temporarily convert to the storage type to support extension types in the slice operation
+ array = array_cast(array, storage_type, allow_number_to_str=allow_number_to_str)
+ array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
+ array = array_cast(array, array_type, allow_number_to_str=allow_number_to_str)
+ else:
+ array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
+ array_values = array.values
+ casted_array_values = _c(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(casted_array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[casted_array_values],
)
else:
- return pa.ListArray.from_arrays(
- array.offsets, _c(array.values, feature.feature), mask=array.is_null()
+ return pa.FixedSizeListArray.from_arrays(
+ casted_array_values, feature.length, mask=array.is_null()
)
- return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature.feature))
+ else:
+ array_values = array.values[
+ array.offset * feature.length : (array.offset + len(array)) * feature.length
+ ]
+ return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length)
+ else:
+ casted_array_values = _c(array.values, feature.feature)
+ if casted_array_values.type == array.values.type:
+ return array
+ else:
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, casted_array_values)
elif pa.types.is_fixed_size_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
- array_values = array.values
if isinstance(feature, list):
- if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
- )
- else:
- return pa.ListArray.from_arrays(array.offsets, _c(array_values, feature[0]), mask=array.is_null())
- return pa.ListArray.from_arrays(array.offsets, _c(array_values, feature[0]))
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature[0]), mask=array.is_null())
elif isinstance(feature, Sequence):
if feature.length > -1:
- if array.offset and feature.length * len(array) != len(array_values):
+ if feature.length == array.type.list_size:
array_values = array.values[
array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
]
- if feature.length * len(array) == len(array_values):
- return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length)
- else:
- offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32())
- if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
+ casted_array_values = _c(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(casted_array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[casted_array_values],
)
else:
- return pa.ListArray.from_arrays(
- offsets_arr, _c(array_values, feature.feature), mask=array.is_null()
+ return pa.FixedSizeListArray.from_arrays(
+ casted_array_values, feature.length, mask=array.is_null()
)
- return pa.ListArray.from_arrays(offsets_arr, _c(array_values, feature.feature))
+ else:
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null())
if pa.types.is_null(array.type):
return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str)
elif not isinstance(feature, (Sequence, dict, list, tuple)):
@@ -2116,7 +2070,7 @@ def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_
def embed_array_storage(array: pa.Array, feature: "FeatureType"):
"""Embed data into an arrays's storage.
For custom features like Audio or Image, it takes into account the "embed_storage" methods
- they defined to enable embedding external data (e.g. an image file) into an other arrow types.
+ they define to embed external data (e.g. an image file) into an array.
<Added version="2.4.0"/>
@@ -2153,65 +2107,28 @@ def embed_array_storage(array: pa.Array, feature: "FeatureType"):
return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
elif pa.types.is_list(array.type):
# feature must be either [subfeature] or Sequence(subfeature)
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
if isinstance(feature, list):
- if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
- )
- else:
- return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature[0]), mask=array.is_null())
- return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature[0]))
- elif isinstance(feature, Sequence):
- if feature.length > -1:
- if feature.length * len(array) == len(array.values):
- return pa.FixedSizeListArray.from_arrays(_e(array.values, feature.feature), feature.length)
- else:
- casted_values = _e(array.values, feature.feature)
- if casted_values.type == array.values.type:
- return array
- else:
- if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
- )
- else:
- return pa.ListArray.from_arrays(
- array.offsets, _e(array.values, feature.feature), mask=array.is_null()
- )
- return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature.feature))
+ return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature[0]))
+ if isinstance(feature, Sequence) and feature.length == -1:
+ return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature.feature))
elif pa.types.is_fixed_size_list(array.type):
- # feature must be either [subfeature] or Sequence(subfeature)
- array_values = array.values
- if config.PYARROW_VERSION.major < 15:
- # PyArrow bug: https://github.com/apache/arrow/issues/35360
- array_values = array.values[array.offset * array.type.list_size :]
- if isinstance(feature, list):
- if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
- )
- else:
- return pa.ListArray.from_arrays(array.offsets, _e(array_values, feature[0]), mask=array.is_null())
- return pa.ListArray.from_arrays(array.offsets, _e(array_values, feature[0]))
- elif isinstance(feature, Sequence):
- if feature.length > -1:
- if feature.length * len(array) == len(array_values):
- return pa.FixedSizeListArray.from_arrays(_e(array_values, feature.feature), feature.length)
+ # feature must be Sequence(subfeature)
+ if isinstance(feature, Sequence) and feature.length > -1:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ embedded_array_values = _e(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[embedded_array_values],
+ )
else:
- offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32())
- if array.null_count > 0:
- if config.PYARROW_VERSION.major < 10:
- warnings.warn(
- f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`"
- )
- else:
- return pa.ListArray.from_arrays(
- offsets_arr, _e(array_values, feature.feature), mask=array.is_null()
- )
- return pa.ListArray.from_arrays(offsets_arr, _e(array_values, feature.feature))
+ return pa.FixedSizeListArray.from_arrays(embedded_array_values, feature.length, mask=array.is_null())
if not isinstance(feature, (Sequence, dict, list, tuple)):
return array
raise TypeError(f"Couldn't embed array of type\n{array.type}\nwith\n{feature}")
| diff --git a/tests/test_beam.py b/tests/test_beam.py
--- a/tests/test_beam.py
+++ b/tests/test_beam.py
@@ -105,7 +105,7 @@ def test_download_and_prepare_sharded(self):
self.assertTrue(
os.path.exists(
os.path.join(
- tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00000-of-00002.arrow"
+ tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00001-of-00002.arrow"
)
)
)
diff --git a/tests/test_table.py b/tests/test_table.py
--- a/tests/test_table.py
+++ b/tests/test_table.py
@@ -1,15 +1,13 @@
import copy
import pickle
-import warnings
from typing import List, Union
import numpy as np
import pyarrow as pa
import pytest
-import datasets
from datasets import Sequence, Value
-from datasets.features.features import Array2D, Array2DExtensionType, ClassLabel, Features, Image
+from datasets.features.features import Array2D, Array2DExtensionType, ClassLabel, Features, Image, get_nested_type
from datasets.table import (
ConcatenationTable,
InMemoryTable,
@@ -19,9 +17,7 @@
_in_memory_arrow_table_from_buffer,
_in_memory_arrow_table_from_file,
_interpolation_search,
- _is_extension_type,
_memory_mapped_arrow_table_from_file,
- array_concat,
cast_array_to_feature,
concat_tables,
embed_array_storage,
@@ -1081,35 +1077,6 @@ def test_indexed_table_mixin():
assert table.fast_slice(2, 13) == pa_table.slice(2, 13)
[email protected](
- "arrays",
- [
- [pa.array([[1, 2, 3, 4]]), pa.array([[10, 2]])],
- [
- pa.array([[[1, 2], [3]]], pa.list_(pa.list_(pa.int32()), 2)),
- pa.array([[[10, 2, 3], [2]]], pa.list_(pa.list_(pa.int32()), 2)),
- ],
- [pa.array([[[1, 2, 3]], [[2, 3], [20, 21]], [[4]]]).slice(1), pa.array([[[1, 2, 3]]])],
- ],
-)
-def test_concat_arrays(arrays):
- assert array_concat(arrays) == pa.concat_arrays(arrays)
-
-
-def test_concat_arrays_nested_with_nulls():
- arrays = [pa.array([{"a": 21, "b": [[1, 2], [3]]}]), pa.array([{"a": 100, "b": [[1], None]}])]
- concatenated_arrays = array_concat(arrays)
- assert concatenated_arrays == pa.array([{"a": 21, "b": [[1, 2], [3]]}, {"a": 100, "b": [[1], None]}])
-
-
-def test_concat_extension_arrays():
- arrays = [pa.array([[[1, 2], [3, 4]]]), pa.array([[[10, 2], [3, 4]]])]
- extension_type = Array2DExtensionType((2, 2), "int64")
- assert array_concat([extension_type.wrap_array(array) for array in arrays]) == extension_type.wrap_array(
- pa.concat_arrays(arrays)
- )
-
-
def test_cast_array_to_features():
arr = pa.array([[0, 1]])
assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string())
@@ -1130,28 +1097,17 @@ def test_cast_array_to_features_to_nested_with_no_fields():
assert cast_array_to_feature(arr, {}).to_pylist() == arr.to_pylist()
-def test_cast_array_to_features_nested_with_null_values():
+def test_cast_array_to_features_nested_with_nulls():
# same type
arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}))
casted_array = cast_array_to_feature(arr, {"foo": [[Value("int64")]]})
assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})
assert casted_array.to_pylist() == arr.to_pylist()
-
# different type
arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}))
- if datasets.config.PYARROW_VERSION.major < 10:
- with pytest.warns(UserWarning, match="None values are converted to empty lists.+"):
- casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]})
- assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))})
- assert casted_array.to_pylist() == [
- {"foo": [[], [0]]}
- ] # empty list because of https://github.com/huggingface/datasets/issues/3676
- else:
- with warnings.catch_warnings():
- warnings.simplefilter("error")
- casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]})
- assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))})
- assert casted_array.to_pylist() == [{"foo": [None, [0]]}]
+ casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]})
+ assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))})
+ assert casted_array.to_pylist() == [{"foo": [None, [0]]}]
def test_cast_array_to_features_to_null_type():
@@ -1199,23 +1155,67 @@ def test_cast_array_to_features_sequence_classlabel():
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"])))
-def test_cast_fixed_size_array_to_features_sequence():
- arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3))
[email protected](
+ "arr",
+ [
+ pa.array([[0, 1, 2], [3, None, 5], None, [6, 7, 8], None], pa.list_(pa.int32(), 3)),
+ ],
+)
[email protected]("slice", [None, slice(1, None), slice(-1), slice(1, 3), slice(2, 3), slice(1, 1)])
[email protected]("target_value_feature", [Value("int64")])
+def test_cast_fixed_size_list_array_to_features_sequence(arr, slice, target_value_feature):
+ arr = arr if slice is None else arr[slice]
# Fixed size list
- casted_array = cast_array_to_feature(arr, Sequence(Value("int64"), length=3))
- assert casted_array.type == pa.list_(pa.int64(), 3)
+ casted_array = cast_array_to_feature(arr, Sequence(target_value_feature, length=arr.type.list_size))
+ assert casted_array.type == get_nested_type(Sequence(target_value_feature, length=arr.type.list_size))
+ assert casted_array.to_pylist() == arr.to_pylist()
+ with pytest.raises(TypeError):
+ cast_array_to_feature(arr, Sequence(target_value_feature, length=arr.type.list_size + 1))
+ # Variable size list
+ casted_array = cast_array_to_feature(arr, Sequence(target_value_feature))
+ assert casted_array.type == get_nested_type(Sequence(target_value_feature))
+ assert casted_array.to_pylist() == arr.to_pylist()
+ casted_array = cast_array_to_feature(arr, [target_value_feature])
+ assert casted_array.type == get_nested_type([target_value_feature])
assert casted_array.to_pylist() == arr.to_pylist()
+
+
[email protected](
+ "arr",
+ [
+ pa.array([[0, 1, 2], [3, None, 5], None, [6, 7, 8], None], pa.list_(pa.int32())),
+ ],
+)
[email protected]("slice", [None, slice(1, None), slice(-1), slice(1, 3), slice(2, 3), slice(1, 1)])
[email protected]("target_value_feature", [Value("int64")])
+def test_cast_list_array_to_features_sequence(arr, slice, target_value_feature):
+ arr = arr if slice is None else arr[slice]
# Variable size list
- casted_array = cast_array_to_feature(arr, Sequence(Value("int64")))
- assert casted_array.type == pa.list_(pa.int64())
+ casted_array = cast_array_to_feature(arr, Sequence(target_value_feature))
+ assert casted_array.type == get_nested_type(Sequence(target_value_feature))
+ assert casted_array.to_pylist() == arr.to_pylist()
+ casted_array = cast_array_to_feature(arr, [target_value_feature])
+ assert casted_array.type == get_nested_type([target_value_feature])
+ assert casted_array.to_pylist() == arr.to_pylist()
+ # Fixed size list
+ list_size = arr.value_lengths().drop_null()[0].as_py() if arr.value_lengths().drop_null() else 2
+ casted_array = cast_array_to_feature(arr, Sequence(target_value_feature, length=list_size))
+ assert casted_array.type == get_nested_type(Sequence(target_value_feature, length=list_size))
assert casted_array.to_pylist() == arr.to_pylist()
-def test_cast_sliced_fixed_size_array_to_features():
- arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3))
- casted_array = cast_array_to_feature(arr[1:], Sequence(Value("int64"), length=3))
- assert casted_array.type == pa.list_(pa.int64(), 3)
- assert casted_array.to_pylist() == arr[1:].to_pylist()
+def test_cast_array_xd_to_features_sequence():
+ arr = np.random.randint(0, 10, size=(8, 2, 3)).tolist()
+ arr = Array2DExtensionType(shape=(2, 3), dtype="int64").wrap_array(pa.array(arr, pa.list_(pa.list_(pa.int64()))))
+ arr = pa.ListArray.from_arrays([0, None, 4, 8], arr)
+ # Variable size list
+ casted_array = cast_array_to_feature(arr, Sequence(Array2D(shape=(2, 3), dtype="int32")))
+ assert casted_array.type == get_nested_type(Sequence(Array2D(shape=(2, 3), dtype="int32")))
+ assert casted_array.to_pylist() == arr.to_pylist()
+ # Fixed size list
+ casted_array = cast_array_to_feature(arr, Sequence(Array2D(shape=(2, 3), dtype="int32"), length=4))
+ assert casted_array.type == get_nested_type(Sequence(Array2D(shape=(2, 3), dtype="int32"), length=4))
+ assert casted_array.to_pylist() == arr.to_pylist()
def test_embed_array_storage(image_file):
@@ -1268,17 +1268,3 @@ def test_table_iter(table, batch_size, drop_last_batch):
if num_rows > 0:
reloaded = pa.concat_tables(subtables)
assert table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
-
-
[email protected](
- "pa_type, expected",
- [
- (pa.int8(), False),
- (pa.struct({"col1": pa.int8(), "col2": pa.int64()}), False),
- (pa.struct({"col1": pa.list_(pa.int8()), "col2": Array2DExtensionType((1, 3), "int64")}), True),
- (pa.list_(pa.int8()), False),
- (pa.list_(Array2DExtensionType((1, 3), "int64"), 4), True),
- ],
-)
-def test_is_extension_type(pa_type, expected):
- assert _is_extension_type(pa_type) == expected
| Couldn't cast array of type fixed_size_list to Sequence(Value(float64))
### Describe the bug
I have a dataset with an embedding column, when I try to map that dataset I get the following exception:
```
Traceback (most recent call last):
File "/Users/jmif/.virtualenvs/llm-training/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 3189, in map
for rank, done, content in iflatmap_unordered(
File "/Users/jmif/.virtualenvs/llm-training/lib/python3.10/site-packages/datasets/utils/py_utils.py", line 1387, in iflatmap_unordered
[async_result.get(timeout=0.05) for async_result in async_results]
File "/Users/jmif/.virtualenvs/llm-training/lib/python3.10/site-packages/datasets/utils/py_utils.py", line 1387, in <listcomp>
[async_result.get(timeout=0.05) for async_result in async_results]
File "/Users/jmif/.virtualenvs/llm-training/lib/python3.10/site-packages/multiprocess/pool.py", line 774, in get
raise self._value
TypeError: Couldn't cast array of type
fixed_size_list<item: float>[2]
to
Sequence(feature=Value(dtype='float32', id=None), length=2, id=None)
```
### Steps to reproduce the bug
Here's a simple repro script:
```
from datasets import Features, Value, Sequence, ClassLabel, Dataset
dataset_features = Features({
'text': Value('string'),
'embedding': Sequence(Value('double'), length=2),
'categories': Sequence(ClassLabel(names=sorted([
'one',
'two',
'three'
]))),
})
dataset = Dataset.from_dict(
{
'text': ['A'] * 10000,
'embedding': [[0.0, 0.1]] * 10000,
'categories': [[0]] * 10000,
},
features=dataset_features
)
def test_mapper(r):
r['text'] = list(map(lambda t: t + ' b', r['text']))
return r
dataset = dataset.map(test_mapper, batched=True, batch_size=10, features=dataset_features, num_proc=2)
```
Removing the embedding column fixes the issue!
### Expected behavior
The mapping completes successfully.
### Environment info
- `datasets` version: 2.14.4
- Platform: macOS-14.0-arm64-arm-64bit
- Python version: 3.10.12
- Huggingface_hub version: 0.17.1
- PyArrow version: 13.0.0
- Pandas version: 2.0.3
Add support for `Sequence(Audio/Image)` feature in `push_to_hub`
### Feature request
Allow for `Sequence` of `Image` (or `Audio`) to be embedded inside the shards.
### Motivation
Currently, thanks to #3685, when `embed_external_files` is set to True (which is the default) in `push_to_hub`, features of type `Image` and `Audio` are embedded inside the arrow/parquet shards, instead of only storing paths to the files.
I've noticed that this behavior does not extend to `Sequence` of `Image`, when working with a [dataset of timelapse images](https://huggingface.co/datasets/1aurent/Human-Embryo-Timelapse).
### Your contribution
I'll submit a PR if I find a way to add this feature
| 2023-10-05T15:24:05Z | [] | [] |
|
huggingface/datasets | 6,297 | huggingface__datasets-6297 | [
"6291"
] | f2d9fcc0840f9d94f63635e9b40a1a7f11b34ea2 | diff --git a/src/datasets/table.py b/src/datasets/table.py
--- a/src/datasets/table.py
+++ b/src/datasets/table.py
@@ -1964,7 +1964,7 @@ def array_cast(array: pa.Array, pa_type: pa.DataType, allow_number_to_str=True):
if isinstance(array, pa.ExtensionArray):
array = array.storage
if isinstance(pa_type, pa.ExtensionType):
- return pa_type.wrap_array(array)
+ return pa_type.wrap_array(_c(array, pa_type.storage_type))
elif array.type == pa_type:
return array
elif pa.types.is_struct(array.type):
| diff --git a/tests/test_table.py b/tests/test_table.py
--- a/tests/test_table.py
+++ b/tests/test_table.py
@@ -9,7 +9,7 @@
import datasets
from datasets import Sequence, Value
-from datasets.features.features import Array2DExtensionType, ClassLabel, Features, Image
+from datasets.features.features import Array2D, Array2DExtensionType, ClassLabel, Features, Image
from datasets.table import (
ConcatenationTable,
InMemoryTable,
@@ -1165,6 +1165,16 @@ def test_cast_array_to_features_to_null_type():
cast_array_to_feature(arr, Sequence(Value("null")))
+def test_cast_array_to_features_array_xd():
+ # same storage type
+ arr = pa.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], pa.list_(pa.list_(pa.int32(), 2), 2))
+ casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="int32"))
+ assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="int32")
+ # different storage type
+ casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="float32"))
+ assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="float32")
+
+
def test_cast_array_to_features_sequence_classlabel():
arr = pa.array([[], [1], [0, 1]], pa.list_(pa.int64()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
| Casting type from Array2D int to Array2D float crashes
### Describe the bug
I am on a school project and the initial type for feature annotations are `Array2D(shape=(None, 4))`. I am trying to cast this type to a `float64` and pyarrow gives me this error :
```
Traceback (most recent call last):
File "/home/alan/dev/ClassezDesImagesAvecDesAlgorithmesDeDeeplearning/src/sdd/data/dataset.py", line 141, in <module>
dataset = StanfordDogsDataset(size, 5).original(True).demo()
File "<attrs generated init __main__.StanfordDogsDataset>", line 4, in __init__
File "/home/alan/dev/ClassezDesImagesAvecDesAlgorithmesDeDeeplearning/src/sdd/data/dataset.py", line 33, in __attrs_post_init__
self.dataset = self.dataset.cast_column(
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/fingerprint.py", line 511, in wrapper
out = func(dataset, *args, **kwargs)
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 2110, in cast_column
return self.cast(features)
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 2055, in cast
dataset = dataset.map(
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 592, in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 557, in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 3097, in map
for rank, done, content in Dataset._map_single(**dataset_kwargs):
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 3474, in _map_single
batch = apply_function_on_filtered_inputs(
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 3353, in apply_function_on_filtered_inputs
processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/table.py", line 2328, in table_cast
return cast_table_to_schema(table, schema)
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/table.py", line 2287, in cast_table_to_schema
arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/table.py", line 2287, in <listcomp>
arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/table.py", line 1831, in wrapper
return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/table.py", line 1831, in <listcomp>
return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/table.py", line 2143, in cast_array_to_feature
return array_cast(array, feature(), allow_number_to_str=allow_number_to_str)
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/table.py", line 1833, in wrapper
return func(array, *args, **kwargs)
File "/home/alan/.cache/pypoetry/virtualenvs/sdd-2XWLAjSi-py3.10/lib/python3.10/site-packages/datasets/table.py", line 1967, in array_cast
return pa_type.wrap_array(array)
File "pyarrow/types.pxi", line 1369, in pyarrow.lib.BaseExtensionType.wrap_array
TypeError: Incompatible storage type for extension<arrow.py_extension_type<Array2DExtensionType>>: expected list<item: list<item: double>>, got list<item: list<item: int32>>
```
### Steps to reproduce the bug
```python
dataset = datasets.load_dataset("Alanox/stanford-dogs", split="full")
dataset = dataset.cast_column("annotations", Array2D((None, 4), "float64"))
```
### Expected behavior
It should simply cast the column feature type to a `float64` without error
### Environment info
datasets == 2.14.5
| 2023-10-11T21:14:59Z | [] | [] |
|
huggingface/datasets | 6,309 | huggingface__datasets-6309 | [
"6305"
] | f77539cbd88d00ec1ab2b9d4edfd01d5a58ef88a | diff --git a/src/datasets/data_files.py b/src/datasets/data_files.py
--- a/src/datasets/data_files.py
+++ b/src/datasets/data_files.py
@@ -227,7 +227,9 @@ def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_
return len(hidden_directories_in_path) != len(hidden_directories_in_pattern)
-def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]:
+def _get_data_files_patterns(
+ pattern_resolver: Callable[[str], List[str]], base_path: str = ""
+) -> Dict[str, List[str]]:
"""
Get the default pattern from a directory or repository by testing all the supported patterns.
The first patterns to return a non-empty list of data files is returned.
@@ -242,7 +244,8 @@ def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Di
except FileNotFoundError:
continue
if len(data_files) > 0:
- splits: Set[str] = {string_to_dict(p, glob_pattern_to_regex(split_pattern))["split"] for p in data_files}
+ pattern = base_path + ("/" if base_path else "") + split_pattern
+ splits: Set[str] = {string_to_dict(p, glob_pattern_to_regex(pattern))["split"] for p in data_files}
sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted(
splits - set(DEFAULT_SPLITS)
)
@@ -462,7 +465,7 @@ def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig]
"""
resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
try:
- return _get_data_files_patterns(resolver)
+ return _get_data_files_patterns(resolver, base_path=base_path)
except FileNotFoundError:
raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None
| diff --git a/tests/test_data_files.py b/tests/test_data_files.py
--- a/tests/test_data_files.py
+++ b/tests/test_data_files.py
@@ -16,6 +16,7 @@
_get_metadata_files_patterns,
_is_inside_unrequested_special_dir,
_is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir,
+ get_data_patterns,
resolve_pattern,
)
from datasets.fingerprint import Hasher
@@ -634,3 +635,13 @@ def resolver(pattern):
patterns = _get_metadata_files_patterns(resolver)
matched = [file_path for pattern in patterns for file_path in resolver(pattern)]
assert sorted(matched) == sorted(metadata_files)
+
+
+def test_get_data_patterns_from_directory_with_the_word_data_twice(tmp_path):
+ repo_dir = tmp_path / "directory-name-ending-with-the-word-data" # parent directory contains the word "data/"
+ data_dir = repo_dir / "data"
+ data_dir.mkdir(parents=True)
+ data_file = data_dir / "train-00001-of-00009.parquet"
+ data_file.touch()
+ data_file_patterns = get_data_patterns(repo_dir.as_posix())
+ assert data_file_patterns == {"train": ["data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"]}
| Cannot load dataset with `2.14.5`: `FileNotFound` error
### Describe the bug
I'm trying to load [piuba-bigdata/articles_and_comments] and I'm stumbling with this error on `2.14.5`. However, this works on `2.10.0`.
### Steps to reproduce the bug
[Colab link](https://colab.research.google.com/drive/1SAftFMQnFE708ikRnJJHIXZV7R5IBOCE#scrollTo=r2R2ipCCDmsg)
```python
Downloading readme: 100%
1.19k/1.19k [00:00<00:00, 30.9kB/s]
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
[<ipython-input-2-807c3583d297>](https://localhost:8080/#) in <cell line: 3>()
1 from datasets import load_dataset
2
----> 3 load_dataset("piuba-bigdata/articles_and_comments", split="train")
2 frames
[/usr/local/lib/python3.10/dist-packages/datasets/load.py](https://localhost:8080/#) in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)
2127
2128 # Create a dataset builder
-> 2129 builder_instance = load_dataset_builder(
2130 path=path,
2131 name=name,
[/usr/local/lib/python3.10/dist-packages/datasets/load.py](https://localhost:8080/#) in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, token, use_auth_token, storage_options, **config_kwargs)
1813 download_config = download_config.copy() if download_config else DownloadConfig()
1814 download_config.storage_options.update(storage_options)
-> 1815 dataset_module = dataset_module_factory(
1816 path,
1817 revision=revision,
[/usr/local/lib/python3.10/dist-packages/datasets/load.py](https://localhost:8080/#) in dataset_module_factory(path, revision, download_config, download_mode, dynamic_modules_path, data_dir, data_files, **download_kwargs)
1506 raise e1 from None
1507 if isinstance(e1, FileNotFoundError):
-> 1508 raise FileNotFoundError(
1509 f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory. "
1510 f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}"
FileNotFoundError: Couldn't find a dataset script at /content/piuba-bigdata/articles_and_comments/articles_and_comments.py or any data file in the same directory. Couldn't find 'piuba-bigdata/articles_and_comments' on the Hugging Face Hub either: FileNotFoundError: No (supported) data files or dataset script found in piuba-bigdata/articles_and_comments.
```
### Expected behavior
It should load normally.
### Environment info
```
- `datasets` version: 2.14.5
- Platform: Linux-5.15.120+-x86_64-with-glibc2.35
- Python version: 3.10.12
- Huggingface_hub version: 0.18.0
- PyArrow version: 9.0.0
- Pandas version: 1.5.3
```
| Thanks for reporting, @finiteautomata.
We are investigating it. | 2023-10-17T09:00:39Z | [] | [] |
huggingface/datasets | 6,316 | huggingface__datasets-6316 | [
"6315"
] | 3aeb078ba1afd713e901df43343c160877403d07 | diff --git a/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py b/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
--- a/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
+++ b/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
@@ -164,15 +164,15 @@ def analyze(files_or_archives, downloaded_files_or_dirs, split):
# Check that all metadata files share the same format
metadata_ext = {
- os.path.splitext(downloaded_metadata_file)[1][1:]
- for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values())
+ os.path.splitext(original_metadata_file)[-1]
+ for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values())
}
if len(metadata_ext) > 1:
raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")
metadata_ext = metadata_ext.pop()
for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()):
- pa_metadata_table = self._read_metadata(downloaded_metadata_file)
+ pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext)
features_per_metadata_file.append(
(downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))
)
@@ -236,9 +236,8 @@ def _split_files_and_archives(self, data_files):
archives.append(data_file)
return files, archives
- def _read_metadata(self, metadata_file):
- metadata_file_ext = os.path.splitext(metadata_file)[1][1:]
- if metadata_file_ext == "csv":
+ def _read_metadata(self, metadata_file, metadata_ext: str = ""):
+ if metadata_ext == ".csv":
# Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module
return pa.Table.from_pandas(pd.read_csv(metadata_file))
else:
@@ -255,10 +254,10 @@ def _generate_examples(self, files, metadata_files, split_name, add_metadata, ad
metadata_dict = None
downloaded_metadata_file = None
+ metadata_ext = ""
if split_metadata_files:
metadata_ext = {
- os.path.splitext(downloaded_metadata_file)[1][1:]
- for _, downloaded_metadata_file in split_metadata_files
+ os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files
}
metadata_ext = metadata_ext.pop()
@@ -290,7 +289,9 @@ def _generate_examples(self, files, metadata_files, split_name, add_metadata, ad
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
- pa_metadata_table = self._read_metadata(downloaded_metadata_file)
+ pa_metadata_table = self._read_metadata(
+ downloaded_metadata_file, metadata_ext=metadata_ext
+ )
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(metadata_file)
@@ -302,7 +303,7 @@ def _generate_examples(self, files, metadata_files, split_name, add_metadata, ad
}
else:
raise ValueError(
- f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
if metadata_dir is not None and downloaded_metadata_file is not None:
file_relpath = os.path.relpath(original_file, metadata_dir)
@@ -314,7 +315,7 @@ def _generate_examples(self, files, metadata_files, split_name, add_metadata, ad
sample_metadata = metadata_dict[file_relpath]
else:
raise ValueError(
- f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
else:
sample_metadata = {}
@@ -356,7 +357,9 @@ def _generate_examples(self, files, metadata_files, split_name, add_metadata, ad
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
- pa_metadata_table = self._read_metadata(downloaded_metadata_file)
+ pa_metadata_table = self._read_metadata(
+ downloaded_metadata_file, metadata_ext=metadata_ext
+ )
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(downloaded_metadata_file)
@@ -368,7 +371,7 @@ def _generate_examples(self, files, metadata_files, split_name, add_metadata, ad
}
else:
raise ValueError(
- f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
)
if metadata_dir is not None and downloaded_metadata_file is not None:
downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir)
@@ -380,7 +383,7 @@ def _generate_examples(self, files, metadata_files, split_name, add_metadata, ad
sample_metadata = metadata_dict[downloaded_dir_file_relpath]
else:
raise ValueError(
- f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
)
else:
sample_metadata = {}
| diff --git a/tests/test_upstream_hub.py b/tests/test_upstream_hub.py
--- a/tests/test_upstream_hub.py
+++ b/tests/test_upstream_hub.py
@@ -1,7 +1,9 @@
import fnmatch
import gc
import os
+import shutil
import tempfile
+import textwrap
import time
import unittest
from io import BytesIO
@@ -17,6 +19,7 @@
ClassLabel,
Dataset,
DatasetDict,
+ DownloadManager,
Features,
Image,
Value,
@@ -24,6 +27,10 @@
load_dataset_builder,
)
from datasets.config import METADATA_CONFIGS_FIELD
+from datasets.packaged_modules.folder_based_builder.folder_based_builder import (
+ FolderBasedBuilder,
+ FolderBasedBuilderConfig,
+)
from datasets.utils.file_utils import cached_path
from datasets.utils.hub import hf_hub_url
from tests.fixtures.hub import CI_HUB_ENDPOINT, CI_HUB_USER, CI_HUB_USER_TOKEN
@@ -813,3 +820,67 @@ def test_push_dataset_dict_to_hub_with_config_no_metadata_configs(self, temporar
ds_another_config_builder.config.data_files["random"][0],
"*/another_config/random-00000-of-00001.parquet",
)
+
+
+class DummyFolderBasedBuilder(FolderBasedBuilder):
+ BASE_FEATURE = dict
+ BASE_COLUMN_NAME = "base"
+ BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig
+ EXTENSIONS = [".txt"]
+ # CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label")
+
+
[email protected](params=[".jsonl", ".csv"])
+def text_file_with_metadata(request, tmp_path, text_file):
+ metadata_filename_extension = request.param
+ data_dir = tmp_path / "data_dir"
+ data_dir.mkdir()
+ text_file_path = data_dir / "file.txt"
+ shutil.copyfile(text_file, text_file_path)
+ metadata_file_path = data_dir / f"metadata{metadata_filename_extension}"
+ metadata = textwrap.dedent(
+ """\
+ {"file_name": "file.txt", "additional_feature": "Dummy file"}
+ """
+ if metadata_filename_extension == ".jsonl"
+ else """\
+ file_name,additional_feature
+ file.txt,Dummy file
+ """
+ )
+ with open(metadata_file_path, "w", encoding="utf-8") as f:
+ f.write(metadata)
+ return text_file_path, metadata_file_path
+
+
+@for_all_test_methods(xfail_if_500_502_http_error)
[email protected]("ci_hub_config", "ci_hfh_hf_hub_url")
+class TestLoadFromHub:
+ _api = HfApi(endpoint=CI_HUB_ENDPOINT)
+ _token = CI_HUB_USER_TOKEN
+
+ def test_load_dataset_with_metadata_file(self, temporary_repo, text_file_with_metadata, tmp_path):
+ text_file_path, metadata_file_path = text_file_with_metadata
+ data_dir_path = text_file_path.parent
+ cache_dir_path = tmp_path / ".cache"
+ cache_dir_path.mkdir()
+ with temporary_repo() as repo_id:
+ self._api.create_repo(repo_id, token=self._token, repo_type="dataset")
+ self._api.upload_folder(
+ folder_path=str(data_dir_path),
+ repo_id=repo_id,
+ repo_type="dataset",
+ token=self._token,
+ )
+ data_files = [
+ f"hf://datasets/{repo_id}/{text_file_path.name}",
+ f"hf://datasets/{repo_id}/{metadata_file_path.name}",
+ ]
+ builder = DummyFolderBasedBuilder(
+ dataset_name=repo_id.split("/")[-1], data_files=data_files, cache_dir=str(cache_dir_path)
+ )
+ download_manager = DownloadManager()
+ gen_kwargs = builder._split_generators(download_manager)[0].gen_kwargs
+ generator = builder._generate_examples(**gen_kwargs)
+ result = [example for _, example in generator]
+ assert len(result) == 1
| Hub datasets with CSV metadata raise ArrowInvalid: JSON parse error: Invalid value. in row 0
When trying to load a Hub dataset that contains a CSV metadata file, it raises an `ArrowInvalid` error:
```
E pyarrow.lib.ArrowInvalid: JSON parse error: Invalid value. in row 0
pyarrow/error.pxi:100: ArrowInvalid
```
See: https://huggingface.co/datasets/lukarape/public_small_papers/discussions/1
| 2023-10-19T10:21:34Z | [] | [] |
|
huggingface/datasets | 6,368 | huggingface__datasets-6368 | [
"6366"
] | c9c1166e1cf81d38534020f9c167b326585339e5 | diff --git a/src/datasets/formatting/formatting.py b/src/datasets/formatting/formatting.py
--- a/src/datasets/formatting/formatting.py
+++ b/src/datasets/formatting/formatting.py
@@ -624,7 +624,7 @@ def format_table(
else:
pa_table = table
query_type = key_to_query_type(key)
- python_formatter = PythonFormatter(features=None)
+ python_formatter = PythonFormatter(features=formatter.features)
if format_columns is None:
return formatter(pa_table, query_type=query_type)
elif query_type == "column":
| diff --git a/tests/test_arrow_dataset.py b/tests/test_arrow_dataset.py
--- a/tests/test_arrow_dataset.py
+++ b/tests/test_arrow_dataset.py
@@ -4551,6 +4551,19 @@ def test_dataset_to_iterable_dataset(dataset: Dataset):
dataset.with_format("torch").to_iterable_dataset()
+@require_pil
+def test_dataset_format_with_unformatted_image():
+ import PIL
+
+ ds = Dataset.from_dict(
+ {"a": [np.arange(4 * 4 * 3).reshape(4, 4, 3)] * 10, "b": [[0, 1]] * 10},
+ Features({"a": Image(), "b": Sequence(Value("int64"))}),
+ )
+ ds.set_format("np", columns=["b"], output_all_columns=True)
+ assert isinstance(ds[0]["a"], PIL.Image.Image)
+ assert isinstance(ds[0]["b"], np.ndarray)
+
+
@pytest.mark.parametrize("batch_size", [1, 4])
@require_torch
def test_dataset_with_torch_dataloader(dataset, batch_size):
| with_format() function returns bytes instead of PIL images even when image column is not part of "columns"
### Describe the bug
When using the with_format() function on a dataset containing images, even if the image column is not part of the columns provided in the function, its type will be changed to bytes.
Here is a minimal reproduction of the bug:
https://colab.research.google.com/drive/1hyaOspgyhB41oiR1-tXE3k_gJCdJUQCf?usp=sharing
### Steps to reproduce the bug
1. Load the image dataset
2. apply with_format(columns=["text"])
3. Check the type of images in the "image" column before and after applying with_format
### Expected behavior
The type should stay the same, but it does not
### Environment info
datasets==2.14.6
| 2023-10-31T19:48:08Z | [] | [] |
|
huggingface/datasets | 6,431 | huggingface__datasets-6431 | [
"6397"
] | 1a1e7416892dcb71097b47120bc9b26b3d90f06a | diff --git a/src/datasets/exceptions.py b/src/datasets/exceptions.py
--- a/src/datasets/exceptions.py
+++ b/src/datasets/exceptions.py
@@ -5,10 +5,23 @@
class DatasetsError(Exception):
"""Base class for exceptions in this library."""
- pass
-
class DefunctDatasetError(DatasetsError):
"""The dataset has been defunct."""
- pass
+
+class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
+ """FileNotFoundError raised by this library."""
+
+
+class DataFilesNotFoundError(FileNotFoundDatasetsError):
+ """No (supported) data files found."""
+
+
+class DatasetNotFoundError(FileNotFoundDatasetsError):
+ """Dataset not found.
+
+ Raised when trying to access:
+ - a missing dataset, or
+ - a private/gated dataset and the user is not authenticated.
+ """
diff --git a/src/datasets/load.py b/src/datasets/load.py
--- a/src/datasets/load.py
+++ b/src/datasets/load.py
@@ -48,6 +48,7 @@
from .download.download_config import DownloadConfig
from .download.download_manager import DownloadMode
from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin
+from .exceptions import DataFilesNotFoundError, DatasetNotFoundError
from .features import Features
from .fingerprint import Hasher
from .info import DatasetInfo, DatasetInfosDict
@@ -494,9 +495,10 @@ def infer_module_for_data_files(
"""Infer module (and builder kwargs) from data files. Raise if module names for different splits don't match.
Args:
- data_files (DataFilesDict): List of data files.
- path (str, optional): Dataset name or path.
- DownloadConfig (bool or str, optional): for authenticate on the Hugging Face Hub for private remote files.
+ data_files ([`DataFilesDict`]): Dict of list of data files.
+ path (str, *optional*): Dataset name or path.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters to authenticate on the Hugging Face Hub for private remote files.
Returns:
tuple[str, dict[str, Any]]: Tuple with
@@ -511,8 +513,7 @@ def infer_module_for_data_files(
if any((module_name, default_builder_kwargs) != split_module for split_module in split_modules.values()):
raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}")
if not module_name:
- path = f" in {path}. " if path else ". "
- raise FileNotFoundError(f"No (supported) data files or dataset script found{path}")
+ raise DataFilesNotFoundError("No (supported) data files found" + (f" in {path}" if path else ""))
return module_name, default_builder_kwargs
@@ -1471,7 +1472,7 @@ def dataset_module_factory(
elif "401" in str(e):
msg = f"Dataset '{path}' doesn't exist on the Hub"
msg = msg + f" at revision '{revision}'" if revision else msg
- raise FileNotFoundError(
+ raise DatasetNotFoundError(
msg + ". If the repo is private or gated, make sure to log in with `huggingface-cli login`."
)
else:
@@ -1493,13 +1494,15 @@ def dataset_module_factory(
download_config=download_config,
download_mode=download_mode,
).get_module()
- except Exception as e1: # noqa all the attempts failed, before raising the error we should check if the module is already cached.
+ except Exception as e1:
+ # All the attempts failed, before raising the error we should check if the module is already cached
try:
return CachedDatasetModuleFactory(path, dynamic_modules_path=dynamic_modules_path).get_module()
- except Exception: # noqa if it's not in the cache, then it doesn't exist.
+ except Exception:
+ # If it's not in the cache, then it doesn't exist.
if isinstance(e1, OfflineModeIsEnabled):
raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None
- if isinstance(e1, EmptyDatasetError):
+ if isinstance(e1, (DataFilesNotFoundError, DatasetNotFoundError, EmptyDatasetError)):
raise e1 from None
if isinstance(e1, FileNotFoundError):
raise FileNotFoundError(
| diff --git a/tests/test_load.py b/tests/test_load.py
--- a/tests/test_load.py
+++ b/tests/test_load.py
@@ -24,6 +24,7 @@
from datasets.data_files import DataFilesDict
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.download.download_config import DownloadConfig
+from datasets.exceptions import DatasetNotFoundError
from datasets.features import Features, Value
from datasets.iterable_dataset import IterableDataset
from datasets.load import (
@@ -819,7 +820,9 @@ def test_dataset_module_factory(self):
# missing module
for offline_simulation_mode in list(OfflineSimulationMode):
with offline(offline_simulation_mode):
- with self.assertRaises((FileNotFoundError, ConnectionError, requests.exceptions.ConnectionError)):
+ with self.assertRaises(
+ (DatasetNotFoundError, ConnectionError, requests.exceptions.ConnectionError)
+ ):
datasets.load.dataset_module_factory(
"__missing_dummy_module_name__", dynamic_modules_path=self.dynamic_modules_path
)
@@ -850,13 +853,13 @@ def test_offline_dataset_module_factory(self):
self.assertIn("Using the latest cached version of the module", self._caplog.text)
def test_load_dataset_from_hub(self):
- with self.assertRaises(FileNotFoundError) as context:
+ with self.assertRaises(DatasetNotFoundError) as context:
datasets.load_dataset("_dummy")
self.assertIn(
"Dataset '_dummy' doesn't exist on the Hub",
str(context.exception),
)
- with self.assertRaises(FileNotFoundError) as context:
+ with self.assertRaises(DatasetNotFoundError) as context:
datasets.load_dataset("_dummy", revision="0.0.0")
self.assertIn(
"Dataset '_dummy' doesn't exist on the Hub",
@@ -877,7 +880,7 @@ def test_load_dataset_from_hub(self):
)
def test_load_dataset_namespace(self):
- with self.assertRaises(FileNotFoundError) as context:
+ with self.assertRaises(DatasetNotFoundError) as context:
datasets.load_dataset("hf-internal-testing/_dummy")
self.assertIn(
"hf-internal-testing/_dummy",
@@ -1018,7 +1021,7 @@ def test_load_dataset_builder_for_community_dataset_without_script():
def test_load_dataset_builder_fail():
- with pytest.raises(FileNotFoundError):
+ with pytest.raises(DatasetNotFoundError):
datasets.load_dataset_builder("blabla")
@@ -1037,10 +1040,9 @@ def test_load_dataset_local(dataset_loading_script_dir, data_dir, keep_in_memory
dataset = datasets.load_dataset(DATASET_LOADING_SCRIPT_NAME, data_dir=data_dir)
assert len(dataset) == 2
assert "Using the latest cached version of the module" in caplog.text
- with pytest.raises(FileNotFoundError) as exc_info:
+ with pytest.raises(DatasetNotFoundError) as exc_info:
datasets.load_dataset(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST)
assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value)
- assert os.path.abspath(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST) in str(exc_info.value)
def test_load_dataset_streaming(dataset_loading_script_dir, data_dir):
| Raise a different exception for inexisting dataset vs files without known extension
See https://github.com/huggingface/datasets-server/issues/2082#issuecomment-1805716557
We have the same error for:
- https://huggingface.co/datasets/severo/a_dataset_that_does_not_exist: a dataset that does not exist
- https://huggingface.co/datasets/severo/test_files_without_extension: a dataset with files without a known extension
```
>>> import datasets
>>> datasets.get_dataset_config_names('severo/a_dataset_that_does_not_exist')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 351, in get_dataset_config_names
dataset_module = dataset_module_factory(
File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1508, in dataset_module_factory
raise FileNotFoundError(
FileNotFoundError: Couldn't find a dataset script at /home/slesage/hf/datasets-server/services/worker/severo/a_dataset_that_does_not_exist/a_dataset_that_does_not_exist.py or any data file in the same directory. Couldn't find 'severo/a_dataset_that_does_not_exist' on the Hugging Face Hub either: FileNotFoundError: Dataset 'severo/a_dataset_that_does_not_exist' doesn't exist on the Hub. If the repo is private or gated, make sure to log in with `huggingface-cli login`.
>>> datasets.get_dataset_config_names('severo/test_files_without_extension')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 351, in get_dataset_config_names
dataset_module = dataset_module_factory(
File "/home/slesage/hf/datasets-server/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1508, in dataset_module_factory
raise FileNotFoundError(
FileNotFoundError: Couldn't find a dataset script at /home/slesage/hf/datasets-server/services/worker/severo/test_files_without_extension/test_files_without_extension.py or any data file in the same directory. Couldn't find 'severo/test_files_without_extension' on the Hugging Face Hub either: FileNotFoundError: No (supported) data files or dataset script found in severo/test_files_without_extension.
```
To differentiate, we must parse the error message (only the end is different). We should have a different exception for these two errors.
| 2023-11-16T16:02:55Z | [] | [] |
|
huggingface/datasets | 6,433 | huggingface__datasets-6433 | [
"6409"
] | 1a1e7416892dcb71097b47120bc9b26b3d90f06a | diff --git a/src/datasets/arrow_dataset.py b/src/datasets/arrow_dataset.py
--- a/src/datasets/arrow_dataset.py
+++ b/src/datasets/arrow_dataset.py
@@ -111,6 +111,7 @@
)
from .tasks import TaskTemplate
from .utils import logging
+from .utils import tqdm as hf_tqdm
from .utils.deprecation_utils import deprecated
from .utils.file_utils import _retry, estimate_dataset_size
from .utils.info_utils import is_small_dataset
@@ -1494,8 +1495,7 @@ def save_to_disk(
dataset_info = asdict(self._info)
shards_done = 0
- pbar = logging.tqdm(
- disable=not logging.is_progress_bar_enabled(),
+ pbar = hf_tqdm(
unit=" examples",
total=len(self),
desc=f"Saving the dataset ({shards_done}/{num_shards} shards)",
@@ -3080,8 +3080,7 @@ def load_processed_shard_from_cache(shard_kwargs):
except NonExistentDatasetError:
pass
if transformed_dataset is None:
- with logging.tqdm(
- disable=not logging.is_progress_bar_enabled(),
+ with hf_tqdm(
unit=" examples",
total=pbar_total,
desc=desc or "Map",
@@ -3173,8 +3172,7 @@ def format_new_fingerprint(new_fingerprint: str, rank: int) -> str:
with Pool(len(kwargs_per_job)) as pool:
os.environ = prev_env
logger.info(f"Spawning {num_proc} processes")
- with logging.tqdm(
- disable=not logging.is_progress_bar_enabled(),
+ with hf_tqdm(
unit=" examples",
total=pbar_total,
desc=(desc or "Map") + f" (num_proc={num_proc})",
@@ -5195,11 +5193,10 @@ def shards_with_embedded_external_files(shards):
uploaded_size = 0
additions = []
- for index, shard in logging.tqdm(
+ for index, shard in hf_tqdm(
enumerate(shards),
desc="Uploading the dataset shards",
total=num_shards,
- disable=not logging.is_progress_bar_enabled(),
):
shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet"
buffer = BytesIO()
diff --git a/src/datasets/arrow_writer.py b/src/datasets/arrow_writer.py
--- a/src/datasets/arrow_writer.py
+++ b/src/datasets/arrow_writer.py
@@ -41,6 +41,7 @@
from .keyhash import DuplicatedKeysError, KeyHasher
from .table import array_cast, array_concat, cast_array_to_feature, embed_table_storage, table_cast
from .utils import logging
+from .utils import tqdm as hf_tqdm
from .utils.file_utils import hash_url_to_filename
from .utils.py_utils import asdict, first_non_null_value
@@ -689,9 +690,8 @@ def finalize(self, metrics_query_result: dict):
for metadata in beam.io.filesystems.FileSystems.match([parquet_path + "*.parquet"])[0].metadata_list
]
try: # stream conversion
- disable = not logging.is_progress_bar_enabled()
num_bytes = 0
- for shard in logging.tqdm(shards, unit="shards", disable=disable):
+ for shard in hf_tqdm(shards, unit="shards"):
with beam.io.filesystems.FileSystems.open(shard) as source:
with beam.io.filesystems.FileSystems.create(
shard.replace(".parquet", ".arrow")
@@ -706,9 +706,8 @@ def finalize(self, metrics_query_result: dict):
)
local_convert_dir = os.path.join(self._cache_dir, "beam_convert")
os.makedirs(local_convert_dir, exist_ok=True)
- disable = not logging.is_progress_bar_enabled()
num_bytes = 0
- for shard in logging.tqdm(shards, unit="shards", disable=disable):
+ for shard in hf_tqdm(shards, unit="shards"):
local_parquet_path = os.path.join(local_convert_dir, hash_url_to_filename(shard) + ".parquet")
beam_utils.download_remote_to_local(shard, local_parquet_path)
local_arrow_path = local_parquet_path.replace(".parquet", ".arrow")
@@ -727,8 +726,7 @@ def finalize(self, metrics_query_result: dict):
def get_parquet_lengths(sources) -> List[int]:
shard_lengths = []
- disable = not logging.is_progress_bar_enabled()
- for source in logging.tqdm(sources, unit="parquet files", disable=disable):
+ for source in hf_tqdm(sources, unit="parquet files"):
parquet_file = pa.parquet.ParquetFile(source)
shard_lengths.append(parquet_file.metadata.num_rows)
return shard_lengths
diff --git a/src/datasets/builder.py b/src/datasets/builder.py
--- a/src/datasets/builder.py
+++ b/src/datasets/builder.py
@@ -65,6 +65,7 @@
from .splits import Split, SplitDict, SplitGenerator, SplitInfo
from .streaming import extend_dataset_builder_for_streaming
from .utils import logging
+from .utils import tqdm as hf_tqdm
from .utils.file_utils import cached_path, is_remote_url
from .utils.filelock import FileLock
from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits
@@ -1526,8 +1527,7 @@ def _prepare_split(
)
num_proc = num_input_shards
- pbar = logging.tqdm(
- disable=not logging.is_progress_bar_enabled(),
+ pbar = hf_tqdm(
unit=" examples",
total=split_info.num_examples,
desc=f"Generating {split_info.name} split",
@@ -1784,8 +1784,7 @@ def _prepare_split(
)
num_proc = num_input_shards
- pbar = logging.tqdm(
- disable=not logging.is_progress_bar_enabled(),
+ pbar = hf_tqdm(
unit=" examples",
total=split_info.num_examples,
desc=f"Generating {split_info.name} split",
diff --git a/src/datasets/config.py b/src/datasets/config.py
--- a/src/datasets/config.py
+++ b/src/datasets/config.py
@@ -1,15 +1,15 @@
import importlib
import importlib.metadata
+import logging
import os
import platform
from pathlib import Path
+from typing import Optional
from packaging import version
-from .utils.logging import get_logger
-
-logger = get_logger(__name__)
+logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging
# Datasets
S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets"
@@ -192,6 +192,18 @@
# Offline mode
HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
+# Here, `True` will disable progress bars globally without possibility of enabling it
+# programmatically. `False` will enable them without possibility of disabling them.
+# If environment variable is not set (None), then the user is free to enable/disable
+# them programmatically.
+# TL;DR: env variable has priority over code
+__HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS")
+HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = (
+ __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES
+ if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None
+ else None
+)
+
# In-memory
DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled
IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE))
diff --git a/src/datasets/data_files.py b/src/datasets/data_files.py
--- a/src/datasets/data_files.py
+++ b/src/datasets/data_files.py
@@ -17,6 +17,7 @@
from .download.streaming_download_manager import _prepare_path_and_storage_options, xbasename, xjoin
from .splits import Split
from .utils import logging
+from .utils import tqdm as hf_tqdm
from .utils.file_utils import is_local_path, is_relative_path
from .utils.py_utils import glob_pattern_to_regex, string_to_dict
@@ -515,9 +516,9 @@ def _get_origin_metadata(
partial(_get_single_origin_metadata, download_config=download_config),
data_files,
max_workers=max_workers,
- tqdm_class=logging.tqdm,
+ tqdm_class=hf_tqdm,
desc="Resolving data files",
- disable=len(data_files) <= 16 or not logging.is_progress_bar_enabled(),
+ disable=len(data_files) <= 16,
)
diff --git a/src/datasets/download/download_manager.py b/src/datasets/download/download_manager.py
--- a/src/datasets/download/download_manager.py
+++ b/src/datasets/download/download_manager.py
@@ -28,10 +28,11 @@
from typing import Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union
from .. import config
+from ..utils import tqdm as hf_tqdm
from ..utils.deprecation_utils import DeprecatedEnum, deprecated
from ..utils.file_utils import cached_path, get_from_cache, hash_url_to_filename, is_relative_path, url_or_path_join
from ..utils.info_utils import get_size_checksum_dict
-from ..utils.logging import get_logger, is_progress_bar_enabled, tqdm
+from ..utils.logging import get_logger
from ..utils.py_utils import NestedDataStructure, map_nested, size_str
from .download_config import DownloadConfig
@@ -327,18 +328,16 @@ def upload(local_file_path):
uploaded_path_or_paths = map_nested(
lambda local_file_path: upload(local_file_path),
downloaded_path_or_paths,
- disable_tqdm=not is_progress_bar_enabled(),
)
return uploaded_path_or_paths
def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure):
"""Record size/checksum of downloaded files."""
delay = 5
- for url, path in tqdm(
+ for url, path in hf_tqdm(
list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())),
delay=delay,
desc="Computing checksums",
- disable=not is_progress_bar_enabled(),
):
# call str to support PathLike objects
self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(
@@ -373,9 +372,7 @@ def download_custom(self, url_or_urls, custom_download):
def url_to_downloaded_path(url):
return os.path.join(cache_dir, hash_url_to_filename(url))
- downloaded_path_or_paths = map_nested(
- url_to_downloaded_path, url_or_urls, disable_tqdm=not is_progress_bar_enabled()
- )
+ downloaded_path_or_paths = map_nested(url_to_downloaded_path, url_or_urls)
url_or_urls = NestedDataStructure(url_or_urls)
downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()):
@@ -426,7 +423,6 @@ def download(self, url_or_urls):
url_or_urls,
map_tuple=True,
num_proc=download_config.num_proc,
- disable_tqdm=not is_progress_bar_enabled(),
desc="Downloading data files",
)
duration = datetime.now() - start_time
@@ -534,7 +530,6 @@ def extract(self, path_or_paths, num_proc="deprecated"):
partial(cached_path, download_config=download_config),
path_or_paths,
num_proc=download_config.num_proc,
- disable_tqdm=not is_progress_bar_enabled(),
desc="Extracting data files",
)
path_or_paths = NestedDataStructure(path_or_paths)
diff --git a/src/datasets/io/csv.py b/src/datasets/io/csv.py
--- a/src/datasets/io/csv.py
+++ b/src/datasets/io/csv.py
@@ -5,7 +5,7 @@
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.csv.csv import Csv
-from ..utils import logging
+from ..utils import tqdm as hf_tqdm
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
@@ -117,10 +117,9 @@ def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int:
written = 0
if self.num_proc is None or self.num_proc == 1:
- for offset in logging.tqdm(
+ for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
- disable=not logging.is_progress_bar_enabled(),
desc="Creating CSV from Arrow format",
):
csv_str = self._batch_csv((offset, header, index, to_csv_kwargs))
@@ -129,14 +128,13 @@ def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int:
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
- for csv_str in logging.tqdm(
+ for csv_str in hf_tqdm(
pool.imap(
self._batch_csv,
[(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
- disable=not logging.is_progress_bar_enabled(),
desc="Creating CSV from Arrow format",
):
written += file_obj.write(csv_str)
diff --git a/src/datasets/io/json.py b/src/datasets/io/json.py
--- a/src/datasets/io/json.py
+++ b/src/datasets/io/json.py
@@ -7,7 +7,7 @@
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
-from ..utils import logging
+from ..utils import tqdm as hf_tqdm
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
@@ -139,10 +139,9 @@ def _write(
written = 0
if self.num_proc is None or self.num_proc == 1:
- for offset in logging.tqdm(
+ for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
- disable=not logging.is_progress_bar_enabled(),
desc="Creating json from Arrow format",
):
json_str = self._batch_json((offset, orient, lines, to_json_kwargs))
@@ -150,14 +149,13 @@ def _write(
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
- for json_str in logging.tqdm(
+ for json_str in hf_tqdm(
pool.imap(
self._batch_json,
[(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
- disable=not logging.is_progress_bar_enabled(),
desc="Creating json from Arrow format",
):
written += file_obj.write(json_str)
diff --git a/src/datasets/io/parquet.py b/src/datasets/io/parquet.py
--- a/src/datasets/io/parquet.py
+++ b/src/datasets/io/parquet.py
@@ -9,7 +9,7 @@
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
-from ..utils import logging
+from ..utils import tqdm as hf_tqdm
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
@@ -140,10 +140,9 @@ def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
- for offset in logging.tqdm(
+ for offset in hf_tqdm(
range(0, len(self.dataset), batch_size),
unit="ba",
- disable=not logging.is_progress_bar_enabled(),
desc="Creating parquet from Arrow format",
):
batch = query_table(
diff --git a/src/datasets/io/sql.py b/src/datasets/io/sql.py
--- a/src/datasets/io/sql.py
+++ b/src/datasets/io/sql.py
@@ -4,7 +4,7 @@
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
-from ..utils import logging
+from ..utils import tqdm as hf_tqdm
from .abc import AbstractDatasetInputStream
@@ -102,24 +102,22 @@ def _write(self, index, **to_sql_kwargs) -> int:
written = 0
if self.num_proc is None or self.num_proc == 1:
- for offset in logging.tqdm(
+ for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
- disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
- for num_rows in logging.tqdm(
+ for num_rows in hf_tqdm(
pool.imap(
self._batch_sql,
[(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
- disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += num_rows
diff --git a/src/datasets/search.py b/src/datasets/search.py
--- a/src/datasets/search.py
+++ b/src/datasets/search.py
@@ -8,6 +8,7 @@
import numpy as np
from .utils import logging
+from .utils import tqdm as hf_tqdm
if TYPE_CHECKING:
@@ -150,7 +151,7 @@ def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional
index_config = self.es_index_config
self.es_client.indices.create(index=index_name, body=index_config)
number_of_docs = len(documents)
- progress = logging.tqdm(unit="docs", total=number_of_docs, disable=not logging.is_progress_bar_enabled())
+ progress = hf_tqdm(unit="docs", total=number_of_docs)
successes = 0
def passage_generator():
@@ -301,7 +302,7 @@ def add_vectors(
# Add vectors
logger.info(f"Adding {len(vectors)} vectors to the faiss index")
- for i in logging.tqdm(range(0, len(vectors), batch_size), disable=not logging.is_progress_bar_enabled()):
+ for i in hf_tqdm(range(0, len(vectors), batch_size)):
vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]
self.faiss_index.add(vecs)
diff --git a/src/datasets/utils/__init__.py b/src/datasets/utils/__init__.py
--- a/src/datasets/utils/__init__.py
+++ b/src/datasets/utils/__init__.py
@@ -14,18 +14,15 @@
# flake8: noqa
# Lint as: python3
-"""Util import."""
-
-__all__ = [
- "VerificationMode",
- "Version",
- "disable_progress_bar",
- "enable_progress_bar",
- "is_progress_bar_enabled",
- "experimental",
-]
+from . import tqdm as _tqdm # _tqdm is the module
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
+from .tqdm import (
+ disable_progress_bars,
+ enable_progress_bars,
+ are_progress_bars_disabled,
+ tqdm,
+)
diff --git a/src/datasets/utils/file_utils.py b/src/datasets/utils/file_utils.py
--- a/src/datasets/utils/file_utils.py
+++ b/src/datasets/utils/file_utils.py
@@ -24,13 +24,16 @@
import fsspec
import huggingface_hub
import requests
+from fsspec.core import strip_protocol
+from fsspec.utils import can_be_local
from huggingface_hub import HfFolder
from huggingface_hub.utils import insecure_hashlib
from packaging import version
from .. import __version__, config
from ..download.download_config import DownloadConfig
-from . import logging
+from . import _tqdm, logging
+from . import tqdm as hf_tqdm
from .extract import ExtractManager
from .filelock import FileLock
@@ -177,6 +180,10 @@ def cached_path(
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
+ # Convert fsspec URL in the format "file://local/path" to "local/path"
+ if can_be_local(url_or_filename):
+ url_or_filename = strip_protocol(url_or_filename)
+
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
@@ -348,7 +355,7 @@ def fsspec_head(url, storage_options=None):
class TqdmCallback(fsspec.callbacks.TqdmCallback):
def __init__(self, tqdm_kwargs=None, *args, **kwargs):
super().__init__(tqdm_kwargs, *args, **kwargs)
- self._tqdm = logging # replace tqdm.tqdm by datasets.logging.tqdm
+ self._tqdm = _tqdm # replace tqdm.tqdm by datasets.tqdm.tqdm
def fsspec_get(url, temp_file, storage_options=None, desc=None):
@@ -359,7 +366,6 @@ def fsspec_get(url, temp_file, storage_options=None, desc=None):
callback = TqdmCallback(
tqdm_kwargs={
"desc": desc or "Downloading",
- "disable": not logging.is_progress_bar_enabled(),
"unit": "B",
"unit_scale": True,
}
@@ -408,13 +414,12 @@ def http_get(
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
- with logging.tqdm(
+ with hf_tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc=desc or "Downloading",
- disable=not logging.is_progress_bar_enabled(),
) as progress:
for chunk in response.iter_content(chunk_size=1024):
progress.update(len(chunk))
diff --git a/src/datasets/utils/logging.py b/src/datasets/utils/logging.py
--- a/src/datasets/utils/logging.py
+++ b/src/datasets/utils/logging.py
@@ -27,7 +27,12 @@
)
from typing import Optional
-from tqdm import auto as tqdm_lib
+from .tqdm import ( # noqa: F401 # imported for backward compatibility
+ disable_progress_bar,
+ enable_progress_bar,
+ is_progress_bar_enabled,
+ tqdm,
+)
log_levels = {
@@ -172,76 +177,3 @@ def enable_propagation() -> None:
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
-
-
-class EmptyTqdm:
- """Dummy tqdm which doesn't do anything."""
-
- def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
- self._iterator = args[0] if args else None
-
- def __iter__(self):
- return iter(self._iterator)
-
- def __getattr__(self, _):
- """Return empty function."""
-
- def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
- return
-
- return empty_fn
-
- def __enter__(self):
- return self
-
- def __exit__(self, type_, value, traceback):
- return
-
-
-_tqdm_active = True
-
-
-class _tqdm_cls:
- def __call__(self, *args, disable=False, **kwargs):
- if _tqdm_active and not disable:
- return tqdm_lib.tqdm(*args, **kwargs)
- else:
- return EmptyTqdm(*args, **kwargs)
-
- def set_lock(self, *args, **kwargs):
- self._lock = None
- if _tqdm_active:
- return tqdm_lib.tqdm.set_lock(*args, **kwargs)
-
- def get_lock(self):
- if _tqdm_active:
- return tqdm_lib.tqdm.get_lock()
-
- def __delattr__(self, attr):
- """fix for https://github.com/huggingface/datasets/issues/6066"""
- try:
- del self.__dict__[attr]
- except KeyError:
- if attr != "_lock":
- raise AttributeError(attr)
-
-
-tqdm = _tqdm_cls()
-
-
-def is_progress_bar_enabled() -> bool:
- """Return a boolean indicating whether tqdm progress bars are enabled."""
- global _tqdm_active
- return bool(_tqdm_active)
-
-
-def enable_progress_bar():
- """Enable tqdm progress bar."""
- global _tqdm_active
- _tqdm_active = True
-
-
-def disable_progress_bar():
- """Disable tqdm progress bar."""
- global _tqdm_active
- _tqdm_active = False
diff --git a/src/datasets/utils/py_utils.py b/src/datasets/utils/py_utils.py
--- a/src/datasets/utils/py_utils.py
+++ b/src/datasets/utils/py_utils.py
@@ -46,6 +46,7 @@
from .. import config
from ..parallel import parallel_map
from . import logging
+from . import tqdm as hf_tqdm
try: # pragma: no branch
@@ -377,7 +378,7 @@ def _single_map_nested(args):
# Loop over single examples or batches and write to buffer/file if examples are to be updated
pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct
pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc
- with logging.tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar:
+ with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar:
if isinstance(data_struct, dict):
return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar}
else:
@@ -455,7 +456,6 @@ def map_nested(
if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
return function(data_struct)
- disable_tqdm = disable_tqdm or not logging.is_progress_bar_enabled()
iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct
if num_proc is None:
@@ -463,7 +463,7 @@ def map_nested(
if num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length:
mapped = [
_single_map_nested((function, obj, types, None, True, None))
- for obj in logging.tqdm(iterable, disable=disable_tqdm, desc=desc)
+ for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)
]
else:
with warnings.catch_warnings():
diff --git a/src/datasets/utils/tqdm.py b/src/datasets/utils/tqdm.py
new file mode 100644
--- /dev/null
+++ b/src/datasets/utils/tqdm.py
@@ -0,0 +1,130 @@
+"""Utility helpers to handle progress bars in `datasets`.
+
+Example:
+ 1. Use `datasets.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.
+ 2. To disable progress bars, either use `disable_progress_bars()` helper or set the
+ environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` to 1.
+ 3. To re-enable progress bars, use `enable_progress_bars()`.
+ 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.
+
+NOTE: Environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` has the priority.
+
+Example:
+ ```py
+ from datasets.utils import (
+ are_progress_bars_disabled,
+ disable_progress_bars,
+ enable_progress_bars,
+ tqdm,
+ )
+
+ # Disable progress bars globally
+ disable_progress_bars()
+
+ # Use as normal `tqdm`
+ for _ in tqdm(range(5)):
+ do_something()
+
+ # Still not showing progress bars, as `disable=False` is overwritten to `True`.
+ for _ in tqdm(range(5), disable=False):
+ do_something()
+
+ are_progress_bars_disabled() # True
+
+ # Re-enable progress bars globally
+ enable_progress_bars()
+
+ # Progress bar will be shown !
+ for _ in tqdm(range(5)):
+ do_something()
+ ```
+"""
+import warnings
+
+from tqdm.auto import tqdm as old_tqdm
+
+from ..config import HF_DATASETS_DISABLE_PROGRESS_BARS
+
+
+# `HF_DATASETS_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_datasets_progress_bars_disabled`
+# is a `bool`. If `HF_DATASETS_DISABLE_PROGRESS_BARS` is set to True or False, it has priority.
+# If `HF_DATASETS_DISABLE_PROGRESS_BARS` is None, it means the user have not set the
+# environment variable and is free to enable/disable progress bars programmatically.
+# TL;DR: env variable has priority over code.
+#
+# By default, progress bars are enabled.
+_hf_datasets_progress_bars_disabled: bool = HF_DATASETS_DISABLE_PROGRESS_BARS or False
+
+
+def disable_progress_bars() -> None:
+ """
+ Disable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
+ variable has been set.
+
+ Use [`~utils.enable_progress_bars`] to re-enable them.
+ """
+ if HF_DATASETS_DISABLE_PROGRESS_BARS is False:
+ warnings.warn(
+ "Cannot disable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=0` is set and has"
+ " priority."
+ )
+ return
+ global _hf_datasets_progress_bars_disabled
+ _hf_datasets_progress_bars_disabled = True
+
+
+def enable_progress_bars() -> None:
+ """
+ Enable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
+ variable has been set.
+
+ Use [`~utils.disable_progress_bars`] to disable them.
+ """
+ if HF_DATASETS_DISABLE_PROGRESS_BARS is True:
+ warnings.warn(
+ "Cannot enable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=1` is set and has"
+ " priority."
+ )
+ return
+ global _hf_datasets_progress_bars_disabled
+ _hf_datasets_progress_bars_disabled = False
+
+
+def are_progress_bars_disabled() -> bool:
+ """Return whether progress bars are globally disabled or not.
+
+ Progress bars used in `datasets` can be enable or disabled globally using [`~utils.enable_progress_bars`]
+ and [`~utils.disable_progress_bars`] or by setting `HF_DATASETS_DISABLE_PROGRESS_BAR` as environment variable.
+ """
+ global _hf_datasets_progress_bars_disabled
+ return _hf_datasets_progress_bars_disabled
+
+
+class tqdm(old_tqdm):
+ """
+ Class to override `disable` argument in case progress bars are globally disabled.
+
+ Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
+ """
+
+ def __init__(self, *args, **kwargs):
+ if are_progress_bars_disabled():
+ kwargs["disable"] = True
+ super().__init__(*args, **kwargs)
+
+ def __delattr__(self, attr: str) -> None:
+ """Fix for https://github.com/huggingface/datasets/issues/6066"""
+ try:
+ super().__delattr__(attr)
+ except AttributeError:
+ if attr != "_lock":
+ raise
+
+
+# backward compatibility
+enable_progress_bar = enable_progress_bars
+disable_progress_bar = disable_progress_bars
+
+
+def is_progress_bar_enabled():
+ return not are_progress_bars_disabled()
| diff --git a/tests/test_file_utils.py b/tests/test_file_utils.py
--- a/tests/test_file_utils.py
+++ b/tests/test_file_utils.py
@@ -80,12 +80,14 @@ def test_extracted_datasets_path(default_extracted, default_cache_dir, xz_file,
def test_cached_path_local(text_file):
- # absolute path
- text_file = str(Path(text_file).resolve())
- assert cached_path(text_file) == text_file
- # relative path
- text_file = str(Path(__file__).resolve().relative_to(Path(os.getcwd())))
- assert cached_path(text_file) == text_file
+ # input absolute path -> output absolute path
+ text_file_abs = str(Path(text_file).resolve())
+ assert os.path.samefile(cached_path(text_file_abs), text_file_abs)
+ # input relative path -> output absolute path
+ text_file = __file__
+ text_file_abs = str(Path(text_file).resolve())
+ text_file_rel = str(Path(text_file).resolve().relative_to(Path(os.getcwd())))
+ assert os.path.samefile(cached_path(text_file_rel), text_file_abs)
def test_cached_path_missing_local(tmp_path):
diff --git a/tests/test_logging.py b/tests/test_logging.py
deleted file mode 100644
--- a/tests/test_logging.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from unittest.mock import patch
-
-import datasets
-from datasets import Dataset
-
-
-def test_enable_disable_progress_bar():
- dset = Dataset.from_dict({"col_1": [3, 2, 0, 1]})
-
- with patch("tqdm.auto.tqdm") as mock_tqdm:
- datasets.disable_progress_bar()
- dset.map(lambda x: {"col_2": x["col_1"] + 1})
- mock_tqdm.assert_not_called()
-
- mock_tqdm.reset_mock()
-
- datasets.enable_progress_bar()
- dset.map(lambda x: {"col_2": x["col_1"] + 1})
- mock_tqdm.assert_called()
diff --git a/tests/test_tqdm.py b/tests/test_tqdm.py
new file mode 100644
--- /dev/null
+++ b/tests/test_tqdm.py
@@ -0,0 +1,116 @@
+import unittest
+from unittest.mock import patch
+
+import pytest
+from pytest import CaptureFixture
+
+from datasets.utils import (
+ are_progress_bars_disabled,
+ disable_progress_bars,
+ enable_progress_bars,
+ tqdm,
+)
+
+
+class TestTqdmUtils(unittest.TestCase):
+ @pytest.fixture(autouse=True)
+ def capsys(self, capsys: CaptureFixture) -> None:
+ """Workaround to make capsys work in unittest framework.
+
+ Capsys is a convenient pytest fixture to capture stdout.
+ See https://waylonwalker.com/pytest-capsys/.
+
+ Taken from https://github.com/pytest-dev/pytest/issues/2504#issuecomment-309475790.
+ """
+ self.capsys = capsys
+
+ def setUp(self) -> None:
+ """Get verbosity to set it back after the tests."""
+ self._previous_are_progress_bars_disabled = are_progress_bars_disabled()
+ return super().setUp()
+
+ def tearDown(self) -> None:
+ """Set back progress bars verbosity as before testing."""
+ if self._previous_are_progress_bars_disabled:
+ disable_progress_bars()
+ else:
+ enable_progress_bars()
+
+ @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
+ def test_tqdm_helpers(self) -> None:
+ """Test helpers to enable/disable progress bars."""
+ disable_progress_bars()
+ self.assertTrue(are_progress_bars_disabled())
+
+ enable_progress_bars()
+ self.assertFalse(are_progress_bars_disabled())
+
+ @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", True)
+ def test_cannot_enable_tqdm_when_env_variable_is_set(self) -> None:
+ """
+ Test helpers cannot enable/disable progress bars when
+ `HF_DATASETS_DISABLE_PROGRESS_BARS` is set.
+ """
+ disable_progress_bars()
+ self.assertTrue(are_progress_bars_disabled())
+
+ with self.assertWarns(UserWarning):
+ enable_progress_bars()
+ self.assertTrue(are_progress_bars_disabled()) # Still disabled !
+
+ @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", False)
+ def test_cannot_disable_tqdm_when_env_variable_is_set(self) -> None:
+ """
+ Test helpers cannot enable/disable progress bars when
+ `HF_DATASETS_DISABLE_PROGRESS_BARS` is set.
+ """
+ enable_progress_bars()
+ self.assertFalse(are_progress_bars_disabled())
+
+ with self.assertWarns(UserWarning):
+ disable_progress_bars()
+ self.assertFalse(are_progress_bars_disabled()) # Still enabled !
+
+ @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
+ def test_tqdm_disabled(self) -> None:
+ """Test TQDM not outputting anything when globally disabled."""
+ disable_progress_bars()
+ for _ in tqdm(range(10)):
+ pass
+
+ captured = self.capsys.readouterr()
+ self.assertEqual(captured.out, "")
+ self.assertEqual(captured.err, "")
+
+ @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
+ def test_tqdm_disabled_cannot_be_forced(self) -> None:
+ """Test TQDM cannot be forced when globally disabled."""
+ disable_progress_bars()
+ for _ in tqdm(range(10), disable=False):
+ pass
+
+ captured = self.capsys.readouterr()
+ self.assertEqual(captured.out, "")
+ self.assertEqual(captured.err, "")
+
+ @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
+ def test_tqdm_can_be_disabled_when_globally_enabled(self) -> None:
+ """Test TQDM can still be locally disabled even when globally enabled."""
+ enable_progress_bars()
+ for _ in tqdm(range(10), disable=True):
+ pass
+
+ captured = self.capsys.readouterr()
+ self.assertEqual(captured.out, "")
+ self.assertEqual(captured.err, "")
+
+ @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
+ def test_tqdm_enabled(self) -> None:
+ """Test TQDM work normally when globally enabled."""
+ enable_progress_bars()
+ for _ in tqdm(range(10)):
+ pass
+
+ captured = self.capsys.readouterr()
+ self.assertEqual(captured.out, "")
+ self.assertIn("10/10", captured.err) # tqdm log
| using DownloadManager to download from local filesystem and disable_progress_bar, there will be an exception
### Describe the bug
i'm using datasets.download.download_manager.DownloadManager to download files like "file:///a/b/c.txt", and i disable_progress_bar() to disable bar. there will be an exception as follows:
`AttributeError: 'function' object has no attribute 'close'
Exception ignored in: <function TqdmCallback.__del__ at 0x7fa8683d84c0>
Traceback (most recent call last):
File "/home/protoss.gao/.local/lib/python3.9/site-packages/fsspec/callbacks.py", line 233, in __del__
self.tqdm.close()`
i check your source code in datasets/utils/file_utils.py:348 you define TqdmCallback derive from fsspec.callbacks.TqdmCallback
but in the newest fsspec code [https://github.com/fsspec/filesystem_spec/blob/master/fsspec/callbacks.py](url) , line 146, in this case, _DEFAULT_CALLBACK will take effect, but in line 234, it calls "close()" function which _DEFAULT_CALLBACK don't have such thing.
so i think the class "TqdmCallback" in datasets/utils/file_utils.py may override "__del__" function or report this bug to fsspec.
### Steps to reproduce the bug
as i said
### Expected behavior
no exception
### Environment info
datasets: 2.14.4
python: 3.9
platform: x86_64
| 2023-11-17T15:45:15Z | [] | [] |
|
huggingface/datasets | 6,449 | huggingface__datasets-6449 | [
"6442"
] | 05ec66cc1abc20bd13d02c681b7be372ae084a4f | diff --git a/src/datasets/load.py b/src/datasets/load.py
--- a/src/datasets/load.py
+++ b/src/datasets/load.py
@@ -869,7 +869,7 @@ def get_module(self) -> DatasetModule:
data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
# Collect metadata files if the module supports them
supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
- if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
+ if self.data_files is None and supports_metadata:
try:
metadata_patterns = get_metadata_patterns(base_path)
except FileNotFoundError:
@@ -1059,7 +1059,7 @@ def get_module(self) -> DatasetModule:
data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
# Collect metadata files if the module supports them
supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
- if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
+ if self.data_files is None and supports_metadata:
try:
metadata_patterns = get_metadata_patterns(base_path)
except FileNotFoundError:
| diff --git a/tests/test_load.py b/tests/test_load.py
--- a/tests/test_load.py
+++ b/tests/test_load.py
@@ -86,6 +86,7 @@ def _generate_examples(self, filepath, **kwargs):
SAMPLE_DATASET_IDENTIFIER2 = "hf-internal-testing/dataset_with_data_files" # only has data files
SAMPLE_DATASET_IDENTIFIER3 = "hf-internal-testing/multi_dir_dataset" # has multiple data directories
SAMPLE_DATASET_IDENTIFIER4 = "hf-internal-testing/imagefolder_with_metadata" # imagefolder with a metadata file outside of the train/test directories
+SAMPLE_DATASET_IDENTIFIER5 = "hf-internal-testing/imagefolder_with_metadata_no_splits" # imagefolder with a metadata file and no default split names in data files
SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER = "hf-internal-testing/_dummy"
SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST = "_dummy"
SAMPLE_DATASET_NO_CONFIGS_IN_METADATA = "hf-internal-testing/audiofolder_no_configs_in_metadata"
@@ -630,6 +631,22 @@ def test_HubDatasetModuleFactoryWithoutScript_with_metadata(self):
for data_file in module_factory_result.builder_kwargs["data_files"]["test"]
)
+ factory = HubDatasetModuleFactoryWithoutScript(
+ SAMPLE_DATASET_IDENTIFIER5, download_config=self.download_config
+ )
+ module_factory_result = factory.get_module()
+ assert importlib.import_module(module_factory_result.module_path) is not None
+ assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT)
+ assert (
+ module_factory_result.builder_kwargs["data_files"] is not None
+ and len(module_factory_result.builder_kwargs["data_files"]) == 1
+ and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0
+ )
+ assert any(
+ Path(data_file).name == "metadata.jsonl"
+ for data_file in module_factory_result.builder_kwargs["data_files"]["train"]
+ )
+
@pytest.mark.integration
def test_HubDatasetModuleFactoryWithoutScript_with_one_default_config_in_metadata(self):
factory = HubDatasetModuleFactoryWithoutScript(
| Trouble loading image folder with additional features - metadata file ignored
### Describe the bug
Loading image folder with a caption column using `load_dataset(<image_folder_path>)` doesn't load the captions.
When loading a local image folder with captions using `datasets==2.13.0`
```
from datasets import load_dataset
data = load_dataset(<image_folder_path>)
data.column_names
```
yields
`{'train': ['image', 'prompt']}`
but when using `datasets==2.15.0`
yeilds
`{'train': ['image']}`
Putting the images and `metadata.jsonl` file into a nested `train` folder **or** loading with `load_dataset("imagefolder", data_dir=<image_folder_path>)` solves the issue and
yields
`{'train': ['image', 'prompt']}`
### Steps to reproduce the bug
1. create a folder `<image_folder_path>` that contains images and a metadata file with additional features- e.g. "prompt"
2. run:
```
from datasets import load_dataset
data = load_dataset("<image_folder_path>")
data.column_names
```
### Expected behavior
`{'train': ['image', 'prompt']}`
### Environment info
- `datasets` version: 2.15.0
- Platform: Linux-5.15.120+-x86_64-with-glibc2.35
- Python version: 3.10.12
- `huggingface_hub` version: 0.19.4
- PyArrow version: 9.0.0
- Pandas version: 1.5.3
- `fsspec` version: 2023.6.0
| I reproduced too:
- root: metadata file is ignored (https://huggingface.co/datasets/severo/doc-image-3)
- data/ dir: metadata file is ignored (https://huggingface.co/datasets/severo/doc-image-4)
- train/ dir: works (https://huggingface.co/datasets/severo/doc-image-5) | 2023-11-23T17:35:02Z | [] | [] |
huggingface/datasets | 6,477 | huggingface__datasets-6477 | [
"6476"
] | f7721021e284859ea0952444bae6300a0d00794f | diff --git a/src/datasets/dataset_dict.py b/src/datasets/dataset_dict.py
--- a/src/datasets/dataset_dict.py
+++ b/src/datasets/dataset_dict.py
@@ -56,6 +56,17 @@ def _check_values_features(self):
f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
+ for dataset in self.values():
+ if hasattr(dataset, "_data"):
+ del dataset._data
+ if hasattr(dataset, "_indices"):
+ del dataset._indices
+
def __getitem__(self, k) -> Dataset:
if isinstance(k, (str, NamedSplit)) or len(self) == 0:
return super().__getitem__(k)
| diff --git a/tests/test_load.py b/tests/test_load.py
--- a/tests/test_load.py
+++ b/tests/test_load.py
@@ -1313,10 +1313,9 @@ def test_load_dataset_with_unsupported_extensions(text_dir_with_unsupported_exte
@pytest.mark.integration
def test_loading_from_the_datasets_hub():
with tempfile.TemporaryDirectory() as tmp_dir:
- dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=tmp_dir)
- assert len(dataset["train"]) == 2
- assert len(dataset["validation"]) == 3
- del dataset
+ with load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=tmp_dir) as dataset:
+ assert len(dataset["train"]) == 2
+ assert len(dataset["validation"]) == 3
@pytest.mark.integration
| CI on windows is broken: PermissionError
See: https://github.com/huggingface/datasets/actions/runs/7104781624/job/19340572394
```
FAILED tests/test_load.py::test_loading_from_the_datasets_hub - NotADirectoryError: [WinError 267] The directory name is invalid: 'C:\\Users\\RUNNER~1\\AppData\\Local\\Temp\\tmpfcnps56i\\hf-internal-testing___dataset_with_script\\default\\0.0.0\\c240e2be3370bdbd\\dataset_with_script-train.arrow'
```
| 2023-12-06T08:34:53Z | [] | [] |
|
huggingface/datasets | 6,500 | huggingface__datasets-6500 | [
"6497"
] | ef0f986518bd252c5314a7e3a419dedcbb166630 | diff --git a/src/datasets/arrow_dataset.py b/src/datasets/arrow_dataset.py
--- a/src/datasets/arrow_dataset.py
+++ b/src/datasets/arrow_dataset.py
@@ -5225,6 +5225,7 @@ def push_to_hub(
self,
repo_id: str,
config_name: str = "default",
+ set_default: Optional[bool] = None,
split: Optional[str] = None,
commit_message: Optional[str] = None,
private: Optional[bool] = False,
@@ -5250,6 +5251,9 @@ def push_to_hub(
of the logged-in user.
config_name (`str`, defaults to "default"):
The configuration name (or subset) of a dataset. Defaults to "default".
+ set_default (`bool`, *optional*):
+ Whether to set this configuration as the default one. Otherwise, the default configuration is the one
+ named "default".
split (`str`, *optional*):
The name of the split that will be given to that dataset. Defaults to `self.split`.
commit_message (`str`, *optional*):
@@ -5275,13 +5279,14 @@ def push_to_hub(
</Deprecated>
create_pr (`bool`, *optional*, defaults to `False`):
- Whether or not to create a PR with the uploaded files or directly commit.
+ Whether to create a PR with the uploaded files or directly commit.
<Added version="2.15.0"/>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by
a unit (like `"5MB"`).
- num_shards (`int`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size`.
+ num_shards (`int`, *optional*):
+ Number of shards to write. By default, the number of shards depends on `max_shard_size`.
<Added version="2.8.0"/>
embed_external_files (`bool`, defaults to `True`):
@@ -5485,6 +5490,17 @@ def push_to_hub(
}
else:
metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]}
+ if set_default and config_name != "default":
+ if metadata_configs:
+ default_config_name = metadata_configs.get_default_config_name()
+ if default_config_name == "default":
+ raise ValueError(
+ "There exists a configuration named 'default'. To set a different configuration as default, "
+ "rename the 'default' one first."
+ )
+ else:
+ _ = metadata_configs[default_config_name].pop("default")
+ metadata_config_to_dump["default"] = True
# push to the deprecated dataset_infos.json
if repo_with_dataset_infos:
dataset_infos_path = api.hf_hub_download(
diff --git a/src/datasets/dataset_dict.py b/src/datasets/dataset_dict.py
--- a/src/datasets/dataset_dict.py
+++ b/src/datasets/dataset_dict.py
@@ -1556,6 +1556,7 @@ def push_to_hub(
self,
repo_id,
config_name: str = "default",
+ set_default: Optional[bool] = None,
commit_message: Optional[str] = None,
private: Optional[bool] = False,
token: Optional[str] = None,
@@ -1582,6 +1583,9 @@ def push_to_hub(
of the logged-in user.
config_name (`str`):
Configuration name of a dataset. Defaults to "default".
+ set_default (`bool`, *optional*):
+ Whether to set this configuration as the default one. Otherwise, the default configuration is the one
+ named "default".
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"Upload dataset"`.
private (`bool`, *optional*):
@@ -1605,14 +1609,14 @@ def push_to_hub(
</Deprecated>
create_pr (`bool`, *optional*, defaults to `False`):
- Whether or not to create a PR with the uploaded files or directly commit.
+ Whether to create a PR with the uploaded files or directly commit.
<Added version="2.15.0"/>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"500MB"` or `"1GB"`).
num_shards (`Dict[str, int]`, *optional*):
- Number of shards to write. By default the number of shards depends on `max_shard_size`.
+ Number of shards to write. By default, the number of shards depends on `max_shard_size`.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
@@ -1712,10 +1716,6 @@ def push_to_hub(
info_to_dump.dataset_size = total_dataset_nbytes
info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
- metadata_config_to_dump = {
- "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
- }
-
# Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
# and delete old split shards (if they exist)
repo_with_dataset_card, repo_with_dataset_infos = False, False
@@ -1763,6 +1763,20 @@ def push_to_hub(
"data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
+ metadata_config_to_dump = {
+ "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
+ }
+ if set_default and config_name != "default":
+ if metadata_configs:
+ default_config_name = metadata_configs.get_default_config_name()
+ if default_config_name == "default":
+ raise ValueError(
+ "There exists a configuration named 'default'. To set a different configuration as default, "
+ "rename the 'default' one first."
+ )
+ else:
+ _ = metadata_configs[default_config_name].pop("default")
+ metadata_config_to_dump["default"] = True
# push to the deprecated dataset_infos.json
if repo_with_dataset_infos:
dataset_infos_path = api.hf_hub_download(
| diff --git a/tests/test_upstream_hub.py b/tests/test_upstream_hub.py
--- a/tests/test_upstream_hub.py
+++ b/tests/test_upstream_hub.py
@@ -591,13 +591,19 @@ def test_push_multiple_dataset_configs_to_hub_load_dataset(self, temporary_repo)
with pytest.raises(ValueError): # no config 'config3'
load_dataset(ds_name, "config3", download_mode="force_redownload")
- def test_push_multiple_dataset_configs_to_hub_readme_metadata_content(self, temporary_repo):
+ @pytest.mark.parametrize("specific_default_config_name", [False, True])
+ def test_push_multiple_dataset_configs_to_hub_readme_metadata_content(
+ self, specific_default_config_name, temporary_repo
+ ):
ds_default = Dataset.from_dict({"a": [0], "b": [2]})
ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
with temporary_repo() as ds_name:
- ds_default.push_to_hub(ds_name, token=self._token)
+ if specific_default_config_name:
+ ds_default.push_to_hub(ds_name, config_name="config0", set_default=True, token=self._token)
+ else:
+ ds_default.push_to_hub(ds_name, token=self._token)
ds_config1.push_to_hub(ds_name, "config1", token=self._token)
ds_config2.push_to_hub(ds_name, "config2", token=self._token)
@@ -606,7 +612,19 @@ def test_push_multiple_dataset_configs_to_hub_readme_metadata_content(self, temp
dataset_card_data = DatasetCard.load(ds_readme_path).data
assert METADATA_CONFIGS_FIELD in dataset_card_data
assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list)
- assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == [
+ assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == (
+ [
+ {
+ "config_name": "config0",
+ "data_files": [
+ {"split": "train", "path": "config0/train-*"},
+ ],
+ "default": True,
+ },
+ ]
+ if specific_default_config_name
+ else []
+ ) + [
{
"config_name": "config1",
"data_files": [
@@ -619,13 +637,18 @@ def test_push_multiple_dataset_configs_to_hub_readme_metadata_content(self, temp
{"split": "train", "path": "config2/train-*"},
],
},
- {
- "config_name": "default",
- "data_files": [
- {"split": "train", "path": "data/train-*"},
- ],
- },
- ]
+ ] + (
+ []
+ if specific_default_config_name
+ else [
+ {
+ "config_name": "default",
+ "data_files": [
+ {"split": "train", "path": "data/train-*"},
+ ],
+ },
+ ]
+ )
def test_push_multiple_dataset_dict_configs_to_hub_load_dataset_builder(self, temporary_repo):
ds_default = Dataset.from_dict({"a": [0], "b": [1]})
@@ -714,7 +737,10 @@ def test_push_multiple_dataset_dict_configs_to_hub_load_dataset(self, temporary_
with pytest.raises(ValueError): # no config 'config3'
load_dataset(ds_name, "config3", download_mode="force_redownload")
- def test_push_multiple_dataset_dict_configs_to_hub_readme_metadata_content(self, temporary_repo):
+ @pytest.mark.parametrize("specific_default_config_name", [False, True])
+ def test_push_multiple_dataset_dict_configs_to_hub_readme_metadata_content(
+ self, specific_default_config_name, temporary_repo
+ ):
ds_default = Dataset.from_dict({"a": [0], "b": [1]})
ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]})
@@ -723,7 +749,10 @@ def test_push_multiple_dataset_dict_configs_to_hub_readme_metadata_content(self,
ds_config2 = DatasetDict({"train": ds_config2, "random": ds_config2})
with temporary_repo() as ds_name:
- ds_default.push_to_hub(ds_name, token=self._token)
+ if specific_default_config_name:
+ ds_default.push_to_hub(ds_name, config_name="config0", set_default=True, token=self._token)
+ else:
+ ds_default.push_to_hub(ds_name, token=self._token)
ds_config1.push_to_hub(ds_name, "config1", token=self._token)
ds_config2.push_to_hub(ds_name, "config2", token=self._token)
@@ -732,7 +761,20 @@ def test_push_multiple_dataset_dict_configs_to_hub_readme_metadata_content(self,
dataset_card_data = DatasetCard.load(ds_readme_path).data
assert METADATA_CONFIGS_FIELD in dataset_card_data
assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list)
- assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == [
+ assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == (
+ [
+ {
+ "config_name": "config0",
+ "data_files": [
+ {"split": "train", "path": "config0/train-*"},
+ {"split": "random", "path": "config0/random-*"},
+ ],
+ "default": True,
+ },
+ ]
+ if specific_default_config_name
+ else []
+ ) + [
{
"config_name": "config1",
"data_files": [
@@ -747,14 +789,19 @@ def test_push_multiple_dataset_dict_configs_to_hub_readme_metadata_content(self,
{"split": "random", "path": "config2/random-*"},
],
},
- {
- "config_name": "default",
- "data_files": [
- {"split": "train", "path": "data/train-*"},
- {"split": "random", "path": "data/random-*"},
- ],
- },
- ]
+ ] + (
+ []
+ if specific_default_config_name
+ else [
+ {
+ "config_name": "default",
+ "data_files": [
+ {"split": "train", "path": "data/train-*"},
+ {"split": "random", "path": "data/random-*"},
+ ],
+ },
+ ]
+ )
def test_push_dataset_to_hub_with_config_no_metadata_configs(self, temporary_repo):
ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
| Support setting a default config name in push_to_hub
In order to convert script-datasets to no-script datasets, we need to support setting a default config name for those scripts that set one.
| 2023-12-15T09:17:41Z | [] | [] |
|
huggingface/datasets | 6,526 | huggingface__datasets-6526 | [
"6521"
] | 2afbf785f8d0551cdd65a81c5c3228e469613724 | diff --git a/src/datasets/utils/metadata.py b/src/datasets/utils/metadata.py
--- a/src/datasets/utils/metadata.py
+++ b/src/datasets/utils/metadata.py
@@ -179,26 +179,37 @@ def _from_exported_parquet_files_and_dataset_infos(
exported_parquet_files: List[Dict[str, Any]],
dataset_infos: DatasetInfosDict,
) -> "MetadataConfigs":
- return cls(
- {
+ metadata_configs = {
+ config_name: {
+ "data_files": [
+ {
+ "split": split_name,
+ "path": [
+ parquet_file["url"].replace("refs%2Fconvert%2Fparquet", revision)
+ for parquet_file in parquet_files_for_split
+ ],
+ }
+ for split_name, parquet_files_for_split in groupby(parquet_files_for_config, itemgetter("split"))
+ ],
+ "version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"),
+ }
+ for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config"))
+ }
+ if dataset_infos:
+ # Preserve order of configs and splits
+ metadata_configs = {
config_name: {
"data_files": [
- {
- "split": split_name,
- "path": [
- parquet_file["url"].replace("refs%2Fconvert%2Fparquet", revision)
- for parquet_file in parquet_files_for_split
- ],
- }
- for split_name, parquet_files_for_split in groupby(
- parquet_files_for_config, itemgetter("split")
- )
+ data_file
+ for split_name in dataset_info.splits
+ for data_file in metadata_configs[config_name]["data_files"]
+ if data_file["split"] == split_name
],
- "version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"),
+ "version": metadata_configs[config_name]["version"],
}
- for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config"))
+ for config_name, dataset_info in dataset_infos.items()
}
- )
+ return cls(metadata_configs)
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs":
| diff --git a/tests/test_metadata_util.py b/tests/test_metadata_util.py
--- a/tests/test_metadata_util.py
+++ b/tests/test_metadata_util.py
@@ -9,6 +9,7 @@
from huggingface_hub import DatasetCard, DatasetCardData
from datasets.config import METADATA_CONFIGS_FIELD
+from datasets.info import DatasetInfo
from datasets.utils.metadata import MetadataConfigs
@@ -249,3 +250,85 @@ def test_metadata_configs_incorrect_yaml():
dataset_card_data = DatasetCard.load(path).data
with pytest.raises(ValueError):
_ = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+
+
+def test_split_order_in_metadata_configs_from_exported_parquet_files_and_dataset_infos():
+ exported_parquet_files = [
+ {
+ "dataset": "beans",
+ "config": "default",
+ "split": "test",
+ "url": "https://huggingface.co/datasets/beans/resolve/refs%2Fconvert%2Fparquet/default/test/0000.parquet",
+ "filename": "0000.parquet",
+ "size": 17707203,
+ },
+ {
+ "dataset": "beans",
+ "config": "default",
+ "split": "train",
+ "url": "https://huggingface.co/datasets/beans/resolve/refs%2Fconvert%2Fparquet/default/train/0000.parquet",
+ "filename": "0000.parquet",
+ "size": 143780164,
+ },
+ {
+ "dataset": "beans",
+ "config": "default",
+ "split": "validation",
+ "url": "https://huggingface.co/datasets/beans/resolve/refs%2Fconvert%2Fparquet/default/validation/0000.parquet",
+ "filename": "0000.parquet",
+ "size": 18500862,
+ },
+ ]
+ dataset_infos = {
+ "default": DatasetInfo(
+ dataset_name="beans",
+ config_name="default",
+ version="0.0.0",
+ splits={
+ "train": {
+ "name": "train",
+ "num_bytes": 143996486,
+ "num_examples": 1034,
+ "shard_lengths": None,
+ "dataset_name": "beans",
+ },
+ "validation": {
+ "name": "validation",
+ "num_bytes": 18525985,
+ "num_examples": 133,
+ "shard_lengths": None,
+ "dataset_name": "beans",
+ },
+ "test": {
+ "name": "test",
+ "num_bytes": 17730506,
+ "num_examples": 128,
+ "shard_lengths": None,
+ "dataset_name": "beans",
+ },
+ },
+ download_checksums={
+ "https://huggingface.co/datasets/beans/resolve/main/data/train.zip": {
+ "num_bytes": 143812152,
+ "checksum": None,
+ },
+ "https://huggingface.co/datasets/beans/resolve/main/data/validation.zip": {
+ "num_bytes": 18504213,
+ "checksum": None,
+ },
+ "https://huggingface.co/datasets/beans/resolve/main/data/test.zip": {
+ "num_bytes": 17708541,
+ "checksum": None,
+ },
+ },
+ download_size=180024906,
+ post_processing_size=None,
+ dataset_size=180252977,
+ size_in_bytes=360277883,
+ )
+ }
+ metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos(
+ "123", exported_parquet_files, dataset_infos
+ )
+ split_names = [data_file["split"] for data_file in metadata_configs["default"]["data_files"]]
+ assert split_names == ["train", "validation", "test"]
| The order of the splits is not preserved
We had a regression and the order of the splits is not preserved. They are alphabetically sorted, instead of preserving original "train", "validation", "test" order.
Check: In branch "main"
```python
In [9]: dataset = load_dataset("adversarial_qa", '"adversarialQA")
In [10]: dataset
Out[10]:
DatasetDict({
test: Dataset({
features: ['id', 'title', 'context', 'question', 'answers', 'metadata'],
num_rows: 3000
})
train: Dataset({
features: ['id', 'title', 'context', 'question', 'answers', 'metadata'],
num_rows: 30000
})
validation: Dataset({
features: ['id', 'title', 'context', 'question', 'answers', 'metadata'],
num_rows: 3000
})
})
```
Before (2.15.0) it was:
```python
DatasetDict({
train: Dataset({
features: ['id', 'title', 'context', 'question', 'answers', 'metadata'],
num_rows: 30000
})
validation: Dataset({
features: ['id', 'title', 'context', 'question', 'answers', 'metadata'],
num_rows: 3000
})
test: Dataset({
features: ['id', 'title', 'context', 'question', 'answers', 'metadata'],
num_rows: 3000
})
})
```
See issues:
- https://huggingface.co/datasets/adversarial_qa/discussions/3
- https://huggingface.co/datasets/beans/discussions/4
This is a regression because it was previously fixed. See:
- #6196
- #5728
| 2023-12-22T10:35:56Z | [] | [] |
|
huggingface/datasets | 6,587 | huggingface__datasets-6587 | [
"6466"
] | 9d6d16117a30ba345b0236407975f701c5b288d4 | diff --git a/src/datasets/features/features.py b/src/datasets/features/features.py
--- a/src/datasets/features/features.py
+++ b/src/datasets/features/features.py
@@ -2126,7 +2126,10 @@ def _align_features(features_list: List[Features]) -> List[Features]:
name2feature = {}
for features in features_list:
for k, v in features.items():
- if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
+ if k in name2feature and isinstance(v, dict):
+ # Recursively align features.
+ name2feature[k] = _align_features([name2feature[k], v])[0]
+ elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
name2feature[k] = v
return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
@@ -2145,7 +2148,10 @@ def _check_if_features_can_be_aligned(features_list: List[Features]):
for features in features_list:
for k, v in features.items():
- if not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
+ if isinstance(v, dict) and isinstance(name2feature[k], dict):
+ # Deep checks for structure.
+ _check_if_features_can_be_aligned([name2feature[k], v])
+ elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
raise ValueError(
f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
)
| diff --git a/tests/features/test_features.py b/tests/features/test_features.py
--- a/tests/features/test_features.py
+++ b/tests/features/test_features.py
@@ -1,4 +1,5 @@
import datetime
+from typing import List, Tuple
from unittest import TestCase
from unittest.mock import patch
@@ -11,8 +12,10 @@
from datasets.arrow_dataset import Dataset
from datasets.features import Audio, ClassLabel, Features, Image, Sequence, Value
from datasets.features.features import (
+ _align_features,
_arrow_to_datasets_dtype,
_cast_to_python_objects,
+ _check_if_features_can_be_aligned,
cast_to_python_objects,
encode_nested_example,
generate_from_dict,
@@ -637,3 +640,42 @@ def test_features_to_arrow_schema(features: Features):
assert isinstance(arrow_schema, pa.Schema)
reloaded = Features.from_arrow_schema(arrow_schema)
assert features == reloaded
+
+
+NESTED_COMPARISON = [
+ [
+ [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="string", id=None)})],
+ [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="string", id=None)})],
+ ],
+ [
+ [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="null", id=None)})],
+ [Features({"email": Value(dtype="string", id=None)}), Features({"email": Value(dtype="string", id=None)})],
+ ],
+ [
+ [
+ Features({"speaker": {"email": Value(dtype="string", id=None)}}),
+ Features({"speaker": {"email": Value(dtype="string", id=None)}}),
+ ],
+ [
+ Features({"speaker": {"email": Value(dtype="string", id=None)}}),
+ Features({"speaker": {"email": Value(dtype="string", id=None)}}),
+ ],
+ ],
+ [
+ [
+ Features({"speaker": {"email": Value(dtype="string", id=None)}}),
+ Features({"speaker": {"email": Value(dtype="null", id=None)}}),
+ ],
+ [
+ Features({"speaker": {"email": Value(dtype="string", id=None)}}),
+ Features({"speaker": {"email": Value(dtype="string", id=None)}}),
+ ],
+ ],
+]
+
+
[email protected]("features", NESTED_COMPARISON)
+def test_features_alignment(features: Tuple[List[Features], Features]):
+ inputs, expected = features
+ _check_if_features_can_be_aligned(inputs) # Check that we can align, will raise otherwise.
+ assert _align_features(inputs) == expected
| Can't align optional features of struct
### Describe the bug
Hello!
I'm currently experiencing an issue where I can't concatenate datasets if an inner field of a Feature is Optional.
I have a column named `speaker`, and this holds some information about a speaker.
```python
@dataclass
class Speaker:
name: str
email: Optional[str]
```
If I have two datasets, one happens to have `email` always None, then I get `The features can't be aligned because the key email of features`
### Steps to reproduce the bug
You can run the following script:
```python
ds = Dataset.from_dict({'speaker': [{'name': 'Ben', 'email': None}]})
ds2 = Dataset.from_dict({'speaker': [{'name': 'Fred', 'email': '[email protected]'}]})
concatenate_datasets([ds, ds2])
>>>The features can't be aligned because the key speaker of features {'speaker': {'email': Value(dtype='string', id=None), 'name': Value(dtype='string', id=None)}} has unexpected type - {'email': Value(dtype='string', id=None), 'name': Value(dtype='string', id=None)} (expected either {'email': Value(dtype='null', id=None), 'name': Value(dtype='string', id=None)} or Value("null").
```
### Expected behavior
I think this should work; if two top-level columns were in the same situation it would properly cast to `string`.
```python
ds = Dataset.from_dict({'email': [None, None]})
ds2 = Dataset.from_dict({'email': ['[email protected]', '[email protected]']})
concatenate_datasets([ds, ds2])
>>> # Works!
```
### Environment info
- `datasets` version: 2.15.1.dev0
- Platform: Linux-5.15.0-89-generic-x86_64-with-glibc2.35
- Python version: 3.9.13
- `huggingface_hub` version: 0.19.4
- PyArrow version: 9.0.0
- Pandas version: 1.4.4
- `fsspec` version: 2023.6.0
I would be happy to fix this issue.
| Friendly bump, I would be happy to work on this issue once I get the go-ahead from the dev team. | 2024-01-13T15:33:20Z | [] | [] |
huggingface/datasets | 6,608 | huggingface__datasets-6608 | [
"6564"
] | bb8497b9dec2a3807c887b8184f902d1d8d7c25a | diff --git a/src/datasets/arrow_dataset.py b/src/datasets/arrow_dataset.py
--- a/src/datasets/arrow_dataset.py
+++ b/src/datasets/arrow_dataset.py
@@ -3539,7 +3539,8 @@ def init_buffer_and_writer():
def filter(
self,
function: Optional[Callable] = None,
- with_indices=False,
+ with_indices: bool = False,
+ with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
@@ -3559,14 +3560,18 @@ def filter(
Args:
function (`Callable`): Callable with one of the following signatures:
- - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
+ - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
If no function is provided, defaults to an always `True` function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
- Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
@@ -3629,9 +3634,16 @@ def filter(
indices = self.map(
function=partial(
- get_indices_from_mask_function, function, batched, with_indices, input_columns, self._indices
+ get_indices_from_mask_function,
+ function,
+ batched,
+ with_indices,
+ with_rank,
+ input_columns,
+ self._indices,
),
with_indices=True,
+ with_rank=True,
features=Features({"indices": Value("uint64")}),
batched=True,
batch_size=batch_size,
@@ -6200,22 +6212,25 @@ def get_indices_from_mask_function(
function: Callable,
batched: bool,
with_indices: bool,
+ with_rank: bool,
input_columns: Optional[Union[str, List[str]]],
indices_mapping: Optional[Table] = None,
*args,
**fn_kwargs,
):
if batched:
- # we extract indices from args
- *inputs, indices = args
+ # we extract indices and rank from args
+ *inputs, indices, rank = args
+ additional_args = ()
if with_indices:
- mask = function(*inputs, indices, **fn_kwargs)
- else:
- mask = function(*inputs, **fn_kwargs)
+ additional_args += (indices,)
+ if with_rank:
+ additional_args += (rank,)
+ mask = function(*inputs, *additional_args, **fn_kwargs)
else:
# we get batched data (to do less look-ups) but `function` only accepts one example
# therefore we need to call `function` on each example of the batch to get the mask
- *inputs, indices = args
+ *inputs, indices, rank = args
mask = []
if input_columns is None:
# inputs only contains a batch of examples
@@ -6223,18 +6238,24 @@ def get_indices_from_mask_function(
num_examples = len(batch[next(iter(batch.keys()))])
for i in range(num_examples):
example = {key: batch[key][i] for key in batch}
- mask.append(
- function(example, indices[i], **fn_kwargs) if with_indices else function(example, **fn_kwargs)
- )
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(example, *additional_args, **fn_kwargs))
else:
# inputs is a list of columns
columns: List[List] = inputs
num_examples = len(columns[0])
for i in range(num_examples):
input = [column[i] for column in columns]
- mask.append(
- function(*input, indices[i], **fn_kwargs) if with_indices else function(*input, **fn_kwargs)
- )
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(*input, *additional_args, **fn_kwargs))
indices_array = [i for i, to_keep in zip(indices, mask) if to_keep]
if indices_mapping is not None:
indices_array = pa.array(indices_array, type=pa.uint64())
diff --git a/src/datasets/dataset_dict.py b/src/datasets/dataset_dict.py
--- a/src/datasets/dataset_dict.py
+++ b/src/datasets/dataset_dict.py
@@ -891,8 +891,9 @@ def map(
def filter(
self,
- function,
- with_indices=False,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
@@ -909,14 +910,20 @@ def filter(
The transformation is applied to all the datasets of the dataset dictionary.
Args:
- function (`callable`):
- With one of the following signature:
- - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if ``with_indices=True, batched=True`
+ function (`Callable`): Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ If no function is provided, defaults to an always `True` function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
- Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
@@ -976,6 +983,7 @@ def filter(
k: dataset.filter(
function=function,
with_indices=with_indices,
+ with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
| diff --git a/tests/test_arrow_dataset.py b/tests/test_arrow_dataset.py
--- a/tests/test_arrow_dataset.py
+++ b/tests/test_arrow_dataset.py
@@ -97,6 +97,10 @@ def picklable_filter_function(x):
return int(x["filename"].split("_")[-1]) < 10
+def picklable_filter_function_with_rank(x, r):
+ return r == 0
+
+
def assert_arrow_metadata_are_synced_with_dataset_features(dataset: Dataset):
assert dataset.data.schema.metadata is not None
assert b"huggingface" in dataset.data.schema.metadata
@@ -1774,6 +1778,18 @@ def test_filter_multiprocessing(self, in_memory):
self.assertEqual(len(dset_filter_first_ten.cache_files), 0 if in_memory else 2)
self.assertNotEqual(dset_filter_first_ten._fingerprint, fingerprint)
+ with tempfile.TemporaryDirectory() as tmp_dir: # with_rank
+ with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
+ fingerprint = dset._fingerprint
+ with dset.filter(
+ picklable_filter_function_with_rank, num_proc=2, with_rank=True
+ ) as dset_filter_first_rank:
+ self.assertEqual(len(dset_filter_first_rank), min(len(dset) // 2, len(dset)))
+ self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
+ self.assertDictEqual(dset_filter_first_rank.features, Features({"filename": Value("string")}))
+ self.assertEqual(len(dset_filter_first_rank.cache_files), 0 if in_memory else 2)
+ self.assertNotEqual(dset_filter_first_rank._fingerprint, fingerprint)
+
def test_filter_caching(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
self._caplog.clear()
| `Dataset.filter` missing `with_rank` parameter
### Describe the bug
The issue shall be open: https://github.com/huggingface/datasets/issues/6435
When i try to pass `with_rank` to `Dataset.filter()`, i get this:
`Dataset.filter() got an unexpected keyword argument 'with_rank'`
### Steps to reproduce the bug
Run notebook:
https://colab.research.google.com/drive/1WUNKph8BdP0on5ve3gQnh_PE0cFLQqTn?usp=sharing
### Expected behavior
Should work?
### Environment info
NVIDIA RTX 4090
| 2024-01-22T15:19:16Z | [] | [] |
|
huggingface/datasets | 6,629 | huggingface__datasets-6629 | [
"6597"
] | bb8497b9dec2a3807c887b8184f902d1d8d7c25a | diff --git a/src/datasets/arrow_dataset.py b/src/datasets/arrow_dataset.py
--- a/src/datasets/arrow_dataset.py
+++ b/src/datasets/arrow_dataset.py
@@ -5365,13 +5365,14 @@ def push_to_hub(
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
- _ = api.create_repo(
+ repo_url = api.create_repo(
repo_id,
token=token,
repo_type="dataset",
private=private,
exist_ok=True,
)
+ repo_id = repo_url.repo_id
if revision is not None:
api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
diff --git a/src/datasets/dataset_dict.py b/src/datasets/dataset_dict.py
--- a/src/datasets/dataset_dict.py
+++ b/src/datasets/dataset_dict.py
@@ -1685,13 +1685,14 @@ def push_to_hub(
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
- _ = api.create_repo(
+ repo_url = api.create_repo(
repo_id,
token=token,
repo_type="dataset",
private=private,
exist_ok=True,
)
+ repo_id = repo_url.repo_id
if revision is not None:
api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
| diff --git a/tests/test_upstream_hub.py b/tests/test_upstream_hub.py
--- a/tests/test_upstream_hub.py
+++ b/tests/test_upstream_hub.py
@@ -13,7 +13,6 @@
import numpy as np
import pytest
from huggingface_hub import DatasetCard, HfApi
-from huggingface_hub.utils import RepositoryNotFoundError
from datasets import (
Audio,
@@ -71,9 +70,16 @@ def test_push_dataset_dict_to_hub_name_without_namespace(self, temporary_repo):
local_ds = DatasetDict({"train": ds})
with temporary_repo() as ds_name:
- # cannot create a repo without namespace
- with pytest.raises(RepositoryNotFoundError):
- local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token)
+ local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token)
+ hub_ds = load_dataset(ds_name, download_mode="force_redownload")
+
+ assert local_ds.column_names == hub_ds.column_names
+ assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys())
+ assert local_ds["train"].features == hub_ds["train"].features
+
+ # Ensure that there is a single file on the repository that has the correct name
+ files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset"))
+ assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"]
def test_push_dataset_dict_to_hub_datasets_with_different_features(self, cleanup_repo):
ds_train = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]})
| Dataset.push_to_hub of a canonical dataset creates an additional dataset under the user namespace
While using `Dataset.push_to_hub` of a canonical dataset, an additional dataset was created under my user namespace.
## Steps to reproduce the bug
The command:
```python
commit_info = ds.push_to_hub(
"caner",
config_name="default",
commit_message="Convert dataset to Parquet",
commit_description="Convert dataset to Parquet.",
create_pr=True,
token=token,
)
```
creates the additional dataset `albertvillanova/caner`.
| It is caused by these code lines: https://github.com/huggingface/datasets/blob/9d6d16117a30ba345b0236407975f701c5b288d4/src/datasets/dataset_dict.py#L1688-L1694
Also note the information in the docstring: https://github.com/huggingface/datasets/blob/9d6d16117a30ba345b0236407975f701c5b288d4/src/datasets/dataset_dict.py#L1582-L1585
> Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user.
This behavior was "reverted" by the PR:
- #6519
We have therefore contradictory requirements. We should decide:
- whether to support passing dataset_namespace without user/org that defaults to the logged-in user (and not support canonical datasets)
- or vice-versa, to support canonical datasets and not support passing only dataset_name
As canonical datasets are "deprecated" (and will eventually disappear), I would choose the first option. However, if so, the Space to convert datasets to Parquet will not work for canonical datasets: https://huggingface.co/spaces/albertvillanova/convert-dataset-to-parquet
IIUC, this could also be "fixed" by `create_repo("dataset_name")` not defaulting to `create_repo("user/dataset_name")` (when the user's token is available), which would be consistent with the rest of the `HfApi` ops used in the `push_to_hub` implementation. This is a (small) breaking change for `huggingface_hub`, but justified to make the API more consistent.
I tag @Wauplin to have his opinion as well.
Hmm, creating repo with implicit namespace (e.g. `create_repo("dataset_name")`) is a convenient feature used in a lot of integrations. It is not consistent with other HfApi methods specifically because it is the method to create repos. Once the repo is created, the return value provides the explicit repo_id (`namespace/repo_name`) that has to be passed to every `HfApi` method. Otherwise, libraries/scripts would often need to do a `whoami` call to get the namespace before creating a repo.
Another solution for https://github.com/huggingface/datasets/issues/6597#issuecomment-1893746690 could be that implicit namespace is allowed (same as today) except if the `repo_id` is in a hard-coded list of canonical datasets. This list can be maintained automatically and should be slowly decreasing. **Caveat:** as a normal user I wouldn't be able to implicitly push to `imagenet-1k` if I wanted to push to `Wauplin/imagenet-1k`. Shouldn't be too problematic, no? Worse case, would need to add a `whoami` call and allow implicit-canonical-name for non-HF users for instance (a bit too over-engineered IMO but doable).
As canonical datasets are going to disappear in the following couple of months, I would not make any effort on their support.
I propose reverting #6519, so that the behavior of `push_to_hub` is aligned with the one described in its dosctring: "Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user."
I'm opening a PR. | 2024-01-29T15:36:52Z | [] | [] |
huggingface/datasets | 6,687 | huggingface__datasets-6687 | [
"6645"
] | b02be21047087c5ffc11cf1c072a5aceab517eba | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -131,7 +131,7 @@
"multiprocess",
# to save datasets locally or on any filesystem
# minimum 2023.1.0 to support protocol=kwargs in fsspec's `open`, `get_fs_token_paths`, etc.: see https://github.com/fsspec/filesystem_spec/pull/1143
- "fsspec[http]>=2023.1.0,<=2023.10.0",
+ "fsspec[http]>=2023.1.0,<=2024.2.0",
# for data streaming via http
"aiohttp",
# To get datasets from the Datasets Hub on huggingface.co
diff --git a/src/datasets/data_files.py b/src/datasets/data_files.py
--- a/src/datasets/data_files.py
+++ b/src/datasets/data_files.py
@@ -47,8 +47,17 @@ class EmptyDatasetError(FileNotFoundError):
NON_WORDS_CHARS = "-._ 0-9"
if config.FSSPEC_VERSION < version.parse("2023.9.0"):
KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**[{sep}/]{keyword}[{sep}/]**"]
-else:
+elif config.FSSPEC_VERSION < version.parse("2023.12.0"):
KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**/*[{sep}/]{keyword}[{sep}/]**"]
+else:
+ KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = [
+ "**/{keyword}[{sep}]*",
+ "**/{keyword}/**",
+ "**/*[{sep}]{keyword}[{sep}]*",
+ "**/*[{sep}]{keyword}[{sep}]*/**",
+ "**/{keyword}[{sep}]*/**",
+ "**/*[{sep}]{keyword}/**",
+ ]
DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST]
DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME = {
@@ -303,11 +312,9 @@ def resolve_pattern(
- data/* to match all the files inside "data"
- data/** to match all the files inside "data" and its subdirectories
- The patterns are resolved using the fsspec glob.
-
- glob.glob, Path.glob, Path.match or fnmatch do not support ** with a prefix/suffix other than a forward slash /.
- For instance, this means **.json is the same as *.json. On the contrary, the fsspec glob has no limits regarding the ** prefix/suffix,
- resulting in **.json being equivalent to **/*.json.
+ The patterns are resolved using the fsspec glob. In fsspec>=2023.12.0 this is equivalent to
+ Python's glob.glob, Path.glob, Path.match and fnmatch where ** is unsupported with a prefix/suffix
+ other than a forward slash /.
More generally:
- '*' matches any character except a forward-slash (to match just the file or directory name)
| diff --git a/tests/test_data_files.py b/tests/test_data_files.py
--- a/tests/test_data_files.py
+++ b/tests/test_data_files.py
@@ -415,8 +415,6 @@ def test_DataFilesDict_from_patterns_in_dataset_repository(
("**", 4, None, "train"),
("**", 4, "data", "train"),
("**", 2, "data/subdir", "train"),
- ("**train*", 1, "data/subdir", "train"),
- ("**test*", 1, "data/subdir", "test"),
("**", 0, "data/subdir2", "train"),
],
)
@@ -452,14 +450,6 @@ def test_DataFilesDict_from_patterns_in_dataset_repository_hashing(hub_dataset_r
data_files2 = DataFilesDict(sorted(data_files1.items(), reverse=True))
assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
- patterns2 = {"train": ["data/**train.txt"], "test": ["data/**test.txt"]}
- data_files2 = DataFilesDict.from_patterns(patterns2, hub_dataset_repo_path)
- assert Hasher.hash(data_files1) == Hasher.hash(data_files2)
-
- patterns2 = {"train": ["data/**train.txt"], "test": ["data/**train.txt"]}
- data_files2 = DataFilesDict.from_patterns(patterns2, hub_dataset_repo_path)
- assert Hasher.hash(data_files1) != Hasher.hash(data_files2)
-
# the tmpfs used to mock the hub repo is based on a local directory
# therefore os.stat is used to get the mtime of the data files
with patch("os.stat", return_value=os.stat(__file__)):
@@ -609,6 +599,18 @@ def ls(self, path, detail=True, refresh=True, **kwargs):
{"test": "data/my_test_file.txt"},
{"validation": "my_validation_dir/dataset.txt"},
{"validation": "data/my_validation_file.txt"},
+ {"train": "train_dir/dataset.txt"},
+ {"train": "data/train_file.txt"},
+ {"test": "test_dir/dataset.txt"},
+ {"test": "data/test_file.txt"},
+ {"validation": "validation_dir/dataset.txt"},
+ {"validation": "data/validation_file.txt"},
+ {"train": "my_train/dataset.txt"},
+ {"train": "data/my_train.txt"},
+ {"test": "my_test/dataset.txt"},
+ {"test": "data/my_test.txt"},
+ {"validation": "my_validation/dataset.txt"},
+ {"validation": "data/my_validation.txt"},
# With test<>eval aliases
{"test": "eval.txt"},
{"test": "data/eval.txt"},
@@ -631,6 +633,7 @@ def ls(self, path, detail=True, refresh=True, **kwargs):
{"test": "my-test-file.txt"},
{"test": "my_test_file.txt"},
{"test": "my test file.txt"},
+ {"test": "my-test_file.txt"},
{"test": "test00001.txt"},
],
)
| Support fsspec 2024.2
Support fsspec 2024.2.
First, we should address:
- #6644
| I'd be very grateful. This upper bound banished me straight into dependency hell today. :( | 2024-02-22T08:59:32Z | [] | [] |
huggingface/datasets | 6,696 | huggingface__datasets-6696 | [
"6695"
] | ad5b221c01a183a66cbf52a6d708f94e0cff0b53 | diff --git a/src/datasets/packaged_modules/json/json.py b/src/datasets/packaged_modules/json/json.py
--- a/src/datasets/packaged_modules/json/json.py
+++ b/src/datasets/packaged_modules/json/json.py
@@ -145,12 +145,20 @@ def _generate_tables(self, files):
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise e
- # If possible, parse the file as a list of json objects and exit the loop
+ # If possible, parse the file as a list of json objects/strings and exit the loop
if isinstance(dataset, list): # list is the only sequence type supported in JSON
try:
- keys = set().union(*[row.keys() for row in dataset])
- mapping = {col: [row.get(col) for row in dataset] for col in keys}
- pa_table = pa.Table.from_pydict(mapping)
+ if dataset and isinstance(dataset[0], str):
+ pa_table_names = (
+ list(self.config.features)
+ if self.config.features is not None
+ else ["text"]
+ )
+ pa_table = pa.Table.from_arrays([pa.array(dataset)], names=pa_table_names)
+ else:
+ keys = set().union(*[row.keys() for row in dataset])
+ mapping = {col: [row.get(col) for row in dataset] for col in keys}
+ pa_table = pa.Table.from_pydict(mapping)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
| diff --git a/tests/packaged_modules/test_json.py b/tests/packaged_modules/test_json.py
--- a/tests/packaged_modules/test_json.py
+++ b/tests/packaged_modules/test_json.py
@@ -54,6 +54,23 @@ def json_file_with_list_of_dicts(tmp_path):
return str(filename)
[email protected]
+def json_file_with_list_of_strings(tmp_path):
+ filename = tmp_path / "file_with_list_of_strings.json"
+ data = textwrap.dedent(
+ """\
+ [
+ "First text.",
+ "Second text.",
+ "Third text."
+ ]
+ """
+ )
+ with open(filename, "w") as f:
+ f.write(data)
+ return str(filename)
+
+
@pytest.fixture
def json_file_with_list_of_dicts_field(tmp_path):
filename = tmp_path / "file_with_list_of_dicts_field.json"
@@ -82,13 +99,18 @@ def json_file_with_list_of_dicts_field(tmp_path):
("jsonl_file_utf16_encoded", {"encoding": "utf-16"}),
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
+ ("json_file_with_list_of_strings", {}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
- assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]}
+ if file_fixture == "json_file_with_list_of_strings":
+ expected = {"text": ["First text.", "Second text.", "Third text."]}
+ else:
+ expected = {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]}
+ assert pa_table.to_pydict() == expected
@pytest.mark.parametrize(
| Support JSON file with an array of strings
Support loading a dataset from a JSON file with an array of strings.
See: https://huggingface.co/datasets/CausalLM/Refined-Anime-Text/discussions/1
| 2024-02-26T13:18:31Z | [] | [] |
|
huggingface/datasets | 6,743 | huggingface__datasets-6743 | [
"6738"
] | 19b40860acf3b3ba8db727fcf3b1b99ebb8d7e33 | diff --git a/src/datasets/features/features.py b/src/datasets/features/features.py
--- a/src/datasets/features/features.py
+++ b/src/datasets/features/features.py
@@ -1937,7 +1937,7 @@ def encode_column(self, column, column_name: str):
`list[Any]`
"""
column = cast_to_python_objects(column)
- return [encode_nested_example(self[column_name], obj) for obj in column]
+ return [encode_nested_example(self[column_name], obj, level=1) for obj in column]
def encode_batch(self, batch):
"""
@@ -1955,7 +1955,7 @@ def encode_batch(self, batch):
raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
for key, column in batch.items():
column = cast_to_python_objects(column)
- encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column]
+ encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column]
return encoded_batch
def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
| diff --git a/tests/features/test_features.py b/tests/features/test_features.py
--- a/tests/features/test_features.py
+++ b/tests/features/test_features.py
@@ -405,6 +405,16 @@ def test_encode_batch_with_example_with_empty_first_elem():
assert encoded_batch == {"x": [[[0], [1]], [[], [1]]]}
+def test_encode_column_dict_with_none():
+ features = Features(
+ {
+ "x": {"a": ClassLabel(names=["a", "b"]), "b": Value("int32")},
+ }
+ )
+ encoded_column = features.encode_column([{"a": "a", "b": 1}, None], "x")
+ assert encoded_column == [{"a": 0, "b": 1}, None]
+
+
@pytest.mark.parametrize(
"feature",
[
| Dict feature is non-nullable while nested dict feature is
When i try to create a `Dataset` object with None values inside a dict column, like this:
```python
from datasets import Dataset, Features, Value
Dataset.from_dict(
{
"dict": [{"a": 0, "b": 0}, None],
}, features=Features(
{"dict": {"a": Value("int16"), "b": Value("int16")}}
)
)
```
i get `ValueError: Got None but expected a dictionary instead`.
At the same time, having None in _nested_ dict feature works, for example, this doesn't throw any errors:
```python
from datasets import Dataset, Features, Value, Sequence
dataset = Dataset.from_dict(
{
"list_dict": [[{"a": 0, "b": 0}], None],
"sequence_dict": [[{"a": 0, "b": 0}], None],
}, features=Features({
"list_dict": [{"a": Value("int16"), "b": Value("int16")}],
"sequence_dict": Sequence({"a": Value("int16"), "b": Value("int16")}),
})
)
```
Other types of features also seem to be nullable (but I haven't checked all of them).
Version of `datasets` is the latest atm (2.18.0)
Is this an expected behavior or a bug?
| It looks like a bug, by default every feature should be nullable. | 2024-03-19T16:54:22Z | [] | [] |
huggingface/datasets | 6,803 | huggingface__datasets-6803 | [
"6791"
] | 873b7c8e354bfbd1873272a03d1392550d2cac39 | diff --git a/src/datasets/search.py b/src/datasets/search.py
--- a/src/datasets/search.py
+++ b/src/datasets/search.py
@@ -6,6 +6,7 @@
import numpy as np
+from .features import Sequence
from .utils import logging
@@ -262,6 +263,11 @@ def add_vectors(
"""
import faiss # noqa: F811
+ if column and not isinstance(vectors.features[column], Sequence):
+ raise ValueError(
+ f"Wrong feature type for column '{column}'. Expected 1d array, got {vectors.features[column]}"
+ )
+
# Create index
if self.faiss_index is None:
size = len(vectors[0]) if column is None else len(vectors[0][column])
| diff --git a/tests/test_search.py b/tests/test_search.py
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -34,6 +34,13 @@ def test_add_faiss_index(self):
self.assertEqual(examples["filename"][0], "my_name-train_29")
dset.drop_index("vecs")
+ def test_add_faiss_index_errors(self):
+ import faiss
+
+ dset: Dataset = self._create_dummy_dataset()
+ with pytest.raises(ValueError, match="Wrong feature type for column 'filename'"):
+ _ = dset.add_faiss_index("filename", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT)
+
def test_add_faiss_index_from_external_arrays(self):
import faiss
| `add_faiss_index` raises ValueError: not enough values to unpack (expected 2, got 1)
### Describe the bug
Calling `add_faiss_index` on a `Dataset` with a column argument raises a ValueError. The following is the trace
```python
214 def replacement_add(self, x):
215 """Adds vectors to the index.
216 The index must be trained before vectors can be added to it.
217 The vectors are implicitly numbered in sequence. When `n` vectors are
(...)
224 `dtype` must be float32.
225 """
--> 227 n, d = x.shape
228 assert d == self.d
229 x = np.ascontiguousarray(x, dtype='float32')
ValueError: not enough values to unpack (expected 2, got 1)
```
### Steps to reproduce the bug
1. Load any dataset like `ds = datasets.load_dataset("wikimedia/wikipedia", "20231101.en")["train"]`
2. Add an FAISS index on any column `ds.add_faiss_index('title')`
### Expected behavior
The index should be created
### Environment info
- `datasets` version: 2.18.0
- Platform: Linux-6.5.0-26-generic-x86_64-with-glibc2.35
- Python version: 3.9.19
- `huggingface_hub` version: 0.22.2
- PyArrow version: 15.0.2
- Pandas version: 2.2.1
- `fsspec` version: 2024.2.0
- `faiss-cpu` version: 1.8.0
| I realized I was passing a string column to this instead of float. Is it possible to add a warning or error to prevent users from falsely believing there's a bug?
Hello!
I agree that we could add some safeguards around the type of `ds[column]`. At least for FAISS, we need the column to be made of embeddings as FAISS doesn't perform the embeddings itself.
I can propose a PR sometime this week.
@Dref360 thanks for the initiative! | 2024-04-11T14:54:30Z | [] | [] |
huggingface/datasets | 6,820 | huggingface__datasets-6820 | [
"6810"
] | 22bf5388748611a9255d8e17218d36d2f799f182 | diff --git a/src/datasets/commands/datasets_cli.py b/src/datasets/commands/datasets_cli.py
--- a/src/datasets/commands/datasets_cli.py
+++ b/src/datasets/commands/datasets_cli.py
@@ -3,6 +3,7 @@
from datasets.commands.convert import ConvertCommand
from datasets.commands.convert_to_parquet import ConvertToParquetCommand
+from datasets.commands.delete_from_hub import DeleteFromHubCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
@@ -28,6 +29,7 @@ def main():
RunBeamCommand.register_subcommand(commands_parser)
DummyDataCommand.register_subcommand(commands_parser)
ConvertToParquetCommand.register_subcommand(commands_parser)
+ DeleteFromHubCommand.register_subcommand(commands_parser)
# Parse args
args, unknown_args = parser.parse_known_args()
diff --git a/src/datasets/commands/delete_from_hub.py b/src/datasets/commands/delete_from_hub.py
new file mode 100644
--- /dev/null
+++ b/src/datasets/commands/delete_from_hub.py
@@ -0,0 +1,42 @@
+from argparse import ArgumentParser
+from typing import Optional
+
+from datasets.commands import BaseDatasetsCLICommand
+from datasets.hub import delete_from_hub
+
+
+def _command_factory(args):
+ return DeleteFromHubCommand(
+ args.dataset_id,
+ args.config_name,
+ args.token,
+ args.revision,
+ )
+
+
+class DeleteFromHubCommand(BaseDatasetsCLICommand):
+ @staticmethod
+ def register_subcommand(parser):
+ parser: ArgumentParser = parser.add_parser("delete_from_hub", help="Delete dataset config from the Hub")
+ parser.add_argument(
+ "dataset_id", help="source dataset ID, e.g. USERNAME/DATASET_NAME or ORGANIZATION/DATASET_NAME"
+ )
+ parser.add_argument("config_name", help="config name to delete")
+ parser.add_argument("--token", help="access token to the Hugging Face Hub")
+ parser.add_argument("--revision", help="source revision")
+ parser.set_defaults(func=_command_factory)
+
+ def __init__(
+ self,
+ dataset_id: str,
+ config_name: str,
+ token: Optional[str],
+ revision: Optional[str],
+ ):
+ self._dataset_id = dataset_id
+ self._config_name = config_name
+ self._token = token
+ self._revision = revision
+
+ def run(self) -> None:
+ _ = delete_from_hub(self._dataset_id, self._config_name, revision=self._revision, token=self._token)
diff --git a/src/datasets/hub.py b/src/datasets/hub.py
new file mode 100644
--- /dev/null
+++ b/src/datasets/hub.py
@@ -0,0 +1,87 @@
+from itertools import chain
+from typing import Optional, Union
+
+from huggingface_hub import (
+ CommitInfo,
+ CommitOperationAdd,
+ CommitOperationDelete,
+ DatasetCard,
+ DatasetCardData,
+ HfApi,
+ HfFileSystem,
+)
+
+from datasets import config
+from datasets.info import DatasetInfosDict
+from datasets.load import load_dataset_builder
+from datasets.utils.metadata import MetadataConfigs
+
+
+def delete_from_hub(
+ repo_id: str,
+ config_name: str,
+ revision: Optional[str] = None,
+ token: Optional[Union[bool, str]] = None,
+) -> CommitInfo:
+ """Delete a dataset configuration from a [data-only dataset](repository_structure) on the Hub.
+
+ Args:
+ repo_id (`str`): ID of the Hub dataset repository, in the following format: `<user>/<dataset_name>` or
+ `<org>/<dataset_name>`.
+ config_name (`str`): Name of the dataset configuration.
+ revision (`str`, *optional*): Branch to delete the configuration from. Defaults to the `"main"` branch.
+ token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub.
+
+ Returns:
+ huggingface_hub.CommitInfo
+ """
+ operations = []
+ # data_files
+ fs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token)
+ builder = load_dataset_builder(repo_id, config_name, revision=revision, token=token, trust_remote_code=False)
+ for data_file in chain(*builder.config.data_files.values()):
+ data_file_resolved_path = fs.resolve_path(data_file)
+ if data_file_resolved_path.repo_id == repo_id:
+ operations.append(CommitOperationDelete(path_in_repo=data_file_resolved_path.path_in_repo))
+ # README.md
+ dataset_card = DatasetCard.load(repo_id)
+ # config_names
+ if dataset_card.data.get("config_names", None) and config_name in dataset_card.data["config_names"]:
+ dataset_card.data["config_names"].remove(config_name)
+ # metadata_configs
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card.data)
+ if metadata_configs:
+ _ = metadata_configs.pop(config_name, None)
+ dataset_card_data = DatasetCardData()
+ metadata_configs.to_dataset_card_data(dataset_card_data)
+ if config.METADATA_CONFIGS_FIELD in dataset_card_data:
+ dataset_card.data[config.METADATA_CONFIGS_FIELD] = dataset_card_data[config.METADATA_CONFIGS_FIELD]
+ else:
+ _ = dataset_card.data.pop(config.METADATA_CONFIGS_FIELD, None)
+ # dataset_info
+ dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card.data)
+ if dataset_infos:
+ _ = dataset_infos.pop(config_name, None)
+ dataset_card_data = DatasetCardData()
+ dataset_infos.to_dataset_card_data(dataset_card_data)
+ if "dataset_info" in dataset_card_data:
+ dataset_card.data["dataset_info"] = dataset_card_data["dataset_info"]
+ else:
+ _ = dataset_card.data.pop("dataset_info", None)
+ # Commit
+ operations.append(
+ CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
+ )
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+ commit_info = api.create_commit(
+ repo_id,
+ operations=operations,
+ commit_message=f"Delete '{config_name}' config",
+ commit_description=f"Delete '{config_name}' config.",
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=True,
+ )
+ print(f"You can find your PR to delete the dataset config at: {commit_info.pr_url}")
+ return commit_info
| diff --git a/tests/test_hub.py b/tests/test_hub.py
--- a/tests/test_hub.py
+++ b/tests/test_hub.py
@@ -1,7 +1,14 @@
+from textwrap import dedent
+from types import SimpleNamespace
+from unittest.mock import patch
from urllib.parse import quote
import pytest
+from huggingface_hub import CommitOperationAdd, CommitOperationDelete
+import datasets
+from datasets.config import METADATA_CONFIGS_FIELD
+from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
@@ -11,3 +18,63 @@
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
+
+
+def test_delete_from_hub(
+ temporary_repo, hf_api, hf_token, csv_path, tmp_path, ci_hub_config, ci_hfh_hf_hub_url
+) -> None:
+ with temporary_repo() as repo_id:
+ hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
+ hf_api.upload_file(
+ path_or_fileobj=str(csv_path),
+ path_in_repo="cats/train/0000.csv",
+ repo_id=repo_id,
+ repo_type="dataset",
+ token=hf_token,
+ )
+ hf_api.upload_file(
+ path_or_fileobj=str(csv_path),
+ path_in_repo="dogs/train/0000.csv",
+ repo_id=repo_id,
+ repo_type="dataset",
+ token=hf_token,
+ )
+ readme_path = tmp_path / "README.md"
+ readme_path.write_text(
+ dedent(f"""\
+ ---
+ {METADATA_CONFIGS_FIELD}:
+ - config_name: cats
+ data_files:
+ - split: train
+ path: cats/train/*
+ - config_name: dogs
+ data_files:
+ - split: train
+ path: dogs/train/*
+ ---
+ """)
+ )
+ hf_api.upload_file(
+ token=hf_token,
+ path_or_fileobj=str(readme_path),
+ path_in_repo="README.md",
+ repo_id=repo_id,
+ repo_type="dataset",
+ )
+ commit_info = SimpleNamespace(
+ pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
+ )
+ with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
+ delete_from_hub(repo_id, "dogs")
+ assert mock_method.called
+ assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
+ assert mock_method.call_args.kwargs.get("create_pr")
+ expected_operations = [
+ CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
+ CommitOperationAdd(
+ path_in_repo="README.md",
+ path_or_fileobj=b"---\nconfigs:\n- config_name: cats\n data_files:\n - split: train\n path: cats/train/*\n---\n",
+ ),
+ ]
+ assert mock_method.call_args.kwargs.get("operations") == expected_operations
| Allow deleting a subset/config from a no-script dataset
As proposed by @BramVanroy, it would be neat to have this functionality through the API.
| Probably best to implement this as a CLI command?
Thanks for your comment, @mariosasko. Or maybe both (in Python and as CLI command)? The Python command would be just the reverse of `push_to_hub`...
I am working on a draft implementation, so we can discuss about the API and UX. | 2024-04-17T14:41:12Z | [] | [] |
huggingface/datasets | 6,871 | huggingface__datasets-6871 | [
"6869",
"6850"
] | a0bdb664436fad1d82c7988d5b413c76207f5037 | diff --git a/src/datasets/utils/py_utils.py b/src/datasets/utils/py_utils.py
--- a/src/datasets/utils/py_utils.py
+++ b/src/datasets/utils/py_utils.py
@@ -367,12 +367,15 @@ def _single_map_nested(args):
# Singleton first to spare some computation
if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
- return function(data_struct)
+ if batched:
+ return function([data_struct])[0]
+ else:
+ return function(data_struct)
if (
batched
and not isinstance(data_struct, dict)
and isinstance(data_struct, types)
- and all(not isinstance(v, types) for v in data_struct)
+ and all(not isinstance(v, (dict, types)) for v in data_struct)
):
return [mapped_item for batch in iter_batched(data_struct, batch_size) for mapped_item in function(batch)]
@@ -450,11 +453,11 @@ def map_nested(
<Added version="2.5.0"/>
batched (`bool`, defaults to `False`):
Provide batch of items to `function`.
- <Added version="2.18.1"/>
+ <Added version="2.19.0"/>
batch_size (`int`, *optional*, defaults to `1000`):
Number of items per batch provided to `function` if `batched=True`.
If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`.
- <Added version="2.18.1"/>
+ <Added version="2.19.0"/>
types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their
elements.
disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar.
| diff --git a/tests/test_download_manager.py b/tests/test_download_manager.py
--- a/tests/test_download_manager.py
+++ b/tests/test_download_manager.py
@@ -8,6 +8,7 @@
from datasets.download.download_manager import DownloadManager
from datasets.download.streaming_download_manager import StreamingDownloadManager
from datasets.utils.file_utils import hash_url_to_filename, xopen
+from datasets.utils.py_utils import NestedDataStructure
URL = "http://www.mocksite.com/file1.txt"
@@ -28,19 +29,15 @@ def mock_request(*args, **kwargs):
return MockResponse()
[email protected]("urls_type", [str, list, dict])
[email protected]("urls_type", ["str", "list", "dict", "dict_of_dict"])
def test_download_manager_download(urls_type, tmp_path, monkeypatch):
import requests
monkeypatch.setattr(requests, "request", mock_request)
url = URL
- if issubclass(urls_type, str):
- urls = url
- elif issubclass(urls_type, list):
- urls = [url]
- elif issubclass(urls_type, dict):
- urls = {"train": url}
+ urls_types = {"str": url, "list": [url], "dict": {"train": url}, "dict_of_dict": {"train": {"en": url}}}
+ urls = urls_types[urls_type]
dataset_name = "dummy"
cache_subdir = "downloads"
cache_dir_root = tmp_path
@@ -50,29 +47,29 @@ def test_download_manager_download(urls_type, tmp_path, monkeypatch):
)
dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config)
downloaded_paths = dl_manager.download(urls)
- input_urls = urls
- for downloaded_paths in [downloaded_paths]:
- if isinstance(urls, str):
- downloaded_paths = [downloaded_paths]
- input_urls = [urls]
- elif isinstance(urls, dict):
- assert "train" in downloaded_paths.keys()
- downloaded_paths = downloaded_paths.values()
- input_urls = urls.values()
- assert downloaded_paths
- for downloaded_path, input_url in zip(downloaded_paths, input_urls):
- assert downloaded_path == dl_manager.downloaded_paths[input_url]
- downloaded_path = Path(downloaded_path)
- parts = downloaded_path.parts
- assert parts[-1] == HASH
- assert parts[-2] == cache_subdir
- assert downloaded_path.exists()
- content = downloaded_path.read_text()
- assert content == CONTENT
- metadata_downloaded_path = downloaded_path.with_suffix(".json")
- assert metadata_downloaded_path.exists()
- metadata_content = json.loads(metadata_downloaded_path.read_text())
- assert metadata_content == {"url": URL, "etag": None}
+ assert isinstance(downloaded_paths, type(urls))
+ if "urls_type".startswith("list"):
+ assert len(downloaded_paths) == len(urls)
+ elif "urls_type".startswith("dict"):
+ assert downloaded_paths.keys() == urls.keys()
+ if "urls_type" == "dict_of_dict":
+ key = list(urls.keys())[0]
+ assert isinstance(downloaded_paths[key], dict)
+ assert downloaded_paths[key].keys() == urls[key].keys()
+ for downloaded_path, url in zip(
+ NestedDataStructure(downloaded_paths).flatten(), NestedDataStructure(urls).flatten()
+ ):
+ downloaded_path = Path(downloaded_path)
+ parts = downloaded_path.parts
+ assert parts[-1] == HASH
+ assert parts[-2] == cache_subdir
+ assert downloaded_path.exists()
+ content = downloaded_path.read_text()
+ assert content == CONTENT
+ metadata_downloaded_path = downloaded_path.with_suffix(".json")
+ assert metadata_downloaded_path.exists()
+ metadata_content = json.loads(metadata_downloaded_path.read_text())
+ assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type", [str, list, dict])
diff --git a/tests/test_py_utils.py b/tests/test_py_utils.py
--- a/tests/test_py_utils.py
+++ b/tests/test_py_utils.py
@@ -29,49 +29,39 @@ def add_one(i): # picklable for multiprocessing
return i + 1
+def add_one_to_batch(batch): # picklable for multiprocessing
+ return [i + 1 for i in batch]
+
+
@dataclass
class A:
x: int
y: str
[email protected]("batched, function", [(False, add_one), (True, add_one_to_batch)])
[email protected]("num_proc", [None, 2])
[email protected](
+ "data_struct, expected_result",
+ [
+ ({}, {}),
+ ([], []),
+ (1, 2),
+ ([1, 2], [2, 3]),
+ ({"a": 1, "b": 2}, {"a": 2, "b": 3}),
+ ({"a": [1, 2], "b": [3, 4]}, {"a": [2, 3], "b": [4, 5]}),
+ ({"a": {"1": 1}, "b": {"2": 2}}, {"a": {"1": 2}, "b": {"2": 3}}),
+ ({"a": 1, "b": [2, 3], "c": {"1": 4}}, {"a": 2, "b": [3, 4], "c": {"1": 5}}),
+ ({"a": 1, "b": 2, "c": 3, "d": 4}, {"a": 2, "b": 3, "c": 4, "d": 5}),
+ ],
+)
+def test_map_nested(data_struct, expected_result, num_proc, batched, function):
+ assert map_nested(function, data_struct, num_proc=num_proc, batched=batched) == expected_result
+
+
class PyUtilsTest(TestCase):
def test_map_nested(self):
- s1 = {}
- s2 = []
- s3 = 1
- s4 = [1, 2]
- s5 = {"a": 1, "b": 2}
- s6 = {"a": [1, 2], "b": [3, 4]}
- s7 = {"a": {"1": 1}, "b": 2}
- s8 = {"a": 1, "b": 2, "c": 3, "d": 4}
- expected_map_nested_s1 = {}
- expected_map_nested_s2 = []
- expected_map_nested_s3 = 2
- expected_map_nested_s4 = [2, 3]
- expected_map_nested_s5 = {"a": 2, "b": 3}
- expected_map_nested_s6 = {"a": [2, 3], "b": [4, 5]}
- expected_map_nested_s7 = {"a": {"1": 2}, "b": 3}
- expected_map_nested_s8 = {"a": 2, "b": 3, "c": 4, "d": 5}
- self.assertEqual(map_nested(add_one, s1), expected_map_nested_s1)
- self.assertEqual(map_nested(add_one, s2), expected_map_nested_s2)
- self.assertEqual(map_nested(add_one, s3), expected_map_nested_s3)
- self.assertEqual(map_nested(add_one, s4), expected_map_nested_s4)
- self.assertEqual(map_nested(add_one, s5), expected_map_nested_s5)
- self.assertEqual(map_nested(add_one, s6), expected_map_nested_s6)
- self.assertEqual(map_nested(add_one, s7), expected_map_nested_s7)
- self.assertEqual(map_nested(add_one, s8), expected_map_nested_s8)
-
num_proc = 2
- self.assertEqual(map_nested(add_one, s1, num_proc=num_proc), expected_map_nested_s1)
- self.assertEqual(map_nested(add_one, s2, num_proc=num_proc), expected_map_nested_s2)
- self.assertEqual(map_nested(add_one, s3, num_proc=num_proc), expected_map_nested_s3)
- self.assertEqual(map_nested(add_one, s4, num_proc=num_proc), expected_map_nested_s4)
- self.assertEqual(map_nested(add_one, s5, num_proc=num_proc), expected_map_nested_s5)
- self.assertEqual(map_nested(add_one, s6, num_proc=num_proc), expected_map_nested_s6)
- self.assertEqual(map_nested(add_one, s7, num_proc=num_proc), expected_map_nested_s7)
- self.assertEqual(map_nested(add_one, s8, num_proc=num_proc), expected_map_nested_s8)
-
sn1 = {"a": np.eye(2), "b": np.zeros(3), "c": np.ones(2)}
expected_map_nested_sn1_sum = {"a": 2, "b": 0, "c": 2}
expected_map_nested_sn1_int = {
| Download is broken for dict of dicts: FileNotFoundError
It seems there is a bug when downloading a dict of dicts of URLs introduced by:
- #6794
## Steps to reproduce the bug:
```python
from datasets import DownloadManager
dl_manager = DownloadManager()
paths = dl_manager.download({"train": {"frr": "hf://datasets/wikimedia/wikipedia/20231101.frr/train-00000-of-00001.parquet"}})
```
Stack trace:
```
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-7-0e0d76d25b09> in <module>
----> 1 paths = dl_manager.download({"train": {"frr": "hf://datasets/wikimedia/wikipedia/20231101.frr/train-00000-of-00001.parquet"}})
.../huggingface/datasets/src/datasets/download/download_manager.py in download(self, url_or_urls)
255 start_time = datetime.now()
256 with stack_multiprocessing_download_progress_bars():
--> 257 downloaded_path_or_paths = map_nested(
258 download_func,
259 url_or_urls,
.../huggingface/datasets/src/datasets/utils/py_utils.py in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, parallel_min_length, batched, batch_size, types, disable_tqdm, desc)
506 batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1)
507 iterable = list(iter_batched(iterable, batch_size))
--> 508 mapped = [
509 _single_map_nested((function, obj, batched, batch_size, types, None, True, None))
510 for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)
.../huggingface/datasets/src/datasets/utils/py_utils.py in <listcomp>(.0)
507 iterable = list(iter_batched(iterable, batch_size))
508 mapped = [
--> 509 _single_map_nested((function, obj, batched, batch_size, types, None, True, None))
510 for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)
511 ]
.../huggingface/datasets/src/datasets/utils/py_utils.py in _single_map_nested(args)
375 and all(not isinstance(v, types) for v in data_struct)
376 ):
--> 377 return [mapped_item for batch in iter_batched(data_struct, batch_size) for mapped_item in function(batch)]
378
379 # Reduce logging to keep things readable in multiprocessing with tqdm
.../huggingface/datasets/src/datasets/utils/py_utils.py in <listcomp>(.0)
375 and all(not isinstance(v, types) for v in data_struct)
376 ):
--> 377 return [mapped_item for batch in iter_batched(data_struct, batch_size) for mapped_item in function(batch)]
378
379 # Reduce logging to keep things readable in multiprocessing with tqdm
.../huggingface/datasets/src/datasets/download/download_manager.py in _download_batched(self, url_or_filenames, download_config)
311 )
312 else:
--> 313 return [
314 self._download_single(url_or_filename, download_config=download_config)
315 for url_or_filename in url_or_filenames
.../huggingface/datasets/src/datasets/download/download_manager.py in <listcomp>(.0)
312 else:
313 return [
--> 314 self._download_single(url_or_filename, download_config=download_config)
315 for url_or_filename in url_or_filenames
316 ]
.../huggingface/datasets/src/datasets/download/download_manager.py in _download_single(self, url_or_filename, download_config)
321 # append the relative path to the base_path
322 url_or_filename = url_or_path_join(self._base_path, url_or_filename)
--> 323 out = cached_path(url_or_filename, download_config=download_config)
324 out = tracked_str(out)
325 out.set_origin(url_or_filename)
.../huggingface/datasets/src/datasets/utils/file_utils.py in cached_path(url_or_filename, download_config, **download_kwargs)
220 elif is_local_path(url_or_filename):
221 # File, but it doesn't exist.
--> 222 raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
223 else:
224 # Something unknown
FileNotFoundError: Local file .../huggingface/datasets/{'frr': 'hf:/datasets/wikimedia/wikipedia/20231101.frr/train-00000-of-00001.parquet'} doesn't exist
```
Related to:
- #6850
Problem loading voxpopuli dataset
### Describe the bug
```
Exception has occurred: FileNotFoundError
Couldn't find file at https://huggingface.co/datasets/facebook/voxpopuli/resolve/main/{'en': 'data/en/asr_train.tsv'}
```
Error in logic for link url creation. The link should be https://huggingface.co/datasets/facebook/voxpopuli/resolve/main/data/en/asr_train.tsv
Basically there should be links directly under ```metadata["train"]```, not under ```metadata["train"][self.config.languages[0]]```
same for audio urls
### Steps to reproduce the bug
```
from datasets import load_dataset
dataset = load_dataset("facebook/voxpopuli","en")
```
### Expected behavior
Dataset should be loaded successfully.
### Environment info
- `datasets` version: 2.19.0
- Platform: Linux-5.15.0-1041-aws-x86_64-with-glibc2.31
- Python version: 3.10.13
- `huggingface_hub` version: 0.22.2
- PyArrow version: 16.0.0
- Pandas version: 2.2.0
- `fsspec` version: 2023.12.2
|
Version 2.18 works without problem.
@Namangarg110 @mohsen-goodarzi The bug appears because the number of urls is less than 16 and the algorithm is meant to work on the previously created mode for a single url as stated on line 314: https://github.com/huggingface/datasets/blob/1bf8a46cc7b096d5c547ea3794f6a4b6c31ea762/src/datasets/download/download_manager.py#L314
In addition, previously `map_nested` function was supported without batching and it is meant to be the default performance.
One of the shortest walk-arounds would be changing the part of the manager with the current setting:
```
if len(url_or_urls) >= 16:
download_func = partial(self._download_batched, download_config=download_config)
else:
download_func = partial(self._download_single, download_config=download_config)
start_time = datetime.now()
with stack_multiprocessing_download_progress_bars():
downloaded_path_or_paths = map_nested(
download_func,
url_or_urls,
map_tuple=True,
num_proc=download_config.num_proc,
desc="Downloading data files",
batched=True if len(url_or_urls) >= 16 else False,
batch_size=-1,
)
```
I would suggest to consider other datasets for similar issues and make a pull-request.
Thanks for reporting @Namangarg110 and thanks for the investigation @MilanaShhanukova.
Apparently, there is an issue with the download functionality.
I am proposing a fix. | 2024-05-06T06:06:52Z | [] | [] |
huggingface/datasets | 6,914 | huggingface__datasets-6914 | [
"6913"
] | 60d21efbc01e15d0b596ac1072750cbecd91548a | diff --git a/src/datasets/packaged_modules/json/json.py b/src/datasets/packaged_modules/json/json.py
--- a/src/datasets/packaged_modules/json/json.py
+++ b/src/datasets/packaged_modules/json/json.py
@@ -15,6 +15,14 @@
logger = datasets.utils.logging.get_logger(__name__)
+def ujson_dumps(*args, **kwargs):
+ try:
+ return pd.io.json.ujson_dumps(*args, **kwargs)
+ except AttributeError:
+ # Before pandas-2.2.0, ujson_dumps was renamed to dumps: import ujson_dumps as dumps
+ return pd.io.json.dumps(*args, **kwargs)
+
+
def ujson_loads(*args, **kwargs):
try:
return pd.io.json.ujson_loads(*args, **kwargs)
@@ -85,21 +93,16 @@ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
- # If the file is one json object and if we need to look at the list of items in one specific field
+ # If the file is one json object and if we need to look at the items in one specific field
if self.config.field is not None:
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
dataset = ujson_loads(f.read())
-
# We keep only the field we are interested in
dataset = dataset[self.config.field]
-
- # We accept two format: a list of dicts or a dict of lists
- if isinstance(dataset, (list, tuple)):
- keys = set().union(*[row.keys() for row in dataset])
- mapping = {col: [row.get(col) for row in dataset] for col in keys}
- else:
- mapping = dataset
- pa_table = pa.Table.from_pydict(mapping)
+ df = pd.read_json(io.StringIO(ujson_dumps(dataset)), dtype_backend="pyarrow")
+ if df.columns.tolist() == [0]:
+ df.columns = list(self.config.features) if self.config.features else ["text"]
+ pa_table = pa.Table.from_pandas(df, preserve_index=False)
yield file_idx, self._cast_table(pa_table)
# If the file has one json object per line
@@ -150,39 +153,22 @@ def _generate_tables(self, files):
with open(
file, encoding=self.config.encoding, errors=self.config.encoding_errors
) as f:
- dataset = ujson_loads(f.read())
+ df = pd.read_json(f, dtype_backend="pyarrow")
except ValueError:
- logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ logger.error(f"Failed to load JSON from file '{file}' with error {type(e)}: {e}")
raise e
- # If possible, parse the file as a list of json objects/strings and exit the loop
- if isinstance(dataset, list): # list is the only sequence type supported in JSON
- try:
- if dataset and isinstance(dataset[0], str):
- pa_table_names = (
- list(self.config.features)
- if self.config.features is not None
- else ["text"]
- )
- pa_table = pa.Table.from_arrays([pa.array(dataset)], names=pa_table_names)
- else:
- keys = set().union(*[row.keys() for row in dataset])
- mapping = {col: [row.get(col) for row in dataset] for col in keys}
- pa_table = pa.Table.from_pydict(mapping)
- except (pa.ArrowInvalid, AttributeError) as e:
- logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
- raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
- yield file_idx, self._cast_table(pa_table)
- break
- else:
- logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ if df.columns.tolist() == [0]:
+ df.columns = list(self.config.features) if self.config.features else ["text"]
+ try:
+ pa_table = pa.Table.from_pandas(df, preserve_index=False)
+ except pa.ArrowInvalid as e:
+ logger.error(
+ f"Failed to convert pandas DataFrame to Arrow Table from file '{file}' with error {type(e)}: {e}"
+ )
raise ValueError(
- f"Not able to read records in the JSON file at {file}. "
- f"You should probably indicate the field of the JSON file containing your records. "
- f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
- f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. "
+ f"Failed to convert pandas DataFrame to Arrow Table from file {file}."
) from None
- # Uncomment for debugging (will print the Arrow table size and elements)
- # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
- # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield file_idx, self._cast_table(pa_table)
+ break
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
| diff --git a/tests/packaged_modules/test_json.py b/tests/packaged_modules/test_json.py
--- a/tests/packaged_modules/test_json.py
+++ b/tests/packaged_modules/test_json.py
@@ -92,6 +92,85 @@ def json_file_with_list_of_dicts_field(tmp_path):
return str(filename)
[email protected]
+def json_file_with_list_of_strings_field(tmp_path):
+ path = tmp_path / "file.json"
+ data = textwrap.dedent(
+ """\
+ {
+ "field1": 1,
+ "field2": "aabb",
+ "field3": [
+ "First text.",
+ "Second text.",
+ "Third text."
+ ]
+ }
+ """
+ )
+ with open(path, "w") as f:
+ f.write(data)
+ return str(path)
+
+
[email protected]
+def json_file_with_dict_of_lists_field(tmp_path):
+ path = tmp_path / "file.json"
+ data = textwrap.dedent(
+ """\
+ {
+ "field1": 1,
+ "field2": "aabb",
+ "field3": {
+ "col_1": [-1, 1, 10],
+ "col_2": [null, 2, 20]
+ }
+ }
+ """
+ )
+ with open(path, "w") as f:
+ f.write(data)
+ return str(path)
+
+
[email protected]
+def json_file_with_list_of_dicts_with_sorted_columns(tmp_path):
+ path = tmp_path / "file.json"
+ data = textwrap.dedent(
+ """\
+ [
+ {"ID": 0, "Language": "Language-0", "Topic": "Topic-0"},
+ {"ID": 1, "Language": "Language-1", "Topic": "Topic-1"},
+ {"ID": 2, "Language": "Language-2", "Topic": "Topic-2"}
+ ]
+ """
+ )
+ with open(path, "w") as f:
+ f.write(data)
+ return str(path)
+
+
[email protected]
+def json_file_with_list_of_dicts_with_sorted_columns_field(tmp_path):
+ path = tmp_path / "file.json"
+ data = textwrap.dedent(
+ """\
+ {
+ "field1": 1,
+ "field2": "aabb",
+ "field3": [
+ {"ID": 0, "Language": "Language-0", "Topic": "Topic-0"},
+ {"ID": 1, "Language": "Language-1", "Topic": "Topic-1"},
+ {"ID": 2, "Language": "Language-2", "Topic": "Topic-2"}
+ ]
+ }
+ """
+ )
+ with open(path, "w") as f:
+ f.write(data)
+ return str(path)
+
+
@pytest.mark.parametrize(
"file_fixture, config_kwargs",
[
@@ -100,13 +179,15 @@ def json_file_with_list_of_dicts_field(tmp_path):
("json_file_with_list_of_dicts", {}),
("json_file_with_list_of_dicts_field", {"field": "field3"}),
("json_file_with_list_of_strings", {}),
+ ("json_file_with_list_of_strings_field", {"field": "field3"}),
+ ("json_file_with_dict_of_lists_field", {"field": "field3"}),
],
)
def test_json_generate_tables(file_fixture, config_kwargs, request):
json = Json(**config_kwargs)
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
- if file_fixture == "json_file_with_list_of_strings":
+ if "list_of_strings" in file_fixture:
expected = {"text": ["First text.", "Second text.", "Third text."]}
else:
expected = {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]}
@@ -140,3 +221,17 @@ def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs,
generator = json._generate_tables([[request.getfixturevalue(file_fixture)]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20], "missing_col": [None, None, None]}
+
+
[email protected](
+ "file_fixture, config_kwargs",
+ [
+ ("json_file_with_list_of_dicts_with_sorted_columns", {}),
+ ("json_file_with_list_of_dicts_with_sorted_columns_field", {"field": "field3"}),
+ ],
+)
+def test_json_generate_tables_with_sorted_columns(file_fixture, config_kwargs, request):
+ builder = Json(**config_kwargs)
+ generator = builder._generate_tables([[request.getfixturevalue(file_fixture)]])
+ pa_table = pa.concat_tables([table for _, table in generator])
+ assert pa_table.column_names == ["ID", "Language", "Topic"]
| Column order is nondeterministic when loading from JSON
As reported by @meg-huggingface, the order of the JSON object keys is not preserved while loading a dataset from a JSON file with a list of objects.
For example, when loading a JSON files with a list of objects, each with the following ordered keys:
- [ID, Language, Topic],
the resulting dataset may have columns:
- [ID, Topic, Language], or
- [Topic, Language, ID], or
- [Topic, ID, Language],...
This issue is caused by the use of a Python set (which does not preserve the order):
https://github.com/huggingface/datasets/blob/60d21efbc01e15d0b596ac1072750cbecd91548a/src/datasets/packaged_modules/json/json.py#L168
introduced in
- #5772
| 2024-05-22T09:58:54Z | [] | [] |
|
huggingface/datasets | 6,925 | huggingface__datasets-6925 | [
"6918",
"6939"
] | b442aa2d3efc83ba0dc369adaa63cc496e3d9836 | diff --git a/src/datasets/load.py b/src/datasets/load.py
--- a/src/datasets/load.py
+++ b/src/datasets/load.py
@@ -1235,7 +1235,12 @@ def get_module(self) -> DatasetModule:
pass
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
- if config.USE_PARQUET_EXPORT: # maybe don't use the infos from the parquet export
+ # Use the infos from the parquet export except in some cases:
+ if self.data_dir or self.data_files or (self.revision and self.revision != "main"):
+ use_exported_dataset_infos = False
+ else:
+ use_exported_dataset_infos = True
+ if config.USE_PARQUET_EXPORT and use_exported_dataset_infos:
try:
exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
dataset=self.name, revision=self.revision, token=self.download_config.token
| diff --git a/tests/test_load.py b/tests/test_load.py
--- a/tests/test_load.py
+++ b/tests/test_load.py
@@ -1267,6 +1267,21 @@ def test_load_dataset_cached_local_script(dataset_loading_script_dir, data_dir,
assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value)
[email protected]
[email protected](
+ "kwargs, expected_train_num_rows, expected_test_num_rows",
+ [
+ ({}, 2, 2),
+ ({"data_dir": "data1"}, 1, 1), # GH-6918: NonMatchingSplitsSizesError
+ ({"data_files": "data1/train.txt"}, 1, None), # GH-6939: ExpectedMoreSplits
+ ],
+)
+def test_load_dataset_without_script_from_hub(kwargs, expected_train_num_rows, expected_test_num_rows):
+ dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER3, **kwargs)
+ assert dataset["train"].num_rows == expected_train_num_rows
+ assert (dataset["test"].num_rows == expected_test_num_rows) if expected_test_num_rows else ("test" not in dataset)
+
+
@pytest.mark.integration
@pytest.mark.parametrize("stream_from_cache, ", [False, True])
def test_load_dataset_cached_from_hub(stream_from_cache, caplog):
| NonMatchingSplitsSizesError when using data_dir
### Describe the bug
Loading a dataset from with a data_dir argument generates a NonMatchingSplitsSizesError if there are multiple directories in the dataset.
This appears to happen because the expected split is calculated based on the data in all the directories whereas the recorded split is calculated based on the data in the directory specified using the data_dir argument.
This is recent behavior. Until the past few weeks loading using the data_dir argument worked without any issue.
### Steps to reproduce the bug
Simple test dataset available here: https://huggingface.co/datasets/srehaag/hf-bug-temp
The dataset contains two directories "data1" and "data2", each with a file called "train.parquet" with a 2 x 5 table.
from datasets import load_dataset
dataset = load_dataset("srehaag/hf-bug-temp", data_dir = "data1")
Generates:
---------------------------------------------------------------------------
NonMatchingSplitsSizesError Traceback (most recent call last)
Cell In[3], <a href='vscode-notebook-cell:?execution_count=3&line=2'>line 2</a>
<a href='vscode-notebook-cell:?execution_count=3&line=1'>1</a> from datasets import load_dataset
----> <a href='vscode-notebook-cell:?execution_count=3&line=2'>2</a> dataset = load_dataset("srehaag/hf-bug-temp", data_dir = "data1")
File ~/.python/current/lib/python3.10/site-packages/datasets/load.py:2609, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2606'>2606</a> return builder_instance.as_streaming_dataset(split=split)
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2608'>2608</a> # Download and prepare data
-> <a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2609'>2609</a> builder_instance.download_and_prepare(
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2610'>2610</a> download_config=download_config,
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2611'>2611</a> download_mode=download_mode,
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2612'>2612</a> verification_mode=verification_mode,
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2613'>2613</a> num_proc=num_proc,
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2614'>2614</a> storage_options=storage_options,
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2615'>2615</a> )
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2617'>2617</a> # Build dataset for splits
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2618'>2618</a> keep_in_memory = (
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2619'>2619</a> keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
<a href='~/.python/current/lib/python3.10/site-packages/datasets/load.py:2620'>2620</a> )
File ~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1027, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1025'>1025</a> if num_proc is not None:
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1026'>1026</a> prepare_split_kwargs["num_proc"] = num_proc
-> <a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1027'>1027</a> self._download_and_prepare(
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1028'>1028</a> dl_manager=dl_manager,
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1029'>1029</a> verification_mode=verification_mode,
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1030'>1030</a> **prepare_split_kwargs,
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1031'>1031</a> **download_and_prepare_kwargs,
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1032'>1032</a> )
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1033'>1033</a> # Sync info
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1034'>1034</a> self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
File ~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1140, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1137'>1137</a> dl_manager.manage_extracted_files()
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1139'>1139</a> if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS:
-> <a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1140'>1140</a> verify_splits(self.info.splits, split_dict)
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1142'>1142</a> # Update the info object with the splits.
<a href='~/.python/current/lib/python3.10/site-packages/datasets/builder.py:1143'>1143</a> self.info.splits = split_dict
File ~/.python/current/lib/python3.10/site-packages/datasets/utils/info_utils.py:101, in verify_splits(expected_splits, recorded_splits)
<a href='~/.python/current/lib/python3.10/site-packages/datasets/utils/info_utils.py:95'>95</a> bad_splits = [
<a href='~/.python/current/lib/python3.10/site-packages/datasets/utils/info_utils.py:96'>96</a> {"expected": expected_splits[name], "recorded": recorded_splits[name]}
<a href='~/.python/current/lib/python3.10/site-packages/datasets/utils/info_utils.py:97'>97</a> for name in expected_splits
<a href='~/.python/current/lib/python3.10/site-packages/datasets/utils/info_utils.py:98'>98</a> if expected_splits[name].num_examples != recorded_splits[name].num_examples
<a href='~/.python/current/lib/python3.10/site-packages/datasets/utils/info_utils.py:99'>99</a> ]
<a href='~/.python/current/lib/python3.10/site-packages/datasets/utils/info_utils.py:100'>100</a> if len(bad_splits) > 0:
--> <a href='~/.python/current/lib/python3.10/site-packages/datasets/utils/info_utils.py:101'>101</a> raise NonMatchingSplitsSizesError(str(bad_splits))
<a href='~/.python/current/lib/python3.10/site-packages/datasets/utils/info_utils.py:102'>102</a> logger.info("All the splits matched successfully.")
NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='train', num_bytes=212, num_examples=10, shard_lengths=None, dataset_name=None), 'recorded': SplitInfo(name='train', num_bytes=106, num_examples=5, shard_lengths=None, dataset_name='hf-bug-temp')}]
__________
By contrast, this loads the data from both data1/train.parquet and data2/train.parquet without any error message:
from datasets import load_dataset
dataset = load_dataset("srehaag/hf-bug-temp")
### Expected behavior
Should load the 5 x 2 table from data1/train.parquet without error message.
### Environment info
Used Codespaces to simplify environment (see details below), but bug is present across various configurations.
- `datasets` version: 2.19.1
- Platform: Linux-6.5.0-1021-azure-x86_64-with-glibc2.31
- Python version: 3.10.13
- `huggingface_hub` version: 0.23.1
- PyArrow version: 16.1.0
- Pandas version: 2.2.2
- `fsspec` version: 2024.3.1
ExpectedMoreSplits error when using data_dir
As reported by @regisss, an `ExpectedMoreSplits` error is raised when passing `data_dir`:
```python
from datasets import load_dataset
dataset = load_dataset(
"lvwerra/stack-exchange-paired",
split="train",
cache_dir=None,
data_dir="data/rl",
)
```
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.10/dist-packages/datasets/load.py", line 2609, in load_dataset
builder_instance.download_and_prepare(
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 1027, in download_and_prepare
self._download_and_prepare(
File "/usr/local/lib/python3.10/dist-packages/datasets/builder.py", line 1140, in _download_and_prepare
verify_splits(self.info.splits, split_dict)
File "/usr/local/lib/python3.10/dist-packages/datasets/utils/info_utils.py", line 92, in verify_splits
raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits)))
datasets.utils.info_utils.ExpectedMoreSplits: {'test'}
```
| Thanks for reporting, @srehaag.
We are investigating this issue.
I confirm there is a bug for data-based Hub datasets when the user passes `data_dir`, which was introduced by PR:
- #6714
| 2024-05-28T13:33:38Z | [] | [] |
koxudaxi/datamodel-code-generator | 382 | koxudaxi__datamodel-code-generator-382 | [
"381"
] | 3f7975a5a239f33533a12d68a01d08668ba86b1b | diff --git a/datamodel_code_generator/__main__.py b/datamodel_code_generator/__main__.py
--- a/datamodel_code_generator/__main__.py
+++ b/datamodel_code_generator/__main__.py
@@ -249,17 +249,6 @@ def validate_url(cls, value: Any) -> Optional[ParseResult]:
f'This protocol doesn\'t support only http/https. --input={value}'
) # pragma: no cover
- @root_validator
- def validate_literal_option(cls, values: Dict[str, Any]) -> Dict[str, Any]:
- if values.get('enum_field_as_literal'):
- target_python_version: PythonVersion = values['target_python_version']
- if not target_python_version.has_literal_type:
- raise Error(
- f"`--enum-field-as-literal` isn't compatible with `--target-python-version {target_python_version.value}`.\n" # type: ignore
- f"You have to set `--target-python-version {target_python_version.PY_38.value}` or later version."
- )
- return values
-
@root_validator
def validate_use_generic_container_types(
cls, values: Dict[str, Any]
diff --git a/datamodel_code_generator/imports.py b/datamodel_code_generator/imports.py
--- a/datamodel_code_generator/imports.py
+++ b/datamodel_code_generator/imports.py
@@ -65,6 +65,7 @@ def append(self, imports: Union[Import, Iterable[Import], None]) -> None:
IMPORT_UNION = Import.from_full_path('typing.Union')
IMPORT_OPTIONAL = Import.from_full_path('typing.Optional')
IMPORT_LITERAL = Import.from_full_path('typing.Literal')
+IMPORT_LITERAL_BACKPORT = Import.from_full_path('typing_extensions.Literal')
IMPORT_SEQUENCE = Import.from_full_path('typing.Sequence')
IMPORT_MAPPING = Import.from_full_path('typing.Mapping')
IMPORT_ABC_SEQUENCE = Import.from_full_path('collections.abc.Sequence')
diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -520,7 +520,8 @@ def parse_object_fields(
field.additionalProperties
):
additional_properties_type = self.data_type(
- literals=field.additionalProperties.enum
+ literals=field.additionalProperties.enum,
+ python_version=self.target_python_version,
)
else:
additional_properties_type = self.parse_enum(
@@ -566,7 +567,9 @@ def parse_object_fields(
field_type = self.data_type_manager.get_data_type(Types.object)
elif field.enum:
if self.should_parse_enum_as_literal(field):
- field_type = self.data_type(literals=field.enum)
+ field_type = self.data_type(
+ literals=field.enum, python_version=self.target_python_version
+ )
else:
field_type = self.parse_enum(field_name, field, [*path, field_name])
else:
@@ -661,7 +664,9 @@ def parse_field(index: int, item: JsonSchemaObject) -> DataType:
)
elif item.enum:
if self.should_parse_enum_as_literal(item):
- return self.data_type(literals=item.enum)
+ return self.data_type(
+ literals=item.enum, python_version=self.target_python_version
+ )
else:
return self.parse_enum(name, item, field_path, singular_name=True)
elif item.is_array:
@@ -814,6 +819,7 @@ def parse_enum(
else type(enum_part).__name__
)
field_name = f'{prefix}_{enum_part}'
+
enum_fields.append(
self.data_model_field_type(
name=self.model_resolver.get_valid_name(field_name),
diff --git a/datamodel_code_generator/types.py b/datamodel_code_generator/types.py
--- a/datamodel_code_generator/types.py
+++ b/datamodel_code_generator/types.py
@@ -24,6 +24,7 @@
IMPORT_DICT,
IMPORT_LIST,
IMPORT_LITERAL,
+ IMPORT_LITERAL_BACKPORT,
IMPORT_MAPPING,
IMPORT_OPTIONAL,
IMPORT_SEQUENCE,
@@ -156,8 +157,18 @@ def __init__(self, **values: Any) -> None:
imports: Tuple[Tuple[bool, Import], ...] = (
(self.is_optional, IMPORT_OPTIONAL),
(len(self.data_types) > 1, IMPORT_UNION),
- (any(self.literals), IMPORT_LITERAL),
)
+ if any(self.literals):
+ import_literal = (
+ IMPORT_LITERAL
+ if self.python_version.has_literal_type
+ else IMPORT_LITERAL_BACKPORT
+ )
+ imports = (
+ *imports,
+ (any(self.literals), import_literal),
+ )
+
if self.use_generic_container:
if self.use_standard_collections:
imports = (
| diff --git a/tests/data/expected/main/main_openapi_enum_models_as_literal_py37/output.py b/tests/data/expected/main/main_openapi_enum_models_as_literal_py37/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_openapi_enum_models_as_literal_py37/output.py
@@ -0,0 +1,83 @@
+# generated by datamodel-codegen:
+# filename: enum_models.yaml
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from enum import Enum
+from typing import List, Optional, Union
+
+from pydantic import BaseModel, Field
+from typing_extensions import Literal
+
+
+class Pet(BaseModel):
+ id: int
+ name: str
+ tag: Optional[str] = None
+ kind: Optional[Literal['dog', 'cat']] = None
+ type: Optional[Literal['animal']] = None
+
+
+class Pets(BaseModel):
+ __root__: List[Pet]
+
+
+class Animal(BaseModel):
+ kind: Optional[Literal['snake', 'rabbit']] = None
+
+
+class Error(BaseModel):
+ code: int
+ message: str
+
+
+class EnumObject(BaseModel):
+ type: Optional[Literal['a', 'b']] = None
+
+
+class EnumRoot(Enum):
+ a = 'a'
+ b = 'b'
+
+
+class IntEnum(Enum):
+ number_1 = 1
+ number_2 = 2
+
+
+class AliasEnum(Enum):
+ a = 1
+ b = 2
+ c = 3
+
+
+class MultipleTypeEnum(Enum):
+ red = 'red'
+ amber = 'amber'
+ green = 'green'
+ NoneType_None = None
+ int_42 = 42
+
+
+class SingleEnum(Enum):
+ pet = 'pet'
+
+
+class ArrayEnum(BaseModel):
+ __root__: List[Union[Literal['cat'], Literal['dog']]]
+
+
+class VersionEnum(Enum):
+ RC1 = 'RC1'
+ RC1N = 'RC1N'
+ RC2 = 'RC2'
+ RC2N = 'RC2N'
+ RC3 = 'RC3'
+ RC4 = 'RC4'
+
+
+class Version(BaseModel):
+ __root__: Optional[VersionEnum] = Field(
+ 'RC1', description='nullable enum', example='RC2'
+ )
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1761,13 +1761,17 @@ def test_main_openapi_enum_models_as_literal_py37(capsys):
]
)
- captured = capsys.readouterr()
- assert return_code == Exit.ERROR
+ assert return_code == Exit.OK
assert (
- captured.err
- == "`--enum-field-as-literal` isn't compatible with `--target-python-version 3.7`.\n"
- "You have to set `--target-python-version 3.8` or later version.\n"
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_openapi_enum_models_as_literal_py37'
+ / 'output.py'
+ ).read_text()
)
+ with pytest.raises(SystemExit):
+ main()
@freeze_time('2019-07-26')
| Allow enum as Literal in python 3.7 via typing_extensions backport
**Is your feature request related to a problem? Please describe.**
I want to use both options `--target-python-version 3.7` and `--enum-field-as-literal all` at the same time.
This is currently not allowed via this [line](https://github.com/koxudaxi/datamodel-code-generator/blob/3f7975a5a239f33533a12d68a01d08668ba86b1b/datamodel_code_generator/__main__.py#L256).
**Describe the solution you'd like**
Import Literal from `typing_extensions` if target python version is below 3.8.
| 2021-03-21T07:33:22Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 421 | koxudaxi__datamodel-code-generator-421 | [
"420"
] | d57b73895cf749118e93891e9bdf7070bede73fe | diff --git a/datamodel_code_generator/model/pydantic/imports.py b/datamodel_code_generator/model/pydantic/imports.py
--- a/datamodel_code_generator/model/pydantic/imports.py
+++ b/datamodel_code_generator/model/pydantic/imports.py
@@ -17,8 +17,8 @@
IMPORT_UUID4 = Import.from_full_path('pydantic.UUID4')
IMPORT_UUID5 = Import.from_full_path('pydantic.UUID5')
IMPORT_ANYURL = Import.from_full_path('pydantic.AnyUrl')
-IMPORT_IPV4ADDRESS = Import.from_full_path('pydantic.IPv4Address')
-IMPORT_IPV6ADDRESS = Import.from_full_path('pydantic.IPv6Address')
+IMPORT_IPV4ADDRESS = Import.from_full_path('ipaddress.IPv4Address')
+IMPORT_IPV6ADDRESS = Import.from_full_path('ipaddress.IPv6Address')
IMPORT_EXTRA = Import.from_full_path('pydantic.Extra')
IMPORT_FIELD = Import.from_full_path('pydantic.Field')
IMPORT_STRICT_INT = Import.from_full_path('pydantic.StrictInt')
| diff --git a/tests/parser/test_jsonschema.py b/tests/parser/test_jsonschema.py
--- a/tests/parser/test_jsonschema.py
+++ b/tests/parser/test_jsonschema.py
@@ -354,8 +354,8 @@ def test_parse_nested_array():
('string', 'uuid3', 'UUID3', 'pydantic', 'UUID3'),
('string', 'uuid4', 'UUID4', 'pydantic', 'UUID4'),
('string', 'uuid5', 'UUID5', 'pydantic', 'UUID5'),
- ('string', 'ipv4', 'IPv4Address', 'pydantic', 'IPv4Address'),
- ('string', 'ipv6', 'IPv6Address', 'pydantic', 'IPv6Address'),
+ ('string', 'ipv4', 'IPv4Address', 'ipaddress', 'IPv4Address'),
+ ('string', 'ipv6', 'IPv6Address', 'ipaddress', 'IPv6Address'),
('string', 'unknown-type', 'str', None, None),
],
)
| IPv4Address doesn't import from pydantic.validators
**Describe the bug**
When using `format: ipv4`, the following import is added to the output:
```py
from pydantic import IPv4Address
```
This isn't a valid import.
**To Reproduce**
Example schema:
```yaml
openapi: 3.0.0
info:
version: 0.0.1
title: Foo API
paths:
/foo:
get:
responses:
"200":
description: Success
components:
schemas:
Foo:
type: object
properties:
ip:
type: string
format: ipv4
```
Used commandline:
```
$ datamodel-codegen --input openapi.yaml
```
**Expected behavior**
When using `format: ipv4`, the following import is added to the output:
```py
from pydantic.validators import IPv4Address
```
**Version:**
- OS: MacOS
- Python version: `3.9.2`
- datamodel-code-generator version: `0.8.2`
**Additional context**
None
| 2021-04-29T00:53:11Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 689 | koxudaxi__datamodel-code-generator-689 | [
"688"
] | e2dcb199fc6da3c22aa5df4dd209721f1e71507e | diff --git a/datamodel_code_generator/types.py b/datamodel_code_generator/types.py
--- a/datamodel_code_generator/types.py
+++ b/datamodel_code_generator/types.py
@@ -75,7 +75,7 @@ class Config:
is_dict: bool = False
is_list: bool = False
is_custom_type: bool = False
- literals: List[Union[int, str]] = []
+ literals: 'List[Union[int, str]]' = []
use_standard_collections: bool = False
use_generic_container: bool = False
alias: Optional[str] = None
| diff --git a/tests/data/expected/main/main_openapi_enum_models_all/output.py b/tests/data/expected/main/main_openapi_enum_models_all/output.py
--- a/tests/data/expected/main/main_openapi_enum_models_all/output.py
+++ b/tests/data/expected/main/main_openapi_enum_models_all/output.py
@@ -16,6 +16,7 @@ class Pet(BaseModel):
tag: Optional[str] = None
kind: Optional[Literal['dog', 'cat']] = None
type: Optional[Literal['animal']] = None
+ number: Literal[1]
class Pets(BaseModel):
diff --git a/tests/data/expected/main/main_openapi_enum_models_as_literal_py37/output.py b/tests/data/expected/main/main_openapi_enum_models_as_literal_py37/output.py
--- a/tests/data/expected/main/main_openapi_enum_models_as_literal_py37/output.py
+++ b/tests/data/expected/main/main_openapi_enum_models_as_literal_py37/output.py
@@ -17,6 +17,7 @@ class Pet(BaseModel):
tag: Optional[str] = None
kind: Optional[Literal['dog', 'cat']] = None
type: Optional[Literal['animal']] = None
+ number: Literal[1]
class Pets(BaseModel):
diff --git a/tests/data/expected/main/main_openapi_enum_models_one/output.py b/tests/data/expected/main/main_openapi_enum_models_one/output.py
--- a/tests/data/expected/main/main_openapi_enum_models_one/output.py
+++ b/tests/data/expected/main/main_openapi_enum_models_one/output.py
@@ -21,6 +21,7 @@ class Pet(BaseModel):
tag: Optional[str] = None
kind: Optional[Kind] = None
type: Optional[Literal['animal']] = None
+ number: Literal[1]
class Pets(BaseModel):
diff --git a/tests/data/expected/parser/openapi/openapi_parser_parse_enum_models/output_py36.py b/tests/data/expected/parser/openapi/openapi_parser_parse_enum_models/output_py36.py
--- a/tests/data/expected/parser/openapi/openapi_parser_parse_enum_models/output_py36.py
+++ b/tests/data/expected/parser/openapi/openapi_parser_parse_enum_models/output_py36.py
@@ -13,12 +13,17 @@ class Type(Enum):
animal = 'animal'
+class Number(Enum):
+ integer_1 = 1
+
+
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
kind: Optional['Kind'] = None
type: Optional['Type'] = None
+ number: 'Number'
class Pets(BaseModel):
diff --git a/tests/data/expected/parser/openapi/openapi_parser_parse_enum_models/output_py37.py b/tests/data/expected/parser/openapi/openapi_parser_parse_enum_models/output_py37.py
--- a/tests/data/expected/parser/openapi/openapi_parser_parse_enum_models/output_py37.py
+++ b/tests/data/expected/parser/openapi/openapi_parser_parse_enum_models/output_py37.py
@@ -15,12 +15,17 @@ class Type(Enum):
animal = 'animal'
+class Number(Enum):
+ integer_1 = 1
+
+
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
kind: Optional[Kind] = None
type: Optional[Type] = None
+ number: Number
class Pets(BaseModel):
diff --git a/tests/data/openapi/enum_models.yaml b/tests/data/openapi/enum_models.yaml
--- a/tests/data/openapi/enum_models.yaml
+++ b/tests/data/openapi/enum_models.yaml
@@ -51,6 +51,7 @@ components:
required:
- id
- name
+ - number
properties:
id:
type: integer
@@ -65,6 +66,10 @@ components:
type:
type: string
enum: [ 'animal' ]
+ number:
+ type: integer
+ enum: [ 1 ]
+
Pets:
type: array
items:
diff --git a/tests/parser/test_openapi.py b/tests/parser/test_openapi.py
--- a/tests/parser/test_openapi.py
+++ b/tests/parser/test_openapi.py
@@ -2,6 +2,7 @@
from pathlib import Path
from typing import List, Optional
+import pydantic
import pytest
from datamodel_code_generator import PythonVersion
@@ -378,6 +379,9 @@ class UnknownTypeNumber(Enum):
)
[email protected](
+ pydantic.VERSION < '1.9.0', reason='Require Pydantic version 1.9.0 or later '
+)
def test_openapi_parser_parse_enum_models():
parser = OpenAPIParser(
Path(DATA_PATH / 'enum_models.yaml').read_text(),
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -5,6 +5,7 @@
from unittest.mock import call
import isort
+import pydantic
import pytest
from _pytest.capture import CaptureFixture
from _pytest.tmpdir import TempdirFactory
@@ -1824,6 +1825,9 @@ def test_main_jsonschema_multiple_files_json_pointer():
main()
[email protected](
+ pydantic.VERSION < '1.9.0', reason='Require Pydantic version 1.9.0 or later '
+)
@freeze_time('2019-07-26')
def test_main_openapi_enum_models_as_literal_one():
with TemporaryDirectory() as output_dir:
@@ -1853,6 +1857,9 @@ def test_main_openapi_enum_models_as_literal_one():
main()
[email protected](
+ pydantic.VERSION < '1.9.0', reason='Require Pydantic version 1.9.0 or later '
+)
@freeze_time('2019-07-26')
def test_main_openapi_enum_models_as_literal_all():
with TemporaryDirectory() as output_dir:
@@ -1882,6 +1889,9 @@ def test_main_openapi_enum_models_as_literal_all():
main()
[email protected](
+ pydantic.VERSION < '1.9.0', reason='Require Pydantic version 1.9.0 or later '
+)
@freeze_time('2019-07-26')
def test_main_openapi_enum_models_as_literal_py37(capsys):
with TemporaryDirectory() as output_dir:
| --enum-field-as-literal=one converts integer to string - still
**Describe the bug**
When using `--enum-field-as-literal=one`, literal integers get converted to strings, depending on the exact version of `datamodel`'s dependencies which are installed. For details see the bottom of the description. This is highly problematic when using `datamodel-code-generator` as a library, as it's output changes unpredictably depending on which exact version of other dependencies are installed.
This is not a duplicate of https://github.com/koxudaxi/datamodel-code-generator/issues/440 .
**To Reproduce**
Example schema:
```json
{
"title": "SomeModel",
"type": "object",
"properties": {
"attribute": {
"title": "Attribute",
"enum": [
1
],
"type": "integer"
}
},
"required": [
"attribute"
]
}
```
Used commandline:
```
$ datamodel-codegen --input file.json --enum-field-as-literal=one
```
**Expected behavior**
I expected the result to look something like
```
class SomeModel(BaseModel):
attribute: Literal[1] = Field(..., title='Attribute')
```
instead it looks like
```
class SomeModel(BaseModel):
attribute: Literal['1'] = Field(..., title='Attribute')
```
**Version:**
- OS: Linux
- Python version: 3.8.0
- datamodel-code-generator version: 0.11.16
**Additional context**
The problem seems to lie in https://github.com/koxudaxi/datamodel-code-generator/blob/e2dcb199fc6da3c22aa5df4dd209721f1e71507e/datamodel_code_generator/types.py#L78
Python caches specified generics - see also https://bugs.python.org/issue45679 -, which means that if
```
List[Union[str, int]]
```
was used in some dependency _before_ python parses this part, `List[Union[int, str]]` magically becomes `List[Union[str, int]]`. This is turn makes pydantic parse `[1]` to `['1']`. Whether or not `List[Union[str, int]]` was parsed by python before parsing `types.py` depends on the exact version of the dependencies which are installed.
For an example of this type caching, the following code runs without error in python 3.8:
```
from typing import List, Union
List[Union[str, int]]
assert str(List[Union[int, str]]) == "typing.List[typing.Union[str, int]]"
```
For how this can confuse pydantic, also the following code runs without error in python 3.8 with pydantic version 1.9.0:
```
from pydantic import BaseModel
from typing import List, Literal, Union
List[Union[str, int]]
class SomeModel(BaseModel):
literals: List[Union[int, str]]
my_instance = SomeModel(literals=[1])
assert type(my_instance.literals[0]) == str
```
See also the warning in https://pydantic-docs.helpmanual.io/usage/types/#unions
| Using the new pydantic 1.9 feature https://pydantic-docs.helpmanual.io/usage/model_config/#smart-union in `DataType` fixes the problem.
As an easier solution, one can just put the type in quotes to make the error disappear. I'll send an MR. | 2022-01-20T14:15:12Z | [] | [] |
koxudaxi/datamodel-code-generator | 701 | koxudaxi__datamodel-code-generator-701 | [
"700"
] | cf4b361b5c2796a46acb88bd30a2377fffe4d22f | diff --git a/datamodel_code_generator/__main__.py b/datamodel_code_generator/__main__.py
--- a/datamodel_code_generator/__main__.py
+++ b/datamodel_code_generator/__main__.py
@@ -41,7 +41,11 @@
enable_debug_message,
generate,
)
-from datamodel_code_generator.format import PythonVersion, is_supported_in_black
+from datamodel_code_generator.format import (
+ PythonVersion,
+ black_find_project_root,
+ is_supported_in_black,
+)
from datamodel_code_generator.parser import LiteralType
from datamodel_code_generator.reference import is_url
from datamodel_code_generator.types import StrictTypes
@@ -435,7 +439,7 @@ def main(args: Optional[Sequence[str]] = None) -> Exit:
print(version)
exit(0)
- root = black.find_project_root((Path().resolve(),))
+ root = black_find_project_root((Path().resolve(),))
pyproject_toml_path = root / "pyproject.toml"
if pyproject_toml_path.is_file():
pyproject_toml: Dict[str, Any] = {
diff --git a/datamodel_code_generator/format.py b/datamodel_code_generator/format.py
--- a/datamodel_code_generator/format.py
+++ b/datamodel_code_generator/format.py
@@ -1,6 +1,6 @@
from enum import Enum
from pathlib import Path
-from typing import TYPE_CHECKING, Any, Dict, Optional
+from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence
from warnings import warn
import black
@@ -19,17 +19,42 @@ def has_literal_type(self) -> bool:
return self.value >= self.PY_38.value # type: ignore
-BLACK_PYTHON_VERSION: Dict[PythonVersion, black.TargetVersion] = {
- v: getattr(black.TargetVersion, f'PY{v.name.split("_")[-1]}')
- for v in PythonVersion
- if hasattr(black.TargetVersion, f'PY{v.name.split("_")[-1]}')
-}
+if TYPE_CHECKING:
+
+ class _TargetVersion(Enum):
+ ...
+
+ BLACK_PYTHON_VERSION: Dict[PythonVersion, _TargetVersion]
+else:
+ BLACK_PYTHON_VERSION: Dict[PythonVersion, black.TargetVersion] = {
+ v: getattr(black.TargetVersion, f'PY{v.name.split("_")[-1]}')
+ for v in PythonVersion
+ if hasattr(black.TargetVersion, f'PY{v.name.split("_")[-1]}')
+ }
def is_supported_in_black(python_version: PythonVersion) -> bool: # pragma: no cover
return python_version in BLACK_PYTHON_VERSION
+def black_find_project_root(sources: Sequence[Path]) -> Path:
+ if TYPE_CHECKING:
+ from typing import Iterable, Tuple, Union
+
+ def _find_project_root(
+ srcs: Union[Sequence[str], Iterable[str]]
+ ) -> Union[Tuple[Path, str], Path]:
+ ...
+
+ else:
+ from black import find_project_root as _find_project_root
+ project_root = _find_project_root(tuple(str(s) for s in sources))
+ if isinstance(project_root, tuple):
+ return project_root[0]
+ else:
+ return project_root
+
+
class CodeFormatter:
def __init__(
self,
@@ -40,7 +65,7 @@ def __init__(
if not settings_path:
settings_path = Path().resolve()
- root = black.find_project_root((settings_path,))
+ root = black_find_project_root((settings_path,))
path = root / "pyproject.toml"
if path.is_file():
value = str(path)
| diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -13,6 +13,7 @@ jobs:
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
os: [ubuntu-latest, windows-latest, macos-latest]
isort-version: [4.3.21, 5.6.4]
+ black-version: [22.1.0, defualt]
pydantic-version: [1.5.1, default]
defaults:
run:
@@ -50,6 +51,10 @@ jobs:
if: matrix.pydantic-version == '1.5.1' && matrix.python-version != '3.9' && matrix.python-version != '3.10'
run: |
poetry run pip install pydantic=="1.5.1"
+ - name: Install Black 22.1.0
+ if: matrix.black-version == '22.1.0'
+ run: |
+ poetry run pip install black=="22.1.0"
- name: Lint
run: |
source $VENV
| Support black 22.1.0
**Is your feature request related to a problem? Please describe.**
`black` released a new version 22.1.0 https://github.com/psf/black/releases/tag/22.1.0
**Describe the solution you'd like**
Being able to use `black` 22.1.0 and the latest `datamodel-code-generator` in the same project
**Describe alternatives you've considered**
Pin black to an earlier for now
**Additional context**
In particular, this new version changed `find_project_root ` return value used by `datamodel-code-generator` package.
* 22.1.0 https://github.com/psf/black/blob/d038a24ca200da9dacc1dcb05090c9e5b45b7869/src/black/files.py#L34
* 21.12b0 https://github.com/psf/black/blob/f1d4e742c91dd5179d742b0db9293c4472b765f8/src/black/files.py#L34
Using `black` 22.1.0 with `datamodel-code-generator` 0.11.17 causes this kind of error:
```
File "/opt/hostedtoolcache/Python/3.8.12/x64/lib/python3.8/site-packages/datamodel_code_generator/format.py", line 44, in __init__
path = root / "pyproject.toml"
TypeError: unsupported operand type(s) for /: 'tuple' and 'str'
```
| 2022-01-31T20:21:48Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 726 | koxudaxi__datamodel-code-generator-726 | [
"724"
] | ca976e8e43815600b9cc3b3ca3e30c45464d2839 | diff --git a/datamodel_code_generator/format.py b/datamodel_code_generator/format.py
--- a/datamodel_code_generator/format.py
+++ b/datamodel_code_generator/format.py
@@ -51,7 +51,7 @@ def _find_project_root(
project_root = _find_project_root(tuple(str(s) for s in sources))
if isinstance(project_root, tuple):
return project_root[0]
- else:
+ else: # pragma: no cover
return project_root
diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -670,7 +670,7 @@ def parse_item(
is_dict=True,
dict_key=self.data_type_manager.get_data_type(
Types.string,
- pattern=k,
+ pattern=k if not self.field_constraints else None,
),
)
for k, v in item.patternProperties.items()
| diff --git a/tests/data/expected/main/main_pattern_properties_field_constraints/output.py b/tests/data/expected/main/main_pattern_properties_field_constraints/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_pattern_properties_field_constraints/output.py
@@ -0,0 +1,17 @@
+# generated by datamodel-codegen:
+# filename: pattern_properties.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+
+from pydantic import BaseModel
+
+
+class Bar(BaseModel):
+ name: Optional[str] = None
+
+
+class Foo(BaseModel):
+ bar: Dict[str, Bar]
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -3480,6 +3480,34 @@ def test_jsonschema_pattern_properties():
main()
+@freeze_time('2019-07-26')
+def test_jsonschema_pattern_properties_field_constraints():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'pattern_properties.json'),
+ '--output',
+ str(output_file),
+ '--input-file-type',
+ 'jsonschema',
+ '--field-constraints',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_pattern_properties_field_constraints'
+ / 'output.py'
+ ).read_text()
+ )
+ with pytest.raises(SystemExit):
+ main()
+
+
@freeze_time('2019-07-26')
def test_jsonschema_titles():
with TemporaryDirectory() as output_dir:
| Optional command to generate code without constr
**Is your feature request related to a problem? Please describe.**
The inclusion of `constr` in the generated datamodel when it contains regular expressions can cause the generated code to fail checks in mypy and flake8.
Specifically, with `https://github.com/compose-spec/compose-spec/blob/master/schema/compose-spec.json` as the target file, the following command attempts to generate the code.
````
datamodel-codegen --input compose-spec.json --input-file-type jsonschema --output model.py --field-constraints --target-python-version 3.8
````
Part of the generated result contains the following code
```
services: Optional[Dict[constr(regex=r'^[a-zA-Z0-9._-]+$'), Service]] = None
````
This fails on mypy and flake8 checks.
**Describe the solution you'd like**
The reason for the failure seems to be the use of constr, so I would like an optional command that outputs in a format that does not contain constr (nor conint).
**Describe alternatives you've considered**
If there is already an optional command that accomplishes this kind of functionality, it would be great to know.
I have already tried the `--field-constraints`, `-use-annotated` option commands, etc., but they did not solve this problem.
**Additional context**
If it is difficult to express a constraint by regular expression without using constr, I would like to see a behavior that replaces constr with str and outputs a warning.
Thank you for creating such a great package.
| 2022-03-09T15:23:35Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 767 | koxudaxi__datamodel-code-generator-767 | [
"766"
] | fc1c25756a0b41ff68ce541c644456363cd1418b | diff --git a/datamodel_code_generator/parser/base.py b/datamodel_code_generator/parser/base.py
--- a/datamodel_code_generator/parser/base.py
+++ b/datamodel_code_generator/parser/base.py
@@ -159,6 +159,16 @@ def sort_data_models(
unresolved_model = (
model.reference_classes - {model.path} - set(sorted_data_models)
)
+ base_models = [
+ getattr(s.reference, "path", None) for s in model.base_classes
+ ]
+ update_action_parent = set(require_update_action_models).intersection(
+ base_models
+ )
+ if not unresolved_model and update_action_parent:
+ sorted_data_models[model.path] = model
+ require_update_action_models.append(model.path)
+ continue
if not unresolved_model:
sorted_data_models[model.path] = model
continue
| diff --git a/tests/data/expected/main/main_inheritance_forward_ref/output.py b/tests/data/expected/main/main_inheritance_forward_ref/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_inheritance_forward_ref/output.py
@@ -0,0 +1,50 @@
+# generated by datamodel-codegen:
+# filename: inheritance_forward_ref.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from pydantic import BaseModel, Field
+
+
+class DogBase(BaseModel):
+ name: Optional[str] = Field(None, title='Name')
+ woof: Optional[bool] = Field(True, title='Woof')
+
+
+class PersonBase(BaseModel):
+ name: Optional[str] = Field(None, title='Name')
+
+
+class PersonsBestFriend(BaseModel):
+ people: Optional[List[Person]] = Field(None, title='People')
+ dogs: Optional[List[Dog]] = Field(None, title='Dogs')
+ dog_base: Optional[DogBase] = None
+ dog_relationships: Optional[DogRelationships] = None
+ person_base: Optional[PersonBase] = None
+ person_relationships: Optional[PersonRelationships] = None
+
+
+class DogRelationships(BaseModel):
+ people: Optional[List[Person]] = Field(None, title='People')
+
+
+class PersonRelationships(BaseModel):
+ people: Optional[List[Person]] = Field(None, title='People')
+
+
+class Dog(DogBase, DogRelationships):
+ pass
+
+
+class Person(PersonBase, PersonRelationships):
+ pass
+
+
+PersonsBestFriend.update_forward_refs()
+DogRelationships.update_forward_refs()
+PersonRelationships.update_forward_refs()
+Dog.update_forward_refs()
+Person.update_forward_refs()
diff --git a/tests/data/jsonschema/inheritance_forward_ref.json b/tests/data/jsonschema/inheritance_forward_ref.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/inheritance_forward_ref.json
@@ -0,0 +1,100 @@
+{
+ "title": "PersonsBestFriend",
+ "description": "This is the main model.",
+ "type": "object",
+ "properties": {
+ "people": {
+ "title": "People",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Person"
+ }
+ },
+ "dogs": {
+ "title": "Dogs",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Dog"
+ }
+ },
+ "dog_base": {
+ "$ref": "#/definitions/DogBase"
+ },
+ "dog_relationships": {
+ "$ref": "#/definitions/DogRelationships"
+ },
+ "person_base": {
+ "$ref": "#/definitions/PersonBase"
+ },
+ "person_relationships": {
+ "$ref": "#/definitions/PersonRelationships"
+ }
+ },
+ "definitions": {
+ "Person": {
+ "title": "Person",
+ "allOf": [
+ {"$ref": "#/definitions/PersonBase"},
+ {"$ref": "#/definitions/PersonRelationships"}
+ ]
+ },
+ "Dog": {
+ "title": "Dog",
+ "allOf": [
+ {"$ref": "#/definitions/DogBase"},
+ {"$ref": "#/definitions/DogRelationships"}
+ ]
+ },
+ "DogBase": {
+ "title": "DogBase",
+ "type": "object",
+ "properties": {
+ "name": {
+ "title": "Name",
+ "type": "string"
+ },
+ "woof": {
+ "title": "Woof",
+ "default": true,
+ "type": "boolean"
+ }
+ }
+ },
+ "DogRelationships": {
+ "title": "DogRelationships",
+ "type": "object",
+ "properties": {
+ "people": {
+ "title": "People",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Person"
+ }
+ }
+ }
+ },
+ "PersonBase": {
+ "title": "PersonBase",
+ "type": "object",
+ "properties": {
+ "name": {
+ "title": "Name",
+ "type": "string"
+ }
+ }
+ },
+ "PersonRelationships": {
+ "title": "PersonRelationships",
+ "type": "object",
+ "properties": {
+ "people": {
+ "title": "People",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Person"
+ }
+ }
+ }
+ }
+ }
+ }
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -30,6 +30,31 @@
TIMESTAMP = '1985-10-26T01:21:00-07:00'
+@freeze_time('2019-07-26')
+def test_main_inheritance_forward_ref():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ shutil.copy(DATA_PATH / 'pyproject.toml', Path(output_dir) / 'pyproject.toml')
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'inheritance_forward_ref.json'),
+ '--output',
+ str(output_file),
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH / 'main_inheritance_forward_ref' / 'output.py'
+ ).read_text()
+ )
+
+ with pytest.raises(SystemExit):
+ main()
+
+
@freeze_time('2019-07-26')
def test_main():
with TemporaryDirectory() as output_dir:
| `update_forward_refs()` is not created for all necessary models
**Describe the bug**
`update_forward_refs()` is not called for all necessary models if I use inheritance in the models.
**To Reproduce**
Example schema:
```json
{
"title": "PersonsBestFriend",
"description": "This is the main model.",
"type": "object",
"properties": {
"people": {
"title": "People",
"type": "array",
"items": {
"$ref": "#/definitions/Person"
}
},
"dogs": {
"title": "Dogs",
"type": "array",
"items": {
"$ref": "#/definitions/Dog"
}
},
"dog_base": {
"$ref": "#/definitions/DogBase"
},
"dog_relationships": {
"$ref": "#/definitions/DogRelationships"
},
"person_base": {
"$ref": "#/definitions/PersonBase"
},
"person_relationships": {
"$ref": "#/definitions/PersonRelationships"
}
},
"definitions": {
"Person": {
"title": "Person",
"allOf": [
{"$ref": "#/definitions/PersonBase"},
{"$ref": "#/definitions/PersonRelationships"}
]
},
"Dog": {
"title": "Dog",
"allOf": [
{"$ref": "#/definitions/DogBase"},
{"$ref": "#/definitions/DogRelationships"}
]
},
"DogBase": {
"title": "DogBase",
"type": "object",
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"woof": {
"title": "Woof",
"default": true,
"type": "boolean"
}
}
},
"DogRelationships": {
"title": "DogRelationships",
"type": "object",
"properties": {
"people": {
"title": "People",
"type": "array",
"items": {
"$ref": "#/definitions/Person"
}
}
}
},
"PersonBase": {
"title": "PersonBase",
"type": "object",
"properties": {
"name": {
"title": "Name",
"type": "string"
}
}
},
"PersonRelationships": {
"title": "PersonRelationships",
"type": "object",
"properties": {
"people": {
"title": "People",
"type": "array",
"items": {
"$ref": "#/definitions/Person"
}
}
}
}
}
}
```
Used commandline:
```
$ datamodel-codegen --input pbf.json --output pbf.py
```
generates:
```python
# generated by datamodel-codegen:
# filename: mbf.json
# timestamp: 2022-05-12T21:56:35+00:00
from __future__ import annotations
from typing import List, Optional
from pydantic import BaseModel, Field
class DogBase(BaseModel):
name: Optional[str] = Field(None, title='Name')
woof: Optional[bool] = Field(True, title='Woof')
class PersonBase(BaseModel):
name: Optional[str] = Field(None, title='Name')
class PersonsBestFriend(BaseModel):
people: Optional[List[Person]] = Field(None, title='People')
dogs: Optional[List[Dog]] = Field(None, title='Dogs')
dog_base: Optional[DogBase] = None
dog_relationships: Optional[DogRelationships] = None
person_base: Optional[PersonBase] = None
person_relationships: Optional[PersonRelationships] = None
class DogRelationships(BaseModel):
people: Optional[List[Person]] = Field(None, title='People')
class PersonRelationships(BaseModel):
people: Optional[List[Person]] = Field(None, title='People')
class Dog(DogBase, DogRelationships):
pass
class Person(PersonBase, PersonRelationships):
pass
PersonsBestFriend.update_forward_refs()
DogRelationships.update_forward_refs()
PersonRelationships.update_forward_refs()
```
but if I call `PersonsBestFriend.schema()` I get:
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "pydantic/main.py", line 647, in pydantic.main.BaseModel.schema
File "pydantic/schema.py", line 185, in pydantic.schema.model_schema
File "pydantic/schema.py", line 617, in pydantic.schema.model_process_schema
File "pydantic/schema.py", line 658, in pydantic.schema.model_type_schema
File "pydantic/schema.py", line 258, in pydantic.schema.field_schema
File "pydantic/schema.py", line 498, in pydantic.schema.field_type_schema
File "pydantic/schema.py", line 925, in pydantic.schema.field_singleton_schema
File "pydantic/schema.py", line 617, in pydantic.schema.model_process_schema
File "pydantic/schema.py", line 658, in pydantic.schema.model_type_schema
File "pydantic/schema.py", line 258, in pydantic.schema.field_schema
File "pydantic/schema.py", line 563, in pydantic.schema.field_type_schema
File "pydantic/schema.py", line 922, in pydantic.schema.field_singleton_schema
File "/usr/lib/python3.8/abc.py", line 102, in __subclasscheck__
return _abc_subclasscheck(cls, subclass)
TypeError: issubclass() arg 1 must be a class
```
**Expected behavior**
if I add `Dog.update_forward_refs()`, then I get expected behavior.
**Version:**
- OS: Linux
- Python version: 3.7
- datamodel-code-generator version: 0.12.0
**Additional context**
Add any other context about the problem here.
| 2022-05-12T22:23:45Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 933 | koxudaxi__datamodel-code-generator-933 | [
"638"
] | 7e43bcd699ca4962bdca2c712a0497cbb5eb9f08 | diff --git a/datamodel_code_generator/__init__.py b/datamodel_code_generator/__init__.py
--- a/datamodel_code_generator/__init__.py
+++ b/datamodel_code_generator/__init__.py
@@ -246,6 +246,7 @@ def generate(
original_field_name_delimiter: Optional[str] = None,
use_double_quotes: bool = False,
use_union_operator: bool = False,
+ collapse_root_models: bool = False,
special_field_name_prefix: Optional[str] = None,
) -> None:
remote_text_cache: DefaultPutDict[str, str] = DefaultPutDict()
@@ -372,6 +373,7 @@ def get_header_and_first_line(csv_file: IO[str]) -> Dict[str, Any]:
original_field_name_delimiter=original_field_name_delimiter,
use_double_quotes=use_double_quotes,
use_union_operator=use_union_operator,
+ collapse_root_models=collapse_root_models,
special_field_name_prefix=special_field_name_prefix,
**kwargs,
)
diff --git a/datamodel_code_generator/__main__.py b/datamodel_code_generator/__main__.py
old mode 100755
new mode 100644
--- a/datamodel_code_generator/__main__.py
+++ b/datamodel_code_generator/__main__.py
@@ -265,6 +265,16 @@ def sig_int_handler(_: int, __: Any) -> None: # pragma: no cover
default=None,
)
+
+arg_parser.add_argument(
+ "--collapse-root-models",
+ action='store_true',
+ default=False,
+ help="Models generated with a root-type field will be merged"
+ "into the models using that root-type model",
+)
+
+
arg_parser.add_argument(
'--enum-field-as-literal',
help='Parse enum field as literal. '
@@ -501,6 +511,7 @@ def _validate_use_union_operator(cls, values: Dict[str, Any]) -> Dict[str, Any]:
use_non_positive_negative_number_constrained_types: bool = False
original_field_name_delimiter: Optional[str] = None
use_double_quotes: bool = False
+ collapse_root_models: bool = False
special_field_name_prefix: Optional[str] = None
def merge_args(self, args: Namespace) -> None:
@@ -647,6 +658,7 @@ def main(args: Optional[Sequence[str]] = None) -> Exit:
use_non_positive_negative_number_constrained_types=config.use_non_positive_negative_number_constrained_types,
original_field_name_delimiter=config.original_field_name_delimiter,
use_double_quotes=config.use_double_quotes,
+ collapse_root_models=config.collapse_root_models,
use_union_operator=config.use_union_operator,
special_field_name_prefix=config.special_field_name_prefix,
)
diff --git a/datamodel_code_generator/parser/base.py b/datamodel_code_generator/parser/base.py
--- a/datamodel_code_generator/parser/base.py
+++ b/datamodel_code_generator/parser/base.py
@@ -311,6 +311,7 @@ def __init__(
use_double_quotes: bool = False,
use_union_operator: bool = False,
allow_responses_without_content: bool = False,
+ collapse_root_models: bool = False,
special_field_name_prefix: Optional[str] = None,
):
self.data_type_manager: DataTypeManager = data_type_manager_type(
@@ -412,6 +413,7 @@ def __init__(
)
self.use_double_quotes = use_double_quotes
self.allow_responses_without_content = allow_responses_without_content
+ self.collapse_root_models = collapse_root_models
@property
def iter_source(self) -> Iterator[Source]:
@@ -692,6 +694,24 @@ def parse(
for duplicate in duplicates:
models.remove(duplicate)
+ if self.collapse_root_models:
+ for model in models:
+ for model_field in model.fields:
+ reference = model_field.data_type.reference
+ if reference and isinstance(
+ reference.source, self.data_model_root_type
+ ):
+ # Use root-type as model_field type
+ root_type_field = reference.source.fields[0]
+ model_field.data_type.remove_reference()
+ model_field.data_type = root_type_field.data_type
+ model_field.data_type.parent = model_field
+ model_field.extras = root_type_field.extras
+ model_field.constraints = root_type_field.constraints
+
+ if not reference.children: # pragma: no cover
+ models.remove(reference.source)
+
if self.set_default_enum_member:
for model in models:
for model_field in model.fields:
diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -345,6 +345,7 @@ def __init__(
use_double_quotes: bool = False,
use_union_operator: bool = False,
allow_responses_without_content: bool = False,
+ collapse_root_models: bool = False,
special_field_name_prefix: Optional[str] = None,
):
super().__init__(
@@ -397,6 +398,7 @@ def __init__(
use_double_quotes=use_double_quotes,
use_union_operator=use_union_operator,
allow_responses_without_content=allow_responses_without_content,
+ collapse_root_models=collapse_root_models,
special_field_name_prefix=special_field_name_prefix,
)
diff --git a/datamodel_code_generator/parser/openapi.py b/datamodel_code_generator/parser/openapi.py
--- a/datamodel_code_generator/parser/openapi.py
+++ b/datamodel_code_generator/parser/openapi.py
@@ -194,6 +194,7 @@ def __init__(
use_double_quotes: bool = False,
use_union_operator: bool = False,
allow_responses_without_content: bool = False,
+ collapse_root_models: bool = False,
special_field_name_prefix: Optional[str] = None,
):
super().__init__(
@@ -246,6 +247,7 @@ def __init__(
use_double_quotes=use_double_quotes,
use_union_operator=use_union_operator,
allow_responses_without_content=allow_responses_without_content,
+ collapse_root_models=collapse_root_models,
special_field_name_prefix=special_field_name_prefix,
)
self.open_api_scopes: List[OpenAPIScope] = openapi_scopes or [
diff --git a/datamodel_code_generator/types.py b/datamodel_code_generator/types.py
--- a/datamodel_code_generator/types.py
+++ b/datamodel_code_generator/types.py
@@ -133,16 +133,21 @@ def unresolved_types(self) -> FrozenSet[str]:
| ({self.reference.path} if self.reference else set())
)
- def replace_reference(self, reference: Reference) -> None:
+ def replace_reference(self, reference: Optional[Reference]) -> None:
if not self.reference: # pragma: no cover
raise Exception(
f'`{self.__class__.__name__}.replace_reference()` can\'t be called'
f' when `reference` field is empty.'
)
- self.reference.children.remove(self)
+ while self in self.reference.children:
+ self.reference.children.remove(self)
self.reference = reference
- reference.children.append(self)
+ if reference:
+ reference.children.append(self)
+
+ def remove_reference(self) -> None:
+ self.replace_reference(None)
@property
def module_name(self) -> Optional[str]:
| diff --git a/tests/data/expected/main/main_collapse_root_models/output.py b/tests/data/expected/main/main_collapse_root_models/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_collapse_root_models/output.py
@@ -0,0 +1,17 @@
+# generated by datamodel-codegen:
+# filename: not_real_string.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+
+from pydantic import BaseModel, constr
+
+
+class Tweet(BaseModel):
+ author_id: Optional[str] = None
+
+
+class FileRequest(BaseModel):
+ file_hash: constr(regex=r'^[a-fA-F\d]{32}$', min_length=32, max_length=32)
diff --git a/tests/data/expected/main/main_collapse_root_models_field_constraints/output.py b/tests/data/expected/main/main_collapse_root_models_field_constraints/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_collapse_root_models_field_constraints/output.py
@@ -0,0 +1,17 @@
+# generated by datamodel-codegen:
+# filename: not_real_string.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+
+from pydantic import BaseModel, Field
+
+
+class Tweet(BaseModel):
+ author_id: Optional[str] = None
+
+
+class FileRequest(BaseModel):
+ file_hash: str = Field(..., max_length=32, min_length=32, regex='^[a-fA-F\\d]{32}$')
diff --git a/tests/data/openapi/not_real_string.json b/tests/data/openapi/not_real_string.json
new file mode 100644
--- /dev/null
+++ b/tests/data/openapi/not_real_string.json
@@ -0,0 +1,33 @@
+{
+ "openapi" : "3.0.0",
+ "components" : {
+ "schemas" : {
+ "UserId" : {
+ "type" : "string"
+ },
+ "Tweet" : {
+ "type" : "object",
+ "properties" : {
+ "author_id" : {
+ "$ref" : "#/components/schemas/UserId"
+ }
+ }
+ },
+ "FileHash": {
+ "type": "string",
+ "minLength": 32,
+ "maxLength": 32,
+ "pattern": "^[a-fA-F\\d]{32}$"
+ },
+ "FileRequest": {
+ "type": "object",
+ "required": ["file_hash"],
+ "properties": {
+ "file_hash": {
+ "$ref": "#/components/schemas/FileHash"
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -4080,3 +4080,54 @@ def test_external_relative_ref():
with pytest.raises(SystemExit):
main()
+
+
+@freeze_time('2019-07-26')
+def test_main_collapse_root_models():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'not_real_string.json'),
+ '--output',
+ str(output_file),
+ "--collapse-root-models",
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH / 'main_collapse_root_models' / 'output.py'
+ ).read_text()
+ )
+ with pytest.raises(SystemExit):
+ main()
+
+
+@freeze_time('2019-07-26')
+def test_main_collapse_root_models_field_constraints():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'not_real_string.json'),
+ '--output',
+ str(output_file),
+ "--collapse-root-models",
+ '--field-constraints',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_collapse_root_models_field_constraints'
+ / 'output.py'
+ ).read_text()
+ )
+ with pytest.raises(SystemExit):
+ main()
| Referenced string is not real string (cannot be used as dict key, doesn't equal to string)
**Describe the bug**
**To Reproduce**
Example schema:
```json
openapi: "3.0.0"
info:
version: 1.0.0
title: File endpoint
paths:
get_file:
parameters:
- $ref: "#/components/schemas/FileRequest"
components:
schemas:
FileHash:
type: string
min_length: 32
max_length: 32
regex: "^[a-fA-F\d]{32}$"
FileRequest:
type: object
required:
- file_hash
properties:
file_hash:
$ref: "#/components/schemas/FileHash"
```
Used commandline:
```sh
$ datamodel-codegen file.yml
```
Generated python code:
```py
class FileHash(BaseModel):
__root__: str = Field(..., max_length=32, min_length=32, regex="^[0-9a-fA-F]+$")
class FileRequest(BaseModel):
file_hash: FileHash
```
Python usage
```py
file_hash = "a"*32
f = FileRequest(file_hash=file_hash)
assert f.file_hash == file_hash) # does not equal
d = dict()
d[f.file_hash] = "asdf" # throws TypeError: unhashable type: 'FileHash'
```
**Expected behavior**
- original string equals with field
- can be used as key in dict
**Version:**
- OS: linux
- Python version: 3.9
- datamodel-code-generator version: 0.11.14
| @adaamz
I'm sorry for the slow response.
We should use `__root__` attribute for usecase.
```
assert f.file_hash.__root__ == file_hash # does not equal
d = dict()
d[f.file_hash.__root__] = "asdf" # throws TypeError: unhashable type: 'FileHash'
```
yeah, i know this "workaround"... but is it possible to use the property as standard string? 🤔
If the code-generator provides the workaround...
```python
class FileRequest(BaseModel):
file_hash: str = Field(..., max_length=32, min_length=32, regex="^[0-9a-fA-F]+$")
```
Or
```python
FileHash = str
class FileRequest(BaseModel):
file_hash: FileHash
```
we should choice `validation` or `Defined Type`
If I can choose combination then it would be probably best 😁
```py
FileHash = str
class FileRequest(BaseModel):
file_hash: FileHash = Field(...)
```
@koxudaxi is this something you are looking at introducing ? a feature
effectively collapsing single primitive types, into their "base" type on the field, in a parent class
@rbuckland
> effectively collapsing single primitive types, into their "base" type on the field, in a parent class
Could you explain the way? | 2022-12-06T15:32:02Z | [] | [] |
koxudaxi/datamodel-code-generator | 962 | koxudaxi__datamodel-code-generator-962 | [
"917"
] | 9a64326891da7cd8b26444d4ddbfa894f0245fa8 | diff --git a/datamodel_code_generator/reference.py b/datamodel_code_generator/reference.py
--- a/datamodel_code_generator/reference.py
+++ b/datamodel_code_generator/reference.py
@@ -171,10 +171,14 @@ def get_valid_name(
):
name = snake_to_upper_camel(name, delimiter=self.original_delimiter)
- # TODO: when first character is a number
name = re.sub(r'[¹²³⁴⁵⁶⁷⁸⁹]|\W', '_', name)
if name[0].isnumeric():
- name = f'field_{name}'
+ name = f'_{name}'
+
+ # We should avoid having a field begin with an underscore, as it
+ # causes pydantic to consider it as private
+ if name.startswith('_'):
+ name = f'field{name}'
if self.snake_case_field and not ignore_snake_case_field:
name = camel_to_snake(name)
count = 1
| diff --git a/tests/data/expected/main/main_invalid_enum_name/output.py b/tests/data/expected/main/main_invalid_enum_name/output.py
--- a/tests/data/expected/main/main_invalid_enum_name/output.py
+++ b/tests/data/expected/main/main_invalid_enum_name/output.py
@@ -9,7 +9,7 @@
class InvalidEnum(Enum):
field_1_value = '1 value'
- _space = ' space'
- ___special = '*- special'
+ field_space = ' space'
+ field___special = '*- special'
schema = 'schema'
mro_ = 'mro'
diff --git a/tests/data/expected/main/main_jsonschema_field_extras_field_include_all_keys/output.py b/tests/data/expected/main/main_jsonschema_field_extras_field_include_all_keys/output.py
--- a/tests/data/expected/main/main_jsonschema_field_extras_field_include_all_keys/output.py
+++ b/tests/data/expected/main/main_jsonschema_field_extras_field_include_all_keys/output.py
@@ -12,10 +12,10 @@
class Extras(BaseModel):
name: Optional[str] = Field(
None,
- _comment='comment',
- _exclude=123,
- _invalid_key_2='efg',
description='normal key',
+ field_comment='comment',
+ field_exclude=123,
+ field_invalid_key_2='efg',
invalid_key_1='abc',
key1=123,
key2=456,
diff --git a/tests/data/expected/main/main_jsonschema_special_enum/output.py b/tests/data/expected/main/main_jsonschema_special_enum/output.py
--- a/tests/data/expected/main/main_jsonschema_special_enum/output.py
+++ b/tests/data/expected/main/main_jsonschema_special_enum/output.py
@@ -13,12 +13,12 @@
class ModelEnum(Enum):
True_ = True
False_ = False
- _ = ''
- __1 = '\n'
- __ = '\r\n'
- __2 = '\t'
- __3 = '\b'
- __4 = '\\'
+ field_ = ''
+ field__1 = '\n'
+ field__ = '\r\n'
+ field__2 = '\t'
+ field__3 = '\b'
+ field__4 = '\\'
class Model(BaseModel):
diff --git a/tests/data/expected/main/main_jsonschema_special_enum_empty_enum_field_name/output.py b/tests/data/expected/main/main_jsonschema_special_enum_empty_enum_field_name/output.py
--- a/tests/data/expected/main/main_jsonschema_special_enum_empty_enum_field_name/output.py
+++ b/tests/data/expected/main/main_jsonschema_special_enum_empty_enum_field_name/output.py
@@ -14,11 +14,11 @@ class ModelEnum(Enum):
True_ = True
False_ = False
empty = ''
- _ = '\n'
- __ = '\r\n'
- __1 = '\t'
- __2 = '\b'
- __3 = '\\'
+ field_ = '\n'
+ field__ = '\r\n'
+ field__1 = '\t'
+ field__2 = '\b'
+ field__3 = '\\'
class Model(BaseModel):
diff --git a/tests/data/expected/main/main_jsonschema_special_field_name/output.py b/tests/data/expected/main/main_jsonschema_special_field_name/output.py
--- a/tests/data/expected/main/main_jsonschema_special_field_name/output.py
+++ b/tests/data/expected/main/main_jsonschema_special_field_name/output.py
@@ -15,4 +15,4 @@ class SpecialField(BaseModel):
class_: Optional[int] = Field(None, alias='class')
class_s: Optional[int] = Field(None, alias="class's")
class_s_1: Optional[str] = Field(None, alias='class-s')
- _: Optional[str] = Field(None, alias='#')
+ field_: Optional[str] = Field(None, alias='#')
diff --git a/tests/test_resolver.py b/tests/test_resolver.py
new file mode 100644
--- /dev/null
+++ b/tests/test_resolver.py
@@ -0,0 +1,16 @@
+import pytest
+
+from datamodel_code_generator.reference import FieldNameResolver
+
+
[email protected](
+ 'name,expected_resolved',
+ [
+ ('3a', 'field_3a'),
+ ('$in', 'field_in'),
+ ('field', 'field'),
+ ],
+)
+def test_get_valid_field_name(name: str, expected_resolved: str) -> None:
+ resolver = FieldNameResolver()
+ assert expected_resolved == resolver.get_valid_name(name)
| @ symbol in JSON keys produces private variable
**Describe the bug**
We have JSON that has `@id` and `@type` which are reflected in the schema below. When we generate the python model using the datamodel-codegen library it produces variables that we can't access. This is likely due to [pydantic hiding variables](https://pydantic-docs.helpmanual.io/usage/models/#automatically-excluded-attributes) starting with an underscore.
**To Reproduce**
Example schema:
```json
{
"$id": "schema_v2.json",
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"@id": {
"type": "string",
"format": "uri",
"pattern": "^http.*$",
"title": "Id must be presesnt and must be a URI"
},
"@type": { "type": "string" },
"profile": { "type": "string" }
},
"required": ["@id", "@type"]
}
```
Produces the following model:
```
class Model(BaseModel):
_id: AnyUrl = Field(..., alias='@id', title='Id must be presesnt and must be a URI')
_type: str = Field(..., alias='@type')
profile: Optional[str] = None
```
and if we try and use it we can't access id or type:
```
$ python
Python 3.9.10 (main, Jan 15 2022, 11:48:04)
>>> from model2 import Model
>>> v2 = Model(_id="http://example.com/iiif/2", _type="service")
>>> print (v2)
profile=None
```
Used commandline:
```
$ datamodel-codegen --input schema_v2.json --input-file-type jsonschema --output model2.py
```
**Expected behavior**
Ideally we would look the following conversion where the _ is removed:
```
class Model(BaseModel):
id: AnyUrl = Field(..., alias='@id', title='Id must be presesnt and must be a URI')
type: str = Field(..., alias='@type')
profile: Optional[str] = None
```
**Version:**
- OS: Mac OS X Monterey 12.6
- Python version: 3.9.10
- datamodel-code-generator version: 0.13.5
**Additional context**
JSON keys `@id`, `@type` and `@context` are used in [JSON-LD](https://json-ld.org/)
| I have a different use case, but the same problem surfaces.
I'm consuming an API where one of the models has a field that starts with `_`. The field makes it into the generated code, but Pydantic ignores the field upon model instantiation, thinking I want it to be private.
It would be great if there was a CLI flag that makes it so the appropriate option is included in the generated models where I can use the `_` fields just like any other fields. | 2022-12-23T20:41:35Z | [] | [] |
koxudaxi/datamodel-code-generator | 968 | koxudaxi__datamodel-code-generator-968 | [
"834"
] | bb9266c008fc88daa0587e9daeb560613ef854d7 | diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -516,6 +516,7 @@ def parse_all_of(
)
# ignore an undetected object
if ignore_duplicate_model and not fields and len(base_classes) == 1:
+ self.model_resolver.delete(path)
return self.data_type(reference=base_classes[0])
if self.use_title_as_name and obj.title:
name = obj.title
diff --git a/datamodel_code_generator/reference.py b/datamodel_code_generator/reference.py
--- a/datamodel_code_generator/reference.py
+++ b/datamodel_code_generator/reference.py
@@ -550,6 +550,9 @@ def add(
def get(self, path: Union[Sequence[str], str]) -> Optional[Reference]:
return self.references.get(self.resolve_ref(path))
+ def delete(self, path: Union[Sequence[str], str]) -> None:
+ del self.references[self.resolve_ref(path)]
+
def default_class_name_generator(self, name: str) -> str:
# TODO: create a validate for class name
return self.field_name_resolvers[ModelType.CLASS].get_valid_name(
| diff --git a/tests/data/expected/parser/openapi/openapi_parser_parse_allof_same_prefix_with_ref/output.py b/tests/data/expected/parser/openapi/openapi_parser_parse_allof_same_prefix_with_ref/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/parser/openapi/openapi_parser_parse_allof_same_prefix_with_ref/output.py
@@ -0,0 +1,17 @@
+from __future__ import annotations
+
+from typing import Optional
+
+from pydantic import BaseModel
+
+
+class FooBar(BaseModel):
+ id: Optional[int] = None
+
+
+class FooBarBaz(BaseModel):
+ id: Optional[int] = None
+
+
+class Foo(BaseModel):
+ foo_bar: Optional[FooBarBaz] = None
diff --git a/tests/data/openapi/allof_same_prefix_with_ref.yaml b/tests/data/openapi/allof_same_prefix_with_ref.yaml
new file mode 100644
--- /dev/null
+++ b/tests/data/openapi/allof_same_prefix_with_ref.yaml
@@ -0,0 +1,30 @@
+openapi: 3.0.3
+info:
+ title: Foo
+ version: "1.0"
+paths:
+ /:
+ get:
+ responses:
+ '200':
+ description: ''
+components:
+ schemas:
+ Foo:
+ type: object
+ properties:
+ foo_bar:
+ allOf:
+ - $ref: '#/components/schemas/FooBarBaz'
+
+ FooBar:
+ type: object
+ properties:
+ id:
+ type: integer
+
+ FooBarBaz:
+ type: object
+ properties:
+ id:
+ type: integer
diff --git a/tests/parser/test_openapi.py b/tests/parser/test_openapi.py
--- a/tests/parser/test_openapi.py
+++ b/tests/parser/test_openapi.py
@@ -421,6 +421,20 @@ def test_openapi_parser_parse_nested_oneof():
)
+def test_openapi_parser_parse_allof_ref():
+ parser = OpenAPIParser(
+ Path(DATA_PATH / 'allof_same_prefix_with_ref.yaml'),
+ )
+ assert (
+ parser.parse()
+ == (
+ EXPECTED_OPEN_API_PATH
+ / 'openapi_parser_parse_allof_same_prefix_with_ref'
+ / 'output.py'
+ ).read_text()
+ )
+
+
def test_openapi_parser_parse_allof():
parser = OpenAPIParser(
Path(DATA_PATH / 'allof.yaml'),
| generator appends "1" to two components
### Discussed in https://github.com/koxudaxi/datamodel-code-generator/discussions/827
<div type='discussions-op-text'>
<sup>Originally posted by **azatoth** August 21, 2022</sup>
When I'm running the generator on https://ll.thespacedevs.com/2.2.0/schema (no option flags supplied to the binary), it will for the components `Launcher` and `LauncherConfig`, it will append a "1" to the class names.
I don't see any apparent conflicts in the spec, thus I'm a bit lost why this happens. It doesn't happens to any of the other components in the spec file.
Any input would be grateful.</div>
I've boiled down the issue to following minimal spec:
```yaml
openapi: 3.0.3
info:
title: Foo
version: "1.0"
paths:
/:
get:
responses:
'200':
description: ''
components:
schemas:
Foo:
type: object
properties:
foo_bar:
allOf:
- $ref: '#/components/schemas/FooBarOther'
FooBar:
type: object
properties:
id:
type: integer
FooBarOther:
type: object
properties:
id:
type: integer
```
That will result in following python code:
```py3
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel
class FooBar1(BaseModel):
id: Optional[int] = None
class FooBarOther(BaseModel):
id: Optional[int] = None
class Foo(BaseModel):
foo_bar: Optional[FooBarOther] = None
```
I notice if I break out the `$ref` from the `allOf`, it will not generate a numbered class name.
Also that property name must be snake case of an existing model.
I'm positive this is not the intended result, thus I'm filing this issue.
| 2022-12-27T10:56:20Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 1,248 | koxudaxi__datamodel-code-generator-1248 | [
"1220"
] | e10f1bcce5f0135458a96e4d0e3d4e6ab7e54c3d | diff --git a/datamodel_code_generator/__init__.py b/datamodel_code_generator/__init__.py
--- a/datamodel_code_generator/__init__.py
+++ b/datamodel_code_generator/__init__.py
@@ -2,6 +2,7 @@
import contextlib
import os
+import sys
from datetime import datetime, timezone
from enum import Enum
from pathlib import Path
@@ -308,6 +309,10 @@ def generate(
if is_openapi(input_text_) # type: ignore
else InputFileType.JsonSchema
)
+ print(
+ f'The input file type was determined to be: {input_file_type.value}',
+ file=sys.stderr,
+ )
except: # noqa
raise Error('Invalid file format')
| diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -588,7 +588,7 @@ def test_main_no_file(capsys: CaptureFixture) -> None:
assert (
captured.out == (EXPECTED_MAIN_PATH / 'main_no_file' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_extra_template_data_config(capsys: CaptureFixture) -> None:
@@ -614,7 +614,7 @@ def test_main_extra_template_data_config(capsys: CaptureFixture) -> None:
EXPECTED_MAIN_PATH / 'main_extra_template_data_config' / 'output.py'
).read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_custom_template_dir_old_style(capsys: CaptureFixture) -> None:
@@ -641,7 +641,7 @@ def test_main_custom_template_dir_old_style(capsys: CaptureFixture) -> None:
captured.out
== (EXPECTED_MAIN_PATH / 'main_custom_template_dir' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
@@ -668,7 +668,7 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
captured.out
== (EXPECTED_MAIN_PATH / 'main_custom_template_dir' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
@freeze_time('2019-07-26')
diff --git a/tests/test_main_kr.py b/tests/test_main_kr.py
--- a/tests/test_main_kr.py
+++ b/tests/test_main_kr.py
@@ -146,7 +146,7 @@ def test_main_no_file(capsys: CaptureFixture) -> None:
== (EXPECTED_MAIN_KR_PATH / 'main_no_file' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
@@ -175,7 +175,7 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
EXPECTED_MAIN_KR_PATH / 'main_custom_template_dir' / 'output.py'
).read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
@freeze_time('2019-07-26')
| (🎁) Log input file type when when `--input-file-type` is `auto`
I was left a little confused when my json file was silently detected as jsonschema instead of json.
| I agree. the CLI should show the detail of the error. | 2023-04-15T04:09:13Z | [] | [] |
koxudaxi/datamodel-code-generator | 1,249 | koxudaxi__datamodel-code-generator-1249 | [
"1221"
] | 083691c6fea8fabc5000466c40c16298c7a4b463 | diff --git a/datamodel_code_generator/__init__.py b/datamodel_code_generator/__init__.py
--- a/datamodel_code_generator/__init__.py
+++ b/datamodel_code_generator/__init__.py
@@ -168,6 +168,37 @@ def is_openapi(text: str) -> bool:
return 'openapi' in load_yaml(text)
+JSON_SCHEMA_URLS: Tuple[str, ...] = (
+ 'http://json-schema.org/',
+ 'https://json-schema.org/',
+)
+
+
+def is_schema(text: str) -> bool:
+ data = load_yaml(text)
+ if not isinstance(data, dict):
+ return False
+ schema = data.get('$schema')
+ if isinstance(schema, str) and any(
+ schema.startswith(u) for u in JSON_SCHEMA_URLS
+ ): # pragma: no cover
+ return True
+ if isinstance(data.get('type'), str):
+ return True
+ if any(
+ isinstance(data.get(o), list)
+ for o in (
+ 'allOf',
+ 'anyOf',
+ 'oneOf',
+ )
+ ):
+ return True
+ if isinstance(data.get('properties'), dict):
+ return True
+ return False
+
+
class InputFileType(Enum):
Auto = 'auto'
OpenAPI = 'openapi'
@@ -304,11 +335,8 @@ def generate(
if isinstance(input_, Path)
else input_text
)
- input_file_type = (
- InputFileType.OpenAPI
- if is_openapi(input_text_) # type: ignore
- else InputFileType.JsonSchema
- )
+ assert isinstance(input_text_, str)
+ input_file_type = infer_input_type(input_text_)
print(
inferred_message.format(input_file_type.value),
file=sys.stderr,
@@ -483,6 +511,14 @@ def get_header_and_first_line(csv_file: IO[str]) -> Dict[str, Any]:
file.close()
+def infer_input_type(text: str) -> InputFileType:
+ if is_openapi(text):
+ return InputFileType.OpenAPI
+ elif is_schema(text):
+ return InputFileType.JsonSchema
+ return InputFileType.Json
+
+
inferred_message = (
'The input file type was determined to be: {}\nThis can be specificied explicitly with the '
'`--input-file-type` option.'
| diff --git a/tests/data/jsonschema/items_boolean.json b/tests/data/jsonschema/items_boolean.json
--- a/tests/data/jsonschema/items_boolean.json
+++ b/tests/data/jsonschema/items_boolean.json
@@ -1,5 +1,5 @@
{
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"example": {
diff --git a/tests/data/jsonschema/root_id.json b/tests/data/jsonschema/root_id.json
--- a/tests/data/jsonschema/root_id.json
+++ b/tests/data/jsonschema/root_id.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/data/jsonschema/root_id_absolute_url.json b/tests/data/jsonschema/root_id_absolute_url.json
--- a/tests/data/jsonschema/root_id_absolute_url.json
+++ b/tests/data/jsonschema/root_id_absolute_url.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/data/jsonschema/root_id_ref.json b/tests/data/jsonschema/root_id_ref.json
--- a/tests/data/jsonschema/root_id_ref.json
+++ b/tests/data/jsonschema/root_id_ref.json
@@ -1,5 +1,5 @@
{
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "root_id.json#/definitions/Person"
diff --git a/tests/data/jsonschema/root_id_self_ref.json b/tests/data/jsonschema/root_id_self_ref.json
--- a/tests/data/jsonschema/root_id_self_ref.json
+++ b/tests/data/jsonschema/root_id_self_ref.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id_self_ref.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/data/openapi/complex_reference.json b/tests/data/openapi/complex_reference.json
--- a/tests/data/openapi/complex_reference.json
+++ b/tests/data/openapi/complex_reference.json
@@ -1,4 +1,5 @@
{
+ "openapi": "3.0.0",
"components": {
"schemas": {
"A": {
diff --git a/tests/data/openapi/datetime.yaml b/tests/data/openapi/datetime.yaml
--- a/tests/data/openapi/datetime.yaml
+++ b/tests/data/openapi/datetime.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
components:
schemas:
InventoryItem:
diff --git a/tests/data/openapi/definitions.yaml b/tests/data/openapi/definitions.yaml
--- a/tests/data/openapi/definitions.yaml
+++ b/tests/data/openapi/definitions.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
schemas:
Problem:
properties:
diff --git a/tests/data/openapi/discriminator.yaml b/tests/data/openapi/discriminator.yaml
--- a/tests/data/openapi/discriminator.yaml
+++ b/tests/data/openapi/discriminator.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
components:
schemas:
ObjectBase:
diff --git a/tests/data/openapi/override_required_all_of.yaml b/tests/data/openapi/override_required_all_of.yaml
--- a/tests/data/openapi/override_required_all_of.yaml
+++ b/tests/data/openapi/override_required_all_of.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
components:
schemas:
ObjectBase:
diff --git a/tests/data/openapi/x_enum_varnames.yaml b/tests/data/openapi/x_enum_varnames.yaml
--- a/tests/data/openapi/x_enum_varnames.yaml
+++ b/tests/data/openapi/x_enum_varnames.yaml
@@ -1,4 +1,4 @@
-openapi: 3.0
+openapi: "3.0.0"
components:
schemas:
string:
diff --git a/tests/root_id.json b/tests/root_id.json
--- a/tests/root_id.json
+++ b/tests/root_id.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/test_infer_input_type.py b/tests/test_infer_input_type.py
new file mode 100644
--- /dev/null
+++ b/tests/test_infer_input_type.py
@@ -0,0 +1,46 @@
+from pathlib import Path
+
+from datamodel_code_generator import InputFileType, infer_input_type
+
+DATA_PATH: Path = Path(__file__).parent / 'data'
+
+
+def test_infer_input_type():
+ def assert_infer_input_type(file: Path, raw_data_type: InputFileType) -> None:
+ __tracebackhide__ = True
+ if file.is_dir():
+ return
+ if file.suffix not in ('.yaml', '.json'):
+ return
+ result = infer_input_type(file.read_text())
+ assert result == raw_data_type, f'{file} was the wrong type!'
+
+ for file in (DATA_PATH / 'json').rglob('*'):
+ if str(file).endswith('broken.json'):
+ continue
+ assert_infer_input_type(file, InputFileType.Json)
+ for file in (DATA_PATH / 'jsonschema').rglob('*'):
+ if str(file).endswith(('external_child.json', 'external_child.yaml')):
+ continue
+ if 'reference_same_hierarchy_directory' in str(file):
+ continue
+ assert_infer_input_type(file, InputFileType.JsonSchema)
+ for file in (DATA_PATH / 'openapi').rglob('*'):
+ if str(file).endswith(
+ (
+ 'aliases.json',
+ 'extra_data.json',
+ 'invalid.yaml',
+ 'list.json',
+ 'empty_data.json',
+ 'root_model.yaml',
+ 'json_pointer.yaml',
+ 'const.json',
+ )
+ ):
+ continue
+
+ if str(file).endswith('not.json'):
+ assert_infer_input_type(file, InputFileType.Json)
+ continue
+ assert_infer_input_type(file, InputFileType.OpenAPI)
| (🎁) Can we use heuristics to automatically detect the input type of json files?
It would be convenient if json files could be automatically detected if they are either schema or data files.
input:
```json
{
"a": 1
}
```
```
👉 datamodel_code_generator --input test.json
# generated by datamodel-codegen:
# filename: test.json
# timestamp: 2023-04-15T04:05:21+00:00
from __future__ import annotations
from typing import Any
from pydantic import BaseModel
class Model(BaseModel):
__root__: Any
```
| @KotlinIsland
Do you have an example?
The CLI tries to parse json file as JSON Schema.
https://github.com/koxudaxi/datamodel-code-generator/blob/b48cb94edc76851eca0ff637bf81831330ea5808/datamodel_code_generator/__init__.py#L301-L310
So here we use check if the file is an OpenAPI, but could we do an additional check to see if the file is not a schema at all, but just a normal json file?
@KotlinIsland
How can we recognize whether JSON Schema or JSON data?
If we apply the strict rule, current behavior will be broken.
I think some use `auto` to convert rough JSON Schema :thinking:
Yes, I'm not certain if it's possible to determine if a file is schema or data. | 2023-04-15T07:50:00Z | [] | [] |
koxudaxi/datamodel-code-generator | 1,422 | koxudaxi__datamodel-code-generator-1422 | [
"1421"
] | 9a6814fd6c6b32399b215fd59c3b7492c038776e | diff --git a/datamodel_code_generator/__init__.py b/datamodel_code_generator/__init__.py
--- a/datamodel_code_generator/__init__.py
+++ b/datamodel_code_generator/__init__.py
@@ -225,6 +225,7 @@ def generate(
output_model_type: DataModelType = DataModelType.PydanticBaseModel,
target_python_version: PythonVersion = PythonVersion.PY_37,
base_class: str = DEFAULT_BASE_CLASS,
+ additional_imports: Optional[List[str]] = None,
custom_template_dir: Optional[Path] = None,
extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,
validation: bool = False,
@@ -369,6 +370,7 @@ def get_header_and_first_line(csv_file: IO[str]) -> Dict[str, Any]:
data_model_field_type=data_model_types.field_model,
data_type_manager_type=data_model_types.data_type_manager,
base_class=base_class,
+ additional_imports=additional_imports,
custom_template_dir=custom_template_dir,
extra_template_data=extra_template_data,
target_python_version=target_python_version,
diff --git a/datamodel_code_generator/parser/base.py b/datamodel_code_generator/parser/base.py
--- a/datamodel_code_generator/parser/base.py
+++ b/datamodel_code_generator/parser/base.py
@@ -314,6 +314,7 @@ def __init__(
data_type_manager_type: Type[DataTypeManager] = pydantic_model.DataTypeManager,
data_model_field_type: Type[DataModelFieldBase] = pydantic_model.DataModelField,
base_class: Optional[str] = None,
+ additional_imports: Optional[List[str]] = None,
custom_template_dir: Optional[Path] = None,
extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,
target_python_version: PythonVersion = PythonVersion.PY_37,
@@ -380,7 +381,10 @@ def __init__(
self.data_model_type: Type[DataModel] = data_model_type
self.data_model_root_type: Type[DataModel] = data_model_root_type
self.data_model_field_type: Type[DataModelFieldBase] = data_model_field_type
+
self.imports: Imports = Imports()
+ self._append_additional_imports(additional_imports=additional_imports)
+
self.base_class: Optional[str] = base_class
self.target_python_version: PythonVersion = target_python_version
self.results: List[DataModel] = []
@@ -504,6 +508,16 @@ def iter_source(self) -> Iterator[Source]:
),
)
+ def _append_additional_imports(
+ self, additional_imports: Optional[List[str]]
+ ) -> None:
+ if additional_imports is None:
+ additional_imports = []
+
+ for additional_import_string in additional_imports:
+ new_import = Import.from_full_path(additional_import_string)
+ self.imports.append(new_import)
+
def _get_text_from_url(self, url: str) -> str:
from datamodel_code_generator.http import get_body
diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -371,6 +371,7 @@ def __init__(
data_type_manager_type: Type[DataTypeManager] = pydantic_model.DataTypeManager,
data_model_field_type: Type[DataModelFieldBase] = pydantic_model.DataModelField,
base_class: Optional[str] = None,
+ additional_imports: Optional[List[str]] = None,
custom_template_dir: Optional[Path] = None,
extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,
target_python_version: PythonVersion = PythonVersion.PY_37,
@@ -432,6 +433,7 @@ def __init__(
data_type_manager_type=data_type_manager_type,
data_model_field_type=data_model_field_type,
base_class=base_class,
+ additional_imports=additional_imports,
custom_template_dir=custom_template_dir,
extra_template_data=extra_template_data,
target_python_version=target_python_version,
diff --git a/datamodel_code_generator/parser/openapi.py b/datamodel_code_generator/parser/openapi.py
--- a/datamodel_code_generator/parser/openapi.py
+++ b/datamodel_code_generator/parser/openapi.py
@@ -161,6 +161,7 @@ def __init__(
data_type_manager_type: Type[DataTypeManager] = pydantic_model.DataTypeManager,
data_model_field_type: Type[DataModelFieldBase] = pydantic_model.DataModelField,
base_class: Optional[str] = None,
+ additional_imports: Optional[List[str]] = None,
custom_template_dir: Optional[Path] = None,
extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,
target_python_version: PythonVersion = PythonVersion.PY_37,
@@ -223,6 +224,7 @@ def __init__(
data_type_manager_type=data_type_manager_type,
data_model_field_type=data_model_field_type,
base_class=base_class,
+ additional_imports=additional_imports,
custom_template_dir=custom_template_dir,
extra_template_data=extra_template_data,
target_python_version=target_python_version,
| diff --git a/tests/parser/test_base.py b/tests/parser/test_base.py
--- a/tests/parser/test_base.py
+++ b/tests/parser/test_base.py
@@ -214,3 +214,21 @@ def __init__(self, filename: str, data: str, fields: List[DataModelFieldBase]):
def render(self) -> str:
return self._data
+
+
+def test_additional_imports():
+ """Test that additional imports are inside imports container."""
+ new_parser = C(
+ source='',
+ additional_imports=['collections.deque'],
+ )
+ assert len(new_parser.imports) == 1
+ assert new_parser.imports['collections'] == {'deque'}
+
+
+def test_no_additional_imports():
+ """Test that not additional imports are not affecting imports container."""
+ new_parser = C(
+ source='',
+ )
+ assert len(new_parser.imports) == 0
diff --git a/tests/parser/test_jsonschema.py b/tests/parser/test_jsonschema.py
--- a/tests/parser/test_jsonschema.py
+++ b/tests/parser/test_jsonschema.py
@@ -439,3 +439,18 @@ def test_get_data_type_array(schema_types, result_types):
],
is_optional='null' in schema_types,
)
+
+
+def test_additional_imports():
+ """Test that additional imports are inside imports container."""
+ new_parser = JsonSchemaParser(source='', additional_imports=['collections.deque'])
+ assert len(new_parser.imports) == 1
+ assert new_parser.imports['collections'] == {'deque'}
+
+
+def test_no_additional_imports():
+ """Test that not additional imports are not affecting imports container."""
+ new_parser = JsonSchemaParser(
+ source='',
+ )
+ assert len(new_parser.imports) == 0
diff --git a/tests/parser/test_openapi.py b/tests/parser/test_openapi.py
--- a/tests/parser/test_openapi.py
+++ b/tests/parser/test_openapi.py
@@ -729,3 +729,18 @@ def test_openapi_parser_with_query_parameters():
/ 'output.py'
).read_text()
)
+
+
+def test_additional_imports():
+ """Test that additional imports are inside imports container."""
+ new_parser = OpenAPIParser(source='', additional_imports=['collections.deque'])
+ assert len(new_parser.imports) == 1
+ assert new_parser.imports['collections'] == {'deque'}
+
+
+def test_no_additional_imports():
+ """Test that not additional imports are not affecting imports container."""
+ new_parser = OpenAPIParser(
+ source='',
+ )
+ assert len(new_parser.imports) == 0
| Ability to add custom imports
**Is your feature request related to a problem? Please describe.**
Can't customize my template to add new imports
**Describe the solution you'd like**
New argument `additional_imports` in `generate` function which adds additional imports to final rendered template
**Describe alternatives you've considered**
-
**Additional context**
-
| 2023-07-12T09:34:36Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 1,432 | koxudaxi__datamodel-code-generator-1432 | [
"1419"
] | fab9f575bfec9975f598a02d71fe1fb6b7d87376 | diff --git a/datamodel_code_generator/parser/base.py b/datamodel_code_generator/parser/base.py
--- a/datamodel_code_generator/parser/base.py
+++ b/datamodel_code_generator/parser/base.py
@@ -868,7 +868,9 @@ def __collapse_root_models(
data_type.remove_reference()
root_type_model.reference.children = [
- c for c in root_type_model.reference.children if c.parent
+ c
+ for c in root_type_model.reference.children
+ if getattr(c, 'parent', None)
]
if not root_type_model.reference.children:
| diff --git a/tests/data/expected/main/main_collapse_root_models_with_references_to_flat_types/output.py b/tests/data/expected/main/main_collapse_root_models_with_references_to_flat_types/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_collapse_root_models_with_references_to_flat_types/output.py
@@ -0,0 +1,13 @@
+# generated by datamodel-codegen:
+# filename: flat_type.jsonschema
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+
+from pydantic import BaseModel
+
+
+class FooModel(BaseModel):
+ foo: Optional[str] = None
diff --git a/tests/data/openapi/flat_type.jsonschema b/tests/data/openapi/flat_type.jsonschema
new file mode 100644
--- /dev/null
+++ b/tests/data/openapi/flat_type.jsonschema
@@ -0,0 +1,16 @@
+{
+ "title": "Foo",
+ "$schema": "http://json-schema.org/schema#",
+ "description": "",
+ "type": "object",
+ "properties": {
+ "foo": {
+ "$ref": "#/definitions/foo"
+ }
+ },
+ "definitions": {
+ "foo": {
+ "type": "string"
+ }
+ }
+}
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -4339,6 +4339,31 @@ def test_main_collapse_root_models_field_constraints():
)
+@freeze_time('2019-07-26')
+def test_main_collapse_root_models_with_references_to_flat_types():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'flat_type.jsonschema'),
+ '--output',
+ str(output_file),
+ '--collapse-root-models',
+ ]
+ )
+
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_collapse_root_models_with_references_to_flat_types'
+ / 'output.py'
+ ).read_text()
+ )
+
+
@freeze_time('2019-07-26')
def test_main_openapi_max_items_enum():
with TemporaryDirectory() as output_dir:
| AttributeError raised when using --collapse-root-models flag
**Describe the bug**
When using the `--collapse-root-models` flag, this error is raised:
```shell
Traceback (most recent call last):
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/__main__.py", line 767, in main
generate(
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/__init__.py", line 431, in generate
results = parser.parse()
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/parser/base.py", line 1083, in parse
self.__collapse_root_models(models, unused_models)
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/parser/base.py", line 856, in __collapse_root_models
root_type_model.reference.children = [
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/parser/base.py", line 857, in <listcomp>
c for c in root_type_model.reference.children if c.parent
AttributeError: 'BaseModel' object has no attribute 'parent'
```
**To Reproduce**
Input schema: https://github.com/CycloneDX/specification/blob/1.5/schema/bom-1.5.schema.json
pyproject.toml config:
```toml
[tool.datamodel-codegen]
base-class = ".base.BaseModel"
collapse-root-models = true
disable-warnings = true
enum-field-as-literal = "one"
field-include-all-keys = true
input-file-type = "jsonschema"
reuse-model = true
set-default-enum-member = true
snake-case-field = true
target-python-version = "3.7"
use-annotated = true
use-default-kwarg = true
use-double-quotes = true
use-schema-description = true
use-standard-collections = true
use-subclass-enum = true
use-title-as-name = true
wrap-string-literal = true
```
Base class code:
```python
from pydantic import BaseModel as _BaseModel
from pydantic import Extra
class BaseModel(_BaseModel):
class Config:
allow_population_by_field_name = True
arbitrary_types_allowed = True
extra = Extra.allow
use_enum_values = True
def __hash__(self) -> int:
return hash(repr(self))
```
**Expected behavior**
Collapse root models without raising an exception
**Version:**
- OS: MacOS 13.4.1
- Python version: 3.10
- datamodel-code-generator version: 0.21.1
| Seems to work (at least for my use case, don't know if breaks others) if that line is changed to
```python
c for c in root_type_model.reference.children if getattr(c, "parent", None)
``` | 2023-07-18T09:48:04Z | [] | [] |
koxudaxi/datamodel-code-generator | 1,448 | koxudaxi__datamodel-code-generator-1448 | [
"1435",
"1435"
] | e8d600886f894c100d6c4c0277b91a67f89cae48 | diff --git a/datamodel_code_generator/__main__.py b/datamodel_code_generator/__main__.py
--- a/datamodel_code_generator/__main__.py
+++ b/datamodel_code_generator/__main__.py
@@ -36,8 +36,10 @@
import toml
from pydantic import BaseModel
+if TYPE_CHECKING:
+ from typing_extensions import Self
+
from datamodel_code_generator import (
- DEFAULT_BASE_CLASS,
DataModelType,
Error,
InputFileType,
@@ -470,9 +472,6 @@ def get(self, item: str) -> Any:
def __getitem__(self, item: str) -> Any:
return self.get(item)
- def __setitem__(self, key: str, value: Any) -> None:
- setattr(self, key, value)
-
if TYPE_CHECKING:
@classmethod
@@ -580,23 +579,21 @@ def validate_each_item(each_item: Any) -> Tuple[str, str]:
return [validate_each_item(each_item) for each_item in value]
return value # pragma: no cover
- @model_validator(mode='after')
- def validate_root(cls, values: Dict[str, Any]) -> Dict[str, Any]:
- values = cls._validate_use_annotated(values)
- return cls._validate_base_class(values)
-
- @classmethod
- def _validate_use_annotated(cls, values: Dict[str, Any]) -> Dict[str, Any]:
- if values.get('use_annotated'):
- values['field_constraints'] = True
- return values
+ if PYDANTIC_V2:
- @classmethod
- def _validate_base_class(cls, values: Dict[str, Any]) -> Dict[str, Any]:
- if 'base_class' not in values and 'output_model_type' in values:
- if values['output_model_type'] != DataModelType.PydanticBaseModel.value:
- values['base_class'] = ''
- return values
+ @model_validator(mode='after') # type: ignore
+ def validate_root(self: Self) -> Self:
+ if self.use_annotated:
+ self.field_constraints = True
+ return self
+
+ else:
+
+ @model_validator(mode='after')
+ def validate_root(cls, values: Any) -> Any:
+ if values.get('use_annotated'):
+ values['field_constraints'] = True
+ return values
input: Optional[Union[Path, str]] = None
input_file_type: InputFileType = InputFileType.Auto
@@ -605,7 +602,7 @@ def _validate_base_class(cls, values: Dict[str, Any]) -> Dict[str, Any]:
debug: bool = False
disable_warnings: bool = False
target_python_version: PythonVersion = PythonVersion.PY_37
- base_class: str = DEFAULT_BASE_CLASS
+ base_class: str = ''
custom_template_dir: Optional[Path] = None
extra_template_data: Optional[TextIOBase] = None
validation: bool = False
@@ -666,9 +663,11 @@ def merge_args(self, args: Namespace) -> None:
for f in self.get_fields()
if getattr(args, f) is not None
}
- set_args = self._validate_use_annotated(set_args)
- set_args = self._validate_base_class(set_args)
- parsed_args = self.parse_obj(set_args)
+
+ if set_args.get('use_annotated'):
+ set_args['field_constraints'] = True
+
+ parsed_args = Config.parse_obj(set_args)
for field_name in set_args:
setattr(self, field_name, getattr(parsed_args, field_name))
| diff --git a/tests/data/expected/main/main_openapi_custom_id_pydantic_v2/output.py b/tests/data/expected/main/main_openapi_custom_id_pydantic_v2/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_openapi_custom_id_pydantic_v2/output.py
@@ -0,0 +1,18 @@
+# generated by datamodel-codegen:
+# filename: custom_id.yaml
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+from uuid import UUID
+
+from pydantic import BaseModel, Field, RootModel
+
+
+class CustomId(RootModel):
+ root: UUID = Field(..., description='My custom ID')
+
+
+class Model(BaseModel):
+ custom_id: Optional[CustomId] = None
diff --git a/tests/data/expected/main/main_openapi_custom_id_pydantic_v2_custom_base/output.py b/tests/data/expected/main/main_openapi_custom_id_pydantic_v2_custom_base/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_openapi_custom_id_pydantic_v2_custom_base/output.py
@@ -0,0 +1,20 @@
+# generated by datamodel-codegen:
+# filename: custom_id.yaml
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+from uuid import UUID
+
+from pydantic import Field, RootModel
+
+from custom_base import Base
+
+
+class CustomId(RootModel):
+ root: UUID = Field(..., description='My custom ID')
+
+
+class Model(Base):
+ custom_id: Optional[CustomId] = None
diff --git a/tests/data/openapi/custom_id.yaml b/tests/data/openapi/custom_id.yaml
new file mode 100644
--- /dev/null
+++ b/tests/data/openapi/custom_id.yaml
@@ -0,0 +1,12 @@
+openapi: 3.0.0
+components:
+ schemas:
+ CustomId:
+ description: My custom ID
+ type: string
+ format: uuid
+ Model:
+ type: object
+ properties:
+ custom_id:
+ $ref: "#/components/schemas/CustomId"
\ No newline at end of file
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -5527,3 +5527,57 @@ def test_main_pydantic_v2():
output_file.read_text()
== (EXPECTED_MAIN_PATH / 'main_pydantic_v2' / 'output.py').read_text()
)
+
+
+@freeze_time('2019-07-26')
+def test_main_openapi_custom_id_pydantic_v2():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'custom_id.yaml'),
+ '--output',
+ str(output_file),
+ '--output-model-type',
+ 'pydantic_v2.BaseModel',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH / 'main_openapi_custom_id_pydantic_v2' / 'output.py'
+ ).read_text()
+ )
+
+
[email protected](
+ not isort.__version__.startswith('4.'),
+ reason="isort 5.x don't sort pydantic modules",
+)
+@freeze_time('2019-07-26')
+def test_main_openapi_custom_id_pydantic_v2_custom_base():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'custom_id.yaml'),
+ '--output',
+ str(output_file),
+ '--output-model-type',
+ 'pydantic_v2.BaseModel',
+ '--base-class',
+ 'custom_base.Base',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_openapi_custom_id_pydantic_v2_custom_base'
+ / 'output.py'
+ ).read_text()
+ )
| Wrong parent class for pydantic V2 root models
**Describe the bug**
Generator uses `pydantic.BaseModel` as parent class for root model instead of `pydantic.RootModel`
Example schema (`custom_id.yaml`):
```yaml
openapi: 3.0.0
components:
schemas:
CustomId:
description: My custom ID
type: string
format: uuid
```
Used commandline:
```
$ datamodel-codegen --input custom_id.yaml --output-model-type pydantic_v2.BaseModel --output model.py
```
Contents of `model.py`:
```python
from __future__ import annotations
from uuid import UUID
from pydantic import BaseModel, Field
class CustomId(BaseModel):
root: UUID = Field(..., description='My custom ID')
```
**Expected behavior**
```python
from __future__ import annotations
from uuid import UUID
from pydantic import RootModel, Field
class CustomId(RootModel):
root: UUID = Field(..., description='My custom ID')
```
**Version:**
- OS: [e.g. iOS]
- Python version: 3.10.8
- datamodel-code-generator version: 0.21.1
Wrong parent class for pydantic V2 root models
**Describe the bug**
Generator uses `pydantic.BaseModel` as parent class for root model instead of `pydantic.RootModel`
Example schema (`custom_id.yaml`):
```yaml
openapi: 3.0.0
components:
schemas:
CustomId:
description: My custom ID
type: string
format: uuid
```
Used commandline:
```
$ datamodel-codegen --input custom_id.yaml --output-model-type pydantic_v2.BaseModel --output model.py
```
Contents of `model.py`:
```python
from __future__ import annotations
from uuid import UUID
from pydantic import BaseModel, Field
class CustomId(BaseModel):
root: UUID = Field(..., description='My custom ID')
```
**Expected behavior**
```python
from __future__ import annotations
from uuid import UUID
from pydantic import RootModel, Field
class CustomId(RootModel):
root: UUID = Field(..., description='My custom ID')
```
**Version:**
- OS: [e.g. iOS]
- Python version: 3.10.8
- datamodel-code-generator version: 0.21.1
| @andreyLetenkovWefox
Thank you for creating the issue.
I have tested your schema. But I can't reproduce the issue.
Do you use a custom template for root model?
> @andreyLetenkovWefox Thank you for creating the issue. I have tested your schema. But I can't reproduce the issue. Do you use a custom template for root model?
Sorry, I thought the problem could be reproduced via a CLI generator call, but it really only appears when using the python package:
```python
from pathlib import Path
from datamodel_code_generator import DataModelType, generate
generate(
input_=Path("custom_id.yaml"),
output=Path("model.py"),
output_model_type=DataModelType.PydanticV2BaseModel,
)
```
@andreyLetenkovWefox
Thank you for creating the issue.
I have tested your schema. But I can't reproduce the issue.
Do you use a custom template for root model?
> @andreyLetenkovWefox Thank you for creating the issue. I have tested your schema. But I can't reproduce the issue. Do you use a custom template for root model?
Sorry, I thought the problem could be reproduced via a CLI generator call, but it really only appears when using the python package:
```python
from pathlib import Path
from datamodel_code_generator import DataModelType, generate
generate(
input_=Path("custom_id.yaml"),
output=Path("model.py"),
output_model_type=DataModelType.PydanticV2BaseModel,
)
```
| 2023-07-24T20:40:51Z | [] | [] |
koxudaxi/datamodel-code-generator | 1,477 | koxudaxi__datamodel-code-generator-1477 | [
"1435"
] | 3cbc02cfe5424b71674be8602cb7e89a6f9b5c9a | diff --git a/datamodel_code_generator/model/pydantic_v2/root_model.py b/datamodel_code_generator/model/pydantic_v2/root_model.py
--- a/datamodel_code_generator/model/pydantic_v2/root_model.py
+++ b/datamodel_code_generator/model/pydantic_v2/root_model.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import ClassVar
+from typing import Any, ClassVar
from datamodel_code_generator.model.pydantic_v2.base_model import BaseModel
@@ -8,3 +8,14 @@
class RootModel(BaseModel):
TEMPLATE_FILE_PATH: ClassVar[str] = 'pydantic_v2/RootModel.jinja2'
BASE_CLASS: ClassVar[str] = 'pydantic.RootModel'
+
+ def __init__(
+ self,
+ **kwargs: Any,
+ ) -> None:
+ # Remove custom_base_class for Pydantic V2 models; behaviour is different from Pydantic V1 as it will not
+ # be treated as a root model. custom_base_class cannot both implement BaseModel and RootModel!
+ if 'custom_base_class' in kwargs:
+ kwargs.pop('custom_base_class')
+
+ super().__init__(**kwargs)
| diff --git a/tests/model/pydantic_v2/__init__.py b/tests/model/pydantic_v2/__init__.py
new file mode 100644
diff --git a/tests/model/pydantic_v2/test_root_model.py b/tests/model/pydantic_v2/test_root_model.py
new file mode 100644
--- /dev/null
+++ b/tests/model/pydantic_v2/test_root_model.py
@@ -0,0 +1,67 @@
+from datamodel_code_generator.model import DataModelFieldBase
+from datamodel_code_generator.model.pydantic_v2.root_model import RootModel
+from datamodel_code_generator.reference import Reference
+from datamodel_code_generator.types import DataType
+
+
+def test_root_model():
+ root_model = RootModel(
+ fields=[
+ DataModelFieldBase(
+ name='a',
+ data_type=DataType(type='str'),
+ default='abc',
+ required=False,
+ )
+ ],
+ reference=Reference(name='TestRootModel', path='test_root_model'),
+ )
+
+ assert root_model.name == 'TestRootModel'
+ assert root_model.fields == [
+ DataModelFieldBase(
+ name='a',
+ data_type=DataType(type='str'),
+ default='abc',
+ required=False,
+ parent=root_model,
+ )
+ ]
+
+ assert root_model.base_class == 'RootModel'
+ assert root_model.custom_base_class is None
+ assert root_model.render() == (
+ 'class TestRootModel(RootModel):\n' " root: Optional[str] = 'abc'"
+ )
+
+
+def test_root_model_custom_base_class():
+ root_model = RootModel(
+ custom_base_class='test.Test',
+ fields=[
+ DataModelFieldBase(
+ name='a',
+ data_type=DataType(type='str'),
+ default='abc',
+ required=False,
+ )
+ ],
+ reference=Reference(name='TestRootModel', path='test_root_model'),
+ )
+
+ assert root_model.name == 'TestRootModel'
+ assert root_model.fields == [
+ DataModelFieldBase(
+ name='a',
+ data_type=DataType(type='str'),
+ default='abc',
+ required=False,
+ parent=root_model,
+ )
+ ]
+
+ assert root_model.base_class == 'RootModel'
+ assert root_model.custom_base_class is None
+ assert root_model.render() == (
+ 'class TestRootModel(RootModel):\n' " root: Optional[str] = 'abc'"
+ )
| Wrong parent class for pydantic V2 root models
**Describe the bug**
Generator uses `pydantic.BaseModel` as parent class for root model instead of `pydantic.RootModel`
Example schema (`custom_id.yaml`):
```yaml
openapi: 3.0.0
components:
schemas:
CustomId:
description: My custom ID
type: string
format: uuid
```
Used commandline:
```
$ datamodel-codegen --input custom_id.yaml --output-model-type pydantic_v2.BaseModel --output model.py
```
Contents of `model.py`:
```python
from __future__ import annotations
from uuid import UUID
from pydantic import BaseModel, Field
class CustomId(BaseModel):
root: UUID = Field(..., description='My custom ID')
```
**Expected behavior**
```python
from __future__ import annotations
from uuid import UUID
from pydantic import RootModel, Field
class CustomId(RootModel):
root: UUID = Field(..., description='My custom ID')
```
**Version:**
- OS: [e.g. iOS]
- Python version: 3.10.8
- datamodel-code-generator version: 0.21.1
| @andreyLetenkovWefox
Thank you for creating the issue.
I have tested your schema. But I can't reproduce the issue.
Do you use a custom template for root model?
> @andreyLetenkovWefox Thank you for creating the issue. I have tested your schema. But I can't reproduce the issue. Do you use a custom template for root model?
Sorry, I thought the problem could be reproduced via a CLI generator call, but it really only appears when using the python package:
```python
from pathlib import Path
from datamodel_code_generator import DataModelType, generate
generate(
input_=Path("custom_id.yaml"),
output=Path("model.py"),
output_model_type=DataModelType.PydanticV2BaseModel,
)
```
We hit this issue in a slightly different setting: if `output_data_model = "pydantic_v2.BaseModel"` is read from `pyproject.toml` and not passed on the command line, root models have the wrong base class. However, if `--output_data_model "pydantic_v2.BaseModel"` is passed on the command line, the root model base class is correct.
I drafted a fix in #1448 but it still needs quite some work (mostly because it only works with Pydantic v2 at the moment, and relies on using a `Config` instance as input to `generate`, so it won't fix @andreyLetenkovWefox 's example in https://github.com/koxudaxi/datamodel-code-generator/issues/1435#issuecomment-1646081300).
@koxudaxi would you like to see further work in the direction of #1448? or do you have a simpler idea for the fix? It seems that the handling of `base_class` could be simplified in the parser stack, but this looks quite intrusive to me.
> TL;DR: The issue comes from `base_model` incorrectly overriding `RootModel`.
I was also running into this issue, but not in the same way as @andreyLetenkovWefox. Apparently, if you set the `base_class` parameter and use Pydantic V2, it will incorrectly set the Base Model for Root Models to your custom Base Class.
See the following example:
```openapi
openapi: 3.0.0
components:
schemas:
CustomId:
description: My custom ID
type: string
format: uuid
```
```bash
datamodel-codegen --input custom_id.yaml --base-class "utils.dto_class.BaseDtoModel" --output-model-type pydantic_v2.BaseModel
```
This will output:
``` python
from __future__ import annotations
from uuid import UUID
from pydantic import Field
from utils.dto_class import BaseDtoModel
class CustomId(BaseDtoModel):
root: UUID = Field(..., description='My custom ID')
```
This, for Pydantic V2, is wrong as it will be interpreted as a normal model with a literal `root` field (and not a Root Model). In Pydantic V1 this was not an issue because it was being populated through the `__root__` field which uses `BaseModel` as the upper class (just like a normal model).
But here arises another question: **Should `base_class` actually override `RootModel` as well?** In Pydantic V1 this made sense, but for Pydantic V2 probably RootModel should never be overwritten.
IMO, `RootModel` should not be overwritten even with a custom `base_class` as it implements several methods which are not available in normal models. However, a developer may want to define their custom `RootModel` as well, so not sure what's the correct procedure for this... | 2023-08-06T16:31:38Z | [] | [] |
koxudaxi/datamodel-code-generator | 1,606 | koxudaxi__datamodel-code-generator-1606 | [
"1605"
] | 62c34cdb7a90238b0c960c68bb7f65788ec3d857 | diff --git a/datamodel_code_generator/model/msgspec.py b/datamodel_code_generator/model/msgspec.py
--- a/datamodel_code_generator/model/msgspec.py
+++ b/datamodel_code_generator/model/msgspec.py
@@ -1,6 +1,8 @@
from pathlib import Path
from typing import Any, ClassVar, DefaultDict, Dict, List, Optional, Set, Tuple
+from pydantic import Field
+
from datamodel_code_generator.imports import Import
from datamodel_code_generator.model import DataModel, DataModelFieldBase
from datamodel_code_generator.model.base import UNDEFINED
@@ -10,10 +12,12 @@
IMPORT_MSGSPEC_META,
IMPORT_MSGSPEC_STRUCT,
)
-from datamodel_code_generator.model.pydantic.base_model import Constraints
+from datamodel_code_generator.model.pydantic.base_model import (
+ Constraints as _Constraints,
+)
from datamodel_code_generator.model.rootmodel import RootModel
from datamodel_code_generator.reference import Reference
-from datamodel_code_generator.types import chain_as_tuple
+from datamodel_code_generator.types import chain_as_tuple, get_optional_type
def _has_field_assignment(field: DataModelFieldBase) -> bool:
@@ -71,6 +75,12 @@ def imports(self) -> Tuple[Import, ...]:
return chain_as_tuple(super().imports, extra_imports)
+class Constraints(_Constraints):
+ # To override existing pattern alias
+ regex: Optional[str] = Field(None, alias='regex')
+ pattern: Optional[str] = Field(None, alias='pattern')
+
+
class DataModelField(DataModelFieldBase):
_FIELD_KEYS: ClassVar[Set[str]] = {
'default',
@@ -88,7 +98,7 @@ class DataModelField(DataModelFieldBase):
# 'max_items', # not supported by msgspec
'min_length',
'max_length',
- 'regex',
+ 'pattern',
# 'unique_items', # not supported by msgspec
}
_PARSE_METHOD = 'convert'
@@ -197,6 +207,10 @@ def annotated(self) -> Optional[str]:
meta = f'Meta({", ".join(meta_arguments)})'
+ if not self.required:
+ type_hint = self.data_type.type_hint
+ annotated_type = f'Annotated[{type_hint}, {meta}]'
+ return get_optional_type(annotated_type, self.data_type.use_union_operator)
return f'Annotated[{self.type_hint}, {meta}]'
def _get_default_as_struct_model(self) -> Optional[str]:
| diff --git a/tests/data/expected/main/main_msgspec_struct/output.py b/tests/data/expected/main/main_msgspec_struct/output.py
--- a/tests/data/expected/main/main_msgspec_struct/output.py
+++ b/tests/data/expected/main/main_msgspec_struct/output.py
@@ -40,17 +40,17 @@ class Error(Struct):
class Api(Struct):
- apiKey: Annotated[
- Optional[str], Meta(description='To be used as a dataset parameter value')
+ apiKey: Optional[
+ Annotated[str, Meta(description='To be used as a dataset parameter value')]
] = None
- apiVersionNumber: Annotated[
- Optional[str], Meta(description='To be used as a version parameter value')
+ apiVersionNumber: Optional[
+ Annotated[str, Meta(description='To be used as a version parameter value')]
] = None
- apiUrl: Annotated[
- Optional[str], Meta(description="The URL describing the dataset's fields")
+ apiUrl: Optional[
+ Annotated[str, Meta(description="The URL describing the dataset's fields")]
] = None
- apiDocumentationUrl: Annotated[
- Optional[str], Meta(description='A URL to the API console for each API')
+ apiDocumentationUrl: Optional[
+ Annotated[str, Meta(description='A URL to the API console for each API')]
] = None
diff --git a/tests/data/expected/main/main_pattern_msgspec/output.py b/tests/data/expected/main/main_pattern_msgspec/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_pattern_msgspec/output.py
@@ -0,0 +1,25 @@
+# generated by datamodel-codegen:
+# filename: pattern.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Annotated, Optional
+
+from msgspec import Meta, Struct
+
+
+class Info(Struct):
+ hostName: Optional[str] = None
+ arn: Optional[
+ Annotated[
+ str,
+ Meta(pattern='(^arn:([^:]*):([^:]*):([^:]*):(|\\*|[\\d]{12}):(.+)$)|^\\*$'),
+ ]
+ ] = None
+ tel: Optional[
+ Annotated[str, Meta(pattern='^(\\([0-9]{3}\\))?[0-9]{3}-[0-9]{4}$')]
+ ] = None
+ comment: Optional[
+ Annotated[str, Meta(pattern='[^\\b\\f\\n\\r\\t\\\\a+.?\'"|()]+$')]
+ ] = None
diff --git a/tests/data/expected/main/main_use_annotated_with_msgspec_meta_constraints/output.py b/tests/data/expected/main/main_use_annotated_with_msgspec_meta_constraints/output.py
--- a/tests/data/expected/main/main_use_annotated_with_msgspec_meta_constraints/output.py
+++ b/tests/data/expected/main/main_use_annotated_with_msgspec_meta_constraints/output.py
@@ -12,7 +12,7 @@
class Pet(Struct):
id: Annotated[int, Meta(ge=0, le=9223372036854775807)]
name: Annotated[str, Meta(max_length=256)]
- tag: Annotated[Optional[str], Meta(max_length=64)] = None
+ tag: Optional[Annotated[str, Meta(max_length=64)]] = None
Pets = List[Pet]
@@ -31,13 +31,13 @@ class User(Struct):
id: Annotated[int, Meta(ge=0)]
name: Annotated[str, Meta(max_length=256)]
uid: UID
- tag: Annotated[Optional[str], Meta(max_length=64)] = None
+ tag: Optional[Annotated[str, Meta(max_length=64)]] = None
phones: Optional[List[Phone]] = None
fax: Optional[List[FaxItem]] = None
- height: Annotated[Optional[Union[int, float]], Meta(ge=1.0, le=300.0)] = None
- weight: Annotated[Optional[Union[float, int]], Meta(ge=1.0, le=1000.0)] = None
- age: Annotated[Optional[int], Meta(gt=0, le=200)] = None
- rating: Annotated[Optional[float], Meta(gt=0.0, le=5.0)] = None
+ height: Optional[Annotated[Union[int, float], Meta(ge=1.0, le=300.0)]] = None
+ weight: Optional[Annotated[Union[float, int], Meta(ge=1.0, le=1000.0)]] = None
+ age: Optional[Annotated[int, Meta(gt=0, le=200)]] = None
+ rating: Optional[Annotated[float, Meta(gt=0.0, le=5.0)]] = None
Users = List[User]
@@ -55,17 +55,17 @@ class Error(Struct):
class Api(Struct):
- apiKey: Annotated[
- Optional[str], Meta(description='To be used as a dataset parameter value')
+ apiKey: Optional[
+ Annotated[str, Meta(description='To be used as a dataset parameter value')]
] = None
- apiVersionNumber: Annotated[
- Optional[str], Meta(description='To be used as a version parameter value')
+ apiVersionNumber: Optional[
+ Annotated[str, Meta(description='To be used as a version parameter value')]
] = None
- apiUrl: Annotated[
- Optional[str], Meta(description="The URL describing the dataset's fields")
+ apiUrl: Optional[
+ Annotated[str, Meta(description="The URL describing the dataset's fields")]
] = None
- apiDocumentationUrl: Annotated[
- Optional[str], Meta(description='A URL to the API console for each API')
+ apiDocumentationUrl: Optional[
+ Annotated[str, Meta(description='A URL to the API console for each API')]
] = None
diff --git a/tests/data/expected/main/main_with_aliases_msgspec/output.py b/tests/data/expected/main/main_with_aliases_msgspec/output.py
--- a/tests/data/expected/main/main_with_aliases_msgspec/output.py
+++ b/tests/data/expected/main/main_with_aliases_msgspec/output.py
@@ -39,17 +39,17 @@ class Error(Struct):
class Api(Struct):
- apiKey: Annotated[
- Optional[str], Meta(description='To be used as a dataset parameter value')
+ apiKey: Optional[
+ Annotated[str, Meta(description='To be used as a dataset parameter value')]
] = None
- apiVersionNumber: Annotated[
- Optional[str], Meta(description='To be used as a version parameter value')
+ apiVersionNumber: Optional[
+ Annotated[str, Meta(description='To be used as a version parameter value')]
] = None
- apiUrl: Annotated[
- Optional[str], Meta(description="The URL describing the dataset's fields")
+ apiUrl: Optional[
+ Annotated[str, Meta(description="The URL describing the dataset's fields")]
] = None
- apiDocumentationUrl: Annotated[
- Optional[str], Meta(description='A URL to the API console for each API')
+ apiDocumentationUrl: Optional[
+ Annotated[str, Meta(description='A URL to the API console for each API')]
] = None
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -2688,6 +2688,10 @@ def test_main_openapi_nullable_strict_nullable():
'pydantic_v2.BaseModel',
'main_pattern_pydantic_v2',
),
+ (
+ 'msgspec.Struct',
+ 'main_pattern_msgspec',
+ ),
],
)
@freeze_time('2019-07-26')
@@ -2702,6 +2706,8 @@ def test_main_openapi_pattern(output_model, expected_output):
str(output_file),
'--input-file-type',
'openapi',
+ '--target-python',
+ '3.9',
'--output-model-type',
output_model,
]
| msgspec output uses regex instead of pattern to Meta constraint
Strings with a pattern constraint are output as `Meta(regex='....')` however they should be output as `Meta(pattern='...')`
**To Reproduce**
Example schema:
```yaml
openapi: "3.0.0"
info:
version: 1.0.0
title: Swagger Petstore
license:
name: MIT
servers:
- url: http://petstore.swagger.io/v1
components:
schemas:
info:
type: object
properties:
hostName:
type: string
format: hostname
arn:
type: string
pattern: '(^arn:([^:]*):([^:]*):([^:]*):(|\*|[\d]{12}):(.+)$)|^\*$'
```
Used commandline:
```bash
$ datamodel-codegen --input pattern.yaml --output output_file.py --input-file-type 'openapi' \
--target-python '3.9' \
--output-model-type msgspec.Struct
```
**Expected behavior**
The regex argument to the Meta annotation should be `pattern`
```python
class Info(Struct):
hostName: Optional[str] = None
arn: Optional[
Annotated[
str,
Meta(pattern='(^arn:([^:]*):([^:]*):([^:]*):(|\\*|[\\d]{12}):(.+)$)|^\\*$'),
]
] = None
```
**Version:**
- OS: macOS
- Python version: 3.11.4
- datamodel-code-generator version: 0.22
| 2023-10-06T10:15:35Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 1,678 | koxudaxi__datamodel-code-generator-1678 | [
"1677"
] | 8ca47052e88b07f8677ddd07d9aa35d0917c2380 | diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -1027,6 +1027,7 @@ def parse_item(
)
elif item.allOf:
all_of_path = get_special_path('allOf', path)
+ all_of_path = [self.model_resolver.resolve_ref(all_of_path)]
return self.parse_all_of(
self.model_resolver.add(
all_of_path, name, singular_name=singular_name, class_name=True
| diff --git a/tests/data/expected/main/main_all_of_ref_self/output.py b/tests/data/expected/main/main_all_of_ref_self/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_all_of_ref_self/output.py
@@ -0,0 +1,17 @@
+# generated by datamodel-codegen:
+# filename: all_of_ref_self.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+
+from pydantic import BaseModel
+
+
+class Version(BaseModel):
+ __root__: None
+
+
+class Model(BaseModel):
+ version: Optional[Version] = None
diff --git a/tests/data/jsonschema/all_of_ref_self.json b/tests/data/jsonschema/all_of_ref_self.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/all_of_ref_self.json
@@ -0,0 +1,17 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://example.com/all_of_ref_self.json",
+ "type": "object",
+ "properties": {
+ "version": {
+ "allOf": [
+ { "$ref": "#/$defs/version" }
+ ]
+ }
+ },
+ "$defs": {
+ "version": {
+ "type": "null"
+ }
+ }
+}
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -5957,3 +5957,24 @@ def test_main_dataclass_default():
EXPECTED_MAIN_PATH / 'main_dataclass_field_default' / 'output.py'
).read_text()
)
+
+
+@freeze_time('2019-07-26')
+def test_main_all_of_ref_self():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'all_of_ref_self.json'),
+ '--output',
+ str(output_file),
+ '--input-file-type',
+ 'jsonschema',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (EXPECTED_MAIN_PATH / 'main_all_of_ref_self' / 'output.py').read_text()
+ )
| json schema files making use of `allOf` with a reference and a `$id` throw a `KeyError`
**Describe the bug**
json schema files making use of `allOf` with a reference and a `$id` throw a `KeyError`.
My understanding of the issue is that the `references` dictionary contains
non-resolved paths as keys for `allOf` items, and therefore the reference can't
be deleted when we do resolved lookup.
**To Reproduce**
Example schema:
```json
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://example.com/all_of_ref_self.json",
"type": "object",
"properties": {
"version": {
"allOf": [
{ "$ref": "#/$defs/version" }
]
}
},
"$defs": {
"version": {
"type": "null"
}
}
}
```
Used commandline:
```
$ datamodel-codegen --input-file-type jsonschema --input tmp.json
Traceback (most recent call last):
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/__main__.py", line 386, in main
generate(
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/__init__.py", line 446, in generate
results = parser.parse()
^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/base.py", line 1136, in parse
self.parse_raw()
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1603, in parse_raw
self._parse_file(self.raw_obj, obj_name, path_parts)
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1687, in _parse_file
self.parse_obj(obj_name, root_obj, path_parts or ['#'])
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1555, in parse_obj
self.parse_object(name, obj, path)
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 912, in parse_object
fields = self.parse_object_fields(obj, path, get_module_name(class_name, None))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 867, in parse_object_fields
field_type = self.parse_item(modular_name, field, [*path, field_name])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1030, in parse_item
return self.parse_all_of(
^^^^^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 782, in parse_all_of
return self._parse_object_common_part(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 677, in _parse_object_common_part
self.model_resolver.delete(path)
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/reference.py", line 643, in delete
del self.references[self.resolve_ref(path)]
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^
KeyError: 'https://abc.com/tmp.json/version#-datamodel-code-generator-#-allOf-#-special-#'
```
**Expected behavior**
I would expect this schema to be parsed successfully (no exceptions).
**Version:**
- OS: Linux (Archlinux, 6.5.7-arch1-1)
- Python version: 3.11.5
- datamodel-code-generator version: 0.17.2 to 0.23.0
**Additional context**
This behaviour seemed to appear between 0.17.1 (works) and 0.17.2 (doesn't work). In particular, with commit https://github.com/koxudaxi/datamodel-code-generator/commit/0187b4aa7955adfc5b42090274e146546a132715
This pattern seems to be parsed correctly by other validators, e.g. https://www.jsonschemavalidator.net/s/FwyNnGC3
Real world jsonschemas also have this pattern, and show the same issue, for example with [stripe/stripe-apps](https://github.com/stripe/stripe-apps/blob/954a21bc807671e72a126ac04d72e2ac88063f5d/schema/stripe-app.schema.json):
```
$ cd $(mktemp -d)
$ python -m venv env
$ . env/bin/activate
$ pip install datamodel-code-generator
$ curl https://raw.githubusercontent.com/stripe/stripe-apps/main/schema/stripe-app.schema.json -o stripe-app.schema.json
$ datamodel-codegen --input stripe-app.schema.json
The input file type was determined to be: jsonschema
This can be specificied explicitly with the `--input-file-type` option.
Traceback (most recent call last):
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/__main__.py", line 388, in main
generate(
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/__init__.py", line 435, in generate
results = parser.parse()
^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/base.py", line 1135, in parse
self.parse_raw()
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1603, in parse_raw
self._parse_file(self.raw_obj, obj_name, path_parts)
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1687, in _parse_file
self.parse_obj(obj_name, root_obj, path_parts or ['#'])
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1555, in parse_obj
self.parse_object(name, obj, path)
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 912, in parse_object
fields = self.parse_object_fields(obj, path, get_module_name(class_name, None))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 867, in parse_object_fields
field_type = self.parse_item(modular_name, field, [*path, field_name])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1024, in parse_item
data_types=self.parse_one_of(
^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 641, in parse_one_of
data_types = self.parse_list_item(name, obj.oneOf, path, obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1075, in parse_list_item
return [
^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1076, in <listcomp>
self.parse_item(
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1041, in parse_item
return self.parse_object(
^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 912, in parse_object
fields = self.parse_object_fields(obj, path, get_module_name(class_name, None))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 867, in parse_object_fields
field_type = self.parse_item(modular_name, field, [*path, field_name])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1030, in parse_item
return self.parse_all_of(
^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 782, in parse_all_of
return self._parse_object_common_part(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 677, in _parse_object_common_part
self.model_resolver.delete(path)
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/reference.py", line 643, in delete
del self.references[self.resolve_ref(path)]
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^
KeyError: 'https://stripe.com/stripe-app.schema.json/post_install_action#-datamodel-code-generator-#-oneOf-#-special-#/1#-datamodel-code-generator-#-object-#-special-#/type#-datamodel-code-generator-#-allOf-#-special-#'
```
| 2023-11-09T22:21:12Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 1,767 | koxudaxi__datamodel-code-generator-1767 | [
"1759"
] | 38bf2b9e62bfb8176ad3c49514461437f954c851 | diff --git a/datamodel_code_generator/model/pydantic_v2/base_model.py b/datamodel_code_generator/model/pydantic_v2/base_model.py
--- a/datamodel_code_generator/model/pydantic_v2/base_model.py
+++ b/datamodel_code_generator/model/pydantic_v2/base_model.py
@@ -87,10 +87,9 @@ def process_const(self) -> None:
self.const = True
self.nullable = False
const = self.extras['const']
- if self.data_type.type == 'str' and isinstance(
- const, str
- ): # pragma: no cover # Literal supports only str
- self.data_type = self.data_type.__class__(literals=[const])
+ self.data_type = self.data_type.__class__(literals=[const])
+ if not self.default:
+ self.default = const
def _process_data_in_str(self, data: Dict[str, Any]) -> None:
if self.const:
@@ -103,7 +102,7 @@ def _process_data_in_str(self, data: Dict[str, Any]) -> None:
def _process_annotated_field_arguments(
self, field_arguments: List[str]
) -> List[str]:
- if not self.required:
+ if not self.required or self.const:
if self.use_default_kwarg:
return [
f'default={repr(self.default)}',
| diff --git a/tests/data/expected/main/use_default_with_const/output.py b/tests/data/expected/main/use_default_with_const/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/use_default_with_const/output.py
@@ -0,0 +1,12 @@
+# generated by datamodel-codegen:
+# filename: use_default_with_const.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from pydantic import BaseModel
+from typing_extensions import Literal
+
+
+class UseDefaultWithConst(BaseModel):
+ foo: Literal['foo'] = 'foo'
diff --git a/tests/data/jsonschema/use_default_with_const.json b/tests/data/jsonschema/use_default_with_const.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/use_default_with_const.json
@@ -0,0 +1,10 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "title": "Use default with const",
+ "properties": {
+ "foo": {
+ "const": "foo"
+ }
+ }
+}
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1264,6 +1264,28 @@ def test_force_optional():
)
+@freeze_time('2019-07-26')
+def test_use_default_pydantic_v2_with_json_schema_const():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'use_default_with_const.json'),
+ '--output',
+ str(output_file),
+ '--output-model-type',
+ 'pydantic_v2.BaseModel',
+ '--use-default',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (EXPECTED_MAIN_PATH / 'use_default_with_const' / 'output.py').read_text()
+ )
+
+
@freeze_time('2019-07-26')
def test_main_with_exclusive():
with TemporaryDirectory() as output_dir:
| JSON Schema `const` value and type ignored when setting defaults for Pydantic V2
**Describe the bug**
Use of `--force-optional` clobbers `--use-one-literal-as-default`. In my opinion `--force-optional` should use defaults where they exist and only fall back to assigning to `None` where they don't exist.
### Input
```json
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"title": "Force optional demo",
"properties": {
"foo": {
"const": "foo"
}
}
}
```
### Used command
```
datamodel-codegen --input force-optional-demo.json --output-model-type pydantic_v2.BaseModel --force-optional --use-one-literal-as-default
```
### Actual output
```python
# generated by datamodel-codegen:
# filename: force-optional-demo.json
# timestamp: 2023-12-05T15:06:01+00:00
from __future__ import annotations
from pydantic import BaseModel
from typing_extensions import Literal
class ForceOptionalDemo(BaseModel):
foo: Literal['foo'] = None
```
### Expected output
```python
# generated by datamodel-codegen:
# filename: force-optional-demo.json
# timestamp: 2023-12-05T15:06:01+00:00
from __future__ import annotations
from pydantic import BaseModel
from typing_extensions import Literal
class ForceOptionalDemo(BaseModel):
foo: Literal['foo'] = 'foo'
```
| 2023-12-08T10:00:27Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 1,829 | koxudaxi__datamodel-code-generator-1829 | [
"1821"
] | 1320fcbf0a67d3afa2210b16d5093c915cc33960 | diff --git a/datamodel_code_generator/format.py b/datamodel_code_generator/format.py
--- a/datamodel_code_generator/format.py
+++ b/datamodel_code_generator/format.py
@@ -7,6 +7,7 @@
from warnings import warn
import black
+import black.mode
import isort
from datamodel_code_generator.util import cached_property, load_toml
@@ -131,9 +132,15 @@ def __init__(
if wrap_string_literal is not None:
experimental_string_processing = wrap_string_literal
else:
- experimental_string_processing = config.get(
- 'experimental-string-processing'
- )
+ if black.__version__ < '24.1.0': # type: ignore
+ experimental_string_processing = config.get(
+ 'experimental-string-processing'
+ )
+ else:
+ experimental_string_processing = config.get('preview', False) and (
+ config.get('unstable', False)
+ or 'string_processing' in config.get('enable-unstable-feature', [])
+ )
if experimental_string_processing is not None: # pragma: no cover
if black.__version__.startswith('19.'): # type: ignore
@@ -141,10 +148,16 @@ def __init__(
f"black doesn't support `experimental-string-processing` option" # type: ignore
f' for wrapping string literal in {black.__version__}'
)
- else:
+ elif black.__version__ < '24.1.0': # type: ignore
black_kwargs[
'experimental_string_processing'
] = experimental_string_processing
+ elif experimental_string_processing:
+ black_kwargs['preview'] = True
+ black_kwargs['unstable'] = config.get('unstable', False)
+ black_kwargs['enabled_features'] = {
+ black.mode.Preview.string_processing
+ }
if TYPE_CHECKING:
self.black_mode: black.FileMode
| diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -21,6 +21,16 @@ jobs:
black-version: default
python-version: 3.8
pydantic-version: 1.8.2
+ - os: ubuntu-latest
+ isort-version: 5.6.4
+ black-version: 24.1.0
+ python-version: 3.12
+ pydantic-version: 2.4.2
+ - os: ubuntu-latest
+ isort-version: 5.6.4
+ black-version: 23.12.1
+ python-version: 3.12
+ pydantic-version: 2.4.2
exclude:
- os: windows-latest
black-version: 22.1.0
@@ -91,10 +101,10 @@ jobs:
if: matrix.pydantic-version != 'default'
run: |
poetry run pip install pydantic=="${{ matrix.pydantic-version }}"
- - name: Install Black 22.1.0
- if: matrix.black-version == '22.1.0'
+ - name: Install Black ${{ matrix.black-version }}
+ if: matrix.black-version != 'default'
run: |
- poetry run pip install black=="22.1.0"
+ poetry run pip install black=="${{ matrix.black-version }}"
- name: Lint
if: matrix.pydantic-version == 'default'
run: |
diff --git a/tests/parser/test_openapi.py b/tests/parser/test_openapi.py
--- a/tests/parser/test_openapi.py
+++ b/tests/parser/test_openapi.py
@@ -2,6 +2,7 @@
from pathlib import Path
from typing import List, Optional
+import black
import pydantic
import pytest
from packaging import version
@@ -713,6 +714,10 @@ def test_openapi_parser_responses_with_tag():
)
[email protected](
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
def test_openapi_parser_with_query_parameters():
parser = OpenAPIParser(
data_model_field_type=DataModelFieldBase,
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -703,6 +703,10 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
assert captured.err == inferred_message.format('openapi') + '\n'
[email protected](
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_pyproject():
if platform.system() == 'Windows':
@@ -1755,6 +1759,10 @@ def test_main_use_standard_collections(tmpdir_factory: TempdirFactory) -> None:
assert result == path.read_text()
[email protected](
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
def test_main_use_generic_container_types(tmpdir_factory: TempdirFactory) -> None:
output_directory = Path(tmpdir_factory.mktemp('output'))
@@ -1781,6 +1789,10 @@ def test_main_use_generic_container_types(tmpdir_factory: TempdirFactory) -> Non
assert result == path.read_text()
[email protected](
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@pytest.mark.benchmark
def test_main_use_generic_container_types_standard_collections(
tmpdir_factory: TempdirFactory,
@@ -2366,6 +2378,10 @@ def test_main_openapi_use_one_literal_as_default():
version.parse(pydantic.VERSION) < version.parse('1.9.0'),
reason='Require Pydantic version 1.9.0 or later ',
)
[email protected](
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_main_openapi_enum_models_as_literal_all():
with TemporaryDirectory() as output_dir:
@@ -2397,6 +2413,10 @@ def test_main_openapi_enum_models_as_literal_all():
version.parse(pydantic.VERSION) < version.parse('1.9.0'),
reason='Require Pydantic version 1.9.0 or later ',
)
[email protected](
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_main_openapi_enum_models_as_literal_py37(capsys):
with TemporaryDirectory() as output_dir:
@@ -2687,6 +2707,10 @@ def test_main_all_of_with_object():
)
[email protected](
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_main_combined_array():
with TemporaryDirectory() as output_dir:
@@ -3359,6 +3383,10 @@ def test_main_strict_types():
)
[email protected](
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_main_strict_types_all():
with TemporaryDirectory() as output_dir:
diff --git a/tests/test_main_kr.py b/tests/test_main_kr.py
--- a/tests/test_main_kr.py
+++ b/tests/test_main_kr.py
@@ -3,6 +3,7 @@
from pathlib import Path
from tempfile import TemporaryDirectory
+import black
import pytest
from freezegun import freeze_time
@@ -180,6 +181,10 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
assert captured.err == inferred_message.format('openapi') + '\n'
[email protected](
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_pyproject():
with TemporaryDirectory() as output_dir:
| Black 24.1.0 breaks code formatting if wrap-string-literal is set
**Describe the bug**
Black [24.1.0](https://github.com/psf/black/releases/tag/24.1.0) was just released and removes support for the deprecated `--experimental-string-processing` flag (psf/black#4096). This breaks the code in [`format.py`](https://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/datamodel_code_generator/format.py#L146) that uses this option:
```
Traceback (most recent call last):
File ".../python3.11/site-packages/datamodel_code_generator/__main__.py", line 429, in main
generate(
File ".../python3.11/site-packages/datamodel_code_generator/__init__.py", line 463, in generate
results = parser.parse()
^^^^^^^^^^^^^^
File ".../python3.11/site-packages/datamodel_code_generator/parser/base.py", line 1156, in parse
code_formatter: Optional[CodeFormatter] = CodeFormatter(
^^^^^^^^^^^^^^
File ".../python3.11/site-packages/datamodel_code_generator/format.py", line 152, in __init__
self.black_mode = black.FileMode(
^^^^^^^^^^^^^^^
TypeError: Mode.__init__() got an unexpected keyword argument 'experimental_string_processing'
```
**Expected behavior**
No crash.
**Version:**
- OS: Linux
- Python version: 3.11
- datamodel-code-generator version: 0.25.2
- black version: 0.24.1
**Additional context**
Possible mitigation:
- add a temporary upper bound to the `black` version spec in [pyproject.toml](https://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/pyproject.toml#L54)
- same, but in user environment definitions
- use `--preview --enable-unstable-feature string_processing` instead (as suggested by the black release notes).
| @airwoodix
Thank you for creating the issue.
OK, we should fix it. | 2024-02-01T07:23:54Z | [] | [] |
koxudaxi/datamodel-code-generator | 1,942 | koxudaxi__datamodel-code-generator-1942 | [
"1920"
] | 01dd102c4f577a9993ff30946de639051c4b83c9 | diff --git a/datamodel_code_generator/model/msgspec.py b/datamodel_code_generator/model/msgspec.py
--- a/datamodel_code_generator/model/msgspec.py
+++ b/datamodel_code_generator/model/msgspec.py
@@ -33,7 +33,7 @@
def _has_field_assignment(field: DataModelFieldBase) -> bool:
- return bool(field.field) or not (
+ return not (
field.required
or (field.represented_default == 'None' and field.strip_default_none)
)
@@ -48,7 +48,9 @@ def import_extender(cls: Type[DataModelFieldBaseT]) -> Type[DataModelFieldBaseT]
@wraps(original_imports.fget) # type: ignore
def new_imports(self: DataModelFieldBaseT) -> Tuple[Import, ...]:
extra_imports = []
- if self.field:
+ field = self.field
+ # TODO: Improve field detection
+ if field and field.startswith('field('):
extra_imports.append(IMPORT_MSGSPEC_FIELD)
if self.field and 'lambda: convert' in self.field:
extra_imports.append(IMPORT_MSGSPEC_CONVERT)
@@ -177,6 +179,8 @@ def __str__(self) -> str:
if self.default != UNDEFINED and self.default is not None:
data['default'] = self.default
+ elif not self.required:
+ data['default'] = None
if self.required:
data = {
| diff --git a/tests/data/expected/main/main_msgspec_struct_snake_case/output.py b/tests/data/expected/main/main_msgspec_struct_snake_case/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_msgspec_struct_snake_case/output.py
@@ -0,0 +1,66 @@
+# generated by datamodel-codegen:
+# filename: api_ordered_required_fields.yaml
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from msgspec import Meta, Struct, field
+from typing_extensions import Annotated
+
+
+class Pet(Struct):
+ id: int
+ name: str
+ before_tag: str = field(name='beforeTag')
+ tag: Optional[str] = None
+
+
+Pets = List[Pet]
+
+
+class User(Struct):
+ id: int
+ name: str
+ tag: Optional[str] = None
+
+
+Users = List[User]
+
+
+Id = str
+
+
+Rules = List[str]
+
+
+class Error(Struct):
+ code: int
+ message: str
+
+
+class Api(Struct):
+ api_key: Optional[
+ Annotated[str, Meta(description='To be used as a dataset parameter value')]
+ ] = field(name='apiKey', default=None)
+ api_version_number: Optional[
+ Annotated[str, Meta(description='To be used as a version parameter value')]
+ ] = field(name='apiVersionNumber', default=None)
+ api_url: Optional[
+ Annotated[str, Meta(description="The URL describing the dataset's fields")]
+ ] = field(name='apiUrl', default=None)
+ api_documentation_url: Optional[
+ Annotated[str, Meta(description='A URL to the API console for each API')]
+ ] = field(name='apiDocumentationUrl', default=None)
+
+
+Apis = List[Api]
+
+
+class Event(Struct):
+ name: Optional[str] = None
+
+
+class Result(Struct):
+ event: Optional[Event] = None
diff --git a/tests/data/expected/main/main_with_aliases_msgspec/output.py b/tests/data/expected/main/main_with_aliases_msgspec/output.py
--- a/tests/data/expected/main/main_with_aliases_msgspec/output.py
+++ b/tests/data/expected/main/main_with_aliases_msgspec/output.py
@@ -57,7 +57,7 @@ class Api(Struct):
class Event(Struct):
- name_: Optional[str] = field(name='name')
+ name_: Optional[str] = field(name='name', default=None)
class Result(Struct):
diff --git a/tests/data/openapi/api_ordered_required_fields.yaml b/tests/data/openapi/api_ordered_required_fields.yaml
new file mode 100644
--- /dev/null
+++ b/tests/data/openapi/api_ordered_required_fields.yaml
@@ -0,0 +1,182 @@
+openapi: "3.0.0"
+info:
+ version: 1.0.0
+ title: Swagger Petstore
+ license:
+ name: MIT
+servers:
+ - url: http://petstore.swagger.io/v1
+paths:
+ /pets:
+ get:
+ summary: List all pets
+ operationId: listPets
+ tags:
+ - pets
+ parameters:
+ - name: limit
+ in: query
+ description: How many items to return at one time (max 100)
+ required: false
+ schema:
+ type: integer
+ format: int32
+ responses:
+ '200':
+ description: A paged array of pets
+ headers:
+ x-next:
+ description: A link to the next page of responses
+ schema:
+ type: string
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Pets"
+ default:
+ description: unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ x-amazon-apigateway-integration:
+ uri:
+ Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${PythonVersionFunction.Arn}/invocations
+ passthroughBehavior: when_no_templates
+ httpMethod: POST
+ type: aws_proxy
+ post:
+ summary: Create a pet
+ operationId: createPets
+ tags:
+ - pets
+ responses:
+ '201':
+ description: Null response
+ default:
+ description: unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ x-amazon-apigateway-integration:
+ uri:
+ Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${PythonVersionFunction.Arn}/invocations
+ passthroughBehavior: when_no_templates
+ httpMethod: POST
+ type: aws_proxy
+ /pets/{petId}:
+ get:
+ summary: Info for a specific pet
+ operationId: showPetById
+ tags:
+ - pets
+ parameters:
+ - name: petId
+ in: path
+ required: true
+ description: The id of the pet to retrieve
+ schema:
+ type: string
+ responses:
+ '200':
+ description: Expected response to a valid request
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Pets"
+ default:
+ description: unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ x-amazon-apigateway-integration:
+ uri:
+ Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${PythonVersionFunction.Arn}/invocations
+ passthroughBehavior: when_no_templates
+ httpMethod: POST
+ type: aws_proxy
+components:
+ schemas:
+ Pet:
+ required:
+ - id
+ - name
+ - beforeTag
+ properties:
+ id:
+ type: integer
+ format: int64
+ default: 1
+ name:
+ type: string
+ beforeTag:
+ type: string
+ tag:
+ type: string
+ Pets:
+ type: array
+ items:
+ $ref: "#/components/schemas/Pet"
+ Users:
+ type: array
+ items:
+ required:
+ - id
+ - name
+ properties:
+ id:
+ type: integer
+ format: int64
+ name:
+ type: string
+ tag:
+ type: string
+ Id:
+ type: string
+ Rules:
+ type: array
+ items:
+ type: string
+ Error:
+ description: error result
+ required:
+ - code
+ - message
+ properties:
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ apis:
+ type: array
+ items:
+ type: object
+ properties:
+ apiKey:
+ type: string
+ description: To be used as a dataset parameter value
+ apiVersionNumber:
+ type: string
+ description: To be used as a version parameter value
+ apiUrl:
+ type: string
+ format: uri
+ description: "The URL describing the dataset's fields"
+ apiDocumentationUrl:
+ type: string
+ format: uri
+ description: A URL to the API console for each API
+ Event:
+ type: object
+ description: Event object
+ properties:
+ name:
+ type: string
+ Result:
+ type: object
+ properties:
+ event:
+ $ref: '#/components/schemas/Event'
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -6139,6 +6139,33 @@ def test_main_msgspec_struct():
)
+@freeze_time('2019-07-26')
+def test_main_msgspec_struct_snake_case():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'api_ordered_required_fields.yaml'),
+ '--output',
+ str(output_file),
+ # min msgspec python version is 3.8
+ '--target-python-version',
+ '3.8',
+ '--snake-case-field',
+ '--output-model-type',
+ 'msgspec.Struct',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH / 'main_msgspec_struct_snake_case' / 'output.py'
+ ).read_text()
+ )
+
+
@freeze_time('2019-07-26')
@pytest.mark.skipif(
black.__version__.split('.')[0] == '19',
| msgspec: Optional fields are missing a default when using `--snake-case-field`
### Steps to reproduce
1. Download the NVD CVE [schema][schema]
2. Generate a msgpsec model:
```sh
datamodel-codegen \
--input $schema_json \
--input-file-type jsonschema \
--output-model-type 'msgspec.Struct' \
# This is important I think
--snake-case-field \
--output "."
```
3. (Ignore the circular imports #836)
4. (Ignore wrong field ordering #1919
5. Look at the `class CpeMatch` (and most other classes as well).
```python
class CpeMatch(Struct, kw_only=True):
vulnerable: bool
criteria: str
match_criteria_id: str = field(name='matchCriteriaId')
version_start_excluding: Optional[str] = field(name='versionStartExcluding')
version_start_including: Optional[str] = field(name='versionStartIncluding')
version_end_excluding: Optional[str] = field(name='versionEndExcluding')
version_end_including: Optional[str] = field(name='versionEndIncluding')
```
vs
```json
"cpe_match": {
"description": "CPE match string or range",
"type": "object",
"properties": {
"vulnerable": {"type": "boolean"},
"criteria": {"type": "string"},
"matchCriteriaId": {"type": "string", "format": "uuid"},
"versionStartExcluding": {"type": "string"},
"versionStartIncluding": {"type": "string"},
"versionEndExcluding": {"type": "string"},
"versionEndIncluding": {"type": "string"}
},
"required": ["vulnerable", "criteria", "matchCriteriaId"],
"additionalProperties": false
},
```
Note that the optional fields are missing the `default=None` parameter in the `field` call.
[schema]: https://csrc.nist.gov/schema/nvd/api/2.0/cve_api_json_2.0.schema
### Expected behavior
The field should have a default of value `None`.
### Workaround
Do not use `--snake-case-field`.
### Setup
```sh
$ datamodel-codegen --version
0.25.5
$ python --version
Python 3.11.8
```
| 2024-05-01T20:21:48Z | [] | [] |
|
koxudaxi/datamodel-code-generator | 1,962 | koxudaxi__datamodel-code-generator-1962 | [
"1910"
] | 5bab6270de86a83ff70358d87ca55cba7954b59f | diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -1311,6 +1311,10 @@ def parse_root_type(
data_type = self.data_type_manager.get_data_type_from_full_path(
obj.custom_type_path, is_custom_type=True
)
+ elif obj.is_array:
+ data_type = self.parse_array_fields(
+ name, obj, get_special_path('array', path)
+ ).data_type
elif obj.anyOf or obj.oneOf:
reference = self.model_resolver.add(
path, name, loaded=True, class_name=True
| diff --git a/tests/data/expected/main/main_one_of_with_sub_schema_array_item/output.py b/tests/data/expected/main/main_one_of_with_sub_schema_array_item/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_one_of_with_sub_schema_array_item/output.py
@@ -0,0 +1,17 @@
+# generated by datamodel-codegen:
+# filename: one_of_with_sub_schema_array_item.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import List, Optional, Union
+
+from pydantic import AnyUrl, BaseModel, Field
+
+
+class SpatialPlan(BaseModel):
+ officialDocument: Optional[Union[str, List[AnyUrl]]] = Field(
+ None,
+ description='Link to the official documents that relate to the spatial plan.',
+ title='officialDocument',
+ )
diff --git a/tests/data/jsonschema/one_of_with_sub_schema_array_item.json b/tests/data/jsonschema/one_of_with_sub_schema_array_item.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/one_of_with_sub_schema_array_item.json
@@ -0,0 +1,25 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "title": "SpatialPlan",
+ "type": "object",
+ "properties": {
+ "officialDocument": {
+ "title": "officialDocument",
+ "description": "Link to the official documents that relate to the spatial plan.",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "type": "string",
+ "format": "uri"
+ },
+ "uniqueItems": true
+ }
+ ]
+ }
+ }
+}
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -6798,3 +6798,30 @@ def test_main_root_one_of():
path.relative_to(expected_directory)
).read_text()
assert result == path.read_text()
+
+
+@freeze_time('2019-07-26')
+def test_one_of_with_sub_schema_array_item():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'one_of_with_sub_schema_array_item.json'),
+ '--output',
+ str(output_file),
+ '--input-file-type',
+ 'jsonschema',
+ '--output-model-type',
+ 'pydantic_v2.BaseModel',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_one_of_with_sub_schema_array_item'
+ / 'output.py'
+ ).read_text()
+ )
| oneOf with subschema array items not incorporated/generated as Any for pydantic.v2
**Describe the bug**
When using a JSON schema as input with aoneOf construct where one option is an array with items defined in a subschema, the resulting pydantic v2 model does not incorporate the subschema definition, but rather `list[Any]`
**To Reproduce**
The following JSON schema snippet:
```json
"SpatialPlan": {
"type": "object",
"properties": {
"officialDocument": {
"title": "officialDocument",
"description": "Link to the official documents that relate to the spatial plan.",
"oneOf": [
{
"$ref": "definitions/voidable.json#/definitions/Voidable"
},
{
"type": "array",
"minItems": 1,
"items": {
"$ref": "definitions/ref.json#/definitions/FeatureRef"
},
"uniqueItems": true
}
]
}
```
leads to the pydantic v2 model:
```python
class OfficialDocument(RootModel[list[Any]]):
root: Annotated[
list[Any],
Field(
description='Link to the official documents that relate to the spatial plan.',
min_length=1,
title='officialDocument',
),
]
class SpatialPlan(BaseModel):
officialDocument: Annotated[
Voidable | OfficialDocument,
Field(
description='Link to the official documents that relate to the spatial plan.',
title='officialDocument',
),
]
```
Used commandline:
```
$ datamodel-codegen --target-python-version 3.10 --use-union-operator --use-standard-collections --use-schema-description --use-annotated --collapse-root-models --output-model-type pydantic_v2.BaseModel --input input.json --output output.py
```
**Expected behavior**
The resulting pydantic model should look like this:
```python
class OfficialDocument(RootModel[list[FeatureRef]]):
root: Annotated[
list[FeatureRef],
Field(
description="Link to the official documents that relate to the spatial plan.",
min_length=1,
title="officialDocument",
),
]
```
Or maybe even more preferable, the addtional RootModel definition should be dropped as a whole:
```python
class SpatialPlan(BaseModel):
officialDocument: Annotated[
list[FeatureRef] | Voidable,
Field(
description="Link to the official documents that relate to the spatial plan.",
min_length=1,
title="officialDocument",
),
]
```
**Version:**
- OS: Ubuntu 22.04 (WSL)
- Python version: 3.10
- datamodel-code-generator version: 0.25.5
**Additional context**
Add any other context about the problem here.
| Is this related to: https://github.com/koxudaxi/datamodel-code-generator/blob/fcab9a4d555d4b96d64bb277f974bb7507982fb2/datamodel_code_generator/parser/jsonschema.py#L681-L694
If so - or if you can provide another hint - maybe we can have a look and work on a PR. This issue is really hampering our use case.
I've been looking into a similar issue on my project - so far I think it may be related to enabling the `--field-constraints` option, which is also enabled by using the `--use-annotated` option.
I'm working off of a very slightly modified version of the [CycloneDX 1.5 schema](https://cyclonedx.org/docs/1.5/json/#tab-pane_components_items_licenses_oneOf_i1), where the `licenses` field here is changed from an `array` to `object` type (due to some other issue with datamodel-code-generator parsing the schema). I expect to get a Python class somewhere that includes the `expression` and `bom-ref` fields. Here's what I'm seeing using datamodel-codegen 0.25.6, with the command
`datamodel-codegen --input ~/temp/modified-bom-1.5.schema.json --output output-license-obj-annotated --use-annot
ated`:
```
class LicenseChoice1(BaseModel):
__root__: Annotated[
List[Any],
Field(
description='A tuple of exactly one SPDX License Expression.',
max_items=1,
min_items=1,
title='SPDX License Expression',
),
]
class LicenseChoice(BaseModel):
__root__: Annotated[
Union[List[LicenseChoiceItem], LicenseChoice1],
Field(
description='EITHER (list of SPDX licenses and/or named licenses) OR (tuple of one SPDX License Expression)',
title='License Choice',
),
]
```
When I remove `--use-annotated`, I get something more like what I expect:
```
class LicenseChoiceItem1(BaseModel):
class Config:
extra = Extra.forbid
expression: str = Field(
...,
examples=[
'Apache-2.0 AND (MIT OR GPL-2.0-only)',
'GPL-3.0-only WITH Classpath-exception-2.0',
],
title='SPDX License Expression',
)
bom_ref: Optional[RefType] = Field(
None,
alias='bom-ref',
description='An optional identifier which can be used to reference the license elsewhere in the BOM. Every bom-ref MUST be unique within the BOM.',
title='BOM Reference',
)
class LicenseChoice(BaseModel):
__root__: Union[List[LicenseChoiceItem], List[LicenseChoiceItem1]] = Field(
...,
description='EITHER (list of SPDX licenses and/or named licenses) OR (tuple of one SPDX License Expression)',
title='License Choice',
)
```
I'll keep digging, but for now it appears that using annotations/field constraints ends up dropping type information somewhere down that path. | 2024-05-15T16:07:22Z | [] | [] |
blakeblackshear/frigate | 329 | blakeblackshear__frigate-329 | [
"575"
] | a1cc9ad1f0a3ea3e3a1c8163c5ff3c27065a9be4 | diff --git a/detect_objects.py b/detect_objects.py
deleted file mode 100644
--- a/detect_objects.py
+++ /dev/null
@@ -1,441 +0,0 @@
-import faulthandler; faulthandler.enable()
-import os
-import signal
-import sys
-import traceback
-import signal
-import cv2
-import time
-import datetime
-import queue
-import yaml
-import json
-import threading
-import multiprocessing as mp
-import subprocess as sp
-import numpy as np
-import logging
-from flask import Flask, Response, make_response, jsonify, request
-import paho.mqtt.client as mqtt
-
-from frigate.video import capture_camera, track_camera, get_ffmpeg_input, get_frame_shape, CameraCapture, start_or_restart_ffmpeg
-from frigate.object_processing import TrackedObjectProcessor
-from frigate.events import EventProcessor
-from frigate.util import EventsPerSecond
-from frigate.edgetpu import EdgeTPUProcess
-
-FRIGATE_VARS = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
-
-CONFIG_FILE = os.environ.get('CONFIG_FILE', '/config/config.yml')
-
-if CONFIG_FILE.endswith(".yml"):
- with open(CONFIG_FILE) as f:
- CONFIG = yaml.safe_load(f)
-elif CONFIG_FILE.endswith(".json"):
- with open(CONFIG_FILE) as f:
- CONFIG = json.load(f)
-
-CACHE_DIR = CONFIG.get('save_clips', {}).get('cache_dir', '/cache')
-CLIPS_DIR = CONFIG.get('save_clips', {}).get('clips_dir', '/clips')
-
-if not os.path.exists(CACHE_DIR) and not os.path.islink(CACHE_DIR):
- os.makedirs(CACHE_DIR)
-if not os.path.exists(CLIPS_DIR) and not os.path.islink(CLIPS_DIR):
- os.makedirs(CLIPS_DIR)
-
-MQTT_HOST = CONFIG['mqtt']['host']
-MQTT_PORT = CONFIG.get('mqtt', {}).get('port', 1883)
-MQTT_TOPIC_PREFIX = CONFIG.get('mqtt', {}).get('topic_prefix', 'frigate')
-MQTT_USER = CONFIG.get('mqtt', {}).get('user')
-MQTT_PASS = CONFIG.get('mqtt', {}).get('password')
-if not MQTT_PASS is None:
- MQTT_PASS = MQTT_PASS.format(**FRIGATE_VARS)
-MQTT_CLIENT_ID = CONFIG.get('mqtt', {}).get('client_id', 'frigate')
-
-# Set the default FFmpeg config
-FFMPEG_CONFIG = CONFIG.get('ffmpeg', {})
-FFMPEG_DEFAULT_CONFIG = {
- 'global_args': FFMPEG_CONFIG.get('global_args',
- ['-hide_banner','-loglevel','panic']),
- 'hwaccel_args': FFMPEG_CONFIG.get('hwaccel_args',
- []),
- 'input_args': FFMPEG_CONFIG.get('input_args',
- ['-avoid_negative_ts', 'make_zero',
- '-fflags', 'nobuffer',
- '-flags', 'low_delay',
- '-strict', 'experimental',
- '-fflags', '+genpts+discardcorrupt',
- '-rtsp_transport', 'tcp',
- '-stimeout', '5000000',
- '-use_wallclock_as_timestamps', '1']),
- 'output_args': FFMPEG_CONFIG.get('output_args',
- ['-f', 'rawvideo',
- '-pix_fmt', 'yuv420p'])
-}
-
-GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})
-
-WEB_PORT = CONFIG.get('web_port', 5000)
-DETECTORS = CONFIG.get('detectors', {'coral': {'type': 'edgetpu', 'device': 'usb'}})
-
-class FrigateWatchdog(threading.Thread):
- def __init__(self, camera_processes, config, detectors, detection_queue, out_events, tracked_objects_queue, stop_event):
- threading.Thread.__init__(self)
- self.camera_processes = camera_processes
- self.config = config
- self.detectors = detectors
- self.detection_queue = detection_queue
- self.out_events = out_events
- self.tracked_objects_queue = tracked_objects_queue
- self.stop_event = stop_event
-
- def run(self):
- time.sleep(10)
- while True:
- # wait a bit before checking
- time.sleep(10)
-
- if self.stop_event.is_set():
- print(f"Exiting watchdog...")
- break
-
- now = datetime.datetime.now().timestamp()
-
- # check the detection processes
- for detector in self.detectors.values():
- detection_start = detector.detection_start.value
- if (detection_start > 0.0 and
- now - detection_start > 10):
- print("Detection appears to be stuck. Restarting detection process")
- detector.start_or_restart()
- elif not detector.detect_process.is_alive():
- print("Detection appears to have stopped. Restarting detection process")
- detector.start_or_restart()
-
- # check the camera processes
- for name, camera_process in self.camera_processes.items():
- process = camera_process['process']
- if not process.is_alive():
- print(f"Track process for {name} is not alive. Starting again...")
- camera_process['camera_fps'].value = 0.0
- camera_process['process_fps'].value = 0.0
- camera_process['detection_fps'].value = 0.0
- camera_process['read_start'].value = 0.0
- process = mp.Process(target=track_camera, args=(name, self.config,
- self.detection_queue, self.out_events[name], self.tracked_objects_queue, camera_process, self.stop_event))
- process.daemon = True
- camera_process['process'] = process
- process.start()
- print(f"Track process started for {name}: {process.pid}")
-
-def main():
- stop_event = threading.Event()
- # connect to mqtt and setup last will
- def on_connect(client, userdata, flags, rc):
- print("On connect called")
- if rc != 0:
- if rc == 3:
- print ("MQTT Server unavailable")
- elif rc == 4:
- print ("MQTT Bad username or password")
- elif rc == 5:
- print ("MQTT Not authorized")
- else:
- print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
- # publish a message to signal that the service is running
- client.publish(MQTT_TOPIC_PREFIX+'/available', 'online', retain=True)
- client = mqtt.Client(client_id=MQTT_CLIENT_ID)
- client.on_connect = on_connect
- client.will_set(MQTT_TOPIC_PREFIX+'/available', payload='offline', qos=1, retain=True)
- if not MQTT_USER is None:
- client.username_pw_set(MQTT_USER, password=MQTT_PASS)
- client.connect(MQTT_HOST, MQTT_PORT, 60)
- client.loop_start()
-
- ##
- # Setup config defaults for cameras
- ##
- for name, config in CONFIG['cameras'].items():
- config['snapshots'] = {
- 'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True),
- 'draw_zones': config.get('snapshots', {}).get('draw_zones', False),
- 'draw_bounding_boxes': config.get('snapshots', {}).get('draw_bounding_boxes', True)
- }
- config['zones'] = config.get('zones', {})
-
- # Queue for cameras to push tracked objects to
- tracked_objects_queue = mp.Queue(maxsize=len(CONFIG['cameras'].keys())*2)
-
- # Queue for clip processing
- event_queue = mp.Queue()
-
- # create the detection pipes and shms
- out_events = {}
- camera_shms = []
- for name in CONFIG['cameras'].keys():
- out_events[name] = mp.Event()
- shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=300*300*3)
- shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
- camera_shms.append(shm_in)
- camera_shms.append(shm_out)
-
- detection_queue = mp.Queue()
-
- detectors = {}
- for name, detector in DETECTORS.items():
- if detector['type'] == 'cpu':
- detectors[name] = EdgeTPUProcess(detection_queue, out_events=out_events, tf_device='cpu')
- if detector['type'] == 'edgetpu':
- detectors[name] = EdgeTPUProcess(detection_queue, out_events=out_events, tf_device=detector['device'])
-
- # create the camera processes
- camera_process_info = {}
- for name, config in CONFIG['cameras'].items():
- # Merge the ffmpeg config with the global config
- ffmpeg = config.get('ffmpeg', {})
- ffmpeg_input = get_ffmpeg_input(ffmpeg['input'])
- ffmpeg_global_args = ffmpeg.get('global_args', FFMPEG_DEFAULT_CONFIG['global_args'])
- ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', FFMPEG_DEFAULT_CONFIG['hwaccel_args'])
- ffmpeg_input_args = ffmpeg.get('input_args', FFMPEG_DEFAULT_CONFIG['input_args'])
- ffmpeg_output_args = ffmpeg.get('output_args', FFMPEG_DEFAULT_CONFIG['output_args'])
- if not config.get('fps') is None:
- ffmpeg_output_args = ["-r", str(config.get('fps'))] + ffmpeg_output_args
- if config.get('save_clips', {}).get('enabled', False):
- ffmpeg_output_args = [
- "-f",
- "segment",
- "-segment_time",
- "10",
- "-segment_format",
- "mp4",
- "-reset_timestamps",
- "1",
- "-strftime",
- "1",
- "-c",
- "copy",
- "-an",
- "-map",
- "0",
- f"{os.path.join(CACHE_DIR, name)}-%Y%m%d%H%M%S.mp4"
- ] + ffmpeg_output_args
- ffmpeg_cmd = (['ffmpeg'] +
- ffmpeg_global_args +
- ffmpeg_hwaccel_args +
- ffmpeg_input_args +
- ['-i', ffmpeg_input] +
- ffmpeg_output_args +
- ['pipe:'])
-
- config['ffmpeg_cmd'] = ffmpeg_cmd
-
- if 'width' in config and 'height' in config:
- frame_shape = (config['height'], config['width'], 3)
- else:
- frame_shape = get_frame_shape(ffmpeg_input)
-
- config['frame_shape'] = frame_shape
- config['take_frame'] = config.get('take_frame', 1)
-
- camera_process_info[name] = {
- 'camera_fps': mp.Value('d', 0.0),
- 'skipped_fps': mp.Value('d', 0.0),
- 'process_fps': mp.Value('d', 0.0),
- 'detection_fps': mp.Value('d', 0.0),
- 'detection_frame': mp.Value('d', 0.0),
- 'read_start': mp.Value('d', 0.0),
- 'ffmpeg_pid': mp.Value('i', 0),
- 'frame_queue': mp.Queue(maxsize=2)
- }
-
- # merge global object config into camera object config
- camera_objects_config = config.get('objects', {})
- # get objects to track for camera
- objects_to_track = camera_objects_config.get('track', GLOBAL_OBJECT_CONFIG.get('track', ['person']))
- # get object filters
- object_filters = camera_objects_config.get('filters', GLOBAL_OBJECT_CONFIG.get('filters', {}))
- config['objects'] = {
- 'track': objects_to_track,
- 'filters': object_filters
- }
-
- capture_process = mp.Process(target=capture_camera, args=(name, config,
- camera_process_info[name], stop_event))
- capture_process.daemon = True
- camera_process_info[name]['capture_process'] = capture_process
-
- camera_process = mp.Process(target=track_camera, args=(name, config,
- detection_queue, out_events[name], tracked_objects_queue, camera_process_info[name], stop_event))
- camera_process.daemon = True
- camera_process_info[name]['process'] = camera_process
-
- # start the camera_processes
- for name, camera_process in camera_process_info.items():
- camera_process['capture_process'].start()
- print(f"Camera capture process started for {name}: {camera_process['capture_process'].pid}")
- camera_process['process'].start()
- print(f"Camera process started for {name}: {camera_process['process'].pid}")
-
- event_processor = EventProcessor(CONFIG, camera_process_info, CACHE_DIR, CLIPS_DIR, event_queue, stop_event)
- event_processor.start()
-
- object_processor = TrackedObjectProcessor(CONFIG['cameras'], client, MQTT_TOPIC_PREFIX, tracked_objects_queue, event_queue, stop_event)
- object_processor.start()
-
- frigate_watchdog = FrigateWatchdog(camera_process_info, CONFIG['cameras'], detectors, detection_queue, out_events, tracked_objects_queue, stop_event)
- frigate_watchdog.start()
-
- def receiveSignal(signalNumber, frame):
- print('Received:', signalNumber)
- stop_event.set()
- event_processor.join()
- object_processor.join()
- frigate_watchdog.join()
-
- for detector in detectors.values():
- detector.stop()
- for shm in camera_shms:
- shm.close()
- shm.unlink()
- sys.exit()
-
- signal.signal(signal.SIGTERM, receiveSignal)
- signal.signal(signal.SIGINT, receiveSignal)
-
- # create a flask app that encodes frames a mjpeg on demand
- app = Flask(__name__)
- log = logging.getLogger('werkzeug')
- log.setLevel(logging.ERROR)
-
- @app.route('/')
- def ishealthy():
- # return a healh
- return "Frigate is running. Alive and healthy!"
-
- @app.route('/debug/stack')
- def processor_stack():
- frame = sys._current_frames().get(object_processor.ident, None)
- if frame:
- return "<br>".join(traceback.format_stack(frame)), 200
- else:
- return "no frame found", 200
-
- @app.route('/debug/print_stack')
- def print_stack():
- pid = int(request.args.get('pid', 0))
- if pid == 0:
- return "missing pid", 200
- else:
- os.kill(pid, signal.SIGUSR1)
- return "check logs", 200
-
- @app.route('/debug/stats')
- def stats():
- stats = {}
-
- total_detection_fps = 0
-
- for name, camera_stats in camera_process_info.items():
- total_detection_fps += camera_stats['detection_fps'].value
- stats[name] = {
- 'camera_fps': round(camera_stats['camera_fps'].value, 2),
- 'process_fps': round(camera_stats['process_fps'].value, 2),
- 'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
- 'detection_fps': round(camera_stats['detection_fps'].value, 2),
- 'pid': camera_stats['process'].pid,
- 'capture_pid': camera_stats['capture_process'].pid,
- 'frame_info': {
- 'detect': camera_stats['detection_frame'].value,
- 'process': object_processor.camera_data[name]['current_frame_time']
- }
- }
-
- stats['detectors'] = {}
- for name, detector in detectors.items():
- stats['detectors'][name] = {
- 'inference_speed': round(detector.avg_inference_speed.value*1000, 2),
- 'detection_start': detector.detection_start.value,
- 'pid': detector.detect_process.pid
- }
- stats['detection_fps'] = round(total_detection_fps, 2)
-
- return jsonify(stats)
-
- @app.route('/<camera_name>/<label>/best.jpg')
- def best(camera_name, label):
- if camera_name in CONFIG['cameras']:
- best_object = object_processor.get_best(camera_name, label)
- best_frame = best_object.get('frame')
- if best_frame is None:
- best_frame = np.zeros((720,1280,3), np.uint8)
- else:
- best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
-
- crop = bool(request.args.get('crop', 0, type=int))
- if crop:
- region = best_object.get('region', [0,0,300,300])
- best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
-
- height = int(request.args.get('h', str(best_frame.shape[0])))
- width = int(height*best_frame.shape[1]/best_frame.shape[0])
-
- best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
- ret, jpg = cv2.imencode('.jpg', best_frame)
- response = make_response(jpg.tobytes())
- response.headers['Content-Type'] = 'image/jpg'
- return response
- else:
- return "Camera named {} not found".format(camera_name), 404
-
- @app.route('/<camera_name>')
- def mjpeg_feed(camera_name):
- fps = int(request.args.get('fps', '3'))
- height = int(request.args.get('h', '360'))
- if camera_name in CONFIG['cameras']:
- # return a multipart response
- return Response(imagestream(camera_name, fps, height),
- mimetype='multipart/x-mixed-replace; boundary=frame')
- else:
- return "Camera named {} not found".format(camera_name), 404
-
- @app.route('/<camera_name>/latest.jpg')
- def latest_frame(camera_name):
- if camera_name in CONFIG['cameras']:
- # max out at specified FPS
- frame = object_processor.get_current_frame(camera_name)
- if frame is None:
- frame = np.zeros((720,1280,3), np.uint8)
-
- height = int(request.args.get('h', str(frame.shape[0])))
- width = int(height*frame.shape[1]/frame.shape[0])
-
- frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
-
- ret, jpg = cv2.imencode('.jpg', frame)
- response = make_response(jpg.tobytes())
- response.headers['Content-Type'] = 'image/jpg'
- return response
- else:
- return "Camera named {} not found".format(camera_name), 404
-
- def imagestream(camera_name, fps, height):
- while True:
- # max out at specified FPS
- time.sleep(1/fps)
- frame = object_processor.get_current_frame(camera_name, draw=True)
- if frame is None:
- frame = np.zeros((height,int(height*16/9),3), np.uint8)
-
- width = int(height*frame.shape[1]/frame.shape[0])
- frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
-
- ret, jpg = cv2.imencode('.jpg', frame)
- yield (b'--frame\r\n'
- b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
-
- app.run(host='0.0.0.0', port=WEB_PORT, debug=False)
-
- object_processor.join()
-
-if __name__ == '__main__':
- main()
diff --git a/frigate/__main__.py b/frigate/__main__.py
new file mode 100644
--- /dev/null
+++ b/frigate/__main__.py
@@ -0,0 +1,15 @@
+import faulthandler; faulthandler.enable()
+import sys
+import threading
+
+threading.current_thread().name = "frigate"
+
+from frigate.app import FrigateApp
+
+cli = sys.modules['flask.cli']
+cli.show_server_banner = lambda *x: None
+
+if __name__ == '__main__':
+ frigate_app = FrigateApp()
+
+ frigate_app.start()
diff --git a/frigate/app.py b/frigate/app.py
new file mode 100644
--- /dev/null
+++ b/frigate/app.py
@@ -0,0 +1,262 @@
+import json
+import logging
+import multiprocessing as mp
+import os
+from logging.handlers import QueueHandler
+from typing import Dict, List
+import sys
+import signal
+
+import yaml
+from peewee_migrate import Router
+from playhouse.sqlite_ext import SqliteExtDatabase
+from playhouse.sqliteq import SqliteQueueDatabase
+
+from frigate.config import FrigateConfig
+from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+from frigate.edgetpu import EdgeTPUProcess
+from frigate.events import EventProcessor, EventCleanup
+from frigate.http import create_app
+from frigate.log import log_process, root_configurer
+from frigate.models import Event
+from frigate.mqtt import create_mqtt_client
+from frigate.object_processing import TrackedObjectProcessor
+from frigate.record import RecordingMaintainer
+from frigate.stats import StatsEmitter, stats_init
+from frigate.video import capture_camera, track_camera
+from frigate.watchdog import FrigateWatchdog
+from frigate.zeroconf import broadcast_zeroconf
+
+logger = logging.getLogger(__name__)
+
+class FrigateApp():
+ def __init__(self):
+ self.stop_event = mp.Event()
+ self.config: FrigateConfig = None
+ self.detection_queue = mp.Queue()
+ self.detectors: Dict[str, EdgeTPUProcess] = {}
+ self.detection_out_events: Dict[str, mp.Event] = {}
+ self.detection_shms: List[mp.shared_memory.SharedMemory] = []
+ self.log_queue = mp.Queue()
+ self.camera_metrics = {}
+
+ def set_environment_vars(self):
+ for key, value in self.config.environment_vars.items():
+ os.environ[key] = value
+
+ def ensure_dirs(self):
+ for d in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]:
+ if not os.path.exists(d) and not os.path.islink(d):
+ logger.info(f"Creating directory: {d}")
+ os.makedirs(d)
+ else:
+ logger.debug(f"Skipping directory: {d}")
+
+ tmpfs_size = self.config.clips.tmpfs_cache_size
+ if tmpfs_size:
+ logger.info(f"Creating tmpfs of size {tmpfs_size}")
+ rc = os.system(f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}")
+ if rc != 0:
+ logger.error(f"Failed to create tmpfs, error code: {rc}")
+
+ def init_logger(self):
+ self.log_process = mp.Process(target=log_process, args=(self.log_queue,), name='log_process')
+ self.log_process.daemon = True
+ self.log_process.start()
+ root_configurer(self.log_queue)
+
+ def init_config(self):
+ config_file = os.environ.get('CONFIG_FILE', '/config/config.yml')
+ self.config = FrigateConfig(config_file=config_file)
+
+ for camera_name in self.config.cameras.keys():
+ # create camera_metrics
+ self.camera_metrics[camera_name] = {
+ 'camera_fps': mp.Value('d', 0.0),
+ 'skipped_fps': mp.Value('d', 0.0),
+ 'process_fps': mp.Value('d', 0.0),
+ 'detection_enabled': mp.Value('i', self.config.cameras[camera_name].detect.enabled),
+ 'detection_fps': mp.Value('d', 0.0),
+ 'detection_frame': mp.Value('d', 0.0),
+ 'read_start': mp.Value('d', 0.0),
+ 'ffmpeg_pid': mp.Value('i', 0),
+ 'frame_queue': mp.Queue(maxsize=2),
+ }
+
+ def check_config(self):
+ for name, camera in self.config.cameras.items():
+ assigned_roles = list(set([r for i in camera.ffmpeg.inputs for r in i.roles]))
+ if not camera.clips.enabled and 'clips' in assigned_roles:
+ logger.warning(f"Camera {name} has clips assigned to an input, but clips is not enabled.")
+ elif camera.clips.enabled and not 'clips' in assigned_roles:
+ logger.warning(f"Camera {name} has clips enabled, but clips is not assigned to an input.")
+
+ if not camera.record.enabled and 'record' in assigned_roles:
+ logger.warning(f"Camera {name} has record assigned to an input, but record is not enabled.")
+ elif camera.record.enabled and not 'record' in assigned_roles:
+ logger.warning(f"Camera {name} has record enabled, but record is not assigned to an input.")
+
+ if not camera.rtmp.enabled and 'rtmp' in assigned_roles:
+ logger.warning(f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled.")
+ elif camera.rtmp.enabled and not 'rtmp' in assigned_roles:
+ logger.warning(f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input.")
+
+ def set_log_levels(self):
+ logging.getLogger().setLevel(self.config.logger.default)
+ for log, level in self.config.logger.logs.items():
+ logging.getLogger(log).setLevel(level)
+
+ if not 'werkzeug' in self.config.logger.logs:
+ logging.getLogger('werkzeug').setLevel('ERROR')
+
+ def init_queues(self):
+ # Queues for clip processing
+ self.event_queue = mp.Queue()
+ self.event_processed_queue = mp.Queue()
+
+ # Queue for cameras to push tracked objects to
+ self.detected_frames_queue = mp.Queue(maxsize=len(self.config.cameras.keys())*2)
+
+ def init_database(self):
+ migrate_db = SqliteExtDatabase(self.config.database.path)
+
+ # Run migrations
+ del(logging.getLogger('peewee_migrate').handlers[:])
+ router = Router(migrate_db)
+ router.run()
+
+ migrate_db.close()
+
+ self.db = SqliteQueueDatabase(self.config.database.path)
+ models = [Event]
+ self.db.bind(models)
+
+ def init_stats(self):
+ self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
+
+ def init_web_server(self):
+ self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor)
+
+ def init_mqtt(self):
+ self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
+
+ def start_detectors(self):
+ model_shape = (self.config.model.height, self.config.model.width)
+ for name in self.config.cameras.keys():
+ self.detection_out_events[name] = mp.Event()
+ shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=self.config.model.height*self.config.model.width*3)
+ shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
+ self.detection_shms.append(shm_in)
+ self.detection_shms.append(shm_out)
+
+ for name, detector in self.config.detectors.items():
+ if detector.type == 'cpu':
+ self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, 'cpu', detector.num_threads)
+ if detector.type == 'edgetpu':
+ self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, detector.device, detector.num_threads)
+
+ def start_detected_frames_processor(self):
+ self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
+ self.detected_frames_queue, self.event_queue, self.event_processed_queue, self.stop_event)
+ self.detected_frames_processor.start()
+
+ def start_camera_processors(self):
+ model_shape = (self.config.model.height, self.config.model.width)
+ for name, config in self.config.cameras.items():
+ camera_process = mp.Process(target=track_camera, name=f"camera_processor:{name}", args=(name, config, model_shape,
+ self.detection_queue, self.detection_out_events[name], self.detected_frames_queue,
+ self.camera_metrics[name]))
+ camera_process.daemon = True
+ self.camera_metrics[name]['process'] = camera_process
+ camera_process.start()
+ logger.info(f"Camera processor started for {name}: {camera_process.pid}")
+
+ def start_camera_capture_processes(self):
+ for name, config in self.config.cameras.items():
+ capture_process = mp.Process(target=capture_camera, name=f"camera_capture:{name}", args=(name, config,
+ self.camera_metrics[name]))
+ capture_process.daemon = True
+ self.camera_metrics[name]['capture_process'] = capture_process
+ capture_process.start()
+ logger.info(f"Capture process started for {name}: {capture_process.pid}")
+
+ def start_event_processor(self):
+ self.event_processor = EventProcessor(self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event)
+ self.event_processor.start()
+
+ def start_event_cleanup(self):
+ self.event_cleanup = EventCleanup(self.config, self.stop_event)
+ self.event_cleanup.start()
+
+ def start_recording_maintainer(self):
+ self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
+ self.recording_maintainer.start()
+
+ def start_stats_emitter(self):
+ self.stats_emitter = StatsEmitter(self.config, self.stats_tracking, self.mqtt_client, self.config.mqtt.topic_prefix, self.stop_event)
+ self.stats_emitter.start()
+
+ def start_watchdog(self):
+ self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
+ self.frigate_watchdog.start()
+
+ def start(self):
+ self.init_logger()
+ try:
+ try:
+ self.init_config()
+ except Exception as e:
+ print(f"Error parsing config: {e}")
+ self.log_process.terminate()
+ sys.exit(1)
+ self.set_environment_vars()
+ self.ensure_dirs()
+ self.check_config()
+ self.set_log_levels()
+ self.init_queues()
+ self.init_database()
+ self.init_mqtt()
+ except Exception as e:
+ print(e)
+ self.log_process.terminate()
+ sys.exit(1)
+ self.start_detectors()
+ self.start_detected_frames_processor()
+ self.start_camera_processors()
+ self.start_camera_capture_processes()
+ self.init_stats()
+ self.init_web_server()
+ self.start_event_processor()
+ self.start_event_cleanup()
+ self.start_recording_maintainer()
+ self.start_stats_emitter()
+ self.start_watchdog()
+ # self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
+
+ def receiveSignal(signalNumber, frame):
+ self.stop()
+ sys.exit()
+
+ signal.signal(signal.SIGTERM, receiveSignal)
+
+ self.flask_app.run(host='127.0.0.1', port=5001, debug=False)
+ self.stop()
+
+ def stop(self):
+ logger.info(f"Stopping...")
+ self.stop_event.set()
+
+ self.detected_frames_processor.join()
+ self.event_processor.join()
+ self.event_cleanup.join()
+ self.recording_maintainer.join()
+ self.stats_emitter.join()
+ self.frigate_watchdog.join()
+
+ for detector in self.detectors.values():
+ detector.stop()
+
+ while len(self.detection_shms) > 0:
+ shm = self.detection_shms.pop()
+ shm.close()
+ shm.unlink()
diff --git a/frigate/config.py b/frigate/config.py
new file mode 100644
--- /dev/null
+++ b/frigate/config.py
@@ -0,0 +1,1072 @@
+import base64
+import json
+import logging
+import os
+from typing import Dict
+
+import cv2
+import matplotlib.pyplot as plt
+import numpy as np
+import voluptuous as vol
+import yaml
+
+from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+from frigate.util import create_mask
+
+logger = logging.getLogger(__name__)
+
+DEFAULT_TRACKED_OBJECTS = ['person']
+
+DETECTORS_SCHEMA = vol.Schema(
+ {
+ vol.Required(str): {
+ vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']),
+ vol.Optional('device', default='usb'): str,
+ vol.Optional('num_threads', default=3): int
+ }
+ }
+)
+
+DEFAULT_DETECTORS = {
+ 'coral': {
+ 'type': 'edgetpu',
+ 'device': 'usb'
+ }
+}
+
+MQTT_SCHEMA = vol.Schema(
+ {
+ vol.Required('host'): str,
+ vol.Optional('port', default=1883): int,
+ vol.Optional('topic_prefix', default='frigate'): str,
+ vol.Optional('client_id', default='frigate'): str,
+ vol.Optional('stats_interval', default=60): int,
+ 'user': str,
+ 'password': str
+ }
+)
+
+RETAIN_SCHEMA = vol.Schema(
+ {
+ vol.Required('default',default=10): int,
+ 'objects': {
+ str: int
+ }
+ }
+)
+
+CLIPS_SCHEMA = vol.Schema(
+ {
+ vol.Optional('max_seconds', default=300): int,
+ 'tmpfs_cache_size': str,
+ vol.Optional('retain', default={}): RETAIN_SCHEMA
+ }
+)
+
+FFMPEG_GLOBAL_ARGS_DEFAULT = ['-hide_banner','-loglevel','fatal']
+FFMPEG_INPUT_ARGS_DEFAULT = ['-avoid_negative_ts', 'make_zero',
+ '-fflags', '+genpts+discardcorrupt',
+ '-rtsp_transport', 'tcp',
+ '-stimeout', '5000000',
+ '-use_wallclock_as_timestamps', '1']
+DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ['-f', 'rawvideo',
+ '-pix_fmt', 'yuv420p']
+RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-c", "copy", "-f", "flv"]
+SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "segment", "-segment_time",
+ "10", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime",
+ "1", "-c", "copy", "-an"]
+RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "segment", "-segment_time",
+ "60", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime",
+ "1", "-c", "copy", "-an"]
+
+GLOBAL_FFMPEG_SCHEMA = vol.Schema(
+ {
+ vol.Optional('global_args', default=FFMPEG_GLOBAL_ARGS_DEFAULT): vol.Any(str, [str]),
+ vol.Optional('hwaccel_args', default=[]): vol.Any(str, [str]),
+ vol.Optional('input_args', default=FFMPEG_INPUT_ARGS_DEFAULT): vol.Any(str, [str]),
+ vol.Optional('output_args', default={}): {
+ vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
+ vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
+ vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
+ vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
+ }
+ }
+)
+
+MOTION_SCHEMA = vol.Schema(
+ {
+ 'mask': vol.Any(str, [str]),
+ 'threshold': vol.Range(min=1, max=255),
+ 'contour_area': int,
+ 'delta_alpha': float,
+ 'frame_alpha': float,
+ 'frame_height': int
+ }
+)
+
+DETECT_SCHEMA = vol.Schema(
+ {
+ 'max_disappeared': int
+ }
+)
+
+FILTER_SCHEMA = vol.Schema(
+ {
+ str: {
+ 'min_area': int,
+ 'max_area': int,
+ 'threshold': float,
+ }
+ }
+)
+
+def filters_for_all_tracked_objects(object_config):
+ for tracked_object in object_config.get('track', DEFAULT_TRACKED_OBJECTS):
+ if not 'filters' in object_config:
+ object_config['filters'] = {}
+ if not tracked_object in object_config['filters']:
+ object_config['filters'][tracked_object] = {}
+ return object_config
+
+OBJECTS_SCHEMA = vol.Schema(vol.All(filters_for_all_tracked_objects,
+ {
+ 'track': [str],
+ vol.Optional('filters', default = {}): FILTER_SCHEMA.extend(
+ {
+ str: {
+ 'min_score': float,
+ 'mask': vol.Any(str, [str]),
+ }
+ })
+ }
+))
+
+def each_role_used_once(inputs):
+ roles = [role for i in inputs for role in i['roles']]
+ roles_set = set(roles)
+ if len(roles) > len(roles_set):
+ raise ValueError
+ return inputs
+
+def detect_is_required(inputs):
+ roles = [role for i in inputs for role in i['roles']]
+ if not 'detect' in roles:
+ raise ValueError
+ return inputs
+
+CAMERA_FFMPEG_SCHEMA = vol.Schema(
+ {
+ vol.Required('inputs'): vol.All([{
+ vol.Required('path'): str,
+ vol.Required('roles'): ['detect', 'clips', 'record', 'rtmp'],
+ 'global_args': vol.Any(str, [str]),
+ 'hwaccel_args': vol.Any(str, [str]),
+ 'input_args': vol.Any(str, [str]),
+ }], vol.Msg(each_role_used_once, msg="Each input role may only be used once"),
+ vol.Msg(detect_is_required, msg="The detect role is required")),
+ 'output_args': {
+ vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
+ vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
+ vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
+ vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
+ }
+ }
+)
+
+def ensure_zones_and_cameras_have_different_names(cameras):
+ zones = [zone for camera in cameras.values() for zone in camera['zones'].keys()]
+ for zone in zones:
+ if zone in cameras.keys():
+ raise ValueError
+ return cameras
+
+CAMERAS_SCHEMA = vol.Schema(vol.All(
+ {
+ str: {
+ vol.Required('ffmpeg'): CAMERA_FFMPEG_SCHEMA,
+ vol.Required('height'): int,
+ vol.Required('width'): int,
+ 'fps': int,
+ vol.Optional('best_image_timeout', default=60): int,
+ vol.Optional('zones', default={}): {
+ str: {
+ vol.Required('coordinates'): vol.Any(str, [str]),
+ vol.Optional('filters', default={}): FILTER_SCHEMA
+ }
+ },
+ vol.Optional('clips', default={}): {
+ vol.Optional('enabled', default=False): bool,
+ vol.Optional('pre_capture', default=5): int,
+ vol.Optional('post_capture', default=5): int,
+ 'objects': [str],
+ vol.Optional('retain', default={}): RETAIN_SCHEMA,
+ },
+ vol.Optional('record', default={}): {
+ 'enabled': bool,
+ 'retain_days': int,
+ },
+ vol.Optional('rtmp', default={}): {
+ vol.Required('enabled', default=True): bool,
+ },
+ vol.Optional('snapshots', default={}): {
+ vol.Optional('enabled', default=False): bool,
+ vol.Optional('timestamp', default=False): bool,
+ vol.Optional('bounding_box', default=False): bool,
+ vol.Optional('crop', default=False): bool,
+ 'height': int,
+ vol.Optional('retain', default={}): RETAIN_SCHEMA,
+ },
+ vol.Optional('mqtt', default={}): {
+ vol.Optional('enabled', default=True): bool,
+ vol.Optional('timestamp', default=True): bool,
+ vol.Optional('bounding_box', default=True): bool,
+ vol.Optional('crop', default=True): bool,
+ vol.Optional('height', default=270): int
+ },
+ vol.Optional('objects', default={}): OBJECTS_SCHEMA,
+ vol.Optional('motion', default={}): MOTION_SCHEMA,
+ vol.Optional('detect', default={}): DETECT_SCHEMA.extend({
+ vol.Optional('enabled', default=True): bool
+ })
+ }
+ }, vol.Msg(ensure_zones_and_cameras_have_different_names, msg='Zones cannot share names with cameras'))
+)
+
+FRIGATE_CONFIG_SCHEMA = vol.Schema(
+ {
+ vol.Optional('database', default={}): {
+ vol.Optional('path', default=os.path.join(CLIPS_DIR, 'frigate.db')): str
+ },
+ vol.Optional('model', default={'width': 320, 'height': 320}): {
+ vol.Required('width'): int,
+ vol.Required('height'): int
+ },
+ vol.Optional('detectors', default=DEFAULT_DETECTORS): DETECTORS_SCHEMA,
+ 'mqtt': MQTT_SCHEMA,
+ vol.Optional('logger', default={'default': 'info', 'logs': {}}): {
+ vol.Optional('default', default='info'): vol.In(['info', 'debug', 'warning', 'error', 'critical']),
+ vol.Optional('logs', default={}): {str: vol.In(['info', 'debug', 'warning', 'error', 'critical']) }
+ },
+ vol.Optional('snapshots', default={}): {
+ vol.Optional('retain', default={}): RETAIN_SCHEMA
+ },
+ vol.Optional('clips', default={}): CLIPS_SCHEMA,
+ vol.Optional('record', default={}): {
+ vol.Optional('enabled', default=False): bool,
+ vol.Optional('retain_days', default=30): int,
+ },
+ vol.Optional('ffmpeg', default={}): GLOBAL_FFMPEG_SCHEMA,
+ vol.Optional('objects', default={}): OBJECTS_SCHEMA,
+ vol.Optional('motion', default={}): MOTION_SCHEMA,
+ vol.Optional('detect', default={}): DETECT_SCHEMA,
+ vol.Required('cameras', default={}): CAMERAS_SCHEMA,
+ vol.Optional('environment_vars', default={}): { str: str }
+ }
+)
+
+class DatabaseConfig():
+ def __init__(self, config):
+ self._path = config['path']
+
+ @property
+ def path(self):
+ return self._path
+
+ def to_dict(self):
+ return {
+ 'path': self.path
+ }
+
+class ModelConfig():
+ def __init__(self, config):
+ self._width = config['width']
+ self._height = config['height']
+
+ @property
+ def width(self):
+ return self._width
+
+ @property
+ def height(self):
+ return self._height
+
+ def to_dict(self):
+ return {
+ 'width': self.width,
+ 'height': self.height
+ }
+
+class DetectorConfig():
+ def __init__(self, config):
+ self._type = config['type']
+ self._device = config['device']
+ self._num_threads = config['num_threads']
+
+ @property
+ def type(self):
+ return self._type
+
+ @property
+ def device(self):
+ return self._device
+
+ @property
+ def num_threads(self):
+ return self._num_threads
+
+ def to_dict(self):
+ return {
+ 'type': self.type,
+ 'device': self.device,
+ 'num_threads': self.num_threads
+ }
+
+class LoggerConfig():
+ def __init__(self, config):
+ self._default = config['default'].upper()
+ self._logs = {k: v.upper() for k, v in config['logs'].items()}
+
+ @property
+ def default(self):
+ return self._default
+
+ @property
+ def logs(self):
+ return self._logs
+
+ def to_dict(self):
+ return {
+ 'default': self.default,
+ 'logs': self.logs
+ }
+
+class MqttConfig():
+ def __init__(self, config):
+ self._host = config['host']
+ self._port = config['port']
+ self._topic_prefix = config['topic_prefix']
+ self._client_id = config['client_id']
+ self._user = config.get('user')
+ self._password = config.get('password')
+ self._stats_interval = config.get('stats_interval')
+
+ @property
+ def host(self):
+ return self._host
+
+ @property
+ def port(self):
+ return self._port
+
+ @property
+ def topic_prefix(self):
+ return self._topic_prefix
+
+ @property
+ def client_id(self):
+ return self._client_id
+
+ @property
+ def user(self):
+ return self._user
+
+ @property
+ def password(self):
+ return self._password
+
+ @property
+ def stats_interval(self):
+ return self._stats_interval
+
+ def to_dict(self):
+ return {
+ 'host': self.host,
+ 'port': self.port,
+ 'topic_prefix': self.topic_prefix,
+ 'client_id': self.client_id,
+ 'user': self.user,
+ 'stats_interval': self.stats_interval
+ }
+
+class CameraInput():
+ def __init__(self, global_config, ffmpeg_input):
+ self._path = ffmpeg_input['path']
+ self._roles = ffmpeg_input['roles']
+ self._global_args = ffmpeg_input.get('global_args', global_config['global_args'])
+ self._hwaccel_args = ffmpeg_input.get('hwaccel_args', global_config['hwaccel_args'])
+ self._input_args = ffmpeg_input.get('input_args', global_config['input_args'])
+
+ @property
+ def path(self):
+ return self._path
+
+ @property
+ def roles(self):
+ return self._roles
+
+ @property
+ def global_args(self):
+ return self._global_args if isinstance(self._global_args, list) else self._global_args.split(' ')
+
+ @property
+ def hwaccel_args(self):
+ return self._hwaccel_args if isinstance(self._hwaccel_args, list) else self._hwaccel_args.split(' ')
+
+ @property
+ def input_args(self):
+ return self._input_args if isinstance(self._input_args, list) else self._input_args.split(' ')
+
+class CameraFfmpegConfig():
+ def __init__(self, global_config, config):
+ self._inputs = [CameraInput(global_config, i) for i in config['inputs']]
+ self._output_args = config.get('output_args', global_config['output_args'])
+
+ @property
+ def inputs(self):
+ return self._inputs
+
+ @property
+ def output_args(self):
+ return {k: v if isinstance(v, list) else v.split(' ') for k, v in self._output_args.items()}
+
+class RetainConfig():
+ def __init__(self, global_config, config):
+ self._default = config.get('default', global_config.get('default'))
+ self._objects = config.get('objects', global_config.get('objects', {}))
+
+ @property
+ def default(self):
+ return self._default
+
+ @property
+ def objects(self):
+ return self._objects
+
+ def to_dict(self):
+ return {
+ 'default': self.default,
+ 'objects': self.objects
+ }
+
+class ClipsConfig():
+ def __init__(self, config):
+ self._max_seconds = config['max_seconds']
+ self._tmpfs_cache_size = config.get('tmpfs_cache_size', '').strip()
+ self._retain = RetainConfig(config['retain'], config['retain'])
+
+ @property
+ def max_seconds(self):
+ return self._max_seconds
+
+ @property
+ def tmpfs_cache_size(self):
+ return self._tmpfs_cache_size
+
+ @property
+ def retain(self):
+ return self._retain
+
+ def to_dict(self):
+ return {
+ 'max_seconds': self.max_seconds,
+ 'tmpfs_cache_size': self.tmpfs_cache_size,
+ 'retain': self.retain.to_dict()
+ }
+
+class SnapshotsConfig():
+ def __init__(self, config):
+ self._retain = RetainConfig(config['retain'], config['retain'])
+
+ @property
+ def retain(self):
+ return self._retain
+
+ def to_dict(self):
+ return {
+ 'retain': self.retain.to_dict()
+ }
+
+class RecordConfig():
+ def __init__(self, global_config, config):
+ self._enabled = config.get('enabled', global_config['enabled'])
+ self._retain_days = config.get('retain_days', global_config['retain_days'])
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @property
+ def retain_days(self):
+ return self._retain_days
+
+ def to_dict(self):
+ return {
+ 'enabled': self.enabled,
+ 'retain_days': self.retain_days,
+ }
+
+class FilterConfig():
+ def __init__(self, global_config, config, frame_shape=None):
+ self._min_area = config.get('min_area', global_config.get('min_area', 0))
+ self._max_area = config.get('max_area', global_config.get('max_area', 24000000))
+ self._threshold = config.get('threshold', global_config.get('threshold', 0.7))
+ self._min_score = config.get('min_score', global_config.get('min_score', 0.5))
+ self._raw_mask = config.get('mask')
+ self._mask = create_mask(frame_shape, self._raw_mask) if self._raw_mask else None
+
+ @property
+ def min_area(self):
+ return self._min_area
+
+ @property
+ def max_area(self):
+ return self._max_area
+
+ @property
+ def threshold(self):
+ return self._threshold
+
+ @property
+ def min_score(self):
+ return self._min_score
+
+ @property
+ def mask(self):
+ return self._mask
+
+ def to_dict(self):
+ return {
+ 'min_area': self.min_area,
+ 'max_area': self.max_area,
+ 'threshold': self.threshold,
+ 'min_score': self.min_score,
+ 'mask': self._raw_mask
+ }
+
+class ObjectConfig():
+ def __init__(self, global_config, config, frame_shape):
+ self._track = config.get('track', global_config.get('track', DEFAULT_TRACKED_OBJECTS))
+ self._filters = { name: FilterConfig(global_config.get('filters').get(name, {}), config.get('filters').get(name, {}), frame_shape) for name in self._track }
+
+ @property
+ def track(self):
+ return self._track
+
+ @property
+ def filters(self) -> Dict[str, FilterConfig]:
+ return self._filters
+
+ def to_dict(self):
+ return {
+ 'track': self.track,
+ 'filters': { k: f.to_dict() for k, f in self.filters.items() }
+ }
+
+class CameraSnapshotsConfig():
+ def __init__(self, global_config, config):
+ self._enabled = config['enabled']
+ self._timestamp = config['timestamp']
+ self._bounding_box = config['bounding_box']
+ self._crop = config['crop']
+ self._height = config.get('height')
+ self._retain = RetainConfig(global_config['snapshots']['retain'], config['retain'])
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @property
+ def timestamp(self):
+ return self._timestamp
+
+ @property
+ def bounding_box(self):
+ return self._bounding_box
+
+ @property
+ def crop(self):
+ return self._crop
+
+ @property
+ def height(self):
+ return self._height
+
+ @property
+ def retain(self):
+ return self._retain
+
+ def to_dict(self):
+ return {
+ 'enabled': self.enabled,
+ 'timestamp': self.timestamp,
+ 'bounding_box': self.bounding_box,
+ 'crop': self.crop,
+ 'height': self.height,
+ 'retain': self.retain.to_dict()
+ }
+
+class CameraMqttConfig():
+ def __init__(self, config):
+ self._enabled = config['enabled']
+ self._timestamp = config['timestamp']
+ self._bounding_box = config['bounding_box']
+ self._crop = config['crop']
+ self._height = config.get('height')
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @property
+ def timestamp(self):
+ return self._timestamp
+
+ @property
+ def bounding_box(self):
+ return self._bounding_box
+
+ @property
+ def crop(self):
+ return self._crop
+
+ @property
+ def height(self):
+ return self._height
+
+ def to_dict(self):
+ return {
+ 'enabled': self.enabled,
+ 'timestamp': self.timestamp,
+ 'bounding_box': self.bounding_box,
+ 'crop': self.crop,
+ 'height': self.height
+ }
+
+class CameraClipsConfig():
+ def __init__(self, global_config, config):
+ self._enabled = config['enabled']
+ self._pre_capture = config['pre_capture']
+ self._post_capture = config['post_capture']
+ self._objects = config.get('objects')
+ self._retain = RetainConfig(global_config['clips']['retain'], config['retain'])
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @property
+ def pre_capture(self):
+ return self._pre_capture
+
+ @property
+ def post_capture(self):
+ return self._post_capture
+
+ @property
+ def objects(self):
+ return self._objects
+
+ @property
+ def retain(self):
+ return self._retain
+
+ def to_dict(self):
+ return {
+ 'enabled': self.enabled,
+ 'pre_capture': self.pre_capture,
+ 'post_capture': self.post_capture,
+ 'objects': self.objects,
+ 'retain': self.retain.to_dict()
+ }
+
+class CameraRtmpConfig():
+ def __init__(self, global_config, config):
+ self._enabled = config['enabled']
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ def to_dict(self):
+ return {
+ 'enabled': self.enabled,
+ }
+
+class MotionConfig():
+ def __init__(self, global_config, config, frame_shape):
+ self._raw_mask = config.get('mask')
+ if self._raw_mask:
+ self._mask = create_mask(frame_shape, self._raw_mask)
+ else:
+ default_mask = np.zeros(frame_shape, np.uint8)
+ default_mask[:] = 255
+ self._mask = default_mask
+ self._threshold = config.get('threshold', global_config.get('threshold', 25))
+ self._contour_area = config.get('contour_area', global_config.get('contour_area', 100))
+ self._delta_alpha = config.get('delta_alpha', global_config.get('delta_alpha', 0.2))
+ self._frame_alpha = config.get('frame_alpha', global_config.get('frame_alpha', 0.2))
+ self._frame_height = config.get('frame_height', global_config.get('frame_height', frame_shape[0]//6))
+
+ @property
+ def mask(self):
+ return self._mask
+
+ @property
+ def threshold(self):
+ return self._threshold
+
+ @property
+ def contour_area(self):
+ return self._contour_area
+
+ @property
+ def delta_alpha(self):
+ return self._delta_alpha
+
+ @property
+ def frame_alpha(self):
+ return self._frame_alpha
+
+ @property
+ def frame_height(self):
+ return self._frame_height
+
+ def to_dict(self):
+ return {
+ 'mask': self._raw_mask,
+ 'threshold': self.threshold,
+ 'contour_area': self.contour_area,
+ 'delta_alpha': self.delta_alpha,
+ 'frame_alpha': self.frame_alpha,
+ 'frame_height': self.frame_height,
+ }
+
+
+
+class DetectConfig():
+ def __init__(self, global_config, config, camera_fps):
+ self._enabled = config['enabled']
+ self._max_disappeared = config.get('max_disappeared', global_config.get('max_disappeared', camera_fps*2))
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @property
+ def max_disappeared(self):
+ return self._max_disappeared
+
+ def to_dict(self):
+ return {
+ 'enabled': self.enabled,
+ 'max_disappeared': self._max_disappeared,
+ }
+
+class ZoneConfig():
+ def __init__(self, name, config):
+ self._coordinates = config['coordinates']
+ self._filters = { name: FilterConfig(c, c) for name, c in config['filters'].items() }
+
+ if isinstance(self._coordinates, list):
+ self._contour = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in self._coordinates])
+ elif isinstance(self._coordinates, str):
+ points = self._coordinates.split(',')
+ self._contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
+ else:
+ print(f"Unable to parse zone coordinates for {name}")
+ self._contour = np.array([])
+
+ self._color = (0,0,0)
+
+ @property
+ def coordinates(self):
+ return self._coordinates
+
+ @property
+ def contour(self):
+ return self._contour
+
+ @contour.setter
+ def contour(self, val):
+ self._contour = val
+
+ @property
+ def color(self):
+ return self._color
+
+ @color.setter
+ def color(self, val):
+ self._color = val
+
+ @property
+ def filters(self):
+ return self._filters
+
+ def to_dict(self):
+ return {
+ 'filters': {k: f.to_dict() for k, f in self.filters.items()},
+ 'coordinates': self._coordinates
+ }
+
+class CameraConfig():
+ def __init__(self, name, config, global_config):
+ self._name = name
+ self._ffmpeg = CameraFfmpegConfig(global_config['ffmpeg'], config['ffmpeg'])
+ self._height = config.get('height')
+ self._width = config.get('width')
+ self._frame_shape = (self._height, self._width)
+ self._frame_shape_yuv = (self._frame_shape[0]*3//2, self._frame_shape[1])
+ self._fps = config.get('fps')
+ self._best_image_timeout = config['best_image_timeout']
+ self._zones = { name: ZoneConfig(name, z) for name, z in config['zones'].items() }
+ self._clips = CameraClipsConfig(global_config, config['clips'])
+ self._record = RecordConfig(global_config['record'], config['record'])
+ self._rtmp = CameraRtmpConfig(global_config, config['rtmp'])
+ self._snapshots = CameraSnapshotsConfig(global_config, config['snapshots'])
+ self._mqtt = CameraMqttConfig(config['mqtt'])
+ self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}), self._frame_shape)
+ self._motion = MotionConfig(global_config['motion'], config['motion'], self._frame_shape)
+ self._detect = DetectConfig(global_config['detect'], config['detect'], config.get('fps', 5))
+
+ self._ffmpeg_cmds = []
+ for ffmpeg_input in self._ffmpeg.inputs:
+ ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
+ if ffmpeg_cmd is None:
+ continue
+
+ self._ffmpeg_cmds.append({
+ 'roles': ffmpeg_input.roles,
+ 'cmd': ffmpeg_cmd
+ })
+
+
+ self._set_zone_colors(self._zones)
+
+ def _get_ffmpeg_cmd(self, ffmpeg_input):
+ ffmpeg_output_args = []
+ if 'detect' in ffmpeg_input.roles:
+ ffmpeg_output_args = self.ffmpeg.output_args['detect'] + ffmpeg_output_args + ['pipe:']
+ if self.fps:
+ ffmpeg_output_args = ["-r", str(self.fps)] + ffmpeg_output_args
+ if 'rtmp' in ffmpeg_input.roles and self.rtmp.enabled:
+ ffmpeg_output_args = self.ffmpeg.output_args['rtmp'] + [
+ f"rtmp://127.0.0.1/live/{self.name}"
+ ] + ffmpeg_output_args
+ if 'clips' in ffmpeg_input.roles:
+ ffmpeg_output_args = self.ffmpeg.output_args['clips'] + [
+ f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"
+ ] + ffmpeg_output_args
+ if 'record' in ffmpeg_input.roles and self.record.enabled:
+ ffmpeg_output_args = self.ffmpeg.output_args['record'] + [
+ f"{os.path.join(RECORD_DIR, self.name)}-%Y%m%d%H%M%S.mp4"
+ ] + ffmpeg_output_args
+
+ # if there arent any outputs enabled for this input
+ if len(ffmpeg_output_args) == 0:
+ return None
+
+ cmd = (['ffmpeg'] +
+ ffmpeg_input.global_args +
+ ffmpeg_input.hwaccel_args +
+ ffmpeg_input.input_args +
+ ['-i', ffmpeg_input.path] +
+ ffmpeg_output_args)
+
+ return [part for part in cmd if part != '']
+
+ def _set_zone_colors(self, zones: Dict[str, ZoneConfig]):
+ # set colors for zones
+ all_zone_names = zones.keys()
+ zone_colors = {}
+ colors = plt.cm.get_cmap('tab10', len(all_zone_names))
+ for i, zone in enumerate(all_zone_names):
+ zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])
+
+ for name, zone in zones.items():
+ zone.color = zone_colors[name]
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def ffmpeg(self):
+ return self._ffmpeg
+
+ @property
+ def height(self):
+ return self._height
+
+ @property
+ def width(self):
+ return self._width
+
+ @property
+ def fps(self):
+ return self._fps
+
+ @property
+ def best_image_timeout(self):
+ return self._best_image_timeout
+
+ @property
+ def zones(self)-> Dict[str, ZoneConfig]:
+ return self._zones
+
+ @property
+ def clips(self):
+ return self._clips
+
+ @property
+ def record(self):
+ return self._record
+
+ @property
+ def rtmp(self):
+ return self._rtmp
+
+ @property
+ def snapshots(self):
+ return self._snapshots
+
+ @property
+ def mqtt(self):
+ return self._mqtt
+
+ @property
+ def objects(self):
+ return self._objects
+
+ @property
+ def motion(self):
+ return self._motion
+
+ @property
+ def detect(self):
+ return self._detect
+
+ @property
+ def frame_shape(self):
+ return self._frame_shape
+
+ @property
+ def frame_shape_yuv(self):
+ return self._frame_shape_yuv
+
+ @property
+ def ffmpeg_cmds(self):
+ return self._ffmpeg_cmds
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'height': self.height,
+ 'width': self.width,
+ 'fps': self.fps,
+ 'best_image_timeout': self.best_image_timeout,
+ 'zones': {k: z.to_dict() for k, z in self.zones.items()},
+ 'clips': self.clips.to_dict(),
+ 'record': self.record.to_dict(),
+ 'rtmp': self.rtmp.to_dict(),
+ 'snapshots': self.snapshots.to_dict(),
+ 'mqtt': self.mqtt.to_dict(),
+ 'objects': self.objects.to_dict(),
+ 'motion': self.motion.to_dict(),
+ 'detect': self.detect.to_dict(),
+ 'frame_shape': self.frame_shape,
+ 'ffmpeg_cmds': [{'roles': c['roles'], 'cmd': ' '.join(c['cmd'])} for c in self.ffmpeg_cmds],
+ }
+
+
+class FrigateConfig():
+ def __init__(self, config_file=None, config=None):
+ if config is None and config_file is None:
+ raise ValueError('config or config_file must be defined')
+ elif not config_file is None:
+ config = self._load_file(config_file)
+
+ config = FRIGATE_CONFIG_SCHEMA(config)
+
+ config = self._sub_env_vars(config)
+
+ self._database = DatabaseConfig(config['database'])
+ self._model = ModelConfig(config['model'])
+ self._detectors = { name: DetectorConfig(d) for name, d in config['detectors'].items() }
+ self._mqtt = MqttConfig(config['mqtt'])
+ self._clips = ClipsConfig(config['clips'])
+ self._snapshots = SnapshotsConfig(config['snapshots'])
+ self._cameras = { name: CameraConfig(name, c, config) for name, c in config['cameras'].items() }
+ self._logger = LoggerConfig(config['logger'])
+ self._environment_vars = config['environment_vars']
+
+ def _sub_env_vars(self, config):
+ frigate_env_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
+
+ if 'password' in config['mqtt']:
+ config['mqtt']['password'] = config['mqtt']['password'].format(**frigate_env_vars)
+
+ for camera in config['cameras'].values():
+ for i in camera['ffmpeg']['inputs']:
+ i['path'] = i['path'].format(**frigate_env_vars)
+
+ return config
+
+ def _load_file(self, config_file):
+ with open(config_file) as f:
+ raw_config = f.read()
+
+ if config_file.endswith(".yml"):
+ config = yaml.safe_load(raw_config)
+ elif config_file.endswith(".json"):
+ config = json.loads(raw_config)
+
+ return config
+
+ def to_dict(self):
+ return {
+ 'database': self.database.to_dict(),
+ 'model': self.model.to_dict(),
+ 'detectors': {k: d.to_dict() for k, d in self.detectors.items()},
+ 'mqtt': self.mqtt.to_dict(),
+ 'clips': self.clips.to_dict(),
+ 'snapshots': self.snapshots.to_dict(),
+ 'cameras': {k: c.to_dict() for k, c in self.cameras.items()},
+ 'logger': self.logger.to_dict(),
+ 'environment_vars': self._environment_vars
+ }
+
+ @property
+ def database(self):
+ return self._database
+
+ @property
+ def model(self):
+ return self._model
+
+ @property
+ def detectors(self) -> Dict[str, DetectorConfig]:
+ return self._detectors
+
+ @property
+ def logger(self):
+ return self._logger
+
+ @property
+ def mqtt(self):
+ return self._mqtt
+
+ @property
+ def clips(self):
+ return self._clips
+
+ @property
+ def snapshots(self):
+ return self._snapshots
+
+ @property
+ def cameras(self) -> Dict[str, CameraConfig]:
+ return self._cameras
+
+ @property
+ def environment_vars(self):
+ return self._environment_vars
diff --git a/frigate/const.py b/frigate/const.py
new file mode 100644
--- /dev/null
+++ b/frigate/const.py
@@ -0,0 +1,3 @@
+CLIPS_DIR = '/media/frigate/clips'
+RECORD_DIR = '/media/frigate/recordings'
+CACHE_DIR = '/tmp/cache'
\ No newline at end of file
diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py
--- a/frigate/edgetpu.py
+++ b/frigate/edgetpu.py
@@ -1,15 +1,23 @@
-import os
import datetime
import hashlib
+import logging
import multiprocessing as mp
+import os
import queue
-from multiprocessing.connection import Connection
+import threading
+import signal
from abc import ABC, abstractmethod
+from multiprocessing.connection import Connection
+from setproctitle import setproctitle
from typing import Dict
+
import numpy as np
import tflite_runtime.interpreter as tflite
from tflite_runtime.interpreter import load_delegate
-from frigate.util import EventsPerSecond, listen, SharedMemoryFrameManager
+
+from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen
+
+logger = logging.getLogger(__name__)
def load_labels(path, encoding='utf-8'):
"""Loads labels from file (with or without index numbers).
@@ -36,7 +44,7 @@ def detect(self, tensor_input, threshold = .4):
pass
class LocalObjectDetector(ObjectDetector):
- def __init__(self, tf_device=None, labels=None):
+ def __init__(self, tf_device=None, num_threads=3, labels=None):
self.fps = EventsPerSecond()
if labels is None:
self.labels = {}
@@ -51,19 +59,18 @@ def __init__(self, tf_device=None, labels=None):
if tf_device != 'cpu':
try:
- print(f"Attempting to load TPU as {device_config['device']}")
+ logger.info(f"Attempting to load TPU as {device_config['device']}")
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', device_config)
- print("TPU found")
+ logger.info("TPU found")
+ self.interpreter = tflite.Interpreter(
+ model_path='/edgetpu_model.tflite',
+ experimental_delegates=[edge_tpu_delegate])
except ValueError:
- print("No EdgeTPU detected. Falling back to CPU.")
-
- if edge_tpu_delegate is None:
- self.interpreter = tflite.Interpreter(
- model_path='/cpu_model.tflite')
+ logger.info("No EdgeTPU detected.")
+ raise
else:
self.interpreter = tflite.Interpreter(
- model_path='/edgetpu_model.tflite',
- experimental_delegates=[edge_tpu_delegate])
+ model_path='/cpu_model.tflite', num_threads=num_threads)
self.interpreter.allocate_tensors()
@@ -99,11 +106,22 @@ def detect_raw(self, tensor_input):
return detections
-def run_detector(detection_queue, out_events: Dict[str, mp.Event], avg_speed, start, tf_device):
- print(f"Starting detection process: {os.getpid()}")
+def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device, num_threads):
+ threading.current_thread().name = f"detector:{name}"
+ logger = logging.getLogger(f"detector.{name}")
+ logger.info(f"Starting detection process: {os.getpid()}")
+ setproctitle(f"frigate.detector.{name}")
listen()
+
+ stop_event = mp.Event()
+ def receiveSignal(signalNumber, frame):
+ stop_event.set()
+
+ signal.signal(signal.SIGTERM, receiveSignal)
+ signal.signal(signal.SIGINT, receiveSignal)
+
frame_manager = SharedMemoryFrameManager()
- object_detector = LocalObjectDetector(tf_device=tf_device)
+ object_detector = LocalObjectDetector(tf_device=tf_device, num_threads=num_threads)
outputs = {}
for name in out_events.keys():
@@ -115,8 +133,14 @@ def run_detector(detection_queue, out_events: Dict[str, mp.Event], avg_speed, st
}
while True:
- connection_id = detection_queue.get()
- input_frame = frame_manager.get(connection_id, (1,300,300,3))
+ if stop_event.is_set():
+ break
+
+ try:
+ connection_id = detection_queue.get(timeout=5)
+ except queue.Empty:
+ continue
+ input_frame = frame_manager.get(connection_id, (1,model_shape[0],model_shape[1],3))
if input_frame is None:
continue
@@ -132,21 +156,24 @@ def run_detector(detection_queue, out_events: Dict[str, mp.Event], avg_speed, st
avg_speed.value = (avg_speed.value*9 + duration)/10
class EdgeTPUProcess():
- def __init__(self, detection_queue, out_events, tf_device=None):
+ def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None, num_threads=3):
+ self.name = name
self.out_events = out_events
self.detection_queue = detection_queue
self.avg_inference_speed = mp.Value('d', 0.01)
self.detection_start = mp.Value('d', 0.0)
self.detect_process = None
+ self.model_shape = model_shape
self.tf_device = tf_device
+ self.num_threads = num_threads
self.start_or_restart()
def stop(self):
self.detect_process.terminate()
- print("Waiting for detection process to exit gracefully...")
+ logging.info("Waiting for detection process to exit gracefully...")
self.detect_process.join(timeout=30)
if self.detect_process.exitcode is None:
- print("Detection process didnt exit. Force killing...")
+ logging.info("Detection process didnt exit. Force killing...")
self.detect_process.kill()
self.detect_process.join()
@@ -154,19 +181,19 @@ def start_or_restart(self):
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.stop()
- self.detect_process = mp.Process(target=run_detector, args=(self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.tf_device))
+ self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device, self.num_threads))
self.detect_process.daemon = True
self.detect_process.start()
class RemoteObjectDetector():
- def __init__(self, name, labels, detection_queue, event):
+ def __init__(self, name, labels, detection_queue, event, model_shape):
self.labels = load_labels(labels)
self.name = name
self.fps = EventsPerSecond()
self.detection_queue = detection_queue
self.event = event
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
- self.np_shm = np.ndarray((1,300,300,3), dtype=np.uint8, buffer=self.shm.buf)
+ self.np_shm = np.ndarray((1,model_shape[0],model_shape[1],3), dtype=np.uint8, buffer=self.shm.buf)
self.out_shm = mp.shared_memory.SharedMemory(name=f"out-{self.name}", create=False)
self.out_np_shm = np.ndarray((20,6), dtype=np.float32, buffer=self.out_shm.buf)
@@ -196,4 +223,4 @@ def detect(self, tensor_input, threshold=.4):
def cleanup(self):
self.shm.unlink()
- self.out_shm.unlink()
\ No newline at end of file
+ self.out_shm.unlink()
diff --git a/frigate/events.py b/frigate/events.py
--- a/frigate/events.py
+++ b/frigate/events.py
@@ -1,36 +1,49 @@
+import datetime
+import json
+import logging
import os
-import time
-import psutil
+import queue
+import subprocess as sp
import threading
+import time
from collections import defaultdict
-import json
-import datetime
-import subprocess as sp
-import queue
+from pathlib import Path
+
+import psutil
+
+from frigate.config import FrigateConfig
+from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+from frigate.models import Event
+
+from peewee import fn
+
+logger = logging.getLogger(__name__)
class EventProcessor(threading.Thread):
- def __init__(self, config, camera_processes, cache_dir, clip_dir, event_queue, stop_event):
+ def __init__(self, config, camera_processes, event_queue, event_processed_queue, stop_event):
threading.Thread.__init__(self)
+ self.name = 'event_processor'
self.config = config
self.camera_processes = camera_processes
- self.cache_dir = cache_dir
- self.clip_dir = clip_dir
self.cached_clips = {}
self.event_queue = event_queue
+ self.event_processed_queue = event_processed_queue
self.events_in_process = {}
self.stop_event = stop_event
def refresh_cache(self):
- cached_files = os.listdir(self.cache_dir)
+ cached_files = os.listdir(CACHE_DIR)
files_in_use = []
- for process_data in self.camera_processes.values():
+ for process in psutil.process_iter():
try:
- ffmpeg_process = psutil.Process(pid=process_data['ffmpeg_pid'].value)
- flist = ffmpeg_process.open_files()
+ if process.name() != 'ffmpeg':
+ continue
+
+ flist = process.open_files()
if flist:
for nt in flist:
- if nt.path.startswith(self.cache_dir):
+ if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split('/')[-1])
except:
continue
@@ -50,7 +63,7 @@ def refresh_cache(self):
'format=duration',
'-of',
'default=noprint_wrappers=1:nokey=1',
- f"{os.path.join(self.cache_dir,f)}"
+ f"{os.path.join(CACHE_DIR,f)}"
])
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
@@ -58,8 +71,8 @@ def refresh_cache(self):
if p_status == 0:
duration = float(output.decode('utf-8').strip())
else:
- print(f"bad file: {f}")
- os.remove(os.path.join(self.cache_dir,f))
+ logger.info(f"bad file: {f}")
+ os.remove(os.path.join(CACHE_DIR,f))
continue
self.cached_clips[f] = {
@@ -75,27 +88,35 @@ def refresh_cache(self):
earliest_event = datetime.datetime.now().timestamp()
# if the earliest event exceeds the max seconds, cap it
- max_seconds = self.config.get('save_clips', {}).get('max_seconds', 300)
+ max_seconds = self.config.clips.max_seconds
if datetime.datetime.now().timestamp()-earliest_event > max_seconds:
earliest_event = datetime.datetime.now().timestamp()-max_seconds
for f, data in list(self.cached_clips.items()):
if earliest_event-90 > data['start_time']+data['duration']:
del self.cached_clips[f]
- os.remove(os.path.join(self.cache_dir,f))
+ logger.debug(f"Cleaning up cached file {f}")
+ os.remove(os.path.join(CACHE_DIR,f))
- def create_clip(self, camera, event_data, pre_capture):
+ def create_clip(self, camera, event_data, pre_capture, post_capture):
# get all clips from the camera with the event sorted
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
- while sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']:
+ # if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds
+ wait_count = 0
+ while len(sorted_clips) == 0 or sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']+post_capture:
+ if wait_count > 4:
+ logger.warning(f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event.")
+ return False
+ logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
# get all clips from the camera with the event sorted
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
+ wait_count += 1
playlist_start = event_data['start_time']-pre_capture
- playlist_end = event_data['end_time']+5
+ playlist_end = event_data['end_time']+post_capture
playlist_lines = []
for clip in sorted_clips:
# clip ends before playlist start time, skip
@@ -104,7 +125,7 @@ def create_clip(self, camera, event_data, pre_capture):
# clip starts after playlist ends, finish
if clip['start_time'] > playlist_end:
break
- playlist_lines.append(f"file '{os.path.join(self.cache_dir,clip['path'])}'")
+ playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
# if this is the starting clip, add an inpoint
if clip['start_time'] < playlist_start:
playlist_lines.append(f"inpoint {int(playlist_start-clip['start_time'])}")
@@ -126,21 +147,21 @@ def create_clip(self, camera, event_data, pre_capture):
'-',
'-c',
'copy',
- f"{os.path.join(self.clip_dir, clip_name)}.mp4"
+ '-movflags',
+ '+faststart',
+ f"{os.path.join(CLIPS_DIR, clip_name)}.mp4"
]
p = sp.run(ffmpeg_cmd, input="\n".join(playlist_lines), encoding='ascii', capture_output=True)
if p.returncode != 0:
- print(p.stderr)
- return
-
- with open(f"{os.path.join(self.clip_dir, clip_name)}.json", 'w') as outfile:
- json.dump(event_data, outfile)
+ logger.error(p.stderr)
+ return False
+ return True
def run(self):
while True:
if self.stop_event.is_set():
- print(f"Exiting event processor...")
+ logger.info(f"Exiting event processor...")
break
try:
@@ -150,25 +171,143 @@ def run(self):
self.refresh_cache()
continue
+ logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.refresh_cache()
- save_clips_config = self.config['cameras'][camera].get('save_clips', {})
-
- # if save clips is not enabled for this camera, just continue
- if not save_clips_config.get('enabled', False):
- continue
-
- # if specific objects are listed for this camera, only save clips for them
- if 'objects' in save_clips_config:
- if not event_data['label'] in save_clips_config['objects']:
- continue
-
if event_type == 'start':
self.events_in_process[event_data['id']] = event_data
if event_type == 'end':
- if len(self.cached_clips) > 0 and not event_data['false_positive']:
- self.create_clip(camera, event_data, save_clips_config.get('pre_capture', 30))
+ clips_config = self.config.cameras[camera].clips
+
+ if not event_data['false_positive']:
+ clip_created = False
+ if clips_config.enabled and (clips_config.objects is None or event_data['label'] in clips_config.objects):
+ clip_created = self.create_clip(camera, event_data, clips_config.pre_capture, clips_config.post_capture)
+
+ Event.create(
+ id=event_data['id'],
+ label=event_data['label'],
+ camera=camera,
+ start_time=event_data['start_time'],
+ end_time=event_data['end_time'],
+ top_score=event_data['top_score'],
+ false_positive=event_data['false_positive'],
+ zones=list(event_data['entered_zones']),
+ thumbnail=event_data['thumbnail'],
+ has_clip=clip_created,
+ has_snapshot=event_data['has_snapshot'],
+ )
del self.events_in_process[event_data['id']]
+ self.event_processed_queue.put((event_data['id'], camera))
+
+class EventCleanup(threading.Thread):
+ def __init__(self, config: FrigateConfig, stop_event):
+ threading.Thread.__init__(self)
+ self.name = 'event_cleanup'
+ self.config = config
+ self.stop_event = stop_event
+ self.camera_keys = list(self.config.cameras.keys())
+
+ def expire(self, media):
+ ## Expire events from unlisted cameras based on the global config
+ if media == 'clips':
+ retain_config = self.config.clips.retain
+ file_extension = 'mp4'
+ update_params = {'has_clip': False}
+ else:
+ retain_config = self.config.snapshots.retain
+ file_extension = 'jpg'
+ update_params = {'has_snapshot': False}
+
+ distinct_labels = (Event.select(Event.label)
+ .where(Event.camera.not_in(self.camera_keys))
+ .distinct())
+
+ # loop over object types in db
+ for l in distinct_labels:
+ # get expiration time for this label
+ expire_days = retain_config.objects.get(l.label, retain_config.default)
+ expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
+ # grab all events after specific time
+ expired_events = (
+ Event.select()
+ .where(Event.camera.not_in(self.camera_keys),
+ Event.start_time < expire_after,
+ Event.label == l.label)
+ )
+ # delete the media from disk
+ for event in expired_events:
+ media_name = f"{event.camera}-{event.id}"
+ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
+ media.unlink(missing_ok=True)
+ # update the clips attribute for the db entry
+ update_query = (
+ Event.update(update_params)
+ .where(Event.camera.not_in(self.camera_keys),
+ Event.start_time < expire_after,
+ Event.label == l.label)
+ )
+ update_query.execute()
+
+ ## Expire events from cameras based on the camera config
+ for name, camera in self.config.cameras.items():
+ if media == 'clips':
+ retain_config = camera.clips.retain
+ else:
+ retain_config = camera.snapshots.retain
+ # get distinct objects in database for this camera
+ distinct_labels = (Event.select(Event.label)
+ .where(Event.camera == name)
+ .distinct())
+
+ # loop over object types in db
+ for l in distinct_labels:
+ # get expiration time for this label
+ expire_days = retain_config.objects.get(l.label, retain_config.default)
+ expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
+ # grab all events after specific time
+ expired_events = (
+ Event.select()
+ .where(Event.camera == name,
+ Event.start_time < expire_after,
+ Event.label == l.label)
+ )
+ # delete the grabbed clips from disk
+ for event in expired_events:
+ media_name = f"{event.camera}-{event.id}"
+ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
+ media.unlink(missing_ok=True)
+ # update the clips attribute for the db entry
+ update_query = (
+ Event.update(update_params)
+ .where( Event.camera == name,
+ Event.start_time < expire_after,
+ Event.label == l.label)
+ )
+ update_query.execute()
+
+ def run(self):
+ counter = 0
+ while(True):
+ if self.stop_event.is_set():
+ logger.info(f"Exiting event cleanup...")
+ break
+
+ # only expire events every 10 minutes, but check for stop events every 10 seconds
+ time.sleep(10)
+ counter = counter + 1
+ if counter < 60:
+ continue
+ counter = 0
+
+ self.expire('clips')
+ self.expire('snapshots')
-
\ No newline at end of file
+ # drop events from db where has_clip and has_snapshot are false
+ delete_query = (
+ Event.delete()
+ .where( Event.has_clip == False,
+ Event.has_snapshot == False)
+ )
+ delete_query.execute()
diff --git a/frigate/http.py b/frigate/http.py
new file mode 100644
--- /dev/null
+++ b/frigate/http.py
@@ -0,0 +1,301 @@
+import base64
+import datetime
+import logging
+import os
+import time
+from functools import reduce
+
+import cv2
+import numpy as np
+from flask import (Blueprint, Flask, Response, current_app, jsonify,
+ make_response, request)
+from peewee import SqliteDatabase, operator, fn, DoesNotExist
+from playhouse.shortcuts import model_to_dict
+
+from frigate.const import CLIPS_DIR
+from frigate.models import Event
+from frigate.stats import stats_snapshot
+from frigate.util import calculate_region
+from frigate.version import VERSION
+
+logger = logging.getLogger(__name__)
+
+bp = Blueprint('frigate', __name__)
+
+def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor):
+ app = Flask(__name__)
+
+ @app.before_request
+ def _db_connect():
+ database.connect()
+
+ @app.teardown_request
+ def _db_close(exc):
+ if not database.is_closed():
+ database.close()
+
+ app.frigate_config = frigate_config
+ app.stats_tracking = stats_tracking
+ app.detected_frames_processor = detected_frames_processor
+
+ app.register_blueprint(bp)
+
+ return app
+
[email protected]('/')
+def is_healthy():
+ return "Frigate is running. Alive and healthy!"
+
[email protected]('/events/summary')
+def events_summary():
+ has_clip = request.args.get('has_clip', type=int)
+ has_snapshot = request.args.get('has_snapshot', type=int)
+
+ clauses = []
+
+ if not has_clip is None:
+ clauses.append((Event.has_clip == has_clip))
+
+ if not has_snapshot is None:
+ clauses.append((Event.has_snapshot == has_snapshot))
+
+ if len(clauses) == 0:
+ clauses.append((1 == 1))
+
+ groups = (
+ Event
+ .select(
+ Event.camera,
+ Event.label,
+ fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'),
+ Event.zones,
+ fn.COUNT(Event.id).alias('count')
+ )
+ .where(reduce(operator.and_, clauses))
+ .group_by(
+ Event.camera,
+ Event.label,
+ fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')),
+ Event.zones
+ )
+ )
+
+ return jsonify([e for e in groups.dicts()])
+
[email protected]('/events/<id>')
+def event(id):
+ try:
+ return model_to_dict(Event.get(Event.id == id))
+ except DoesNotExist:
+ return "Event not found", 404
+
[email protected]('/events/<id>/thumbnail.jpg')
+def event_thumbnail(id):
+ format = request.args.get('format', 'ios')
+ thumbnail_bytes = None
+ try:
+ event = Event.get(Event.id == id)
+ thumbnail_bytes = base64.b64decode(event.thumbnail)
+ except DoesNotExist:
+ # see if the object is currently being tracked
+ try:
+ for camera_state in current_app.detected_frames_processor.camera_states.values():
+ if id in camera_state.tracked_objects:
+ tracked_obj = camera_state.tracked_objects.get(id)
+ if not tracked_obj is None:
+ thumbnail_bytes = tracked_obj.get_thumbnail()
+ except:
+ return "Event not found", 404
+
+ if thumbnail_bytes is None:
+ return "Event not found", 404
+
+ # android notifications prefer a 2:1 ratio
+ if format == 'android':
+ jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
+ img = cv2.imdecode(jpg_as_np, flags=1)
+ thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0))
+ ret, jpg = cv2.imencode('.jpg', thumbnail)
+ thumbnail_bytes = jpg.tobytes()
+
+ response = make_response(thumbnail_bytes)
+ response.headers['Content-Type'] = 'image/jpg'
+ return response
+
[email protected]('/events/<id>/snapshot.jpg')
+def event_snapshot(id):
+ jpg_bytes = None
+ try:
+ event = Event.get(Event.id == id)
+ if not event.has_snapshot:
+ return "Snapshot not available", 404
+ # read snapshot from disk
+ with open(os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), 'rb') as image_file:
+ jpg_bytes = image_file.read()
+ except DoesNotExist:
+ # see if the object is currently being tracked
+ try:
+ for camera_state in current_app.detected_frames_processor.camera_states.values():
+ if id in camera_state.tracked_objects:
+ tracked_obj = camera_state.tracked_objects.get(id)
+ if not tracked_obj is None:
+ jpg_bytes = tracked_obj.get_jpg_bytes(
+ timestamp=request.args.get('timestamp', type=int),
+ bounding_box=request.args.get('bbox', type=int),
+ crop=request.args.get('crop', type=int),
+ height=request.args.get('h', type=int)
+ )
+ except:
+ return "Event not found", 404
+ except:
+ return "Event not found", 404
+
+ response = make_response(jpg_bytes)
+ response.headers['Content-Type'] = 'image/jpg'
+ return response
+
[email protected]('/events')
+def events():
+ limit = request.args.get('limit', 100)
+ camera = request.args.get('camera')
+ label = request.args.get('label')
+ zone = request.args.get('zone')
+ after = request.args.get('after', type=int)
+ before = request.args.get('before', type=int)
+ has_clip = request.args.get('has_clip', type=int)
+ has_snapshot = request.args.get('has_snapshot', type=int)
+
+ clauses = []
+
+ if camera:
+ clauses.append((Event.camera == camera))
+
+ if label:
+ clauses.append((Event.label == label))
+
+ if zone:
+ clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*"))
+
+ if after:
+ clauses.append((Event.start_time >= after))
+
+ if before:
+ clauses.append((Event.start_time <= before))
+
+ if not has_clip is None:
+ clauses.append((Event.has_clip == has_clip))
+
+ if not has_snapshot is None:
+ clauses.append((Event.has_snapshot == has_snapshot))
+
+ if len(clauses) == 0:
+ clauses.append((1 == 1))
+
+ events = (Event.select()
+ .where(reduce(operator.and_, clauses))
+ .order_by(Event.start_time.desc())
+ .limit(limit))
+
+ return jsonify([model_to_dict(e) for e in events])
+
[email protected]('/config')
+def config():
+ return jsonify(current_app.frigate_config.to_dict())
+
[email protected]('/version')
+def version():
+ return VERSION
+
[email protected]('/stats')
+def stats():
+ stats = stats_snapshot(current_app.stats_tracking)
+ return jsonify(stats)
+
[email protected]('/<camera_name>/<label>/best.jpg')
+def best(camera_name, label):
+ if camera_name in current_app.frigate_config.cameras:
+ best_object = current_app.detected_frames_processor.get_best(camera_name, label)
+ best_frame = best_object.get('frame')
+ if best_frame is None:
+ best_frame = np.zeros((720,1280,3), np.uint8)
+ else:
+ best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
+
+ crop = bool(request.args.get('crop', 0, type=int))
+ if crop:
+ box = best_object.get('box', (0,0,300,300))
+ region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
+ best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
+
+ height = int(request.args.get('h', str(best_frame.shape[0])))
+ width = int(height*best_frame.shape[1]/best_frame.shape[0])
+
+ best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
+ ret, jpg = cv2.imencode('.jpg', best_frame)
+ response = make_response(jpg.tobytes())
+ response.headers['Content-Type'] = 'image/jpg'
+ return response
+ else:
+ return "Camera named {} not found".format(camera_name), 404
+
[email protected]('/<camera_name>')
+def mjpeg_feed(camera_name):
+ fps = int(request.args.get('fps', '3'))
+ height = int(request.args.get('h', '360'))
+ draw_options = {
+ 'bounding_boxes': request.args.get('bbox', type=int),
+ 'timestamp': request.args.get('timestamp', type=int),
+ 'zones': request.args.get('zones', type=int),
+ 'mask': request.args.get('mask', type=int),
+ 'motion_boxes': request.args.get('motion', type=int),
+ 'regions': request.args.get('regions', type=int),
+ }
+ if camera_name in current_app.frigate_config.cameras:
+ # return a multipart response
+ return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height, draw_options),
+ mimetype='multipart/x-mixed-replace; boundary=frame')
+ else:
+ return "Camera named {} not found".format(camera_name), 404
+
[email protected]('/<camera_name>/latest.jpg')
+def latest_frame(camera_name):
+ draw_options = {
+ 'bounding_boxes': request.args.get('bbox', type=int),
+ 'timestamp': request.args.get('timestamp', type=int),
+ 'zones': request.args.get('zones', type=int),
+ 'mask': request.args.get('mask', type=int),
+ 'motion_boxes': request.args.get('motion', type=int),
+ 'regions': request.args.get('regions', type=int),
+ }
+ if camera_name in current_app.frigate_config.cameras:
+ # max out at specified FPS
+ frame = current_app.detected_frames_processor.get_current_frame(camera_name, draw_options)
+ if frame is None:
+ frame = np.zeros((720,1280,3), np.uint8)
+
+ height = int(request.args.get('h', str(frame.shape[0])))
+ width = int(height*frame.shape[1]/frame.shape[0])
+
+ frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
+
+ ret, jpg = cv2.imencode('.jpg', frame)
+ response = make_response(jpg.tobytes())
+ response.headers['Content-Type'] = 'image/jpg'
+ return response
+ else:
+ return "Camera named {} not found".format(camera_name), 404
+
+def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
+ while True:
+ # max out at specified FPS
+ time.sleep(1/fps)
+ frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
+ if frame is None:
+ frame = np.zeros((height,int(height*16/9),3), np.uint8)
+
+ width = int(height*frame.shape[1]/frame.shape[0])
+ frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
+
+ ret, jpg = cv2.imencode('.jpg', frame)
+ yield (b'--frame\r\n'
+ b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
diff --git a/frigate/log.py b/frigate/log.py
new file mode 100644
--- /dev/null
+++ b/frigate/log.py
@@ -0,0 +1,77 @@
+# adapted from https://medium.com/@jonathonbao/python3-logging-with-multiprocessing-f51f460b8778
+import logging
+import threading
+import os
+import signal
+import queue
+import multiprocessing as mp
+from logging import handlers
+from setproctitle import setproctitle
+
+
+def listener_configurer():
+ root = logging.getLogger()
+ console_handler = logging.StreamHandler()
+ formatter = logging.Formatter('%(name)-30s %(levelname)-8s: %(message)s')
+ console_handler.setFormatter(formatter)
+ root.addHandler(console_handler)
+ root.setLevel(logging.INFO)
+
+def root_configurer(queue):
+ h = handlers.QueueHandler(queue)
+ root = logging.getLogger()
+ root.addHandler(h)
+ root.setLevel(logging.INFO)
+
+def log_process(log_queue):
+ stop_event = mp.Event()
+ def receiveSignal(signalNumber, frame):
+ stop_event.set()
+
+ signal.signal(signal.SIGTERM, receiveSignal)
+ signal.signal(signal.SIGINT, receiveSignal)
+
+ threading.current_thread().name = f"logger"
+ setproctitle("frigate.logger")
+ listener_configurer()
+ while True:
+ if stop_event.is_set() and log_queue.empty():
+ break
+ try:
+ record = log_queue.get(timeout=5)
+ except queue.Empty:
+ continue
+ logger = logging.getLogger(record.name)
+ logger.handle(record)
+
+# based on https://codereview.stackexchange.com/a/17959
+class LogPipe(threading.Thread):
+ def __init__(self, log_name, level):
+ """Setup the object with a logger and a loglevel
+ and start the thread
+ """
+ threading.Thread.__init__(self)
+ self.daemon = False
+ self.logger = logging.getLogger(log_name)
+ self.level = level
+ self.fdRead, self.fdWrite = os.pipe()
+ self.pipeReader = os.fdopen(self.fdRead)
+ self.start()
+
+ def fileno(self):
+ """Return the write file descriptor of the pipe
+ """
+ return self.fdWrite
+
+ def run(self):
+ """Run the thread, logging everything.
+ """
+ for line in iter(self.pipeReader.readline, ''):
+ self.logger.log(self.level, line.strip('\n'))
+
+ self.pipeReader.close()
+
+ def close(self):
+ """Close the write end of the pipe.
+ """
+ os.close(self.fdWrite)
diff --git a/frigate/models.py b/frigate/models.py
new file mode 100644
--- /dev/null
+++ b/frigate/models.py
@@ -0,0 +1,16 @@
+from peewee import *
+from playhouse.sqlite_ext import *
+
+
+class Event(Model):
+ id = CharField(null=False, primary_key=True, max_length=30)
+ label = CharField(index=True, max_length=20)
+ camera = CharField(index=True, max_length=20)
+ start_time = DateTimeField()
+ end_time = DateTimeField()
+ top_score = FloatField()
+ false_positive = BooleanField()
+ zones = JSONField()
+ thumbnail = TextField()
+ has_clip = BooleanField(default=True)
+ has_snapshot = BooleanField(default=True)
diff --git a/frigate/motion.py b/frigate/motion.py
--- a/frigate/motion.py
+++ b/frigate/motion.py
@@ -1,17 +1,20 @@
import cv2
import imutils
import numpy as np
+from frigate.config import MotionConfig
+
class MotionDetector():
- def __init__(self, frame_shape, mask, resize_factor=4):
+ def __init__(self, frame_shape, config: MotionConfig):
+ self.config = config
self.frame_shape = frame_shape
- self.resize_factor = resize_factor
- self.motion_frame_size = (int(frame_shape[0]/resize_factor), int(frame_shape[1]/resize_factor))
+ self.resize_factor = frame_shape[0]/config.frame_height
+ self.motion_frame_size = (config.frame_height, config.frame_height*frame_shape[1]//frame_shape[0])
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
self.motion_frame_count = 0
self.frame_counter = 0
- resized_mask = cv2.resize(mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
+ resized_mask = cv2.resize(config.mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
self.mask = np.where(resized_mask==[0])
def detect(self, frame):
@@ -22,6 +25,8 @@ def detect(self, frame):
# resize frame
resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
+ # TODO: can I improve the contrast of the grayscale image here?
+
# convert to grayscale
# resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
@@ -37,22 +42,21 @@ def detect(self, frame):
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
# compute the average delta over the past few frames
- # the alpha value can be modified to configure how sensitive the motion detection is.
# higher values mean the current frame impacts the delta a lot, and a single raindrop may
# register as motion, too low and a fast moving person wont be detected as motion
- # this also assumes that a person is in the same location across more than a single frame
- cv2.accumulateWeighted(frameDelta, self.avg_delta, 0.2)
+ cv2.accumulateWeighted(frameDelta, self.avg_delta, self.config.delta_alpha)
# compute the threshold image for the current frame
- current_thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
+ # TODO: threshold
+ current_thresh = cv2.threshold(frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
# black out everything in the avg_delta where there isnt motion in the current frame
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
- avg_delta_image[np.where(current_thresh==[0])] = [0]
+ avg_delta_image = cv2.bitwise_and(avg_delta_image, current_thresh)
# then look for deltas above the threshold, but only in areas where there is a delta
# in the current frame. this prevents deltas from previous frames from being included
- thresh = cv2.threshold(avg_delta_image, 25, 255, cv2.THRESH_BINARY)[1]
+ thresh = cv2.threshold(avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
@@ -64,19 +68,18 @@ def detect(self, frame):
for c in cnts:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
- if contour_area > 100:
+ if contour_area > self.config.contour_area:
x, y, w, h = cv2.boundingRect(c)
- motion_boxes.append((x*self.resize_factor, y*self.resize_factor, (x+w)*self.resize_factor, (y+h)*self.resize_factor))
+ motion_boxes.append((int(x*self.resize_factor), int(y*self.resize_factor), int((x+w)*self.resize_factor), int((y+h)*self.resize_factor)))
if len(motion_boxes) > 0:
self.motion_frame_count += 1
- # TODO: this really depends on FPS
if self.motion_frame_count >= 10:
- # only average in the current frame if the difference persists for at least 3 frames
- cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
+ # only average in the current frame if the difference persists for a bit
+ cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
else:
# when no motion, just keep averaging the frames together
- cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
+ cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
self.motion_frame_count = 0
- return motion_boxes
\ No newline at end of file
+ return motion_boxes
diff --git a/frigate/mqtt.py b/frigate/mqtt.py
new file mode 100644
--- /dev/null
+++ b/frigate/mqtt.py
@@ -0,0 +1,125 @@
+import logging
+import threading
+
+import paho.mqtt.client as mqtt
+
+from frigate.config import FrigateConfig
+
+logger = logging.getLogger(__name__)
+
+def create_mqtt_client(config: FrigateConfig, camera_metrics):
+ mqtt_config = config.mqtt
+
+ def on_clips_command(client, userdata, message):
+ payload = message.payload.decode()
+ logger.debug(f"on_clips_toggle: {message.topic} {payload}")
+
+ camera_name = message.topic.split('/')[-3]
+
+ clips_settings = config.cameras[camera_name].clips
+
+ if payload == 'ON':
+ if not clips_settings.enabled:
+ logger.info(f"Turning on clips for {camera_name} via mqtt")
+ clips_settings._enabled = True
+ elif payload == 'OFF':
+ if clips_settings.enabled:
+ logger.info(f"Turning off clips for {camera_name} via mqtt")
+ clips_settings._enabled = False
+ else:
+ logger.warning(f"Received unsupported value at {message.topic}: {payload}")
+
+ state_topic = f"{message.topic[:-4]}/state"
+ client.publish(state_topic, payload, retain=True)
+
+ def on_snapshots_command(client, userdata, message):
+ payload = message.payload.decode()
+ logger.debug(f"on_snapshots_toggle: {message.topic} {payload}")
+
+ camera_name = message.topic.split('/')[-3]
+
+ snapshots_settings = config.cameras[camera_name].snapshots
+
+ if payload == 'ON':
+ if not snapshots_settings.enabled:
+ logger.info(f"Turning on snapshots for {camera_name} via mqtt")
+ snapshots_settings._enabled = True
+ elif payload == 'OFF':
+ if snapshots_settings.enabled:
+ logger.info(f"Turning off snapshots for {camera_name} via mqtt")
+ snapshots_settings._enabled = False
+ else:
+ logger.warning(f"Received unsupported value at {message.topic}: {payload}")
+
+ state_topic = f"{message.topic[:-4]}/state"
+ client.publish(state_topic, payload, retain=True)
+
+ def on_detect_command(client, userdata, message):
+ payload = message.payload.decode()
+ logger.debug(f"on_detect_toggle: {message.topic} {payload}")
+
+ camera_name = message.topic.split('/')[-3]
+
+ detect_settings = config.cameras[camera_name].detect
+
+ if payload == 'ON':
+ if not camera_metrics[camera_name]["detection_enabled"].value:
+ logger.info(f"Turning on detection for {camera_name} via mqtt")
+ camera_metrics[camera_name]["detection_enabled"].value = True
+ detect_settings._enabled = True
+ elif payload == 'OFF':
+ if camera_metrics[camera_name]["detection_enabled"].value:
+ logger.info(f"Turning off detection for {camera_name} via mqtt")
+ camera_metrics[camera_name]["detection_enabled"].value = False
+ detect_settings._enabled = False
+ else:
+ logger.warning(f"Received unsupported value at {message.topic}: {payload}")
+
+ state_topic = f"{message.topic[:-4]}/state"
+ client.publish(state_topic, payload, retain=True)
+
+ def on_connect(client, userdata, flags, rc):
+ threading.current_thread().name = "mqtt"
+ if rc != 0:
+ if rc == 3:
+ logger.error("MQTT Server unavailable")
+ elif rc == 4:
+ logger.error("MQTT Bad username or password")
+ elif rc == 5:
+ logger.error("MQTT Not authorized")
+ else:
+ logger.error("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
+
+ logger.info("MQTT connected")
+ client.publish(mqtt_config.topic_prefix+'/available', 'online', retain=True)
+
+ client = mqtt.Client(client_id=mqtt_config.client_id)
+ client.on_connect = on_connect
+ client.will_set(mqtt_config.topic_prefix+'/available', payload='offline', qos=1, retain=True)
+
+ # register callbacks
+ for name in config.cameras.keys():
+ client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/clips/set", on_clips_command)
+ client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command)
+ client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command)
+
+ if not mqtt_config.user is None:
+ client.username_pw_set(mqtt_config.user, password=mqtt_config.password)
+ try:
+ client.connect(mqtt_config.host, mqtt_config.port, 60)
+ except Exception as e:
+ logger.error(f"Unable to connect to MQTT server: {e}")
+ raise
+
+ client.loop_start()
+
+ for name in config.cameras.keys():
+ client.publish(f"{mqtt_config.topic_prefix}/{name}/clips/state", 'ON' if config.cameras[name].clips.enabled else 'OFF', retain=True)
+ client.publish(f"{mqtt_config.topic_prefix}/{name}/snapshots/state", 'ON' if config.cameras[name].snapshots.enabled else 'OFF', retain=True)
+ client.publish(f"{mqtt_config.topic_prefix}/{name}/detect/state", 'ON' if config.cameras[name].detect.enabled else 'OFF', retain=True)
+
+ client.subscribe(f"{mqtt_config.topic_prefix}/+/clips/set")
+ client.subscribe(f"{mqtt_config.topic_prefix}/+/snapshots/set")
+ client.subscribe(f"{mqtt_config.topic_prefix}/+/detect/set")
+
+ return client
diff --git a/frigate/object_processing.py b/frigate/object_processing.py
--- a/frigate/object_processing.py
+++ b/frigate/object_processing.py
@@ -1,20 +1,28 @@
-import json
-import hashlib
-import datetime
-import time
import copy
-import cv2
-import threading
+import base64
+import datetime
+import hashlib
+import itertools
+import json
+import logging
+import os
import queue
-import copy
-import numpy as np
+import threading
+import time
from collections import Counter, defaultdict
-import itertools
+from statistics import mean, median
+from typing import Callable, Dict
+
+import cv2
import matplotlib.pyplot as plt
-from frigate.util import draw_box_with_label, SharedMemoryFrameManager
+import numpy as np
+
+from frigate.config import FrigateConfig, CameraConfig
+from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.edgetpu import load_labels
-from typing import Callable, Dict
-from statistics import mean, median
+from frigate.util import SharedMemoryFrameManager, draw_box_with_label, calculate_region
+
+logger = logging.getLogger(__name__)
PATH_TO_LABELS = '/labelmap.txt'
@@ -25,26 +33,216 @@
for key, val in LABELS.items():
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
-def zone_filtered(obj, object_config):
- object_name = obj['label']
+def on_edge(box, frame_shape):
+ if (
+ box[0] == 0 or
+ box[1] == 0 or
+ box[2] == frame_shape[1]-1 or
+ box[3] == frame_shape[0]-1
+ ):
+ return True
+
+def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool:
+ # larger is better
+ # cutoff images are less ideal, but they should also be smaller?
+ # better scores are obviously better too
+
+ # if the new_thumb is on an edge, and the current thumb is not
+ if on_edge(new_obj['box'], frame_shape) and not on_edge(current_thumb['box'], frame_shape):
+ return False
+
+ # if the score is better by more than 5%
+ if new_obj['score'] > current_thumb['score']+.05:
+ return True
+
+ # if the area is 10% larger
+ if new_obj['area'] > current_thumb['area']*1.1:
+ return True
+
+ return False
+
+class TrackedObject():
+ def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data):
+ self.obj_data = obj_data
+ self.camera = camera
+ self.camera_config = camera_config
+ self.frame_cache = frame_cache
+ self.current_zones = []
+ self.entered_zones = set()
+ self.false_positive = True
+ self.top_score = self.computed_score = 0.0
+ self.thumbnail_data = None
+ self.last_updated = 0
+ self.last_published = 0
+ self.frame = None
+ self.previous = self.to_dict()
+
+ # start the score history
+ self.score_history = [self.obj_data['score']]
+
+ def _is_false_positive(self):
+ # once a true positive, always a true positive
+ if not self.false_positive:
+ return False
+
+ threshold = self.camera_config.objects.filters[self.obj_data['label']].threshold
+ if self.computed_score < threshold:
+ return True
+ return False
+
+ def compute_score(self):
+ scores = self.score_history[:]
+ # pad with zeros if you dont have at least 3 scores
+ if len(scores) < 3:
+ scores += [0.0]*(3 - len(scores))
+ return median(scores)
+
+ def update(self, current_frame_time, obj_data):
+ significant_update = False
+ self.obj_data.update(obj_data)
+ # if the object is not in the current frame, add a 0.0 to the score history
+ if self.obj_data['frame_time'] != current_frame_time:
+ self.score_history.append(0.0)
+ else:
+ self.score_history.append(self.obj_data['score'])
+ # only keep the last 10 scores
+ if len(self.score_history) > 10:
+ self.score_history = self.score_history[-10:]
+
+ # calculate if this is a false positive
+ self.computed_score = self.compute_score()
+ if self.computed_score > self.top_score:
+ self.top_score = self.computed_score
+ self.false_positive = self._is_false_positive()
+
+ if not self.false_positive:
+ # determine if this frame is a better thumbnail
+ if (
+ self.thumbnail_data is None
+ or is_better_thumbnail(self.thumbnail_data, self.obj_data, self.camera_config.frame_shape)
+ ):
+ self.thumbnail_data = {
+ 'frame_time': self.obj_data['frame_time'],
+ 'box': self.obj_data['box'],
+ 'area': self.obj_data['area'],
+ 'region': self.obj_data['region'],
+ 'score': self.obj_data['score']
+ }
+ significant_update = True
+
+ # check zones
+ current_zones = []
+ bottom_center = (self.obj_data['centroid'][0], self.obj_data['box'][3])
+ # check each zone
+ for name, zone in self.camera_config.zones.items():
+ contour = zone.contour
+ # check if the object is in the zone
+ if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0):
+ # if the object passed the filters once, dont apply again
+ if name in self.current_zones or not zone_filtered(self, zone.filters):
+ current_zones.append(name)
+ self.entered_zones.add(name)
+
+ # if the zones changed, signal an update
+ if not self.false_positive and set(self.current_zones) != set(current_zones):
+ significant_update = True
+
+ self.current_zones = current_zones
+ return significant_update
+
+ def to_dict(self, include_thumbnail: bool = False):
+ return {
+ 'id': self.obj_data['id'],
+ 'camera': self.camera,
+ 'frame_time': self.obj_data['frame_time'],
+ 'label': self.obj_data['label'],
+ 'top_score': self.top_score,
+ 'false_positive': self.false_positive,
+ 'start_time': self.obj_data['start_time'],
+ 'end_time': self.obj_data.get('end_time', None),
+ 'score': self.obj_data['score'],
+ 'box': self.obj_data['box'],
+ 'area': self.obj_data['area'],
+ 'region': self.obj_data['region'],
+ 'current_zones': self.current_zones.copy(),
+ 'entered_zones': list(self.entered_zones).copy(),
+ 'thumbnail': base64.b64encode(self.get_thumbnail()).decode('utf-8') if include_thumbnail else None
+ }
+
+ def get_thumbnail(self):
+ if self.thumbnail_data is None or not self.thumbnail_data['frame_time'] in self.frame_cache:
+ ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
+
+ jpg_bytes = self.get_jpg_bytes(timestamp=False, bounding_box=False, crop=True, height=175)
+
+ if jpg_bytes:
+ return jpg_bytes
+ else:
+ ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
+ return jpg.tobytes()
+
+ def get_jpg_bytes(self, timestamp=False, bounding_box=False, crop=False, height=None):
+ if self.thumbnail_data is None:
+ return None
+
+ try:
+ best_frame = cv2.cvtColor(self.frame_cache[self.thumbnail_data['frame_time']], cv2.COLOR_YUV2BGR_I420)
+ except KeyError:
+ logger.warning(f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache")
+ return None
+
+ if bounding_box:
+ thickness = 2
+ color = COLOR_MAP[self.obj_data['label']]
+
+ # draw the bounding boxes on the frame
+ box = self.thumbnail_data['box']
+ draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'], f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
+
+ if crop:
+ box = self.thumbnail_data['box']
+ region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
+ best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
+
+ if height:
+ width = int(height*best_frame.shape[1]/best_frame.shape[0])
+ best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
+
+ if timestamp:
+ time_to_show = datetime.datetime.fromtimestamp(self.thumbnail_data['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
+ size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
+ text_width = size[0][0]
+ desired_size = max(150, 0.33*best_frame.shape[1])
+ font_scale = desired_size/text_width
+ cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX,
+ fontScale=font_scale, color=(255, 255, 255), thickness=2)
+
+ ret, jpg = cv2.imencode('.jpg', best_frame)
+ if ret:
+ return jpg.tobytes()
+ else:
+ return None
+
+def zone_filtered(obj: TrackedObject, object_config):
+ object_name = obj.obj_data['label']
if object_name in object_config:
obj_settings = object_config[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
- if obj_settings.get('min_area',-1) > obj['area']:
+ if obj_settings.min_area > obj.obj_data['area']:
return True
-
+
# if the detected object is larger than the
# max area, don't add it to detected objects
- if obj_settings.get('max_area', 24000000) < obj['area']:
+ if obj_settings.max_area < obj.obj_data['area']:
return True
# if the score is lower than the threshold, skip
- if obj_settings.get('threshold', 0) > obj['computed_score']:
+ if obj_settings.threshold > obj.computed_score:
return True
-
+
return False
# Maintains the state of a camera
@@ -52,32 +250,37 @@ class CameraState():
def __init__(self, name, config, frame_manager):
self.name = name
self.config = config
+ self.camera_config = config.cameras[name]
self.frame_manager = frame_manager
-
- self.best_objects = {}
- self.object_status = defaultdict(lambda: 'OFF')
- self.tracked_objects = {}
+ self.best_objects: Dict[str, TrackedObject] = {}
+ self.object_counts = defaultdict(lambda: 0)
+ self.tracked_objects: Dict[str, TrackedObject] = {}
+ self.frame_cache = {}
self.zone_objects = defaultdict(lambda: [])
- self._current_frame = np.zeros((self.config['frame_shape'][0]*3//2, self.config['frame_shape'][1]), np.uint8)
+ self._current_frame = np.zeros(self.camera_config.frame_shape_yuv, np.uint8)
self.current_frame_lock = threading.Lock()
self.current_frame_time = 0.0
+ self.motion_boxes = []
+ self.regions = []
self.previous_frame_id = None
self.callbacks = defaultdict(lambda: [])
- def get_current_frame(self, draw=False):
+ def get_current_frame(self, draw_options={}):
with self.current_frame_lock:
frame_copy = np.copy(self._current_frame)
frame_time = self.current_frame_time
- tracked_objects = copy.deepcopy(self.tracked_objects)
-
+ tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()}
+ motion_boxes = self.motion_boxes.copy()
+ regions = self.regions.copy()
+
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
# draw on the frame
- if draw:
+ if draw_options.get('bounding_boxes'):
# draw the bounding boxes on the frame
for obj in tracked_objects.values():
thickness = 2
color = COLOR_MAP[obj['label']]
-
+
if obj['frame_time'] != frame_time:
thickness = 1
color = (255,0,0)
@@ -85,156 +288,135 @@ def get_current_frame(self, draw=False):
# draw the bounding boxes on the frame
box = obj['box']
draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
- # draw the regions on the frame
- region = obj['region']
- cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
-
- if self.config['snapshots']['show_timestamp']:
- time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
- cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
-
- if self.config['snapshots']['draw_zones']:
- for name, zone in self.config['zones'].items():
- thickness = 8 if any([name in obj['zones'] for obj in tracked_objects.values()]) else 2
- cv2.drawContours(frame_copy, [zone['contour']], -1, zone['color'], thickness)
-
- return frame_copy
- def false_positive(self, obj):
- # once a true positive, always a true positive
- if not obj.get('false_positive', True):
- return False
+ if draw_options.get('regions'):
+ for region in regions:
+ cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 2)
- threshold = self.config['objects'].get('filters', {}).get(obj['label'], {}).get('threshold', 0.85)
- if obj['computed_score'] < threshold:
- return True
- return False
+ if draw_options.get('zones'):
+ for name, zone in self.camera_config.zones.items():
+ thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
+ cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
- def compute_score(self, obj):
- scores = obj['score_history'][:]
- # pad with zeros if you dont have at least 3 scores
- if len(scores) < 3:
- scores += [0.0]*(3 - len(scores))
- return median(scores)
+ if draw_options.get('mask'):
+ mask_overlay = np.where(self.camera_config.motion.mask==[0])
+ frame_copy[mask_overlay] = [0,0,0]
+
+ if draw_options.get('motion_boxes'):
+ for m_box in motion_boxes:
+ cv2.rectangle(frame_copy, (m_box[0], m_box[1]), (m_box[2], m_box[3]), (0,0,255), 2)
+
+ if draw_options.get('timestamp'):
+ time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
+ cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
+
+ return frame_copy
+
+ def finished(self, obj_id):
+ del self.tracked_objects[obj_id]
def on(self, event_type: str, callback: Callable[[Dict], None]):
self.callbacks[event_type].append(callback)
- def update(self, frame_time, tracked_objects):
+ def update(self, frame_time, current_detections, motion_boxes, regions):
self.current_frame_time = frame_time
- # get the new frame and delete the old frame
+ self.motion_boxes = motion_boxes
+ self.regions = regions
+ # get the new frame
frame_id = f"{self.name}{frame_time}"
- current_frame = self.frame_manager.get(frame_id, (self.config['frame_shape'][0]*3//2, self.config['frame_shape'][1]))
+ current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv)
- current_ids = tracked_objects.keys()
+ current_ids = current_detections.keys()
previous_ids = self.tracked_objects.keys()
removed_ids = list(set(previous_ids).difference(current_ids))
new_ids = list(set(current_ids).difference(previous_ids))
updated_ids = list(set(current_ids).intersection(previous_ids))
for id in new_ids:
- self.tracked_objects[id] = tracked_objects[id]
- self.tracked_objects[id]['zones'] = []
-
- # start the score history
- self.tracked_objects[id]['score_history'] = [self.tracked_objects[id]['score']]
-
- # calculate if this is a false positive
- self.tracked_objects[id]['computed_score'] = self.compute_score(self.tracked_objects[id])
- self.tracked_objects[id]['false_positive'] = self.false_positive(self.tracked_objects[id])
+ new_obj = self.tracked_objects[id] = TrackedObject(self.name, self.camera_config, self.frame_cache, current_detections[id])
# call event handlers
for c in self.callbacks['start']:
- c(self.name, tracked_objects[id])
-
- for id in updated_ids:
- self.tracked_objects[id].update(tracked_objects[id])
+ c(self.name, new_obj, frame_time)
- # if the object is not in the current frame, add a 0.0 to the score history
- if self.tracked_objects[id]['frame_time'] != self.current_frame_time:
- self.tracked_objects[id]['score_history'].append(0.0)
- else:
- self.tracked_objects[id]['score_history'].append(self.tracked_objects[id]['score'])
- # only keep the last 10 scores
- if len(self.tracked_objects[id]['score_history']) > 10:
- self.tracked_objects[id]['score_history'] = self.tracked_objects[id]['score_history'][-10:]
+ for id in updated_ids:
+ updated_obj = self.tracked_objects[id]
+ significant_update = updated_obj.update(frame_time, current_detections[id])
- # calculate if this is a false positive
- self.tracked_objects[id]['computed_score'] = self.compute_score(self.tracked_objects[id])
- self.tracked_objects[id]['false_positive'] = self.false_positive(self.tracked_objects[id])
+ if significant_update:
+ # ensure this frame is stored in the cache
+ if updated_obj.thumbnail_data['frame_time'] == frame_time and frame_time not in self.frame_cache:
+ self.frame_cache[frame_time] = np.copy(current_frame)
+
+ updated_obj.last_updated = frame_time
+
+ # if it has been more than 5 seconds since the last publish
+ # and the last update is greater than the last publish
+ if frame_time - updated_obj.last_published > 5 and updated_obj.last_updated > updated_obj.last_published:
+ # call event handlers
+ for c in self.callbacks['update']:
+ c(self.name, updated_obj, frame_time)
+ updated_obj.last_published = frame_time
- # call event handlers
- for c in self.callbacks['update']:
- c(self.name, self.tracked_objects[id])
-
for id in removed_ids:
# publish events to mqtt
- self.tracked_objects[id]['end_time'] = frame_time
- for c in self.callbacks['end']:
- c(self.name, self.tracked_objects[id])
- del self.tracked_objects[id]
-
- # check to see if the objects are in any zones
- for obj in self.tracked_objects.values():
- current_zones = []
- bottom_center = (obj['centroid'][0], obj['box'][3])
- # check each zone
- for name, zone in self.config['zones'].items():
- contour = zone['contour']
- # check if the object is in the zone
- if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0):
- # if the object passed the filters once, dont apply again
- if name in obj.get('zones', []) or not zone_filtered(obj, zone.get('filters', {})):
- current_zones.append(name)
-
- obj['zones'] = current_zones
+ removed_obj = self.tracked_objects[id]
+ if not 'end_time' in removed_obj.obj_data:
+ removed_obj.obj_data['end_time'] = frame_time
+ for c in self.callbacks['end']:
+ c(self.name, removed_obj, frame_time)
+ # TODO: can i switch to looking this up and only changing when an event ends?
# maintain best objects
for obj in self.tracked_objects.values():
- object_type = obj['label']
- # if the object wasn't seen on the current frame, skip it
- if obj['frame_time'] != self.current_frame_time or obj['false_positive']:
+ object_type = obj.obj_data['label']
+ # if the object's thumbnail is not from the current frame
+ if obj.false_positive or obj.thumbnail_data['frame_time'] != self.current_frame_time:
continue
- obj_copy = copy.deepcopy(obj)
if object_type in self.best_objects:
current_best = self.best_objects[object_type]
now = datetime.datetime.now().timestamp()
- # if the object is a higher score than the current best score
+ # if the object is a higher score than the current best score
# or the current object is older than desired, use the new object
- if obj_copy['score'] > current_best['score'] or (now - current_best['frame_time']) > self.config.get('best_image_timeout', 60):
- obj_copy['frame'] = np.copy(current_frame)
- self.best_objects[object_type] = obj_copy
+ if (is_better_thumbnail(current_best.thumbnail_data, obj.thumbnail_data, self.camera_config.frame_shape)
+ or (now - current_best.thumbnail_data['frame_time']) > self.camera_config.best_image_timeout):
+ self.best_objects[object_type] = obj
for c in self.callbacks['snapshot']:
- c(self.name, self.best_objects[object_type])
+ c(self.name, self.best_objects[object_type], frame_time)
else:
- obj_copy['frame'] = np.copy(current_frame)
- self.best_objects[object_type] = obj_copy
+ self.best_objects[object_type] = obj
for c in self.callbacks['snapshot']:
- c(self.name, self.best_objects[object_type])
-
+ c(self.name, self.best_objects[object_type], frame_time)
+
# update overall camera state for each object type
obj_counter = Counter()
for obj in self.tracked_objects.values():
- if not obj['false_positive']:
- obj_counter[obj['label']] += 1
-
+ if not obj.false_positive:
+ obj_counter[obj.obj_data['label']] += 1
+
# report on detected objects
for obj_name, count in obj_counter.items():
- new_status = 'ON' if count > 0 else 'OFF'
- if new_status != self.object_status[obj_name]:
- self.object_status[obj_name] = new_status
+ if count != self.object_counts[obj_name]:
+ self.object_counts[obj_name] = count
for c in self.callbacks['object_status']:
- c(self.name, obj_name, new_status)
+ c(self.name, obj_name, count)
- # expire any objects that are ON and no longer detected
- expired_objects = [obj_name for obj_name, status in self.object_status.items() if status == 'ON' and not obj_name in obj_counter]
+ # expire any objects that are >0 and no longer detected
+ expired_objects = [obj_name for obj_name, count in self.object_counts.items() if count > 0 and not obj_name in obj_counter]
for obj_name in expired_objects:
- self.object_status[obj_name] = 'OFF'
+ self.object_counts[obj_name] = 0
for c in self.callbacks['object_status']:
- c(self.name, obj_name, 'OFF')
+ c(self.name, obj_name, 0)
for c in self.callbacks['snapshot']:
- c(self.name, self.best_objects[obj_name])
-
+ c(self.name, self.best_objects[obj_name], frame_time)
+
+ # cleanup thumbnail frame cache
+ current_thumb_frames = set([obj.thumbnail_data['frame_time'] for obj in self.tracked_objects.values() if not obj.false_positive])
+ current_best_frames = set([obj.thumbnail_data['frame_time'] for obj in self.best_objects.values()])
+ thumb_frames_to_delete = [t for t in self.frame_cache.keys() if not t in current_thumb_frames and not t in current_best_frames]
+ for t in thumb_frames_to_delete:
+ del self.frame_cache[t]
+
with self.current_frame_lock:
self._current_frame = current_frame
if not self.previous_frame_id is None:
@@ -242,68 +424,64 @@ def update(self, frame_time, tracked_objects):
self.previous_frame_id = frame_id
class TrackedObjectProcessor(threading.Thread):
- def __init__(self, camera_config, client, topic_prefix, tracked_objects_queue, event_queue, stop_event):
+ def __init__(self, config: FrigateConfig, client, topic_prefix, tracked_objects_queue, event_queue, event_processed_queue, stop_event):
threading.Thread.__init__(self)
- self.camera_config = camera_config
+ self.name = "detected_frames_processor"
+ self.config = config
self.client = client
self.topic_prefix = topic_prefix
self.tracked_objects_queue = tracked_objects_queue
self.event_queue = event_queue
+ self.event_processed_queue = event_processed_queue
self.stop_event = stop_event
self.camera_states: Dict[str, CameraState] = {}
self.frame_manager = SharedMemoryFrameManager()
- def start(camera, obj):
- # publish events to mqtt
- self.client.publish(f"{self.topic_prefix}/{camera}/events/start", json.dumps(obj), retain=False)
- self.event_queue.put(('start', camera, obj))
-
- def update(camera, obj):
- pass
-
- def end(camera, obj):
- self.client.publish(f"{self.topic_prefix}/{camera}/events/end", json.dumps(obj), retain=False)
- self.event_queue.put(('end', camera, obj))
+ def start(camera, obj: TrackedObject, current_frame_time):
+ self.event_queue.put(('start', camera, obj.to_dict()))
+
+ def update(camera, obj: TrackedObject, current_frame_time):
+ after = obj.to_dict()
+ message = { 'before': obj.previous, 'after': after, 'type': 'new' if obj.previous['false_positive'] else 'update' }
+ self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
+ obj.previous = after
+
+ def end(camera, obj: TrackedObject, current_frame_time):
+ snapshot_config = self.config.cameras[camera].snapshots
+ event_data = obj.to_dict(include_thumbnail=True)
+ event_data['has_snapshot'] = False
+ if not obj.false_positive:
+ message = { 'before': obj.previous, 'after': obj.to_dict(), 'type': 'end' }
+ self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
+ # write snapshot to disk if enabled
+ if snapshot_config.enabled:
+ jpg_bytes = obj.get_jpg_bytes(
+ timestamp=snapshot_config.timestamp,
+ bounding_box=snapshot_config.bounding_box,
+ crop=snapshot_config.crop,
+ height=snapshot_config.height
+ )
+ with open(os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), 'wb') as j:
+ j.write(jpg_bytes)
+ event_data['has_snapshot'] = True
+ self.event_queue.put(('end', camera, event_data))
- def snapshot(camera, obj):
- if not 'frame' in obj:
- return
-
- best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_YUV2BGR_I420)
- if self.camera_config[camera]['snapshots']['draw_bounding_boxes']:
- thickness = 2
- color = COLOR_MAP[obj['label']]
- box = obj['box']
- draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
-
- mqtt_config = self.camera_config[camera].get('mqtt', {'crop_to_region': False})
- if mqtt_config.get('crop_to_region'):
- region = obj['region']
- best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
- if 'snapshot_height' in mqtt_config:
- height = int(mqtt_config['snapshot_height'])
- width = int(height*best_frame.shape[1]/best_frame.shape[0])
- best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
-
- if self.camera_config[camera]['snapshots']['show_timestamp']:
- time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
- size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
- text_width = size[0][0]
- text_height = size[0][1]
- desired_size = max(200, 0.33*best_frame.shape[1])
- font_scale = desired_size/text_width
- cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, color=(255, 255, 255), thickness=2)
-
- ret, jpg = cv2.imencode('.jpg', best_frame)
- if ret:
- jpg_bytes = jpg.tobytes()
- self.client.publish(f"{self.topic_prefix}/{camera}/{obj['label']}/snapshot", jpg_bytes, retain=True)
+ def snapshot(camera, obj: TrackedObject, current_frame_time):
+ mqtt_config = self.config.cameras[camera].mqtt
+ if mqtt_config.enabled:
+ jpg_bytes = obj.get_jpg_bytes(
+ timestamp=mqtt_config.timestamp,
+ bounding_box=mqtt_config.bounding_box,
+ crop=mqtt_config.crop,
+ height=mqtt_config.height
+ )
+ self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", jpg_bytes, retain=True)
def object_status(camera, object_name, status):
self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)
- for camera in self.camera_config.keys():
- camera_state = CameraState(camera, self.camera_config[camera], self.frame_manager)
+ for camera in self.config.cameras.keys():
+ camera_state = CameraState(camera, self.config, self.frame_manager)
camera_state.on('start', start)
camera_state.on('update', update)
camera_state.on('end', end)
@@ -311,83 +489,71 @@ def object_status(camera, object_name, status):
camera_state.on('object_status', object_status)
self.camera_states[camera] = camera_state
- self.camera_data = defaultdict(lambda: {
- 'best_objects': {},
- 'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),
- 'tracked_objects': {},
- 'current_frame': np.zeros((720,1280,3), np.uint8),
- 'current_frame_time': 0.0,
- 'object_id': None
- })
# {
# 'zone_name': {
- # 'person': ['camera_1', 'camera_2']
+ # 'person': {
+ # 'camera_1': 2,
+ # 'camera_2': 1
+ # }
# }
# }
- self.zone_data = defaultdict(lambda: defaultdict(lambda: set()))
-
- # set colors for zones
- all_zone_names = set([zone for config in self.camera_config.values() for zone in config['zones'].keys()])
- zone_colors = {}
- colors = plt.cm.get_cmap('tab10', len(all_zone_names))
- for i, zone in enumerate(all_zone_names):
- zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])
-
- # create zone contours
- for camera_config in self.camera_config.values():
- for zone_name, zone_config in camera_config['zones'].items():
- zone_config['color'] = zone_colors[zone_name]
- coordinates = zone_config['coordinates']
- if isinstance(coordinates, list):
- zone_config['contour'] = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in coordinates])
- elif isinstance(coordinates, str):
- points = coordinates.split(',')
- zone_config['contour'] = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
- else:
- print(f"Unable to parse zone coordinates for {zone_name} - {camera}")
-
+ self.zone_data = defaultdict(lambda: defaultdict(lambda: {}))
+
def get_best(self, camera, label):
- best_objects = self.camera_states[camera].best_objects
- if label in best_objects:
- return best_objects[label]
+ # TODO: need a lock here
+ camera_state = self.camera_states[camera]
+ if label in camera_state.best_objects:
+ best_obj = camera_state.best_objects[label]
+ best = best_obj.thumbnail_data.copy()
+ best['frame'] = camera_state.frame_cache.get(best_obj.thumbnail_data['frame_time'])
+ return best
else:
return {}
-
- def get_current_frame(self, camera, draw=False):
- return self.camera_states[camera].get_current_frame(draw)
+
+ def get_current_frame(self, camera, draw_options={}):
+ return self.camera_states[camera].get_current_frame(draw_options)
def run(self):
while True:
if self.stop_event.is_set():
- print(f"Exiting object processor...")
+ logger.info(f"Exiting object processor...")
break
try:
- camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(True, 10)
+ camera, frame_time, current_tracked_objects, motion_boxes, regions = self.tracked_objects_queue.get(True, 10)
except queue.Empty:
continue
camera_state = self.camera_states[camera]
- camera_state.update(frame_time, current_tracked_objects)
-
- # update zone status for each label
- for zone in camera_state.config['zones'].keys():
- # get labels for current camera and all labels in current zone
- labels_for_camera = set([obj['label'] for obj in camera_state.tracked_objects.values() if zone in obj['zones'] and not obj['false_positive']])
- labels_to_check = labels_for_camera | set(self.zone_data[zone].keys())
- # for each label in zone
- for label in labels_to_check:
- camera_list = self.zone_data[zone][label]
- # remove or add the camera to the list for the current label
- previous_state = len(camera_list) > 0
- if label in labels_for_camera:
- camera_list.add(camera_state.name)
- elif camera_state.name in camera_list:
- camera_list.remove(camera_state.name)
- new_state = len(camera_list) > 0
- # if the value is changing, send over MQTT
- if previous_state == False and new_state == True:
- self.client.publish(f"{self.topic_prefix}/{zone}/{label}", 'ON', retain=False)
- elif previous_state == True and new_state == False:
- self.client.publish(f"{self.topic_prefix}/{zone}/{label}", 'OFF', retain=False)
+ camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
+
+ # update zone counts for each label
+ # for each zone in the current camera
+ for zone in self.config.cameras[camera].zones.keys():
+ # count labels for the camera in the zone
+ obj_counter = Counter()
+ for obj in camera_state.tracked_objects.values():
+ if zone in obj.current_zones and not obj.false_positive:
+ obj_counter[obj.obj_data['label']] += 1
+
+ # update counts and publish status
+ for label in set(list(self.zone_data[zone].keys()) + list(obj_counter.keys())):
+ # if we have previously published a count for this zone/label
+ zone_label = self.zone_data[zone][label]
+ if camera in zone_label:
+ current_count = sum(zone_label.values())
+ zone_label[camera] = obj_counter[label] if label in obj_counter else 0
+ new_count = sum(zone_label.values())
+ if new_count != current_count:
+ self.client.publish(f"{self.topic_prefix}/{zone}/{label}", new_count, retain=False)
+ # if this is a new zone/label combo for this camera
+ else:
+ if label in obj_counter:
+ zone_label[camera] = obj_counter[label]
+ self.client.publish(f"{self.topic_prefix}/{zone}/{label}", obj_counter[label], retain=False)
+
+ # cleanup event finished queue
+ while not self.event_processed_queue.empty():
+ event_id, camera = self.event_processed_queue.get()
+ self.camera_states[camera].finished(event_id)
diff --git a/frigate/objects.py b/frigate/objects.py
--- a/frigate/objects.py
+++ b/frigate/objects.py
@@ -1,29 +1,32 @@
-import time
+import copy
import datetime
-import threading
-import cv2
import itertools
-import copy
-import numpy as np
+import multiprocessing as mp
import random
import string
-import multiprocessing as mp
+import threading
+import time
from collections import defaultdict
+
+import cv2
+import numpy as np
from scipy.spatial import distance as dist
-from frigate.util import draw_box_with_label, calculate_region
+
+from frigate.config import DetectConfig
+from frigate.util import draw_box_with_label
+
class ObjectTracker():
- def __init__(self, max_disappeared):
+ def __init__(self, config: DetectConfig):
self.tracked_objects = {}
self.disappeared = {}
- self.max_disappeared = max_disappeared
+ self.max_disappeared = config.max_disappeared
def register(self, index, obj):
rand_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
id = f"{obj['frame_time']}-{rand_id}"
obj['id'] = id
obj['start_time'] = obj['frame_time']
- obj['top_score'] = obj['score']
self.tracked_objects[id] = obj
self.disappeared[id] = 0
@@ -34,8 +37,6 @@ def deregister(self, id):
def update(self, id, new_obj):
self.disappeared[id] = 0
self.tracked_objects[id].update(new_obj)
- if self.tracked_objects[id]['score'] > self.tracked_objects[id]['top_score']:
- self.tracked_objects[id]['top_score'] = self.tracked_objects[id]['score']
def match_and_update(self, frame_time, new_objects):
# group by name
diff --git a/frigate/process_clip.py b/frigate/process_clip.py
new file mode 100644
--- /dev/null
+++ b/frigate/process_clip.py
@@ -0,0 +1,208 @@
+import datetime
+import json
+import logging
+import multiprocessing as mp
+import os
+import subprocess as sp
+import sys
+from unittest import TestCase, main
+
+import click
+import cv2
+import numpy as np
+
+from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
+from frigate.edgetpu import LocalObjectDetector
+from frigate.motion import MotionDetector
+from frigate.object_processing import COLOR_MAP, CameraState
+from frigate.objects import ObjectTracker
+from frigate.util import (DictFrameManager, EventsPerSecond,
+ SharedMemoryFrameManager, draw_box_with_label)
+from frigate.video import (capture_frames, process_frames,
+ start_or_restart_ffmpeg)
+
+logging.basicConfig()
+logging.root.setLevel(logging.DEBUG)
+
+logger = logging.getLogger(__name__)
+
+def get_frame_shape(source):
+ ffprobe_cmd = " ".join([
+ 'ffprobe',
+ '-v',
+ 'panic',
+ '-show_error',
+ '-show_streams',
+ '-of',
+ 'json',
+ '"'+source+'"'
+ ])
+ p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
+ (output, err) = p.communicate()
+ p_status = p.wait()
+ info = json.loads(output)
+
+ video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
+
+ if video_info['height'] != 0 and video_info['width'] != 0:
+ return (video_info['height'], video_info['width'], 3)
+
+ # fallback to using opencv if ffprobe didnt succeed
+ video = cv2.VideoCapture(source)
+ ret, frame = video.read()
+ frame_shape = frame.shape
+ video.release()
+ return frame_shape
+
+class ProcessClip():
+ def __init__(self, clip_path, frame_shape, config: FrigateConfig):
+ self.clip_path = clip_path
+ self.camera_name = 'camera'
+ self.config = config
+ self.camera_config = self.config.cameras['camera']
+ self.frame_shape = self.camera_config.frame_shape
+ self.ffmpeg_cmd = [c['cmd'] for c in self.camera_config.ffmpeg_cmds if 'detect' in c['roles']][0]
+ self.frame_manager = SharedMemoryFrameManager()
+ self.frame_queue = mp.Queue()
+ self.detected_objects_queue = mp.Queue()
+ self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
+
+ def load_frames(self):
+ fps = EventsPerSecond()
+ skipped_fps = EventsPerSecond()
+ current_frame = mp.Value('d', 0.0)
+ frame_size = self.camera_config.frame_shape_yuv[0] * self.camera_config.frame_shape_yuv[1]
+ ffmpeg_process = start_or_restart_ffmpeg(self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size)
+ capture_frames(ffmpeg_process, self.camera_name, self.camera_config.frame_shape_yuv, self.frame_manager,
+ self.frame_queue, fps, skipped_fps, current_frame)
+ ffmpeg_process.wait()
+ ffmpeg_process.communicate()
+
+ def process_frames(self, objects_to_track=['person'], object_filters={}):
+ mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
+ mask[:] = 255
+ motion_detector = MotionDetector(self.frame_shape, mask, self.camera_config.motion)
+
+ object_detector = LocalObjectDetector(labels='/labelmap.txt')
+ object_tracker = ObjectTracker(self.camera_config.detect)
+ process_info = {
+ 'process_fps': mp.Value('d', 0.0),
+ 'detection_fps': mp.Value('d', 0.0),
+ 'detection_frame': mp.Value('d', 0.0)
+ }
+ stop_event = mp.Event()
+ model_shape = (self.config.model.height, self.config.model.width)
+
+ process_frames(self.camera_name, self.frame_queue, self.frame_shape, model_shape,
+ self.frame_manager, motion_detector, object_detector, object_tracker,
+ self.detected_objects_queue, process_info,
+ objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
+
+ def top_object(self, debug_path=None):
+ obj_detected = False
+ top_computed_score = 0.0
+ def handle_event(name, obj, frame_time):
+ nonlocal obj_detected
+ nonlocal top_computed_score
+ if obj.computed_score > top_computed_score:
+ top_computed_score = obj.computed_score
+ if not obj.false_positive:
+ obj_detected = True
+ self.camera_state.on('new', handle_event)
+ self.camera_state.on('update', handle_event)
+
+ while(not self.detected_objects_queue.empty()):
+ camera_name, frame_time, current_tracked_objects, motion_boxes, regions = self.detected_objects_queue.get()
+ if not debug_path is None:
+ self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
+
+ self.camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
+
+ self.frame_manager.delete(self.camera_state.previous_frame_id)
+
+ return {
+ 'object_detected': obj_detected,
+ 'top_score': top_computed_score
+ }
+
+ def save_debug_frame(self, debug_path, frame_time, tracked_objects):
+ current_frame = cv2.cvtColor(self.frame_manager.get(f"{self.camera_name}{frame_time}", self.camera_config.frame_shape_yuv), cv2.COLOR_YUV2BGR_I420)
+ # draw the bounding boxes on the frame
+ for obj in tracked_objects:
+ thickness = 2
+ color = (0,0,175)
+
+ if obj['frame_time'] != frame_time:
+ thickness = 1
+ color = (255,0,0)
+ else:
+ color = (255,255,0)
+
+ # draw the bounding boxes on the frame
+ box = obj['box']
+ draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['id'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
+ # draw the regions on the frame
+ region = obj['region']
+ draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
+
+ cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", current_frame)
+
[email protected]()
[email protected]("-p", "--path", required=True, help="Path to clip or directory to test.")
[email protected]("-l", "--label", default='person', help="Label name to detect.")
[email protected]("-t", "--threshold", default=0.85, help="Threshold value for objects.")
[email protected]("-s", "--scores", default=None, help="File to save csv of top scores")
[email protected]("--debug-path", default=None, help="Path to output frames for debugging.")
+def process(path, label, threshold, scores, debug_path):
+ clips = []
+ if os.path.isdir(path):
+ files = os.listdir(path)
+ files.sort()
+ clips = [os.path.join(path, file) for file in files]
+ elif os.path.isfile(path):
+ clips.append(path)
+
+ json_config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'cameras': {
+ 'camera': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'path.mp4', 'global_args': '', 'input_args': '', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1920,
+ 'width': 1080
+ }
+ }
+ }
+
+ results = []
+ for c in clips:
+ logger.info(c)
+ frame_shape = get_frame_shape(c)
+
+ json_config['cameras']['camera']['height'] = frame_shape[0]
+ json_config['cameras']['camera']['width'] = frame_shape[1]
+ json_config['cameras']['camera']['ffmpeg']['inputs'][0]['path'] = c
+
+ config = FrigateConfig(config=FRIGATE_CONFIG_SCHEMA(json_config))
+
+ process_clip = ProcessClip(c, frame_shape, config)
+ process_clip.load_frames()
+ process_clip.process_frames(objects_to_track=[label])
+
+ results.append((c, process_clip.top_object(debug_path)))
+
+ if not scores is None:
+ with open(scores, 'w') as writer:
+ for result in results:
+ writer.write(f"{result[0]},{result[1]['top_score']}\n")
+
+ positive_count = sum(1 for result in results if result[1]['object_detected'])
+ print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
+
+if __name__ == '__main__':
+ process()
diff --git a/frigate/record.py b/frigate/record.py
new file mode 100644
--- /dev/null
+++ b/frigate/record.py
@@ -0,0 +1,125 @@
+import datetime
+import json
+import logging
+import os
+import queue
+import subprocess as sp
+import threading
+import time
+from collections import defaultdict
+from pathlib import Path
+
+import psutil
+
+from frigate.config import FrigateConfig
+from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+
+logger = logging.getLogger(__name__)
+
+SECONDS_IN_DAY = 60 * 60 * 24
+
+def remove_empty_directories(directory):
+ # list all directories recursively and sort them by path,
+ # longest first
+ paths = sorted(
+ [x[0] for x in os.walk(RECORD_DIR)],
+ key=lambda p: len(str(p)),
+ reverse=True,
+ )
+ for path in paths:
+ # don't delete the parent
+ if path == RECORD_DIR:
+ continue
+ if len(os.listdir(path)) == 0:
+ os.rmdir(path)
+
+class RecordingMaintainer(threading.Thread):
+ def __init__(self, config: FrigateConfig, stop_event):
+ threading.Thread.__init__(self)
+ self.name = 'recording_maint'
+ self.config = config
+ self.stop_event = stop_event
+
+ def move_files(self):
+ recordings = [d for d in os.listdir(RECORD_DIR) if os.path.isfile(os.path.join(RECORD_DIR, d)) and d.endswith(".mp4")]
+
+ files_in_use = []
+ for process in psutil.process_iter():
+ try:
+ if process.name() != 'ffmpeg':
+ continue
+ flist = process.open_files()
+ if flist:
+ for nt in flist:
+ if nt.path.startswith(RECORD_DIR):
+ files_in_use.append(nt.path.split('/')[-1])
+ except:
+ continue
+
+ for f in recordings:
+ if f in files_in_use:
+ continue
+
+ camera = '-'.join(f.split('-')[:-1])
+ start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S')
+
+ ffprobe_cmd = " ".join([
+ 'ffprobe',
+ '-v',
+ 'error',
+ '-show_entries',
+ 'format=duration',
+ '-of',
+ 'default=noprint_wrappers=1:nokey=1',
+ f"{os.path.join(RECORD_DIR,f)}"
+ ])
+ p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
+ (output, err) = p.communicate()
+ p_status = p.wait()
+ if p_status == 0:
+ duration = float(output.decode('utf-8').strip())
+ else:
+ logger.info(f"bad file: {f}")
+ os.remove(os.path.join(RECORD_DIR,f))
+ continue
+
+ directory = os.path.join(RECORD_DIR, start_time.strftime('%Y-%m/%d/%H'), camera)
+
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ file_name = f"{start_time.strftime('%M.%S.mp4')}"
+
+ os.rename(os.path.join(RECORD_DIR,f), os.path.join(directory,file_name))
+
+ def expire_files(self):
+ delete_before = {}
+ for name, camera in self.config.cameras.items():
+ delete_before[name] = datetime.datetime.now().timestamp() - SECONDS_IN_DAY*camera.record.retain_days
+
+ for p in Path('/media/frigate/recordings').rglob("*.mp4"):
+ if not p.parent.name in delete_before:
+ continue
+ if p.stat().st_mtime < delete_before[p.parent.name]:
+ p.unlink(missing_ok=True)
+
+ def run(self):
+ counter = 0
+ self.expire_files()
+ while(True):
+ if self.stop_event.is_set():
+ logger.info(f"Exiting recording maintenance...")
+ break
+
+ # only expire events every 10 minutes, but check for new files every 10 seconds
+ time.sleep(10)
+ counter = counter + 1
+ if counter > 60:
+ self.expire_files()
+ remove_empty_directories(RECORD_DIR)
+ counter = 0
+
+ self.move_files()
+
+
+
diff --git a/frigate/stats.py b/frigate/stats.py
new file mode 100644
--- /dev/null
+++ b/frigate/stats.py
@@ -0,0 +1,70 @@
+import json
+import logging
+import threading
+import time
+
+from frigate.config import FrigateConfig
+from frigate.version import VERSION
+
+logger = logging.getLogger(__name__)
+
+def stats_init(camera_metrics, detectors):
+ stats_tracking = {
+ 'camera_metrics': camera_metrics,
+ 'detectors': detectors,
+ 'started': int(time.time())
+ }
+ return stats_tracking
+
+def stats_snapshot(stats_tracking):
+ camera_metrics = stats_tracking['camera_metrics']
+ stats = {}
+
+ total_detection_fps = 0
+
+ for name, camera_stats in camera_metrics.items():
+ total_detection_fps += camera_stats['detection_fps'].value
+ stats[name] = {
+ 'camera_fps': round(camera_stats['camera_fps'].value, 2),
+ 'process_fps': round(camera_stats['process_fps'].value, 2),
+ 'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
+ 'detection_fps': round(camera_stats['detection_fps'].value, 2),
+ 'pid': camera_stats['process'].pid,
+ 'capture_pid': camera_stats['capture_process'].pid
+ }
+
+ stats['detectors'] = {}
+ for name, detector in stats_tracking["detectors"].items():
+ stats['detectors'][name] = {
+ 'inference_speed': round(detector.avg_inference_speed.value * 1000, 2),
+ 'detection_start': detector.detection_start.value,
+ 'pid': detector.detect_process.pid
+ }
+ stats['detection_fps'] = round(total_detection_fps, 2)
+
+ stats['service'] = {
+ 'uptime': (int(time.time()) - stats_tracking['started']),
+ 'version': VERSION
+ }
+
+ return stats
+
+class StatsEmitter(threading.Thread):
+ def __init__(self, config: FrigateConfig, stats_tracking, mqtt_client, topic_prefix, stop_event):
+ threading.Thread.__init__(self)
+ self.name = 'frigate_stats_emitter'
+ self.config = config
+ self.stats_tracking = stats_tracking
+ self.mqtt_client = mqtt_client
+ self.topic_prefix = topic_prefix
+ self.stop_event = stop_event
+
+ def run(self):
+ time.sleep(10)
+ while True:
+ if self.stop_event.is_set():
+ logger.info(f"Exiting watchdog...")
+ break
+ stats = stats_snapshot(self.stats_tracking)
+ self.mqtt_client.publish(f"{self.topic_prefix}/stats", json.dumps(stats), retain=False)
+ time.sleep(self.config.mqtt.stats_interval)
diff --git a/frigate/util.py b/frigate/util.py
--- a/frigate/util.py
+++ b/frigate/util.py
@@ -1,17 +1,24 @@
-from abc import ABC, abstractmethod
+import collections
import datetime
-import time
+import hashlib
+import json
+import logging
import signal
-import traceback
-import collections
-import numpy as np
-import cv2
+import subprocess as sp
import threading
-import matplotlib.pyplot as plt
-import hashlib
+import time
+import traceback
+from abc import ABC, abstractmethod
from multiprocessing import shared_memory
from typing import AnyStr
+import cv2
+import matplotlib.pyplot as plt
+import numpy as np
+
+logger = logging.getLogger(__name__)
+
+
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
if color is None:
color = (0,0,255)
@@ -43,14 +50,11 @@ def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thicknes
cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
- # size is larger than longest edge
- size = int(max(xmax-xmin, ymax-ymin)*multiplier)
+ # size is the longest edge and divisible by 4
+ size = int(max(xmax-xmin, ymax-ymin)//4*4*multiplier)
# dont go any smaller than 300
if size < 300:
size = 300
- # if the size is too big to fit in the frame
- if size > min(frame_shape[0], frame_shape[1]):
- size = min(frame_shape[0], frame_shape[1])
# x_offset is midpoint of bounding box minus half the size
x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
@@ -58,48 +62,156 @@ def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
if x_offset < 0:
x_offset = 0
elif x_offset > (frame_shape[1]-size):
- x_offset = (frame_shape[1]-size)
+ x_offset = max(0, (frame_shape[1]-size))
# y_offset is midpoint of bounding box minus half the size
y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
- # if outside the image
+ # # if outside the image
if y_offset < 0:
y_offset = 0
elif y_offset > (frame_shape[0]-size):
- y_offset = (frame_shape[0]-size)
+ y_offset = max(0, (frame_shape[0]-size))
return (x_offset, y_offset, x_offset+size, y_offset+size)
-def yuv_region_2_rgb(frame, region):
- height = frame.shape[0]//3*2
- width = frame.shape[1]
- # make sure the size is a multiple of 4
- size = (region[3] - region[1])//4*4
+def get_yuv_crop(frame_shape, crop):
+ # crop should be (x1,y1,x2,y2)
+ frame_height = frame_shape[0]//3*2
+ frame_width = frame_shape[1]
+
+ # compute the width/height of the uv channels
+ uv_width = frame_width//2 # width of the uv channels
+ uv_height = frame_height//4 # height of the uv channels
+
+ # compute the offset for upper left corner of the uv channels
+ uv_x_offset = crop[0]//2 # x offset of the uv channels
+ uv_y_offset = crop[1]//4 # y offset of the uv channels
+
+ # compute the width/height of the uv crops
+ uv_crop_width = (crop[2] - crop[0])//2 # width of the cropped uv channels
+ uv_crop_height = (crop[3] - crop[1])//4 # height of the cropped uv channels
+
+ # ensure crop dimensions are multiples of 2 and 4
+ y = (
+ crop[0],
+ crop[1],
+ crop[0] + uv_crop_width*2,
+ crop[1] + uv_crop_height*4
+ )
- x1 = region[0]
- y1 = region[1]
+ u1 = (
+ 0 + uv_x_offset,
+ frame_height + uv_y_offset,
+ 0 + uv_x_offset + uv_crop_width,
+ frame_height + uv_y_offset + uv_crop_height
+ )
- uv_x1 = x1//2
- uv_y1 = y1//4
+ u2 = (
+ uv_width + uv_x_offset,
+ frame_height + uv_y_offset,
+ uv_width + uv_x_offset + uv_crop_width,
+ frame_height + uv_y_offset + uv_crop_height
+ )
- uv_width = size//2
- uv_height = size//4
+ v1 = (
+ 0 + uv_x_offset,
+ frame_height + uv_height + uv_y_offset,
+ 0 + uv_x_offset + uv_crop_width,
+ frame_height + uv_height + uv_y_offset + uv_crop_height
+ )
- u_y_start = height
- v_y_start = height + height//4
- two_x_offset = width//2
+ v2 = (
+ uv_width + uv_x_offset,
+ frame_height + uv_height + uv_y_offset,
+ uv_width + uv_x_offset + uv_crop_width,
+ frame_height + uv_height + uv_y_offset + uv_crop_height
+ )
- yuv_cropped_frame = np.zeros((size+size//2, size), np.uint8)
- # y channel
- yuv_cropped_frame[0:size, 0:size] = frame[y1:y1+size, x1:x1+size]
- # u channel
- yuv_cropped_frame[size:size+uv_height, 0:uv_width] = frame[uv_y1+u_y_start:uv_y1+u_y_start+uv_height, uv_x1:uv_x1+uv_width]
- yuv_cropped_frame[size:size+uv_height, uv_width:size] = frame[uv_y1+u_y_start:uv_y1+u_y_start+uv_height, uv_x1+two_x_offset:uv_x1+two_x_offset+uv_width]
- # v channel
- yuv_cropped_frame[size+uv_height:size+uv_height*2, 0:uv_width] = frame[uv_y1+v_y_start:uv_y1+v_y_start+uv_height, uv_x1:uv_x1+uv_width]
- yuv_cropped_frame[size+uv_height:size+uv_height*2, uv_width:size] = frame[uv_y1+v_y_start:uv_y1+v_y_start+uv_height, uv_x1+two_x_offset:uv_x1+two_x_offset+uv_width]
+ return y, u1, u2, v1, v2
- return cv2.cvtColor(yuv_cropped_frame, cv2.COLOR_YUV2RGB_I420)
+def yuv_region_2_rgb(frame, region):
+ try:
+ height = frame.shape[0]//3*2
+ width = frame.shape[1]
+
+ # get the crop box if the region extends beyond the frame
+ crop_x1 = max(0, region[0])
+ crop_y1 = max(0, region[1])
+ # ensure these are a multiple of 4
+ crop_x2 = min(width, region[2])
+ crop_y2 = min(height, region[3])
+ crop_box = (crop_x1, crop_y1, crop_x2, crop_y2)
+
+ y, u1, u2, v1, v2 = get_yuv_crop(frame.shape, crop_box)
+
+ # if the region starts outside the frame, indent the start point in the cropped frame
+ y_channel_x_offset = abs(min(0, region[0]))
+ y_channel_y_offset = abs(min(0, region[1]))
+
+ uv_channel_x_offset = y_channel_x_offset//2
+ uv_channel_y_offset = y_channel_y_offset//4
+
+ # create the yuv region frame
+ # make sure the size is a multiple of 4
+ size = (region[3] - region[1])//4*4
+ yuv_cropped_frame = np.zeros((size+size//2, size), np.uint8)
+ # fill in black
+ yuv_cropped_frame[:] = 128
+ yuv_cropped_frame[0:size,0:size] = 16
+
+ # copy the y channel
+ yuv_cropped_frame[
+ y_channel_y_offset:y_channel_y_offset + y[3] - y[1],
+ y_channel_x_offset:y_channel_x_offset + y[2] - y[0]
+ ] = frame[
+ y[1]:y[3],
+ y[0]:y[2]
+ ]
+
+ uv_crop_width = u1[2] - u1[0]
+ uv_crop_height = u1[3] - u1[1]
+
+ # copy u1
+ yuv_cropped_frame[
+ size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
+ 0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
+ ] = frame[
+ u1[1]:u1[3],
+ u1[0]:u1[2]
+ ]
+
+ # copy u2
+ yuv_cropped_frame[
+ size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
+ size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
+ ] = frame[
+ u2[1]:u2[3],
+ u2[0]:u2[2]
+ ]
+
+ # copy v1
+ yuv_cropped_frame[
+ size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
+ 0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
+ ] = frame[
+ v1[1]:v1[3],
+ v1[0]:v1[2]
+ ]
+
+ # copy v2
+ yuv_cropped_frame[
+ size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
+ size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
+ ] = frame[
+ v2[1]:v2[3],
+ v2[0]:v2[2]
+ ]
+
+ return cv2.cvtColor(yuv_cropped_frame, cv2.COLOR_YUV2RGB_I420)
+ except:
+ print(f"frame.shape: {frame.shape}")
+ print(f"region: {region}")
+ raise
def intersection(box_a, box_b):
return (
@@ -179,6 +291,24 @@ def print_stack(sig, frame):
def listen():
signal.signal(signal.SIGUSR1, print_stack)
+def create_mask(frame_shape, mask):
+ mask_img = np.zeros(frame_shape, np.uint8)
+ mask_img[:] = 255
+
+ if isinstance(mask, list):
+ for m in mask:
+ add_mask(m, mask_img)
+
+ elif isinstance(mask, str):
+ add_mask(mask, mask_img)
+
+ return mask_img
+
+def add_mask(mask, mask_img):
+ points = mask.split(',')
+ contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
+ cv2.fillPoly(mask_img, pts=[contour], color=(0))
+
class FrameManager(ABC):
@abstractmethod
def create(self, name, size) -> AnyStr:
@@ -241,4 +371,4 @@ def delete(self, name):
if name in self.shm_store:
self.shm_store[name].close()
self.shm_store[name].unlink()
- del self.shm_store[name]
\ No newline at end of file
+ del self.shm_store[name]
diff --git a/frigate/video.py b/frigate/video.py
--- a/frigate/video.py
+++ b/frigate/video.py
@@ -1,59 +1,37 @@
-import os
-import time
-import datetime
-import cv2
-import queue
-import threading
-import ctypes
-import multiprocessing as mp
-import subprocess as sp
-import numpy as np
+import base64
import copy
+import ctypes
+import datetime
import itertools
import json
-import base64
-from typing import Dict, List
+import logging
+import multiprocessing as mp
+import os
+import queue
+import subprocess as sp
+import signal
+import threading
+import time
from collections import defaultdict
-from frigate.util import draw_box_with_label, yuv_region_2_rgb, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond, listen, FrameManager, SharedMemoryFrameManager
-from frigate.objects import ObjectTracker
+from setproctitle import setproctitle
+from typing import Dict, List
+
+import cv2
+import numpy as np
+
+from frigate.config import CameraConfig
from frigate.edgetpu import RemoteObjectDetector
+from frigate.log import LogPipe
from frigate.motion import MotionDetector
+from frigate.objects import ObjectTracker
+from frigate.util import (EventsPerSecond, FrameManager,
+ SharedMemoryFrameManager, area, calculate_region,
+ clipped, draw_box_with_label, intersection,
+ intersection_over_union, listen, yuv_region_2_rgb)
-def get_frame_shape(source):
- ffprobe_cmd = " ".join([
- 'ffprobe',
- '-v',
- 'panic',
- '-show_error',
- '-show_streams',
- '-of',
- 'json',
- '"'+source+'"'
- ])
- print(ffprobe_cmd)
- p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
- (output, err) = p.communicate()
- p_status = p.wait()
- info = json.loads(output)
- print(info)
-
- video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
-
- if video_info['height'] != 0 and video_info['width'] != 0:
- return (video_info['height'], video_info['width'], 3)
-
- # fallback to using opencv if ffprobe didnt succeed
- video = cv2.VideoCapture(source)
- ret, frame = video.read()
- frame_shape = frame.shape
- video.release()
- return frame_shape
-
-def get_ffmpeg_input(ffmpeg_input):
- frigate_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
- return ffmpeg_input.format(**frigate_vars)
-
-def filtered(obj, objects_to_track, object_filters, mask=None):
+logger = logging.getLogger(__name__)
+
+def filtered(obj, objects_to_track, object_filters):
object_name = obj[0]
if not object_name in objects_to_track:
@@ -64,63 +42,66 @@ def filtered(obj, objects_to_track, object_filters, mask=None):
# if the min area is larger than the
# detected object, don't add it to detected objects
- if obj_settings.get('min_area',-1) > obj[3]:
+ if obj_settings.min_area > obj[3]:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
- if obj_settings.get('max_area', 24000000) < obj[3]:
+ if obj_settings.max_area < obj[3]:
return True
# if the score is lower than the min_score, skip
- if obj_settings.get('min_score', 0) > obj[1]:
+ if obj_settings.min_score > obj[1]:
return True
- # compute the coordinates of the object and make sure
- # the location isnt outside the bounds of the image (can happen from rounding)
- y_location = min(int(obj[2][3]), len(mask)-1)
- x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(mask[0])-1)
-
- # if the object is in a masked location, don't add it to detected objects
- if (not mask is None) and (mask[y_location][x_location] == 0):
- return True
+ if not obj_settings.mask is None:
+ # compute the coordinates of the object and make sure
+ # the location isnt outside the bounds of the image (can happen from rounding)
+ y_location = min(int(obj[2][3]), len(obj_settings.mask)-1)
+ x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(obj_settings.mask[0])-1)
+
+ # if the object is in a masked location, don't add it to detected objects
+ if obj_settings.mask[y_location][x_location] == 0:
+ return True
return False
-def create_tensor_input(frame, region):
+def create_tensor_input(frame, model_shape, region):
cropped_frame = yuv_region_2_rgb(frame, region)
# Resize to 300x300 if needed
- if cropped_frame.shape != (300, 300, 3):
- cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
+ if cropped_frame.shape != (model_shape[0], model_shape[1], 3):
+ cropped_frame = cv2.resize(cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR)
- # Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
+ # Expand dimensions since the model expects images to have shape: [1, height, width, 3]
return np.expand_dims(cropped_frame, axis=0)
-def start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process=None):
+def stop_ffmpeg(ffmpeg_process, logger):
+ logger.info("Terminating the existing ffmpeg process...")
+ ffmpeg_process.terminate()
+ try:
+ logger.info("Waiting for ffmpeg to exit gracefully...")
+ ffmpeg_process.communicate(timeout=30)
+ except sp.TimeoutExpired:
+ logger.info("FFmpeg didnt exit. Force killing...")
+ ffmpeg_process.kill()
+ ffmpeg_process.communicate()
+ ffmpeg_process = None
+
+def start_or_restart_ffmpeg(ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None):
if not ffmpeg_process is None:
- print("Terminating the existing ffmpeg process...")
- ffmpeg_process.terminate()
- try:
- print("Waiting for ffmpeg to exit gracefully...")
- ffmpeg_process.communicate(timeout=30)
- except sp.TimeoutExpired:
- print("FFmpeg didnt exit. Force killing...")
- ffmpeg_process.kill()
- ffmpeg_process.communicate()
- ffmpeg_process = None
-
- print("Creating ffmpeg process...")
- print(" ".join(ffmpeg_cmd))
- process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, stdin = sp.DEVNULL, bufsize=frame_size*10, start_new_session=True)
+ stop_ffmpeg(ffmpeg_process, logger)
+
+ if frame_size is None:
+ process = sp.Popen(ffmpeg_cmd, stdout = sp.DEVNULL, stderr=logpipe, stdin = sp.DEVNULL, start_new_session=True)
+ else:
+ process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, stderr=logpipe, stdin = sp.DEVNULL, bufsize=frame_size*10, start_new_session=True)
return process
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager,
- frame_queue, take_frame: int, fps:mp.Value, skipped_fps: mp.Value,
- stop_event: mp.Event, current_frame: mp.Value):
+ frame_queue, fps:mp.Value, skipped_fps: mp.Value, current_frame: mp.Value):
- frame_num = 0
- frame_size = frame_shape[0] * frame_shape[1] * 3 // 2
+ frame_size = frame_shape[0] * frame_shape[1]
frame_rate = EventsPerSecond()
frame_rate.start()
skipped_eps = EventsPerSecond()
@@ -128,33 +109,23 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
while True:
fps.value = frame_rate.eps()
skipped_fps = skipped_eps.eps()
- if stop_event.is_set():
- print(f"{camera_name}: stop event set. exiting capture thread...")
- break
current_frame.value = datetime.datetime.now().timestamp()
frame_name = f"{camera_name}{current_frame.value}"
frame_buffer = frame_manager.create(frame_name, frame_size)
try:
- frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
- except:
- print(f"{camera_name}: ffmpeg sent a broken frame. something is wrong.")
+ frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
+ except Exception as e:
+ logger.info(f"{camera_name}: ffmpeg sent a broken frame. {e}")
- if ffmpeg_process.poll() != None:
- print(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
- frame_manager.delete(frame_name)
- break
-
- continue
+ if ffmpeg_process.poll() != None:
+ logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
+ frame_manager.delete(frame_name)
+ break
+ continue
frame_rate.update()
- frame_num += 1
- if (frame_num % take_frame) != 0:
- skipped_eps.update()
- frame_manager.delete(frame_name)
- continue
-
# if the queue is full, skip this frame
if frame_queue.full():
skipped_eps.update()
@@ -168,123 +139,139 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
frame_queue.put(current_frame.value)
class CameraWatchdog(threading.Thread):
- def __init__(self, name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event):
+ def __init__(self, camera_name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event):
threading.Thread.__init__(self)
- self.name = name
+ self.logger = logging.getLogger(f"watchdog.{camera_name}")
+ self.camera_name = camera_name
self.config = config
self.capture_thread = None
- self.ffmpeg_process = None
- self.stop_event = stop_event
+ self.ffmpeg_detect_process = None
+ self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
+ self.ffmpeg_other_processes = []
self.camera_fps = camera_fps
self.ffmpeg_pid = ffmpeg_pid
self.frame_queue = frame_queue
- self.frame_shape = self.config['frame_shape']
- self.frame_size = self.frame_shape[0] * self.frame_shape[1] * 3 // 2
+ self.frame_shape = self.config.frame_shape_yuv
+ self.frame_size = self.frame_shape[0] * self.frame_shape[1]
+ self.stop_event = stop_event
def run(self):
- self.start_ffmpeg()
+ self.start_ffmpeg_detect()
+
+ for c in self.config.ffmpeg_cmds:
+ if 'detect' in c['roles']:
+ continue
+ logpipe = LogPipe(f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}", logging.ERROR)
+ self.ffmpeg_other_processes.append({
+ 'cmd': c['cmd'],
+ 'logpipe': logpipe,
+ 'process': start_or_restart_ffmpeg(c['cmd'], self.logger, logpipe)
+ })
+
time.sleep(10)
while True:
if self.stop_event.is_set():
- print(f"Exiting watchdog...")
+ stop_ffmpeg(self.ffmpeg_detect_process, self.logger)
+ for p in self.ffmpeg_other_processes:
+ stop_ffmpeg(p['process'], self.logger)
+ p['logpipe'].close()
+ self.logpipe.close()
break
now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive():
- self.start_ffmpeg()
- elif now - self.capture_thread.current_frame.value > 5:
- print(f"No frames received from {self.name} in 5 seconds. Exiting ffmpeg...")
- self.ffmpeg_process.terminate()
+ self.start_ffmpeg_detect()
+ elif now - self.capture_thread.current_frame.value > 20:
+ self.logger.info(f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg...")
+ self.ffmpeg_detect_process.terminate()
try:
- print("Waiting for ffmpeg to exit gracefully...")
- self.ffmpeg_process.communicate(timeout=30)
+ self.logger.info("Waiting for ffmpeg to exit gracefully...")
+ self.ffmpeg_detect_process.communicate(timeout=30)
except sp.TimeoutExpired:
- print("FFmpeg didnt exit. Force killing...")
- self.ffmpeg_process.kill()
- self.ffmpeg_process.communicate()
+ self.logger.info("FFmpeg didnt exit. Force killing...")
+ self.ffmpeg_detect_process.kill()
+ self.ffmpeg_detect_process.communicate()
+
+ for p in self.ffmpeg_other_processes:
+ poll = p['process'].poll()
+ if poll == None:
+ continue
+ p['process'] = start_or_restart_ffmpeg(p['cmd'], self.logger, p['logpipe'], ffmpeg_process=p['process'])
# wait a bit before checking again
time.sleep(10)
- def start_ffmpeg(self):
- self.ffmpeg_process = start_or_restart_ffmpeg(self.config['ffmpeg_cmd'], self.frame_size)
- self.ffmpeg_pid.value = self.ffmpeg_process.pid
- self.capture_thread = CameraCapture(self.name, self.ffmpeg_process, self.frame_shape, self.frame_queue,
- self.config['take_frame'], self.camera_fps, self.stop_event)
- self.capture_thread.start()
+ def start_ffmpeg_detect(self):
+ ffmpeg_cmd = [c['cmd'] for c in self.config.ffmpeg_cmds if 'detect' in c['roles']][0]
+ self.ffmpeg_detect_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.logger, self.logpipe, self.frame_size)
+ self.ffmpeg_pid.value = self.ffmpeg_detect_process.pid
+ self.capture_thread = CameraCapture(self.camera_name, self.ffmpeg_detect_process, self.frame_shape, self.frame_queue,
+ self.camera_fps)
+ self.capture_thread.start()
class CameraCapture(threading.Thread):
- def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, stop_event):
+ def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
threading.Thread.__init__(self)
- self.name = name
+ self.name = f"capture:{camera_name}"
+ self.camera_name = camera_name
self.frame_shape = frame_shape
- self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
self.frame_queue = frame_queue
- self.take_frame = take_frame
self.fps = fps
self.skipped_fps = EventsPerSecond()
self.frame_manager = SharedMemoryFrameManager()
self.ffmpeg_process = ffmpeg_process
self.current_frame = mp.Value('d', 0.0)
self.last_frame = 0
- self.stop_event = stop_event
def run(self):
self.skipped_fps.start()
- capture_frames(self.ffmpeg_process, self.name, self.frame_shape, self.frame_manager, self.frame_queue, self.take_frame,
- self.fps, self.skipped_fps, self.stop_event, self.current_frame)
+ capture_frames(self.ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue,
+ self.fps, self.skipped_fps, self.current_frame)
+
+def capture_camera(name, config: CameraConfig, process_info):
+ stop_event = mp.Event()
+ def receiveSignal(signalNumber, frame):
+ stop_event.set()
+
+ signal.signal(signal.SIGTERM, receiveSignal)
+ signal.signal(signal.SIGINT, receiveSignal)
-def capture_camera(name, config, process_info, stop_event):
frame_queue = process_info['frame_queue']
camera_watchdog = CameraWatchdog(name, config, frame_queue, process_info['camera_fps'], process_info['ffmpeg_pid'], stop_event)
camera_watchdog.start()
camera_watchdog.join()
-def track_camera(name, config, detection_queue, result_connection, detected_objects_queue, process_info, stop_event):
+def track_camera(name, config: CameraConfig, model_shape, detection_queue, result_connection, detected_objects_queue, process_info):
+ stop_event = mp.Event()
+ def receiveSignal(signalNumber, frame):
+ stop_event.set()
+
+ signal.signal(signal.SIGTERM, receiveSignal)
+ signal.signal(signal.SIGINT, receiveSignal)
+
+ threading.current_thread().name = f"process:{name}"
+ setproctitle(f"frigate.process:{name}")
listen()
frame_queue = process_info['frame_queue']
+ detection_enabled = process_info['detection_enabled']
- frame_shape = config['frame_shape']
-
- # Merge the tracked object config with the global config
- camera_objects_config = config.get('objects', {})
- objects_to_track = camera_objects_config.get('track', [])
- object_filters = camera_objects_config.get('filters', {})
-
- # load in the mask for object detection
- if 'mask' in config:
- if config['mask'].startswith('base64,'):
- img = base64.b64decode(config['mask'][7:])
- npimg = np.fromstring(img, dtype=np.uint8)
- mask = cv2.imdecode(npimg, cv2.IMREAD_GRAYSCALE)
- elif config['mask'].startswith('poly,'):
- points = config['mask'].split(',')[1:]
- contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
- mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
- mask[:] = 255
- cv2.fillPoly(mask, pts=[contour], color=(0))
- else:
- mask = cv2.imread("/config/{}".format(config['mask']), cv2.IMREAD_GRAYSCALE)
- else:
- mask = None
+ frame_shape = config.frame_shape
+ objects_to_track = config.objects.track
+ object_filters = config.objects.filters
- if mask is None or mask.size == 0:
- mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
- mask[:] = 255
+ motion_detector = MotionDetector(frame_shape, config.motion)
+ object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
- motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
- object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection)
-
- object_tracker = ObjectTracker(10)
+ object_tracker = ObjectTracker(config.detect)
frame_manager = SharedMemoryFrameManager()
- process_frames(name, frame_queue, frame_shape, frame_manager, motion_detector, object_detector,
- object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, mask, stop_event)
+ process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
+ object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, detection_enabled, stop_event)
- print(f"{name}: exiting subprocess")
+ logger.info(f"{name}: exiting subprocess")
def reduce_boxes(boxes):
if len(boxes) == 0:
@@ -292,8 +279,8 @@ def reduce_boxes(boxes):
reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
return [tuple(b) for b in reduced_boxes]
-def detect(object_detector, frame, region, objects_to_track, object_filters, mask):
- tensor_input = create_tensor_input(frame, region)
+def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters):
+ tensor_input = create_tensor_input(frame, model_shape, region)
detections = []
region_detections = object_detector.detect(tensor_input)
@@ -310,16 +297,16 @@ def detect(object_detector, frame, region, objects_to_track, object_filters, mas
(x_max-x_min)*(y_max-y_min),
region)
# apply object filters
- if filtered(det, objects_to_track, object_filters, mask):
+ if filtered(det, objects_to_track, object_filters):
continue
detections.append(det)
return detections
-def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
+def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_shape,
frame_manager: FrameManager, motion_detector: MotionDetector,
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue, process_info: Dict,
- objects_to_track: List[str], object_filters: Dict, mask, stop_event: mp.Event,
+ objects_to_track: List[str], object_filters, detection_enabled: mp.Value, stop_event,
exit_on_empty: bool = False):
fps = process_info['process_fps']
@@ -330,9 +317,12 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
fps_tracker.start()
while True:
- if stop_event.is_set() or (exit_on_empty and frame_queue.empty()):
- print(f"Exiting track_objects...")
- break
+ if stop_event.is_set():
+ break
+
+ if exit_on_empty and frame_queue.empty():
+ logger.info(f"Exiting track_objects...")
+ break
try:
frame_time = frame_queue.get(True, 10)
@@ -344,7 +334,15 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
frame = frame_manager.get(f"{camera_name}{frame_time}", (frame_shape[0]*3//2, frame_shape[1]))
if frame is None:
- print(f"{camera_name}: frame {frame_time} is not in memory store.")
+ logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
+ continue
+
+ if not detection_enabled.value:
+ fps.value = fps_tracker.eps()
+ object_tracker.match_and_update(frame_time, [])
+ detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, [], []))
+ detection_fps.value = object_detector.fps.eps()
+ frame_manager.close(f"{camera_name}{frame_time}")
continue
# look for motion
@@ -369,7 +367,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
# resize regions and detect
detections = []
for region in regions:
- detections.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
+ detections.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
#########
# merge objects, check for clipped objects and look again up to 4 times
@@ -401,8 +399,10 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
region = calculate_region(frame_shape,
box[0], box[1],
box[2], box[3])
+
+ regions.append(region)
- selected_objects.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
+ selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
refining = True
else:
@@ -419,11 +419,11 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
# add to the queue if not full
if(detected_objects_queue.full()):
- frame_manager.delete(f"{camera_name}{frame_time}")
- continue
+ frame_manager.delete(f"{camera_name}{frame_time}")
+ continue
else:
- fps_tracker.update()
- fps.value = fps_tracker.eps()
- detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects))
- detection_fps.value = object_detector.fps.eps()
- frame_manager.close(f"{camera_name}{frame_time}")
+ fps_tracker.update()
+ fps.value = fps_tracker.eps()
+ detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
+ detection_fps.value = object_detector.fps.eps()
+ frame_manager.close(f"{camera_name}{frame_time}")
diff --git a/frigate/watchdog.py b/frigate/watchdog.py
new file mode 100644
--- /dev/null
+++ b/frigate/watchdog.py
@@ -0,0 +1,38 @@
+import datetime
+import logging
+import threading
+import time
+import os
+import signal
+
+logger = logging.getLogger(__name__)
+
+class FrigateWatchdog(threading.Thread):
+ def __init__(self, detectors, stop_event):
+ threading.Thread.__init__(self)
+ self.name = 'frigate_watchdog'
+ self.detectors = detectors
+ self.stop_event = stop_event
+
+ def run(self):
+ time.sleep(10)
+ while True:
+ # wait a bit before checking
+ time.sleep(10)
+
+ if self.stop_event.is_set():
+ logger.info(f"Exiting watchdog...")
+ break
+
+ now = datetime.datetime.now().timestamp()
+
+ # check the detection processes
+ for detector in self.detectors.values():
+ detection_start = detector.detection_start.value
+ if (detection_start > 0.0 and
+ now - detection_start > 10):
+ logger.info("Detection appears to be stuck. Restarting detection process")
+ detector.start_or_restart()
+ elif not detector.detect_process.is_alive():
+ logger.info("Detection appears to have stopped. Restarting frigate")
+ os.kill(os.getpid(), signal.SIGTERM)
diff --git a/frigate/zeroconf.py b/frigate/zeroconf.py
new file mode 100644
--- /dev/null
+++ b/frigate/zeroconf.py
@@ -0,0 +1,58 @@
+import logging
+import socket
+
+from zeroconf import (
+ ServiceInfo,
+ NonUniqueNameException,
+ InterfaceChoice,
+ IPVersion,
+ Zeroconf,
+)
+
+logger = logging.getLogger(__name__)
+
+ZEROCONF_TYPE = "_frigate._tcp.local."
+
+# Taken from: http://stackoverflow.com/a/11735897
+def get_local_ip() -> str:
+ """Try to determine the local IP address of the machine."""
+ try:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+
+ # Use Google Public DNS server to determine own IP
+ sock.connect(("8.8.8.8", 80))
+
+ return sock.getsockname()[0] # type: ignore
+ except OSError:
+ try:
+ return socket.gethostbyname(socket.gethostname())
+ except socket.gaierror:
+ return "127.0.0.1"
+ finally:
+ sock.close()
+
+def broadcast_zeroconf(frigate_id):
+ zeroconf = Zeroconf(interfaces=InterfaceChoice.Default, ip_version=IPVersion.V4Only)
+
+ host_ip = get_local_ip()
+
+ try:
+ host_ip_pton = socket.inet_pton(socket.AF_INET, host_ip)
+ except OSError:
+ host_ip_pton = socket.inet_pton(socket.AF_INET6, host_ip)
+
+ info = ServiceInfo(
+ ZEROCONF_TYPE,
+ name=f"{frigate_id}.{ZEROCONF_TYPE}",
+ addresses=[host_ip_pton],
+ port=5000,
+ )
+
+ logger.info("Starting Zeroconf broadcast")
+ try:
+ zeroconf.register_service(info)
+ except NonUniqueNameException:
+ logger.error(
+ "Frigate instance with identical name present in the local network"
+ )
+ return zeroconf
\ No newline at end of file
diff --git a/migrations/001_create_events_table.py b/migrations/001_create_events_table.py
new file mode 100644
--- /dev/null
+++ b/migrations/001_create_events_table.py
@@ -0,0 +1,41 @@
+"""Peewee migrations -- 001_create_events_table.py.
+
+Some examples (model - class or model name)::
+
+ > Model = migrator.orm['model_name'] # Return model in current state by name
+
+ > migrator.sql(sql) # Run custom SQL
+ > migrator.python(func, *args, **kwargs) # Run python code
+ > migrator.create_model(Model) # Create a model (could be used as decorator)
+ > migrator.remove_model(model, cascade=True) # Remove a model
+ > migrator.add_fields(model, **fields) # Add fields to a model
+ > migrator.change_fields(model, **fields) # Change fields
+ > migrator.remove_fields(model, *field_names, cascade=True)
+ > migrator.rename_field(model, old_field_name, new_field_name)
+ > migrator.rename_table(model, new_table_name)
+ > migrator.add_index(model, *col_names, unique=False)
+ > migrator.drop_index(model, *col_names)
+ > migrator.add_not_null(model, *field_names)
+ > migrator.drop_not_null(model, *field_names)
+ > migrator.add_default(model, field_name, default)
+
+"""
+
+import datetime as dt
+import peewee as pw
+from decimal import ROUND_HALF_EVEN
+
+try:
+ import playhouse.postgres_ext as pw_pext
+except ImportError:
+ pass
+
+SQL = pw.SQL
+
+def migrate(migrator, database, fake=False, **kwargs):
+ migrator.sql('CREATE TABLE IF NOT EXISTS "event" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "label" VARCHAR(20) NOT NULL, "camera" VARCHAR(20) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "top_score" REAL NOT NULL, "false_positive" INTEGER NOT NULL, "zones" JSON NOT NULL, "thumbnail" TEXT NOT NULL)')
+ migrator.sql('CREATE INDEX IF NOT EXISTS "event_label" ON "event" ("label")')
+ migrator.sql('CREATE INDEX IF NOT EXISTS "event_camera" ON "event" ("camera")')
+
+def rollback(migrator, database, fake=False, **kwargs):
+ pass
diff --git a/migrations/002_add_clip_snapshot.py b/migrations/002_add_clip_snapshot.py
new file mode 100644
--- /dev/null
+++ b/migrations/002_add_clip_snapshot.py
@@ -0,0 +1,41 @@
+"""Peewee migrations -- 002_add_clip_snapshot.py.
+
+Some examples (model - class or model name)::
+
+ > Model = migrator.orm['model_name'] # Return model in current state by name
+
+ > migrator.sql(sql) # Run custom SQL
+ > migrator.python(func, *args, **kwargs) # Run python code
+ > migrator.create_model(Model) # Create a model (could be used as decorator)
+ > migrator.remove_model(model, cascade=True) # Remove a model
+ > migrator.add_fields(model, **fields) # Add fields to a model
+ > migrator.change_fields(model, **fields) # Change fields
+ > migrator.remove_fields(model, *field_names, cascade=True)
+ > migrator.rename_field(model, old_field_name, new_field_name)
+ > migrator.rename_table(model, new_table_name)
+ > migrator.add_index(model, *col_names, unique=False)
+ > migrator.drop_index(model, *col_names)
+ > migrator.add_not_null(model, *field_names)
+ > migrator.drop_not_null(model, *field_names)
+ > migrator.add_default(model, field_name, default)
+
+"""
+
+import datetime as dt
+import peewee as pw
+from decimal import ROUND_HALF_EVEN
+from frigate.models import Event
+
+try:
+ import playhouse.postgres_ext as pw_pext
+except ImportError:
+ pass
+
+SQL = pw.SQL
+
+
+def migrate(migrator, database, fake=False, **kwargs):
+ migrator.add_fields(Event, has_clip=pw.BooleanField(default=True), has_snapshot=pw.BooleanField(default=True))
+
+def rollback(migrator, database, fake=False, **kwargs):
+ migrator.remove_fields(Event, ['has_clip', 'has_snapshot'])
diff --git a/process_clip.py b/process_clip.py
deleted file mode 100644
--- a/process_clip.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import sys
-import click
-import os
-import datetime
-from unittest import TestCase, main
-from frigate.video import process_frames, start_or_restart_ffmpeg, capture_frames, get_frame_shape
-from frigate.util import DictFrameManager, SharedMemoryFrameManager, EventsPerSecond, draw_box_with_label
-from frigate.motion import MotionDetector
-from frigate.edgetpu import LocalObjectDetector
-from frigate.objects import ObjectTracker
-import multiprocessing as mp
-import numpy as np
-import cv2
-from frigate.object_processing import COLOR_MAP, CameraState
-
-class ProcessClip():
- def __init__(self, clip_path, frame_shape, config):
- self.clip_path = clip_path
- self.frame_shape = frame_shape
- self.camera_name = 'camera'
- self.frame_manager = DictFrameManager()
- # self.frame_manager = SharedMemoryFrameManager()
- self.frame_queue = mp.Queue()
- self.detected_objects_queue = mp.Queue()
- self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
-
- def load_frames(self):
- fps = EventsPerSecond()
- skipped_fps = EventsPerSecond()
- stop_event = mp.Event()
- detection_frame = mp.Value('d', datetime.datetime.now().timestamp()+100000)
- current_frame = mp.Value('d', 0.0)
- ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel panic -i {self.clip_path} -f rawvideo -pix_fmt rgb24 pipe:".split(" ")
- ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.frame_shape[0]*self.frame_shape[1]*self.frame_shape[2])
- capture_frames(ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue, 1, fps, skipped_fps, stop_event, detection_frame, current_frame)
- ffmpeg_process.wait()
- ffmpeg_process.communicate()
-
- def process_frames(self, objects_to_track=['person'], object_filters={}):
- mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
- mask[:] = 255
- motion_detector = MotionDetector(self.frame_shape, mask)
-
- object_detector = LocalObjectDetector(labels='/labelmap.txt')
- object_tracker = ObjectTracker(10)
- process_fps = mp.Value('d', 0.0)
- detection_fps = mp.Value('d', 0.0)
- current_frame = mp.Value('d', 0.0)
- stop_event = mp.Event()
-
- process_frames(self.camera_name, self.frame_queue, self.frame_shape, self.frame_manager, motion_detector, object_detector, object_tracker, self.detected_objects_queue,
- process_fps, detection_fps, current_frame, objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
-
- def objects_found(self, debug_path=None):
- obj_detected = False
- top_computed_score = 0.0
- def handle_event(name, obj):
- nonlocal obj_detected
- nonlocal top_computed_score
- if obj['computed_score'] > top_computed_score:
- top_computed_score = obj['computed_score']
- if not obj['false_positive']:
- obj_detected = True
- self.camera_state.on('new', handle_event)
- self.camera_state.on('update', handle_event)
-
- while(not self.detected_objects_queue.empty()):
- camera_name, frame_time, current_tracked_objects = self.detected_objects_queue.get()
- if not debug_path is None:
- self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
-
- self.camera_state.update(frame_time, current_tracked_objects)
- for obj in self.camera_state.tracked_objects.values():
- print(f"{frame_time}: {obj['id']} - {obj['computed_score']} - {obj['score_history']}")
-
- self.frame_manager.delete(self.camera_state.previous_frame_id)
-
- return {
- 'object_detected': obj_detected,
- 'top_score': top_computed_score
- }
-
- def save_debug_frame(self, debug_path, frame_time, tracked_objects):
- current_frame = self.frame_manager.get(f"{self.camera_name}{frame_time}", self.frame_shape)
- # draw the bounding boxes on the frame
- for obj in tracked_objects:
- thickness = 2
- color = (0,0,175)
-
- if obj['frame_time'] != frame_time:
- thickness = 1
- color = (255,0,0)
- else:
- color = (255,255,0)
-
- # draw the bounding boxes on the frame
- box = obj['box']
- draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
- # draw the regions on the frame
- region = obj['region']
- draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
-
- cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))
-
[email protected]()
[email protected]("-p", "--path", required=True, help="Path to clip or directory to test.")
[email protected]("-l", "--label", default='person', help="Label name to detect.")
[email protected]("-t", "--threshold", default=0.85, help="Threshold value for objects.")
[email protected]("--debug-path", default=None, help="Path to output frames for debugging.")
-def process(path, label, threshold, debug_path):
- clips = []
- if os.path.isdir(path):
- files = os.listdir(path)
- files.sort()
- clips = [os.path.join(path, file) for file in files]
- elif os.path.isfile(path):
- clips.append(path)
-
- config = {
- 'snapshots': {
- 'show_timestamp': False,
- 'draw_zones': False
- },
- 'zones': {},
- 'objects': {
- 'track': [label],
- 'filters': {
- 'person': {
- 'threshold': threshold
- }
- }
- }
- }
-
- results = []
- for c in clips:
- frame_shape = get_frame_shape(c)
- config['frame_shape'] = frame_shape
- process_clip = ProcessClip(c, frame_shape, config)
- process_clip.load_frames()
- process_clip.process_frames(objects_to_track=config['objects']['track'])
-
- results.append((c, process_clip.objects_found(debug_path)))
-
- for result in results:
- print(f"{result[0]}: {result[1]}")
-
- positive_count = sum(1 for result in results if result[1]['object_detected'])
- print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
-
-if __name__ == '__main__':
- process()
\ No newline at end of file
| diff --git a/frigate/test/__init__.py b/frigate/test/__init__.py
new file mode 100644
diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
new file mode 100644
--- /dev/null
+++ b/frigate/test/test_config.py
@@ -0,0 +1,342 @@
+import json
+from unittest import TestCase, main
+import voluptuous as vol
+from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
+
+class TestConfig(TestCase):
+ def setUp(self):
+ self.minimal = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920
+ }
+ }
+ }
+ def test_empty(self):
+ FRIGATE_CONFIG_SCHEMA({})
+
+ def test_minimal(self):
+ FRIGATE_CONFIG_SCHEMA(self.minimal)
+
+ def test_config_class(self):
+ FrigateConfig(config=self.minimal)
+
+ def test_inherit_tracked_objects(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'objects': {
+ 'track': ['person', 'dog']
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert('dog' in frigate_config.cameras['back'].objects.track)
+
+ def test_override_tracked_objects(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'objects': {
+ 'track': ['person', 'dog']
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920,
+ 'objects': {
+ 'track': ['cat']
+ }
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert('cat' in frigate_config.cameras['back'].objects.track)
+
+ def test_default_object_filters(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'objects': {
+ 'track': ['person', 'dog']
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert('dog' in frigate_config.cameras['back'].objects.filters)
+
+ def test_inherit_object_filters(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'objects': {
+ 'track': ['person', 'dog'],
+ 'filters': {
+ 'dog': {
+ 'threshold': 0.7
+ }
+ }
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert('dog' in frigate_config.cameras['back'].objects.filters)
+ assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
+
+ def test_override_object_filters(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920,
+ 'objects': {
+ 'track': ['person', 'dog'],
+ 'filters': {
+ 'dog': {
+ 'threshold': 0.7
+ }
+ }
+ }
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert('dog' in frigate_config.cameras['back'].objects.filters)
+ assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
+
+ def test_ffmpeg_params(self):
+ config = {
+ 'ffmpeg': {
+ 'input_args': ['-re']
+ },
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920,
+ 'objects': {
+ 'track': ['person', 'dog'],
+ 'filters': {
+ 'dog': {
+ 'threshold': 0.7
+ }
+ }
+ }
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
+
+ def test_inherit_clips_retention(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'clips': {
+ 'retain': {
+ 'default': 20,
+ 'objects': {
+ 'person': 30
+ }
+ }
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert(frigate_config.cameras['back'].clips.retain.objects['person'] == 30)
+
+ def test_roles_listed_twice_throws_error(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'clips': {
+ 'retain': {
+ 'default': 20,
+ 'objects': {
+ 'person': 30
+ }
+ }
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] },
+ { 'path': 'rtsp://10.0.0.1:554/video2', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920
+ }
+ }
+ }
+ self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
+
+ def test_zone_matching_camera_name_throws_error(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'clips': {
+ 'retain': {
+ 'default': 20,
+ 'objects': {
+ 'person': 30
+ }
+ }
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920,
+ 'zones': {
+ 'back': {
+ 'coordinates': '1,1,1,1,1,1'
+ }
+ }
+ }
+ }
+ }
+ self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
+
+ def test_clips_should_default_to_global_objects(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'clips': {
+ 'retain': {
+ 'default': 20,
+ 'objects': {
+ 'person': 30
+ }
+ }
+ },
+ 'objects': {
+ 'track': ['person', 'dog']
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920,
+ 'clips': {
+ 'enabled': True
+ }
+ }
+ }
+ }
+ config = FrigateConfig(config=config)
+ assert(config.cameras['back'].clips.objects is None)
+
+ def test_role_assigned_but_not_enabled(self):
+ json_config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect', 'rtmp'] },
+ { 'path': 'rtsp://10.0.0.1:554/record', 'roles': ['record'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920
+ }
+ }
+ }
+
+ config = FrigateConfig(config=json_config)
+ ffmpeg_cmds = config.cameras['back'].ffmpeg_cmds
+ assert(len(ffmpeg_cmds) == 1)
+ assert(not 'clips' in ffmpeg_cmds[0]['roles'])
+
+
+if __name__ == '__main__':
+ main(verbosity=2)
diff --git a/frigate/test/test_yuv_region_2_rgb.py b/frigate/test/test_yuv_region_2_rgb.py
new file mode 100644
--- /dev/null
+++ b/frigate/test/test_yuv_region_2_rgb.py
@@ -0,0 +1,39 @@
+import cv2
+import numpy as np
+from unittest import TestCase, main
+from frigate.util import yuv_region_2_rgb
+
+class TestYuvRegion2RGB(TestCase):
+ def setUp(self):
+ self.bgr_frame = np.zeros((100, 200, 3), np.uint8)
+ self.bgr_frame[:] = (0, 0, 255)
+ self.bgr_frame[5:55, 5:55] = (255,0,0)
+ # cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
+ self.yuv_frame = cv2.cvtColor(self.bgr_frame, cv2.COLOR_BGR2YUV_I420)
+
+ def test_crop_yuv(self):
+ cropped = yuv_region_2_rgb(self.yuv_frame, (10,10,50,50))
+ # ensure the upper left pixel is blue
+ assert(np.all(cropped[0, 0] == [0, 0, 255]))
+
+ def test_crop_yuv_out_of_bounds(self):
+ cropped = yuv_region_2_rgb(self.yuv_frame, (0,0,200,200))
+ # cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
+ # ensure the upper left pixel is red
+ # the yuv conversion has some noise
+ assert(np.all(cropped[0, 0] == [255, 1, 0]))
+ # ensure the bottom right is black
+ assert(np.all(cropped[199, 199] == [0, 0, 0]))
+
+ def test_crop_yuv_portrait(self):
+ bgr_frame = np.zeros((1920, 1080, 3), np.uint8)
+ bgr_frame[:] = (0, 0, 255)
+ bgr_frame[5:55, 5:55] = (255,0,0)
+ # cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
+ yuv_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2YUV_I420)
+
+ cropped = yuv_region_2_rgb(yuv_frame, (0, 852, 648, 1500))
+ # cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
+
+if __name__ == '__main__':
+ main(verbosity=2)
\ No newline at end of file
| [0.8.0-rc3] /api/events/<id>/thumbnail.jpg - not cropped during event + missing bounding box
**Describe the bug**
When the event is in progress, the thumbnail endpoint shows full image and is missing the bounding box.
After the event has ended, the image is replaced by a cropped version.
It would be nice if we could also set to have the bounding box as it was before with the `/api/events/<id>/snapshot.jpg` endpoint and show this in the Media browser in HA (guessing it uses same endpoint)
**Version of frigate**
0.8.0-rc3
**Config file**
```yaml
detectors:
coral:
type: edgetpu
device: usb
# Required: mqtt configuration
mqtt:
host: 10.10.1.10
port: 1883
topic_prefix: frigate
client_id: frigate
user: mqtt_frigate
password: mqtt_pass_redacted
# Optional: Global configuration for saving clips
clips:
max_seconds: 300
tmpfs_cache_size: 512m
retain:
default: 7
objects:
person: 7
ffmpeg:
hwaccel_args:
- -hwaccel
- vaapi
- -hwaccel_device
- /dev/dri/renderD128
- -hwaccel_output_format
- yuv420p
motion:
threshold: 35
contour_area: 100
delta_alpha: 0.15
frame_alpha: 0.3
objects:
track:
- person
filters:
person:
min_area: 2000
max_area: 1000000
min_score: 0.55
detect:
max_disappeared: 25
cameras:
# Required: name of the camera
entry:
# Required: ffmpeg settings for the camera
ffmpeg:
inputs:
- path: rtsp://user:[email protected]/third
roles:
- detect
- clips
height: 1080
width: 1920
fps: 5
clips:
enabled: True
pre_capture: 3
post_capture: 3
snapshots:
enabled: True
timestamp: True
bounding_box: True
height: 1080
mqtt:
enabled: True
timestamp: True
bounding_box: True
crop: True
height: 400
rtmp:
enabled: False
motion:
mask: 4,242,6,5,968,5,969,80,654,139
zones:
at_door:
coordinates: 823,890,673,597,686,376,808,241,986,210,1091,37,1232,75,1420,144,1782,309,1656,636,1539,892,1465,973,1056,977
getting_to_door:
coordinates: 75,1068,50,792,65,500,269,344,476,258,664,248,818,225,788,249,683,373,669,607,818,894,1063,983,1273,1078,688,1072,311,1066
objects:
track:
- person
frontyard:
ffmpeg:
inputs:
- path: rtsp://user:[email protected]/third
roles:
- detect
- clips
height: 1080
width: 1920
fps: 5
rtmp:
enabled: False
clips:
enabled: True
pre_capture: 3
post_capture: 3
snapshots:
enabled: True
timestamp: True
bounding_box: True
height: 1080
mqtt:
enabled: True
timestamp: True
bounding_box: True
crop: True
height: 400
motion:
mask: 966,63,969,3,1912,8,1919,236,1756,195,1531,141,1324,95,1166,71
zones:
driveway:
coordinates: 6,1072,18,451,147,365,528,307,835,292,1154,288,1495,293,1848,363,1897,456,1902,529,1916,722,1897,919,1895,1055,1297,1066,523,1066
outside_road:
coordinates: 4,363,8,239,13,124,160,112,428,86,688,58,893,66,1105,78,1241,119,1327,195,1268,258,1034,297,727,298,447,297,276,317
objects:
track:
- person
east:
ffmpeg:
inputs:
- path: rtsp://user:[email protected]/third
roles:
- detect
- clips
height: 1080
width: 1920
fps: 5
rtmp:
enabled: False
clips:
enabled: True
pre_capture: 3
post_capture: 3
snapshots:
enabled: True
timestamp: True
bounding_box: True
height: 1080
mqtt:
enabled: True
timestamp: True
bounding_box: True
crop: True
height: 400
zones:
east_close:
coordinates: 900,1075,913,115,1071,47,1222,0,1354,12,1919,15,1919,463,1919,804,1919,1075,1816,1078,1722,1075,1283,1072
objects:
track:
- person
north:
ffmpeg:
inputs:
- path: rtsp://user:[email protected]/third
roles:
- detect
- clips
height: 1080
width: 1920
fps: 5
motion:
mask: 1,381,4,0,929,0,930,58
rtmp:
enabled: False
clips:
enabled: True
pre_capture: 3
post_capture: 3
snapshots:
enabled: True
timestamp: True
bounding_box: True
height: 1080
mqtt:
enabled: True
timestamp: True
bounding_box: True
crop: True
height: 400
objects:
track:
- person
```
**Logs**
```
Include relevant log output here
```
**Frigate debug stats**
```json
{
detection_fps: 0,
detectors: {
coral: {
detection_start: 0,
inference_speed: 12.64,
pid: 34
}
},
east: {
camera_fps: 5.2,
capture_pid: 46,
detection_fps: 0,
pid: 38,
process_fps: 5.2,
skipped_fps: 0
},
entry: {
camera_fps: 5.1,
capture_pid: 42,
detection_fps: 0,
pid: 36,
process_fps: 5.1,
skipped_fps: 0
},
frontyard: {
camera_fps: 5.2,
capture_pid: 43,
detection_fps: 0,
pid: 37,
process_fps: 5.2,
skipped_fps: 0
},
north: {
camera_fps: 4.9,
capture_pid: 47,
detection_fps: 0,
pid: 40,
process_fps: 4.9,
skipped_fps: 0
},
service: {
uptime: 1573,
version: "0.8.0-7b4e510"
}
}
```
**FFprobe from your camera**
Run the following command and paste output below
```
ffprobe version 4.3.1 Copyright (c) 2007-2020 the FFmpeg developers
built with gcc 9 (Ubuntu 9.3.0-17ubuntu1~20.04)
configuration: --disable-debug --disable-doc --disable-ffplay --enable-shared --enable-avresample --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-gpl --e
nable-libfreetype --enable-libvidstab --enable-libmfx --enable-libmp3lame --enable-libopus --enable-libtheora --enable-libvorbis --enable-libvpx --enable-libwebp --enable
-libxcb --enable-libx265 --enable-libxvid --enable-libx264 --enable-nonfree --enable-openssl --enable-libfdk_aac --enable-postproc --enable-small --enable-version3 --enab
le-libzmq --extra-libs=-ldl --prefix=/opt/ffmpeg --enable-libopenjpeg --enable-libkvazaar --enable-libaom --extra-libs=-lpthread --enable-vaapi --extra-cflags=-I/opt/ffmp
eg/include --extra-ldflags=-L/opt/ffmpeg/lib
libavutil 56. 51.100 / 56. 51.100
libavcodec 58. 91.100 / 58. 91.100
libavformat 58. 45.100 / 58. 45.100
libavdevice 58. 10.100 / 58. 10.100
libavfilter 7. 85.100 / 7. 85.100
libavresample 4. 0. 0 / 4. 0. 0
libswscale 5. 7.100 / 5. 7.100
libswresample 3. 7.100 / 3. 7.100
libpostproc 55. 7.100 / 55. 7.100
Input #0, rtsp, from 'rtsp://user:[email protected]/third':
Metadata:
title : RTSP/RTP stream from IPNC
comment : third
Duration: N/A, start: 0.000000, bitrate: N/A
Stream #0:0: Video: h264, yuvj420p(pc, bt709, progressive), 1920x1080, 25 tbr, 90k tbn, 180k tbc
Stream #0:1: Audio: pcm_mulaw, 8000 Hz, 1 channels, s16, 64 kb/s
Stream #0:2: Data: none
Unsupported codec with id 0 for input stream 2
```
**Screenshots**
![chrome_2021-01-21_14-03-04](https://user-images.githubusercontent.com/6452373/105356414-a8141100-5bf3-11eb-9ca1-319e64e3c381.jpg)
![chrome_2021-01-21_14-17-17](https://user-images.githubusercontent.com/6452373/105356420-a9ddd480-5bf3-11eb-9a2a-3fad95e3b299.png)
**Computer Hardware**
- OS: Synology DSM 6.2
- Virtualization: Docker directly on DSM
- Coral Version: USB
- Network Setup: Wired
**Camera Info:**
- Manufacturer: Milesight
- Model: MS-C5373-PB
- Resolution: 1080p
- FPS: 5
**Additional context**
| 2020-12-06T18:40:59Z | [] | [] |
|
blakeblackshear/frigate | 408 | blakeblackshear__frigate-408 | [
"322"
] | a54fbc483cec27b0c4680e79dd21fe04adfab3a8 | diff --git a/frigate/app.py b/frigate/app.py
--- a/frigate/app.py
+++ b/frigate/app.py
@@ -103,7 +103,7 @@ def init_queues(self):
self.detected_frames_queue = mp.Queue(maxsize=len(self.config.cameras.keys())*2)
def init_database(self):
- self.db = SqliteExtDatabase(f"/{os.path.join(CLIPS_DIR, 'frigate.db')}")
+ self.db = SqliteExtDatabase(self.config.database.path)
models = [Event]
self.db.bind(models)
self.db.create_tables(models, safe=True)
@@ -125,9 +125,9 @@ def start_detectors(self):
for name, detector in self.config.detectors.items():
if detector.type == 'cpu':
- self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, tf_device='cpu')
+ self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, 'cpu', detector.num_threads)
if detector.type == 'edgetpu':
- self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, tf_device=detector.device)
+ self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, detector.device, detector.num_threads)
def start_detected_frames_processor(self):
self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -15,7 +15,8 @@
{
vol.Required(str): {
vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']),
- vol.Optional('device', default='usb'): str
+ vol.Optional('device', default='usb'): str,
+ vol.Optional('num_threads', default=3): int
}
}
)
@@ -84,12 +85,28 @@
}
)
+MOTION_SCHEMA = vol.Schema(
+ {
+ 'threshold': vol.Range(min=1, max=255),
+ 'contour_area': int,
+ 'delta_alpha': float,
+ 'frame_alpha': float,
+ 'frame_height': int
+ }
+)
+
+DETECT_SCHEMA = vol.Schema(
+ {
+ 'max_disappeared': int
+ }
+)
+
FILTER_SCHEMA = vol.Schema(
{
str: {
vol.Optional('min_area', default=0): int,
vol.Optional('max_area', default=24000000): int,
- vol.Optional('threshold', default=0.85): float
+ vol.Optional('threshold', default=0.7): float
}
}
)
@@ -109,16 +126,6 @@ def filters_for_all_tracked_objects(object_config):
}
))
-DEFAULT_CAMERA_SAVE_CLIPS = {
- 'enabled': False
-}
-DEFAULT_CAMERA_SNAPSHOTS = {
- 'show_timestamp': True,
- 'draw_zones': False,
- 'draw_bounding_boxes': True,
- 'crop_to_region': True
-}
-
def each_role_used_once(inputs):
roles = [role for i in inputs for role in i['roles']]
roles_set = set(roles)
@@ -158,7 +165,7 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Required('height'): int,
vol.Required('width'): int,
'fps': int,
- 'mask': str,
+ 'mask': vol.Any(str, [str]),
vol.Optional('best_image_timeout', default=60): int,
vol.Optional('zones', default={}): {
str: {
@@ -166,9 +173,10 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Optional('filters', default={}): FILTER_SCHEMA
}
},
- vol.Optional('save_clips', default=DEFAULT_CAMERA_SAVE_CLIPS): {
+ vol.Optional('save_clips', default={}): {
vol.Optional('enabled', default=False): bool,
- vol.Optional('pre_capture', default=30): int,
+ vol.Optional('pre_capture', default=5): int,
+ vol.Optional('post_capture', default=5): int,
'objects': [str],
vol.Optional('retain', default={}): SAVE_CLIPS_RETAIN_SCHEMA,
},
@@ -179,20 +187,25 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Optional('rtmp', default={}): {
vol.Required('enabled', default=True): bool,
},
- vol.Optional('snapshots', default=DEFAULT_CAMERA_SNAPSHOTS): {
+ vol.Optional('snapshots', default={}): {
vol.Optional('show_timestamp', default=True): bool,
vol.Optional('draw_zones', default=False): bool,
vol.Optional('draw_bounding_boxes', default=True): bool,
vol.Optional('crop_to_region', default=True): bool,
vol.Optional('height', default=175): int
},
- 'objects': OBJECTS_SCHEMA
+ 'objects': OBJECTS_SCHEMA,
+ vol.Optional('motion', default={}): MOTION_SCHEMA,
+ vol.Optional('detect', default={}): DETECT_SCHEMA
}
}, vol.Msg(ensure_zones_and_cameras_have_different_names, msg='Zones cannot share names with cameras'))
)
FRIGATE_CONFIG_SCHEMA = vol.Schema(
{
+ vol.Optional('database', default={}): {
+ vol.Optional('path', default=os.path.join(CLIPS_DIR, 'frigate.db')): str
+ },
vol.Optional('model', default={'width': 320, 'height': 320}): {
vol.Required('width'): int,
vol.Required('height'): int
@@ -210,10 +223,25 @@ def ensure_zones_and_cameras_have_different_names(cameras):
},
vol.Optional('ffmpeg', default={}): GLOBAL_FFMPEG_SCHEMA,
vol.Optional('objects', default={}): OBJECTS_SCHEMA,
+ vol.Optional('motion', default={}): MOTION_SCHEMA,
+ vol.Optional('detect', default={}): DETECT_SCHEMA,
vol.Required('cameras', default={}): CAMERAS_SCHEMA
}
)
+class DatabaseConfig():
+ def __init__(self, config):
+ self._path = config['path']
+
+ @property
+ def path(self):
+ return self._path
+
+ def to_dict(self):
+ return {
+ 'path': self.path
+ }
+
class ModelConfig():
def __init__(self, config):
self._width = config['width']
@@ -237,6 +265,7 @@ class DetectorConfig():
def __init__(self, config):
self._type = config['type']
self._device = config['device']
+ self._num_threads = config['num_threads']
@property
def type(self):
@@ -246,10 +275,15 @@ def type(self):
def device(self):
return self._device
+ @property
+ def num_threads(self):
+ return self._num_threads
+
def to_dict(self):
return {
'type': self.type,
- 'device': self.device
+ 'device': self.device,
+ 'num_threads': self.num_threads
}
class LoggerConfig():
@@ -505,6 +539,7 @@ class CameraSaveClipsConfig():
def __init__(self, global_config, config):
self._enabled = config['enabled']
self._pre_capture = config['pre_capture']
+ self._post_capture = config['post_capture']
self._objects = config.get('objects', global_config['objects']['track'])
self._retain = SaveClipsRetainConfig(global_config['save_clips']['retain'], config['retain'])
@@ -515,6 +550,10 @@ def enabled(self):
@property
def pre_capture(self):
return self._pre_capture
+
+ @property
+ def post_capture(self):
+ return self._post_capture
@property
def objects(self):
@@ -528,6 +567,7 @@ def to_dict(self):
return {
'enabled': self.enabled,
'pre_capture': self.pre_capture,
+ 'post_capture': self.post_capture,
'objects': self.objects,
'retain': self.retain.to_dict()
}
@@ -545,6 +585,58 @@ def to_dict(self):
'enabled': self.enabled,
}
+class MotionConfig():
+ def __init__(self, global_config, config, camera_height: int):
+ self._threshold = config.get('threshold', global_config.get('threshold', 25))
+ self._contour_area = config.get('contour_area', global_config.get('contour_area', 100))
+ self._delta_alpha = config.get('delta_alpha', global_config.get('delta_alpha', 0.2))
+ self._frame_alpha = config.get('frame_alpha', global_config.get('frame_alpha', 0.2))
+ self._frame_height = config.get('frame_height', global_config.get('frame_height', camera_height//6))
+
+ @property
+ def threshold(self):
+ return self._threshold
+
+ @property
+ def contour_area(self):
+ return self._contour_area
+
+ @property
+ def delta_alpha(self):
+ return self._delta_alpha
+
+ @property
+ def frame_alpha(self):
+ return self._frame_alpha
+
+ @property
+ def frame_height(self):
+ return self._frame_height
+
+ def to_dict(self):
+ return {
+ 'threshold': self.threshold,
+ 'contour_area': self.contour_area,
+ 'delta_alpha': self.delta_alpha,
+ 'frame_alpha': self.frame_alpha,
+ 'frame_height': self.frame_height,
+ }
+
+
+
+class DetectConfig():
+ def __init__(self, global_config, config, camera_fps):
+ self._max_disappeared = config.get('max_disappeared', global_config.get('max_disappeared', camera_fps*2))
+
+ @property
+ def max_disappeared(self):
+ return self._max_disappeared
+
+ def to_dict(self):
+ return {
+ 'max_disappeared': self._max_disappeared,
+ }
+
class ZoneConfig():
def __init__(self, name, config):
self._coordinates = config['coordinates']
@@ -607,6 +699,8 @@ def __init__(self, name, config, global_config):
self._rtmp = CameraRtmpConfig(global_config, config['rtmp'])
self._snapshots = CameraSnapshotsConfig(config['snapshots'])
self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}))
+ self._motion = MotionConfig(global_config['motion'], config['motion'], self._height)
+ self._detect = DetectConfig(global_config['detect'], config['detect'], config.get('fps', 5))
self._ffmpeg_cmds = []
for ffmpeg_input in self._ffmpeg.inputs:
@@ -623,28 +717,28 @@ def __init__(self, name, config, global_config):
self._set_zone_colors(self._zones)
def _create_mask(self, mask):
- if mask:
- if mask.startswith('base64,'):
- img = base64.b64decode(mask[7:])
- np_img = np.fromstring(img, dtype=np.uint8)
- mask_img = cv2.imdecode(np_img, cv2.IMREAD_GRAYSCALE)
- elif mask.startswith('poly,'):
- points = mask.split(',')[1:]
- contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
- mask_img = np.zeros(self.frame_shape, np.uint8)
- mask_img[:] = 255
- cv2.fillPoly(mask_img, pts=[contour], color=(0))
- else:
- mask_img = cv2.imread(f"/config/{mask}", cv2.IMREAD_GRAYSCALE)
- else:
- mask_img = None
+ mask_img = np.zeros(self.frame_shape, np.uint8)
+ mask_img[:] = 255
- if mask_img is None or mask_img.size == 0:
- mask_img = np.zeros(self.frame_shape, np.uint8)
- mask_img[:] = 255
+ if isinstance(mask, list):
+ for m in mask:
+ self._add_mask(m, mask_img)
+
+ elif isinstance(mask, str):
+ self._add_mask(mask, mask_img)
return mask_img
-
+
+ def _add_mask(self, mask, mask_img):
+ if mask.startswith('poly,'):
+ points = mask.split(',')[1:]
+ contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
+ cv2.fillPoly(mask_img, pts=[contour], color=(0))
+ else:
+ mask_file = cv2.imread(f"/config/{mask}", cv2.IMREAD_GRAYSCALE)
+ if not mask_file.size == 0:
+ mask_img[np.where(mask_file==[0])] = [0]
+
def _get_ffmpeg_cmd(self, ffmpeg_input):
ffmpeg_output_args = []
if 'detect' in ffmpeg_input.roles:
@@ -668,12 +762,14 @@ def _get_ffmpeg_cmd(self, ffmpeg_input):
if len(ffmpeg_output_args) == 0:
return None
- return (['ffmpeg'] +
+ cmd = (['ffmpeg'] +
ffmpeg_input.global_args +
ffmpeg_input.hwaccel_args +
ffmpeg_input.input_args +
['-i', ffmpeg_input.path] +
ffmpeg_output_args)
+
+ return [part for part in cmd if part != '']
def _set_zone_colors(self, zones: Dict[str, ZoneConfig]):
# set colors for zones
@@ -737,6 +833,14 @@ def snapshots(self):
@property
def objects(self):
return self._objects
+
+ @property
+ def motion(self):
+ return self._motion
+
+ @property
+ def detect(self):
+ return self._detect
@property
def frame_shape(self):
@@ -763,6 +867,8 @@ def to_dict(self):
'rtmp': self.rtmp.to_dict(),
'snapshots': self.snapshots.to_dict(),
'objects': self.objects.to_dict(),
+ 'motion': self.motion.to_dict(),
+ 'detect': self.detect.to_dict(),
'frame_shape': self.frame_shape,
'ffmpeg_cmds': [{'roles': c['roles'], 'cmd': ' '.join(c['cmd'])} for c in self.ffmpeg_cmds],
}
@@ -779,6 +885,7 @@ def __init__(self, config_file=None, config=None):
config = self._sub_env_vars(config)
+ self._database = DatabaseConfig(config['database'])
self._model = ModelConfig(config['model'])
self._detectors = { name: DetectorConfig(d) for name, d in config['detectors'].items() }
self._mqtt = MqttConfig(config['mqtt'])
@@ -811,6 +918,7 @@ def _load_file(self, config_file):
def to_dict(self):
return {
+ 'database': self.database.to_dict(),
'model': self.model.to_dict(),
'detectors': {k: d.to_dict() for k, d in self.detectors.items()},
'mqtt': self.mqtt.to_dict(),
@@ -819,6 +927,10 @@ def to_dict(self):
'logger': self.logger.to_dict()
}
+ @property
+ def database(self):
+ return self._database
+
@property
def model(self):
return self._model
diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py
--- a/frigate/edgetpu.py
+++ b/frigate/edgetpu.py
@@ -43,7 +43,7 @@ def detect(self, tensor_input, threshold = .4):
pass
class LocalObjectDetector(ObjectDetector):
- def __init__(self, tf_device=None, labels=None):
+ def __init__(self, tf_device=None, num_threads=3, labels=None):
self.fps = EventsPerSecond()
if labels is None:
self.labels = {}
@@ -66,7 +66,7 @@ def __init__(self, tf_device=None, labels=None):
if edge_tpu_delegate is None:
self.interpreter = tflite.Interpreter(
- model_path='/cpu_model.tflite')
+ model_path='/cpu_model.tflite', num_threads=num_threads)
else:
self.interpreter = tflite.Interpreter(
model_path='/edgetpu_model.tflite',
@@ -106,7 +106,7 @@ def detect_raw(self, tensor_input):
return detections
-def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device):
+def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device, num_threads):
threading.current_thread().name = f"detector:{name}"
logger = logging.getLogger(f"detector.{name}")
logger.info(f"Starting detection process: {os.getpid()}")
@@ -120,7 +120,7 @@ def receiveSignal(signalNumber, frame):
signal.signal(signal.SIGINT, receiveSignal)
frame_manager = SharedMemoryFrameManager()
- object_detector = LocalObjectDetector(tf_device=tf_device)
+ object_detector = LocalObjectDetector(tf_device=tf_device, num_threads=num_threads)
outputs = {}
for name in out_events.keys():
@@ -155,7 +155,7 @@ def receiveSignal(signalNumber, frame):
avg_speed.value = (avg_speed.value*9 + duration)/10
class EdgeTPUProcess():
- def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None):
+ def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None, num_threads=3):
self.name = name
self.out_events = out_events
self.detection_queue = detection_queue
@@ -164,6 +164,7 @@ def __init__(self, name, detection_queue, out_events, model_shape, tf_device=Non
self.detect_process = None
self.model_shape = model_shape
self.tf_device = tf_device
+ self.num_threads = num_threads
self.start_or_restart()
def stop(self):
@@ -179,7 +180,7 @@ def start_or_restart(self):
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.stop()
- self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device))
+ self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device, self.num_threads))
self.detect_process.daemon = True
self.detect_process.start()
diff --git a/frigate/events.py b/frigate/events.py
--- a/frigate/events.py
+++ b/frigate/events.py
@@ -97,18 +97,18 @@ def refresh_cache(self):
del self.cached_clips[f]
os.remove(os.path.join(CACHE_DIR,f))
- def create_clip(self, camera, event_data, pre_capture):
+ def create_clip(self, camera, event_data, pre_capture, post_capture):
# get all clips from the camera with the event sorted
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
- while sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']:
+ while len(sorted_clips) == 0 or sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']+post_capture:
time.sleep(5)
self.refresh_cache()
# get all clips from the camera with the event sorted
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
playlist_start = event_data['start_time']-pre_capture
- playlist_end = event_data['end_time']+5
+ playlist_end = event_data['end_time']+post_capture
playlist_lines = []
for clip in sorted_clips:
# clip ends before playlist start time, skip
@@ -139,6 +139,8 @@ def create_clip(self, camera, event_data, pre_capture):
'-',
'-c',
'copy',
+ '-movflags',
+ '+faststart',
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4"
]
@@ -181,7 +183,7 @@ def run(self):
if event_type == 'end':
if len(self.cached_clips) > 0 and not event_data['false_positive']:
- self.create_clip(camera, event_data, save_clips_config.pre_capture)
+ self.create_clip(camera, event_data, save_clips_config.pre_capture, save_clips_config.post_capture)
Event.create(
id=event_data['id'],
label=event_data['label'],
diff --git a/frigate/http.py b/frigate/http.py
--- a/frigate/http.py
+++ b/frigate/http.py
@@ -13,6 +13,8 @@
from playhouse.shortcuts import model_to_dict
from frigate.models import Event
+from frigate.util import calculate_region
+from frigate.version import VERSION
logger = logging.getLogger(__name__)
@@ -144,6 +146,10 @@ def events():
def config():
return jsonify(current_app.frigate_config.to_dict())
[email protected]('/version')
+def version():
+ return VERSION
+
@bp.route('/stats')
def stats():
camera_metrics = current_app.camera_metrics
@@ -185,7 +191,8 @@ def best(camera_name, label):
crop = bool(request.args.get('crop', 0, type=int))
if crop:
- region = best_object.get('region', [0,0,300,300])
+ box = best_object.get('box', (0,0,300,300))
+ region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
height = int(request.args.get('h', str(best_frame.shape[0])))
@@ -203,18 +210,34 @@ def best(camera_name, label):
def mjpeg_feed(camera_name):
fps = int(request.args.get('fps', '3'))
height = int(request.args.get('h', '360'))
+ draw_options = {
+ 'bounding_boxes': request.args.get('bbox', type=int),
+ 'timestamp': request.args.get('timestamp', type=int),
+ 'zones': request.args.get('zones', type=int),
+ 'mask': request.args.get('mask', type=int),
+ 'motion_boxes': request.args.get('motion', type=int),
+ 'regions': request.args.get('regions', type=int),
+ }
if camera_name in current_app.frigate_config.cameras:
# return a multipart response
- return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height),
+ return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height, draw_options),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return "Camera named {} not found".format(camera_name), 404
@bp.route('/<camera_name>/latest.jpg')
def latest_frame(camera_name):
+ draw_options = {
+ 'bounding_boxes': request.args.get('bbox', type=int),
+ 'timestamp': request.args.get('timestamp', type=int),
+ 'zones': request.args.get('zones', type=int),
+ 'mask': request.args.get('mask', type=int),
+ 'motion_boxes': request.args.get('motion', type=int),
+ 'regions': request.args.get('regions', type=int),
+ }
if camera_name in current_app.frigate_config.cameras:
# max out at specified FPS
- frame = current_app.detected_frames_processor.get_current_frame(camera_name)
+ frame = current_app.detected_frames_processor.get_current_frame(camera_name, draw_options)
if frame is None:
frame = np.zeros((720,1280,3), np.uint8)
@@ -230,11 +253,11 @@ def latest_frame(camera_name):
else:
return "Camera named {} not found".format(camera_name), 404
-def imagestream(detected_frames_processor, camera_name, fps, height):
+def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
while True:
# max out at specified FPS
time.sleep(1/fps)
- frame = detected_frames_processor.get_current_frame(camera_name, draw=True)
+ frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
if frame is None:
frame = np.zeros((height,int(height*16/9),3), np.uint8)
diff --git a/frigate/motion.py b/frigate/motion.py
--- a/frigate/motion.py
+++ b/frigate/motion.py
@@ -1,13 +1,15 @@
import cv2
import imutils
import numpy as np
+from frigate.config import MotionConfig
class MotionDetector():
- def __init__(self, frame_shape, mask, resize_factor=4):
+ def __init__(self, frame_shape, mask, config: MotionConfig):
+ self.config = config
self.frame_shape = frame_shape
- self.resize_factor = resize_factor
- self.motion_frame_size = (int(frame_shape[0]/resize_factor), int(frame_shape[1]/resize_factor))
+ self.resize_factor = frame_shape[0]/config.frame_height
+ self.motion_frame_size = (config.frame_height, config.frame_height*frame_shape[1]//frame_shape[0])
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
self.motion_frame_count = 0
@@ -23,6 +25,8 @@ def detect(self, frame):
# resize frame
resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
+ # TODO: can I improve the contrast of the grayscale image here?
+
# convert to grayscale
# resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
@@ -38,14 +42,13 @@ def detect(self, frame):
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
# compute the average delta over the past few frames
- # the alpha value can be modified to configure how sensitive the motion detection is.
# higher values mean the current frame impacts the delta a lot, and a single raindrop may
# register as motion, too low and a fast moving person wont be detected as motion
- # this also assumes that a person is in the same location across more than a single frame
- cv2.accumulateWeighted(frameDelta, self.avg_delta, 0.2)
+ cv2.accumulateWeighted(frameDelta, self.avg_delta, self.config.delta_alpha)
# compute the threshold image for the current frame
- current_thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
+ # TODO: threshold
+ current_thresh = cv2.threshold(frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
# black out everything in the avg_delta where there isnt motion in the current frame
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
@@ -53,7 +56,7 @@ def detect(self, frame):
# then look for deltas above the threshold, but only in areas where there is a delta
# in the current frame. this prevents deltas from previous frames from being included
- thresh = cv2.threshold(avg_delta_image, 25, 255, cv2.THRESH_BINARY)[1]
+ thresh = cv2.threshold(avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
@@ -65,19 +68,18 @@ def detect(self, frame):
for c in cnts:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
- if contour_area > 100:
+ if contour_area > self.config.contour_area:
x, y, w, h = cv2.boundingRect(c)
- motion_boxes.append((x*self.resize_factor, y*self.resize_factor, (x+w)*self.resize_factor, (y+h)*self.resize_factor))
+ motion_boxes.append((int(x*self.resize_factor), int(y*self.resize_factor), int((x+w)*self.resize_factor), int((y+h)*self.resize_factor)))
if len(motion_boxes) > 0:
self.motion_frame_count += 1
- # TODO: this really depends on FPS
if self.motion_frame_count >= 10:
- # only average in the current frame if the difference persists for at least 3 frames
- cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
+ # only average in the current frame if the difference persists for a bit
+ cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
else:
# when no motion, just keep averaging the frames together
- cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
+ cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
self.motion_frame_count = 0
return motion_boxes
diff --git a/frigate/object_processing.py b/frigate/object_processing.py
--- a/frigate/object_processing.py
+++ b/frigate/object_processing.py
@@ -20,7 +20,7 @@
from frigate.config import FrigateConfig, CameraConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.edgetpu import load_labels
-from frigate.util import SharedMemoryFrameManager, draw_box_with_label
+from frigate.util import SharedMemoryFrameManager, draw_box_with_label, calculate_region
logger = logging.getLogger(__name__)
@@ -73,7 +73,7 @@ def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data):
self.top_score = self.computed_score = 0.0
self.thumbnail_data = None
self.frame = None
- self.previous = None
+ self.previous = self.to_dict()
self._snapshot_jpg_time = 0
ret, jpg = cv2.imencode('.jpg', np.zeros((300,300,3), np.uint8))
self._snapshot_jpg = jpg.tobytes()
@@ -99,7 +99,7 @@ def compute_score(self):
return median(scores)
def update(self, current_frame_time, obj_data):
- previous = self.to_dict()
+ significant_update = False
self.obj_data.update(obj_data)
# if the object is not in the current frame, add a 0.0 to the score history
if self.obj_data['frame_time'] != current_frame_time:
@@ -129,7 +129,7 @@ def update(self, current_frame_time, obj_data):
'region': self.obj_data['region'],
'score': self.obj_data['score']
}
- self.previous = previous
+ significant_update = True
# check zones
current_zones = []
@@ -143,8 +143,13 @@ def update(self, current_frame_time, obj_data):
if name in self.current_zones or not zone_filtered(self, zone.filters):
current_zones.append(name)
self.entered_zones.add(name)
-
+
+ # if the zones changed, signal an update
+ if not self.false_positive and set(self.current_zones) != set(current_zones):
+ significant_update = True
+
self.current_zones = current_zones
+ return significant_update
def to_dict(self, include_thumbnail: bool = False):
return {
@@ -187,7 +192,8 @@ def get_jpg_bytes(self):
f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
if snapshot_config.crop_to_region:
- region = self.thumbnail_data['region']
+ box = self.thumbnail_data['box']
+ region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
if snapshot_config.height:
@@ -250,15 +256,17 @@ def __init__(self, name, config, frame_manager):
self.previous_frame_id = None
self.callbacks = defaultdict(lambda: [])
- def get_current_frame(self, draw=False):
+ def get_current_frame(self, draw_options={}):
with self.current_frame_lock:
frame_copy = np.copy(self._current_frame)
frame_time = self.current_frame_time
tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()}
+ motion_boxes = self.motion_boxes.copy()
+ regions = self.regions.copy()
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
# draw on the frame
- if draw:
+ if draw_options.get('bounding_boxes'):
# draw the bounding boxes on the frame
for obj in tracked_objects.values():
thickness = 2
@@ -271,19 +279,28 @@ def get_current_frame(self, draw=False):
# draw the bounding boxes on the frame
box = obj['box']
draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
- # draw the regions on the frame
- region = obj['region']
- cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
-
- if self.camera_config.snapshots.show_timestamp:
- time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
- cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
-
- if self.camera_config.snapshots.draw_zones:
- for name, zone in self.camera_config.zones.items():
- thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
- cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
+ if draw_options.get('regions'):
+ for region in regions:
+ cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 2)
+
+ if draw_options.get('timestamp'):
+ time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
+ cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
+
+ if draw_options.get('zones'):
+ for name, zone in self.camera_config.zones.items():
+ thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
+ cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
+
+ if draw_options.get('mask'):
+ mask_overlay = np.where(self.camera_config.mask==[0])
+ frame_copy[mask_overlay] = [0,0,0]
+
+ if draw_options.get('motion_boxes'):
+ for m_box in motion_boxes:
+ cv2.rectangle(frame_copy, (m_box[0], m_box[1]), (m_box[2], m_box[3]), (0,0,255), 2)
+
return frame_copy
def finished(self, obj_id):
@@ -292,8 +309,10 @@ def finished(self, obj_id):
def on(self, event_type: str, callback: Callable[[Dict], None]):
self.callbacks[event_type].append(callback)
- def update(self, frame_time, current_detections):
+ def update(self, frame_time, current_detections, motion_boxes, regions):
self.current_frame_time = frame_time
+ self.motion_boxes = motion_boxes
+ self.regions = regions
# get the new frame
frame_id = f"{self.name}{frame_time}"
current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv)
@@ -313,16 +332,16 @@ def update(self, frame_time, current_detections):
for id in updated_ids:
updated_obj = self.tracked_objects[id]
- updated_obj.update(frame_time, current_detections[id])
+ significant_update = updated_obj.update(frame_time, current_detections[id])
- if (not updated_obj.false_positive
- and updated_obj.thumbnail_data['frame_time'] == frame_time
- and frame_time not in self.frame_cache):
- self.frame_cache[frame_time] = np.copy(current_frame)
+ if significant_update:
+ # ensure this frame is stored in the cache
+ if updated_obj.thumbnail_data['frame_time'] == frame_time and frame_time not in self.frame_cache:
+ self.frame_cache[frame_time] = np.copy(current_frame)
- # call event handlers
- for c in self.callbacks['update']:
- c(self.name, updated_obj, frame_time)
+ # call event handlers
+ for c in self.callbacks['update']:
+ c(self.name, updated_obj, frame_time)
for id in removed_ids:
# publish events to mqtt
@@ -407,9 +426,10 @@ def start(camera, obj: TrackedObject, current_frame_time):
self.event_queue.put(('start', camera, obj.to_dict()))
def update(camera, obj: TrackedObject, current_frame_time):
- if not obj.thumbnail_data is None and obj.thumbnail_data['frame_time'] == current_frame_time:
- message = { 'before': obj.previous, 'after': obj.to_dict() }
- self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
+ after = obj.to_dict()
+ message = { 'before': obj.previous, 'after': after }
+ self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
+ obj.previous = after
def end(camera, obj: TrackedObject, current_frame_time):
if not obj.false_positive:
@@ -447,14 +467,14 @@ def get_best(self, camera, label):
camera_state = self.camera_states[camera]
if label in camera_state.best_objects:
best_obj = camera_state.best_objects[label]
- best = best_obj.to_dict()
- best['frame'] = camera_state.frame_cache[best_obj.thumbnail_data['frame_time']]
+ best = best_obj.thumbnail_data.copy()
+ best['frame'] = camera_state.frame_cache.get(best_obj.thumbnail_data['frame_time'])
return best
else:
return {}
- def get_current_frame(self, camera, draw=False):
- return self.camera_states[camera].get_current_frame(draw)
+ def get_current_frame(self, camera, draw_options={}):
+ return self.camera_states[camera].get_current_frame(draw_options)
def run(self):
while True:
@@ -463,13 +483,13 @@ def run(self):
break
try:
- camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(True, 10)
+ camera, frame_time, current_tracked_objects, motion_boxes, regions = self.tracked_objects_queue.get(True, 10)
except queue.Empty:
continue
camera_state = self.camera_states[camera]
- camera_state.update(frame_time, current_tracked_objects)
+ camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
# update zone counts for each label
# for each zone in the current camera
diff --git a/frigate/objects.py b/frigate/objects.py
--- a/frigate/objects.py
+++ b/frigate/objects.py
@@ -12,14 +12,15 @@
import numpy as np
from scipy.spatial import distance as dist
-from frigate.util import calculate_region, draw_box_with_label
+from frigate.config import DetectConfig
+from frigate.util import draw_box_with_label
class ObjectTracker():
- def __init__(self, max_disappeared):
+ def __init__(self, config: DetectConfig):
self.tracked_objects = {}
self.disappeared = {}
- self.max_disappeared = max_disappeared
+ self.max_disappeared = config.max_disappeared
def register(self, index, obj):
rand_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
diff --git a/frigate/process_clip.py b/frigate/process_clip.py
new file mode 100644
--- /dev/null
+++ b/frigate/process_clip.py
@@ -0,0 +1,208 @@
+import datetime
+import json
+import logging
+import multiprocessing as mp
+import os
+import subprocess as sp
+import sys
+from unittest import TestCase, main
+
+import click
+import cv2
+import numpy as np
+
+from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
+from frigate.edgetpu import LocalObjectDetector
+from frigate.motion import MotionDetector
+from frigate.object_processing import COLOR_MAP, CameraState
+from frigate.objects import ObjectTracker
+from frigate.util import (DictFrameManager, EventsPerSecond,
+ SharedMemoryFrameManager, draw_box_with_label)
+from frigate.video import (capture_frames, process_frames,
+ start_or_restart_ffmpeg)
+
+logging.basicConfig()
+logging.root.setLevel(logging.DEBUG)
+
+logger = logging.getLogger(__name__)
+
+def get_frame_shape(source):
+ ffprobe_cmd = " ".join([
+ 'ffprobe',
+ '-v',
+ 'panic',
+ '-show_error',
+ '-show_streams',
+ '-of',
+ 'json',
+ '"'+source+'"'
+ ])
+ p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
+ (output, err) = p.communicate()
+ p_status = p.wait()
+ info = json.loads(output)
+
+ video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
+
+ if video_info['height'] != 0 and video_info['width'] != 0:
+ return (video_info['height'], video_info['width'], 3)
+
+ # fallback to using opencv if ffprobe didnt succeed
+ video = cv2.VideoCapture(source)
+ ret, frame = video.read()
+ frame_shape = frame.shape
+ video.release()
+ return frame_shape
+
+class ProcessClip():
+ def __init__(self, clip_path, frame_shape, config: FrigateConfig):
+ self.clip_path = clip_path
+ self.camera_name = 'camera'
+ self.config = config
+ self.camera_config = self.config.cameras['camera']
+ self.frame_shape = self.camera_config.frame_shape
+ self.ffmpeg_cmd = [c['cmd'] for c in self.camera_config.ffmpeg_cmds if 'detect' in c['roles']][0]
+ self.frame_manager = SharedMemoryFrameManager()
+ self.frame_queue = mp.Queue()
+ self.detected_objects_queue = mp.Queue()
+ self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
+
+ def load_frames(self):
+ fps = EventsPerSecond()
+ skipped_fps = EventsPerSecond()
+ current_frame = mp.Value('d', 0.0)
+ frame_size = self.camera_config.frame_shape_yuv[0] * self.camera_config.frame_shape_yuv[1]
+ ffmpeg_process = start_or_restart_ffmpeg(self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size)
+ capture_frames(ffmpeg_process, self.camera_name, self.camera_config.frame_shape_yuv, self.frame_manager,
+ self.frame_queue, fps, skipped_fps, current_frame)
+ ffmpeg_process.wait()
+ ffmpeg_process.communicate()
+
+ def process_frames(self, objects_to_track=['person'], object_filters={}):
+ mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
+ mask[:] = 255
+ motion_detector = MotionDetector(self.frame_shape, mask, self.camera_config.motion)
+
+ object_detector = LocalObjectDetector(labels='/labelmap.txt')
+ object_tracker = ObjectTracker(self.camera_config.detect)
+ process_info = {
+ 'process_fps': mp.Value('d', 0.0),
+ 'detection_fps': mp.Value('d', 0.0),
+ 'detection_frame': mp.Value('d', 0.0)
+ }
+ stop_event = mp.Event()
+ model_shape = (self.config.model.height, self.config.model.width)
+
+ process_frames(self.camera_name, self.frame_queue, self.frame_shape, model_shape,
+ self.frame_manager, motion_detector, object_detector, object_tracker,
+ self.detected_objects_queue, process_info,
+ objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
+
+ def top_object(self, debug_path=None):
+ obj_detected = False
+ top_computed_score = 0.0
+ def handle_event(name, obj, frame_time):
+ nonlocal obj_detected
+ nonlocal top_computed_score
+ if obj.computed_score > top_computed_score:
+ top_computed_score = obj.computed_score
+ if not obj.false_positive:
+ obj_detected = True
+ self.camera_state.on('new', handle_event)
+ self.camera_state.on('update', handle_event)
+
+ while(not self.detected_objects_queue.empty()):
+ camera_name, frame_time, current_tracked_objects, motion_boxes, regions = self.detected_objects_queue.get()
+ if not debug_path is None:
+ self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
+
+ self.camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
+
+ self.frame_manager.delete(self.camera_state.previous_frame_id)
+
+ return {
+ 'object_detected': obj_detected,
+ 'top_score': top_computed_score
+ }
+
+ def save_debug_frame(self, debug_path, frame_time, tracked_objects):
+ current_frame = cv2.cvtColor(self.frame_manager.get(f"{self.camera_name}{frame_time}", self.camera_config.frame_shape_yuv), cv2.COLOR_YUV2BGR_I420)
+ # draw the bounding boxes on the frame
+ for obj in tracked_objects:
+ thickness = 2
+ color = (0,0,175)
+
+ if obj['frame_time'] != frame_time:
+ thickness = 1
+ color = (255,0,0)
+ else:
+ color = (255,255,0)
+
+ # draw the bounding boxes on the frame
+ box = obj['box']
+ draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['id'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
+ # draw the regions on the frame
+ region = obj['region']
+ draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
+
+ cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", current_frame)
+
[email protected]()
[email protected]("-p", "--path", required=True, help="Path to clip or directory to test.")
[email protected]("-l", "--label", default='person', help="Label name to detect.")
[email protected]("-t", "--threshold", default=0.85, help="Threshold value for objects.")
[email protected]("-s", "--scores", default=None, help="File to save csv of top scores")
[email protected]("--debug-path", default=None, help="Path to output frames for debugging.")
+def process(path, label, threshold, scores, debug_path):
+ clips = []
+ if os.path.isdir(path):
+ files = os.listdir(path)
+ files.sort()
+ clips = [os.path.join(path, file) for file in files]
+ elif os.path.isfile(path):
+ clips.append(path)
+
+ json_config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'cameras': {
+ 'camera': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'path.mp4', 'global_args': '', 'input_args': '', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1920,
+ 'width': 1080
+ }
+ }
+ }
+
+ results = []
+ for c in clips:
+ logger.info(c)
+ frame_shape = get_frame_shape(c)
+
+ json_config['cameras']['camera']['height'] = frame_shape[0]
+ json_config['cameras']['camera']['width'] = frame_shape[1]
+ json_config['cameras']['camera']['ffmpeg']['inputs'][0]['path'] = c
+
+ config = FrigateConfig(config=FRIGATE_CONFIG_SCHEMA(json_config))
+
+ process_clip = ProcessClip(c, frame_shape, config)
+ process_clip.load_frames()
+ process_clip.process_frames(objects_to_track=[label])
+
+ results.append((c, process_clip.top_object(debug_path)))
+
+ if not scores is None:
+ with open(scores, 'w') as writer:
+ for result in results:
+ writer.write(f"{result[0]},{result[1]['top_score']}\n")
+
+ positive_count = sum(1 for result in results if result[1]['object_detected'])
+ print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
+
+if __name__ == '__main__':
+ process()
diff --git a/frigate/util.py b/frigate/util.py
--- a/frigate/util.py
+++ b/frigate/util.py
@@ -47,14 +47,11 @@ def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thicknes
cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
- # size is larger than longest edge
- size = int(max(xmax-xmin, ymax-ymin)*multiplier)
+ # size is the longest edge and divisible by 4
+ size = int(max(xmax-xmin, ymax-ymin)//4*4*multiplier)
# dont go any smaller than 300
if size < 300:
size = 300
- # if the size is too big to fit in the frame
- if size > min(frame_shape[0], frame_shape[1]):
- size = min(frame_shape[0], frame_shape[1])
# x_offset is midpoint of bounding box minus half the size
x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
@@ -62,48 +59,156 @@ def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
if x_offset < 0:
x_offset = 0
elif x_offset > (frame_shape[1]-size):
- x_offset = (frame_shape[1]-size)
+ x_offset = max(0, (frame_shape[1]-size))
# y_offset is midpoint of bounding box minus half the size
y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
- # if outside the image
+ # # if outside the image
if y_offset < 0:
y_offset = 0
elif y_offset > (frame_shape[0]-size):
- y_offset = (frame_shape[0]-size)
+ y_offset = max(0, (frame_shape[0]-size))
return (x_offset, y_offset, x_offset+size, y_offset+size)
+def get_yuv_crop(frame_shape, crop):
+ # crop should be (x1,y1,x2,y2)
+ frame_height = frame_shape[0]//3*2
+ frame_width = frame_shape[1]
+
+ # compute the width/height of the uv channels
+ uv_width = frame_width//2 # width of the uv channels
+ uv_height = frame_height//4 # height of the uv channels
+
+ # compute the offset for upper left corner of the uv channels
+ uv_x_offset = crop[0]//2 # x offset of the uv channels
+ uv_y_offset = crop[1]//4 # y offset of the uv channels
+
+ # compute the width/height of the uv crops
+ uv_crop_width = (crop[2] - crop[0])//2 # width of the cropped uv channels
+ uv_crop_height = (crop[3] - crop[1])//4 # height of the cropped uv channels
+
+ # ensure crop dimensions are multiples of 2 and 4
+ y = (
+ crop[0],
+ crop[1],
+ crop[0] + uv_crop_width*2,
+ crop[1] + uv_crop_height*4
+ )
+
+ u1 = (
+ 0 + uv_x_offset,
+ frame_height + uv_y_offset,
+ 0 + uv_x_offset + uv_crop_width,
+ frame_height + uv_y_offset + uv_crop_height
+ )
+
+ u2 = (
+ uv_width + uv_x_offset,
+ frame_height + uv_y_offset,
+ uv_width + uv_x_offset + uv_crop_width,
+ frame_height + uv_y_offset + uv_crop_height
+ )
+
+ v1 = (
+ 0 + uv_x_offset,
+ frame_height + uv_height + uv_y_offset,
+ 0 + uv_x_offset + uv_crop_width,
+ frame_height + uv_height + uv_y_offset + uv_crop_height
+ )
+
+ v2 = (
+ uv_width + uv_x_offset,
+ frame_height + uv_height + uv_y_offset,
+ uv_width + uv_x_offset + uv_crop_width,
+ frame_height + uv_height + uv_y_offset + uv_crop_height
+ )
+
+ return y, u1, u2, v1, v2
+
def yuv_region_2_rgb(frame, region):
- height = frame.shape[0]//3*2
- width = frame.shape[1]
- # make sure the size is a multiple of 4
- size = (region[3] - region[1])//4*4
-
- x1 = region[0]
- y1 = region[1]
-
- uv_x1 = x1//2
- uv_y1 = y1//4
-
- uv_width = size//2
- uv_height = size//4
-
- u_y_start = height
- v_y_start = height + height//4
- two_x_offset = width//2
-
- yuv_cropped_frame = np.zeros((size+size//2, size), np.uint8)
- # y channel
- yuv_cropped_frame[0:size, 0:size] = frame[y1:y1+size, x1:x1+size]
- # u channel
- yuv_cropped_frame[size:size+uv_height, 0:uv_width] = frame[uv_y1+u_y_start:uv_y1+u_y_start+uv_height, uv_x1:uv_x1+uv_width]
- yuv_cropped_frame[size:size+uv_height, uv_width:size] = frame[uv_y1+u_y_start:uv_y1+u_y_start+uv_height, uv_x1+two_x_offset:uv_x1+two_x_offset+uv_width]
- # v channel
- yuv_cropped_frame[size+uv_height:size+uv_height*2, 0:uv_width] = frame[uv_y1+v_y_start:uv_y1+v_y_start+uv_height, uv_x1:uv_x1+uv_width]
- yuv_cropped_frame[size+uv_height:size+uv_height*2, uv_width:size] = frame[uv_y1+v_y_start:uv_y1+v_y_start+uv_height, uv_x1+two_x_offset:uv_x1+two_x_offset+uv_width]
-
- return cv2.cvtColor(yuv_cropped_frame, cv2.COLOR_YUV2RGB_I420)
+ try:
+ height = frame.shape[0]//3*2
+ width = frame.shape[1]
+
+ # get the crop box if the region extends beyond the frame
+ crop_x1 = max(0, region[0])
+ crop_y1 = max(0, region[1])
+ # ensure these are a multiple of 4
+ crop_x2 = min(width, region[2])
+ crop_y2 = min(height, region[3])
+ crop_box = (crop_x1, crop_y1, crop_x2, crop_y2)
+
+ y, u1, u2, v1, v2 = get_yuv_crop(frame.shape, crop_box)
+
+ # if the region starts outside the frame, indent the start point in the cropped frame
+ y_channel_x_offset = abs(min(0, region[0]))
+ y_channel_y_offset = abs(min(0, region[1]))
+
+ uv_channel_x_offset = y_channel_x_offset//2
+ uv_channel_y_offset = y_channel_y_offset//4
+
+ # create the yuv region frame
+ # make sure the size is a multiple of 4
+ size = (region[3] - region[1])//4*4
+ yuv_cropped_frame = np.zeros((size+size//2, size), np.uint8)
+ # fill in black
+ yuv_cropped_frame[:] = 128
+ yuv_cropped_frame[0:size,0:size] = 16
+
+ # copy the y channel
+ yuv_cropped_frame[
+ y_channel_y_offset:y_channel_y_offset + y[3] - y[1],
+ y_channel_x_offset:y_channel_x_offset + y[2] - y[0]
+ ] = frame[
+ y[1]:y[3],
+ y[0]:y[2]
+ ]
+
+ uv_crop_width = u1[2] - u1[0]
+ uv_crop_height = u1[3] - u1[1]
+
+ # copy u1
+ yuv_cropped_frame[
+ size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
+ 0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
+ ] = frame[
+ u1[1]:u1[3],
+ u1[0]:u1[2]
+ ]
+
+ # copy u2
+ yuv_cropped_frame[
+ size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
+ size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
+ ] = frame[
+ u2[1]:u2[3],
+ u2[0]:u2[2]
+ ]
+
+ # copy v1
+ yuv_cropped_frame[
+ size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
+ 0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
+ ] = frame[
+ v1[1]:v1[3],
+ v1[0]:v1[2]
+ ]
+
+ # copy v2
+ yuv_cropped_frame[
+ size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
+ size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
+ ] = frame[
+ v2[1]:v2[3],
+ v2[0]:v2[2]
+ ]
+
+ return cv2.cvtColor(yuv_cropped_frame, cv2.COLOR_YUV2RGB_I420)
+ except:
+ print(f"frame.shape: {frame.shape}")
+ print(f"region: {region}")
+ raise
def intersection(box_a, box_b):
return (
diff --git a/frigate/video.py b/frigate/video.py
--- a/frigate/video.py
+++ b/frigate/video.py
@@ -112,16 +112,15 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
frame_name = f"{camera_name}{current_frame.value}"
frame_buffer = frame_manager.create(frame_name, frame_size)
try:
- frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
- except:
- logger.info(f"{camera_name}: ffmpeg sent a broken frame. something is wrong.")
-
- if ffmpeg_process.poll() != None:
- logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
- frame_manager.delete(frame_name)
- break
-
- continue
+ frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
+ except Exception as e:
+ logger.info(f"{camera_name}: ffmpeg sent a broken frame. {e}")
+
+ if ffmpeg_process.poll() != None:
+ logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
+ frame_manager.delete(frame_name)
+ break
+ continue
frame_rate.update()
@@ -259,10 +258,10 @@ def receiveSignal(signalNumber, frame):
object_filters = config.objects.filters
mask = config.mask
- motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
+ motion_detector = MotionDetector(frame_shape, mask, config.motion)
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
- object_tracker = ObjectTracker(10)
+ object_tracker = ObjectTracker(config.detect)
frame_manager = SharedMemoryFrameManager()
@@ -389,6 +388,8 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
region = calculate_region(frame_shape,
box[0], box[1],
box[2], box[3])
+
+ regions.append(region)
selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask))
@@ -412,6 +413,6 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
else:
fps_tracker.update()
fps.value = fps_tracker.eps()
- detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects))
+ detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{camera_name}{frame_time}")
diff --git a/process_clip.py b/process_clip.py
deleted file mode 100644
--- a/process_clip.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import sys
-import click
-import os
-import datetime
-from unittest import TestCase, main
-from frigate.video import process_frames, start_or_restart_ffmpeg, capture_frames, get_frame_shape
-from frigate.util import DictFrameManager, SharedMemoryFrameManager, EventsPerSecond, draw_box_with_label
-from frigate.motion import MotionDetector
-from frigate.edgetpu import LocalObjectDetector
-from frigate.objects import ObjectTracker
-import multiprocessing as mp
-import numpy as np
-import cv2
-from frigate.object_processing import COLOR_MAP, CameraState
-
-class ProcessClip():
- def __init__(self, clip_path, frame_shape, config):
- self.clip_path = clip_path
- self.frame_shape = frame_shape
- self.camera_name = 'camera'
- self.frame_manager = DictFrameManager()
- # self.frame_manager = SharedMemoryFrameManager()
- self.frame_queue = mp.Queue()
- self.detected_objects_queue = mp.Queue()
- self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
-
- def load_frames(self):
- fps = EventsPerSecond()
- skipped_fps = EventsPerSecond()
- stop_event = mp.Event()
- detection_frame = mp.Value('d', datetime.datetime.now().timestamp()+100000)
- current_frame = mp.Value('d', 0.0)
- ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel panic -i {self.clip_path} -f rawvideo -pix_fmt rgb24 pipe:".split(" ")
- ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.frame_shape[0]*self.frame_shape[1]*self.frame_shape[2])
- capture_frames(ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue, 1, fps, skipped_fps, stop_event, detection_frame, current_frame)
- ffmpeg_process.wait()
- ffmpeg_process.communicate()
-
- def process_frames(self, objects_to_track=['person'], object_filters={}):
- mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
- mask[:] = 255
- motion_detector = MotionDetector(self.frame_shape, mask)
-
- object_detector = LocalObjectDetector(labels='/labelmap.txt')
- object_tracker = ObjectTracker(10)
- process_fps = mp.Value('d', 0.0)
- detection_fps = mp.Value('d', 0.0)
- current_frame = mp.Value('d', 0.0)
- stop_event = mp.Event()
-
- process_frames(self.camera_name, self.frame_queue, self.frame_shape, self.frame_manager, motion_detector, object_detector, object_tracker, self.detected_objects_queue,
- process_fps, detection_fps, current_frame, objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
-
- def objects_found(self, debug_path=None):
- obj_detected = False
- top_computed_score = 0.0
- def handle_event(name, obj):
- nonlocal obj_detected
- nonlocal top_computed_score
- if obj['computed_score'] > top_computed_score:
- top_computed_score = obj['computed_score']
- if not obj['false_positive']:
- obj_detected = True
- self.camera_state.on('new', handle_event)
- self.camera_state.on('update', handle_event)
-
- while(not self.detected_objects_queue.empty()):
- camera_name, frame_time, current_tracked_objects = self.detected_objects_queue.get()
- if not debug_path is None:
- self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
-
- self.camera_state.update(frame_time, current_tracked_objects)
- for obj in self.camera_state.tracked_objects.values():
- print(f"{frame_time}: {obj['id']} - {obj['computed_score']} - {obj['score_history']}")
-
- self.frame_manager.delete(self.camera_state.previous_frame_id)
-
- return {
- 'object_detected': obj_detected,
- 'top_score': top_computed_score
- }
-
- def save_debug_frame(self, debug_path, frame_time, tracked_objects):
- current_frame = self.frame_manager.get(f"{self.camera_name}{frame_time}", self.frame_shape)
- # draw the bounding boxes on the frame
- for obj in tracked_objects:
- thickness = 2
- color = (0,0,175)
-
- if obj['frame_time'] != frame_time:
- thickness = 1
- color = (255,0,0)
- else:
- color = (255,255,0)
-
- # draw the bounding boxes on the frame
- box = obj['box']
- draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
- # draw the regions on the frame
- region = obj['region']
- draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
-
- cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))
-
[email protected]()
[email protected]("-p", "--path", required=True, help="Path to clip or directory to test.")
[email protected]("-l", "--label", default='person', help="Label name to detect.")
[email protected]("-t", "--threshold", default=0.85, help="Threshold value for objects.")
[email protected]("--debug-path", default=None, help="Path to output frames for debugging.")
-def process(path, label, threshold, debug_path):
- clips = []
- if os.path.isdir(path):
- files = os.listdir(path)
- files.sort()
- clips = [os.path.join(path, file) for file in files]
- elif os.path.isfile(path):
- clips.append(path)
-
- config = {
- 'snapshots': {
- 'show_timestamp': False,
- 'draw_zones': False
- },
- 'zones': {},
- 'objects': {
- 'track': [label],
- 'filters': {
- 'person': {
- 'threshold': threshold
- }
- }
- }
- }
-
- results = []
- for c in clips:
- frame_shape = get_frame_shape(c)
- config['frame_shape'] = frame_shape
- process_clip = ProcessClip(c, frame_shape, config)
- process_clip.load_frames()
- process_clip.process_frames(objects_to_track=config['objects']['track'])
-
- results.append((c, process_clip.objects_found(debug_path)))
-
- for result in results:
- print(f"{result[0]}: {result[1]}")
-
- positive_count = sum(1 for result in results if result[1]['object_detected'])
- print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
-
-if __name__ == '__main__':
- process()
\ No newline at end of file
| diff --git a/frigate/test/test_yuv_region_2_rgb.py b/frigate/test/test_yuv_region_2_rgb.py
new file mode 100644
--- /dev/null
+++ b/frigate/test/test_yuv_region_2_rgb.py
@@ -0,0 +1,39 @@
+import cv2
+import numpy as np
+from unittest import TestCase, main
+from frigate.util import yuv_region_2_rgb
+
+class TestYuvRegion2RGB(TestCase):
+ def setUp(self):
+ self.bgr_frame = np.zeros((100, 200, 3), np.uint8)
+ self.bgr_frame[:] = (0, 0, 255)
+ self.bgr_frame[5:55, 5:55] = (255,0,0)
+ # cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
+ self.yuv_frame = cv2.cvtColor(self.bgr_frame, cv2.COLOR_BGR2YUV_I420)
+
+ def test_crop_yuv(self):
+ cropped = yuv_region_2_rgb(self.yuv_frame, (10,10,50,50))
+ # ensure the upper left pixel is blue
+ assert(np.all(cropped[0, 0] == [0, 0, 255]))
+
+ def test_crop_yuv_out_of_bounds(self):
+ cropped = yuv_region_2_rgb(self.yuv_frame, (0,0,200,200))
+ # cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
+ # ensure the upper left pixel is red
+ # the yuv conversion has some noise
+ assert(np.all(cropped[0, 0] == [255, 1, 0]))
+ # ensure the bottom right is black
+ assert(np.all(cropped[199, 199] == [0, 0, 0]))
+
+ def test_crop_yuv_portrait(self):
+ bgr_frame = np.zeros((1920, 1080, 3), np.uint8)
+ bgr_frame[:] = (0, 0, 255)
+ bgr_frame[5:55, 5:55] = (255,0,0)
+ # cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
+ yuv_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2YUV_I420)
+
+ cropped = yuv_region_2_rgb(yuv_frame, (0, 852, 648, 1500))
+ # cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
+
+if __name__ == '__main__':
+ main(verbosity=2)
\ No newline at end of file
| [FR] Enable num_threads parameter when creating tflite.Interpreter
Interpreter constructor supports num_threads parameter. This might not be beneficial for TPU, however on bare Raspberry PI 4 this helps a lot (e.g. using 3 threads, leaving one free, I can bring down inference time from 180ms down to ~60ms).
Looks like a nice FR.
| I have Frigate in docker inside my server with Ryzen 3700x, it will be a nice feature to improve performance with threads and limit the docker as needed.
Should be simple to add | 2020-12-20T13:37:13Z | [] | [] |
blakeblackshear/frigate | 431 | blakeblackshear__frigate-431 | [
"438",
"523"
] | 905c361d5271ce0cbaabd1fabb2ea8d757c43be0 | diff --git a/frigate/app.py b/frigate/app.py
--- a/frigate/app.py
+++ b/frigate/app.py
@@ -8,6 +8,7 @@
import signal
import yaml
+from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from frigate.config import FrigateConfig
@@ -20,6 +21,7 @@
from frigate.mqtt import create_mqtt_client
from frigate.object_processing import TrackedObjectProcessor
from frigate.record import RecordingMaintainer
+from frigate.stats import StatsEmitter, stats_init
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog
from frigate.zeroconf import broadcast_zeroconf
@@ -37,6 +39,10 @@ def __init__(self):
self.log_queue = mp.Queue()
self.camera_metrics = {}
+ def set_environment_vars(self):
+ for key, value in self.config.environment_vars.items():
+ os.environ[key] = value
+
def ensure_dirs(self):
for d in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]:
if not os.path.exists(d) and not os.path.islink(d):
@@ -45,7 +51,7 @@ def ensure_dirs(self):
else:
logger.debug(f"Skipping directory: {d}")
- tmpfs_size = self.config.save_clips.tmpfs_cache_size
+ tmpfs_size = self.config.clips.tmpfs_cache_size
if tmpfs_size:
logger.info(f"Creating tmpfs of size {tmpfs_size}")
rc = os.system(f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}")
@@ -68,20 +74,21 @@ def init_config(self):
'camera_fps': mp.Value('d', 0.0),
'skipped_fps': mp.Value('d', 0.0),
'process_fps': mp.Value('d', 0.0),
+ 'detection_enabled': mp.Value('i', 1),
'detection_fps': mp.Value('d', 0.0),
'detection_frame': mp.Value('d', 0.0),
'read_start': mp.Value('d', 0.0),
'ffmpeg_pid': mp.Value('i', 0),
- 'frame_queue': mp.Queue(maxsize=2)
+ 'frame_queue': mp.Queue(maxsize=2),
}
def check_config(self):
for name, camera in self.config.cameras.items():
assigned_roles = list(set([r for i in camera.ffmpeg.inputs for r in i.roles]))
- if not camera.save_clips.enabled and 'clips' in assigned_roles:
- logger.warning(f"Camera {name} has clips assigned to an input, but save_clips is not enabled.")
- elif camera.save_clips.enabled and not 'clips' in assigned_roles:
- logger.warning(f"Camera {name} has save_clips enabled, but clips is not assigned to an input.")
+ if not camera.clips.enabled and 'clips' in assigned_roles:
+ logger.warning(f"Camera {name} has clips assigned to an input, but clips is not enabled.")
+ elif camera.clips.enabled and not 'clips' in assigned_roles:
+ logger.warning(f"Camera {name} has clips enabled, but clips is not assigned to an input.")
if not camera.record.enabled and 'record' in assigned_roles:
logger.warning(f"Camera {name} has record assigned to an input, but record is not enabled.")
@@ -111,15 +118,23 @@ def init_queues(self):
def init_database(self):
self.db = SqliteExtDatabase(self.config.database.path)
+
+ # Run migrations
+ del(logging.getLogger('peewee_migrate').handlers[:])
+ router = Router(self.db)
+ router.run()
+
models = [Event]
self.db.bind(models)
- self.db.create_tables(models, safe=True)
+
+ def init_stats(self):
+ self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
def init_web_server(self):
- self.flask_app = create_app(self.config, self.db, self.camera_metrics, self.detectors, self.detected_frames_processor)
+ self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor)
def init_mqtt(self):
- self.mqtt_client = create_mqtt_client(self.config.mqtt)
+ self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
def start_detectors(self):
model_shape = (self.config.model.height, self.config.model.width)
@@ -173,6 +188,10 @@ def start_recording_maintainer(self):
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
self.recording_maintainer.start()
+ def start_stats_emitter(self):
+ self.stats_emitter = StatsEmitter(self.config, self.stats_tracking, self.mqtt_client, self.config.mqtt.topic_prefix, self.stop_event)
+ self.stats_emitter.start()
+
def start_watchdog(self):
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
self.frigate_watchdog.start()
@@ -186,6 +205,7 @@ def start(self):
logger.error(f"Error parsing config: {e}")
self.log_process.terminate()
sys.exit(1)
+ self.set_environment_vars()
self.ensure_dirs()
self.check_config()
self.set_log_levels()
@@ -200,10 +220,12 @@ def start(self):
self.start_detected_frames_processor()
self.start_camera_processors()
self.start_camera_capture_processes()
+ self.init_stats()
self.init_web_server()
self.start_event_processor()
self.start_event_cleanup()
self.start_recording_maintainer()
+ self.start_stats_emitter()
self.start_watchdog()
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
@@ -224,6 +246,7 @@ def stop(self):
self.event_processor.join()
self.event_cleanup.join()
self.recording_maintainer.join()
+ self.stats_emitter.join()
self.frigate_watchdog.join()
for detector in self.detectors.values():
diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -11,13 +11,16 @@
import yaml
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+from frigate.util import create_mask
logger = logging.getLogger(__name__)
+DEFAULT_TRACKED_OBJECTS = ['person']
+
DETECTORS_SCHEMA = vol.Schema(
{
vol.Required(str): {
- vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']),
+ vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']),
vol.Optional('device', default='usb'): str,
vol.Optional('num_threads', default=3): int
}
@@ -37,12 +40,13 @@
vol.Optional('port', default=1883): int,
vol.Optional('topic_prefix', default='frigate'): str,
vol.Optional('client_id', default='frigate'): str,
+ vol.Optional('stats_interval', default=60): int,
'user': str,
'password': str
}
)
-SAVE_CLIPS_RETAIN_SCHEMA = vol.Schema(
+RETAIN_SCHEMA = vol.Schema(
{
vol.Required('default',default=10): int,
'objects': {
@@ -51,11 +55,11 @@
}
)
-SAVE_CLIPS_SCHEMA = vol.Schema(
+CLIPS_SCHEMA = vol.Schema(
{
vol.Optional('max_seconds', default=300): int,
'tmpfs_cache_size': str,
- vol.Optional('retain', default={}): SAVE_CLIPS_RETAIN_SCHEMA
+ vol.Optional('retain', default={}): RETAIN_SCHEMA
}
)
@@ -76,7 +80,7 @@
"1", "-c", "copy", "-an"]
GLOBAL_FFMPEG_SCHEMA = vol.Schema(
- {
+ {
vol.Optional('global_args', default=FFMPEG_GLOBAL_ARGS_DEFAULT): vol.Any(str, [str]),
vol.Optional('hwaccel_args', default=[]): vol.Any(str, [str]),
vol.Optional('input_args', default=FFMPEG_INPUT_ARGS_DEFAULT): vol.Any(str, [str]),
@@ -91,6 +95,7 @@
MOTION_SCHEMA = vol.Schema(
{
+ 'mask': vol.Any(str, [str]),
'threshold': vol.Range(min=1, max=255),
'contour_area': int,
'delta_alpha': float,
@@ -106,17 +111,17 @@
)
FILTER_SCHEMA = vol.Schema(
- {
+ {
str: {
- vol.Optional('min_area', default=0): int,
- vol.Optional('max_area', default=24000000): int,
- vol.Optional('threshold', default=0.7): float
+ 'min_area': int,
+ 'max_area': int,
+ 'threshold': float,
}
}
)
def filters_for_all_tracked_objects(object_config):
- for tracked_object in object_config.get('track', ['person']):
+ for tracked_object in object_config.get('track', DEFAULT_TRACKED_OBJECTS):
if not 'filters' in object_config:
object_config['filters'] = {}
if not tracked_object in object_config['filters']:
@@ -125,8 +130,14 @@ def filters_for_all_tracked_objects(object_config):
OBJECTS_SCHEMA = vol.Schema(vol.All(filters_for_all_tracked_objects,
{
- vol.Optional('track', default=['person']): [str],
- vol.Optional('filters', default = {}): FILTER_SCHEMA.extend({ str: {vol.Optional('min_score', default=0.5): float}})
+ 'track': [str],
+ vol.Optional('filters', default = {}): FILTER_SCHEMA.extend(
+ {
+ str: {
+ 'min_score': float,
+ 'mask': vol.Any(str, [str]),
+ }
+ })
}
))
@@ -137,15 +148,22 @@ def each_role_used_once(inputs):
raise ValueError
return inputs
+def detect_is_required(inputs):
+ roles = [role for i in inputs for role in i['roles']]
+ if not 'detect' in roles:
+ raise ValueError
+ return inputs
+
CAMERA_FFMPEG_SCHEMA = vol.Schema(
- {
+ {
vol.Required('inputs'): vol.All([{
vol.Required('path'): str,
vol.Required('roles'): ['detect', 'clips', 'record', 'rtmp'],
'global_args': vol.Any(str, [str]),
'hwaccel_args': vol.Any(str, [str]),
'input_args': vol.Any(str, [str]),
- }], vol.Msg(each_role_used_once, msg="Each input role may only be used once")),
+ }], vol.Msg(each_role_used_once, msg="Each input role may only be used once"),
+ vol.Msg(detect_is_required, msg="The detect role is required")),
'output_args': {
vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
@@ -169,7 +187,6 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Required('height'): int,
vol.Required('width'): int,
'fps': int,
- 'mask': vol.Any(str, [str]),
vol.Optional('best_image_timeout', default=60): int,
vol.Optional('zones', default={}): {
str: {
@@ -177,12 +194,12 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Optional('filters', default={}): FILTER_SCHEMA
}
},
- vol.Optional('save_clips', default={}): {
+ vol.Optional('clips', default={}): {
vol.Optional('enabled', default=False): bool,
vol.Optional('pre_capture', default=5): int,
vol.Optional('post_capture', default=5): int,
'objects': [str],
- vol.Optional('retain', default={}): SAVE_CLIPS_RETAIN_SCHEMA,
+ vol.Optional('retain', default={}): RETAIN_SCHEMA,
},
vol.Optional('record', default={}): {
'enabled': bool,
@@ -192,15 +209,25 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Required('enabled', default=True): bool,
},
vol.Optional('snapshots', default={}): {
- vol.Optional('show_timestamp', default=True): bool,
- vol.Optional('draw_zones', default=False): bool,
- vol.Optional('draw_bounding_boxes', default=True): bool,
- vol.Optional('crop_to_region', default=True): bool,
- vol.Optional('height', default=175): int
+ vol.Optional('enabled', default=False): bool,
+ vol.Optional('timestamp', default=False): bool,
+ vol.Optional('bounding_box', default=False): bool,
+ vol.Optional('crop', default=False): bool,
+ 'height': int,
+ vol.Optional('retain', default={}): RETAIN_SCHEMA,
+ },
+ vol.Optional('mqtt', default={}): {
+ vol.Optional('enabled', default=True): bool,
+ vol.Optional('timestamp', default=True): bool,
+ vol.Optional('bounding_box', default=True): bool,
+ vol.Optional('crop', default=True): bool,
+ vol.Optional('height', default=270): int
},
- 'objects': OBJECTS_SCHEMA,
+ vol.Optional('objects', default={}): OBJECTS_SCHEMA,
vol.Optional('motion', default={}): MOTION_SCHEMA,
- vol.Optional('detect', default={}): DETECT_SCHEMA
+ vol.Optional('detect', default={}): DETECT_SCHEMA.extend({
+ vol.Optional('enabled', default=True): bool
+ })
}
}, vol.Msg(ensure_zones_and_cameras_have_different_names, msg='Zones cannot share names with cameras'))
)
@@ -220,7 +247,10 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Optional('default', default='info'): vol.In(['info', 'debug', 'warning', 'error', 'critical']),
vol.Optional('logs', default={}): {str: vol.In(['info', 'debug', 'warning', 'error', 'critical']) }
},
- vol.Optional('save_clips', default={}): SAVE_CLIPS_SCHEMA,
+ vol.Optional('snapshots', default={}): {
+ vol.Optional('retain', default={}): RETAIN_SCHEMA
+ },
+ vol.Optional('clips', default={}): CLIPS_SCHEMA,
vol.Optional('record', default={}): {
vol.Optional('enabled', default=False): bool,
vol.Optional('retain_days', default=30): int,
@@ -229,7 +259,8 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Optional('objects', default={}): OBJECTS_SCHEMA,
vol.Optional('motion', default={}): MOTION_SCHEMA,
vol.Optional('detect', default={}): DETECT_SCHEMA,
- vol.Required('cameras', default={}): CAMERAS_SCHEMA
+ vol.Required('cameras', default={}): CAMERAS_SCHEMA,
+ vol.Optional('environment_vars', default={}): { str: str }
}
)
@@ -240,7 +271,7 @@ def __init__(self, config):
@property
def path(self):
return self._path
-
+
def to_dict(self):
return {
'path': self.path
@@ -250,15 +281,15 @@ class ModelConfig():
def __init__(self, config):
self._width = config['width']
self._height = config['height']
-
+
@property
def width(self):
return self._width
-
+
@property
def height(self):
return self._height
-
+
def to_dict(self):
return {
'width': self.width,
@@ -270,19 +301,19 @@ def __init__(self, config):
self._type = config['type']
self._device = config['device']
self._num_threads = config['num_threads']
-
+
@property
def type(self):
return self._type
-
+
@property
def device(self):
return self._device
-
+
@property
def num_threads(self):
return self._num_threads
-
+
def to_dict(self):
return {
'type': self.type,
@@ -294,15 +325,15 @@ class LoggerConfig():
def __init__(self, config):
self._default = config['default'].upper()
self._logs = {k: v.upper() for k, v in config['logs'].items()}
-
+
@property
def default(self):
return self._default
-
+
@property
def logs(self):
return self._logs
-
+
def to_dict(self):
return {
'default': self.default,
@@ -317,38 +348,44 @@ def __init__(self, config):
self._client_id = config['client_id']
self._user = config.get('user')
self._password = config.get('password')
+ self._stats_interval = config.get('stats_interval')
@property
def host(self):
return self._host
-
+
@property
def port(self):
return self._port
-
+
@property
def topic_prefix(self):
return self._topic_prefix
-
+
@property
def client_id(self):
return self._client_id
-
+
@property
def user(self):
return self._user
-
+
@property
def password(self):
return self._password
+ @property
+ def stats_interval(self):
+ return self._stats_interval
+
def to_dict(self):
return {
'host': self.host,
'port': self.port,
'topic_prefix': self.topic_prefix,
'client_id': self.client_id,
- 'user': self.user
+ 'user': self.user,
+ 'stats_interval': self.stats_interval
}
class CameraInput():
@@ -358,19 +395,19 @@ def __init__(self, global_config, ffmpeg_input):
self._global_args = ffmpeg_input.get('global_args', global_config['global_args'])
self._hwaccel_args = ffmpeg_input.get('hwaccel_args', global_config['hwaccel_args'])
self._input_args = ffmpeg_input.get('input_args', global_config['input_args'])
-
+
@property
def path(self):
return self._path
-
+
@property
def roles(self):
return self._roles
-
+
@property
def global_args(self):
return self._global_args if isinstance(self._global_args, list) else self._global_args.split(' ')
-
+
@property
def hwaccel_args(self):
return self._hwaccel_args if isinstance(self._hwaccel_args, list) else self._hwaccel_args.split(' ')
@@ -383,44 +420,44 @@ class CameraFfmpegConfig():
def __init__(self, global_config, config):
self._inputs = [CameraInput(global_config, i) for i in config['inputs']]
self._output_args = config.get('output_args', global_config['output_args'])
-
+
@property
def inputs(self):
return self._inputs
-
+
@property
def output_args(self):
return {k: v if isinstance(v, list) else v.split(' ') for k, v in self._output_args.items()}
-class SaveClipsRetainConfig():
+class RetainConfig():
def __init__(self, global_config, config):
self._default = config.get('default', global_config.get('default'))
self._objects = config.get('objects', global_config.get('objects', {}))
-
+
@property
def default(self):
return self._default
-
+
@property
def objects(self):
return self._objects
-
+
def to_dict(self):
return {
'default': self.default,
'objects': self.objects
}
-class SaveClipsConfig():
+class ClipsConfig():
def __init__(self, config):
self._max_seconds = config['max_seconds']
self._tmpfs_cache_size = config.get('tmpfs_cache_size', '').strip()
- self._retain = SaveClipsRetainConfig(config['retain'], config['retain'])
+ self._retain = RetainConfig(config['retain'], config['retain'])
@property
def max_seconds(self):
return self._max_seconds
-
+
@property
def tmpfs_cache_size(self):
return self._tmpfs_cache_size
@@ -428,7 +465,7 @@ def tmpfs_cache_size(self):
@property
def retain(self):
return self._retain
-
+
def to_dict(self):
return {
'max_seconds': self.max_seconds,
@@ -436,6 +473,19 @@ def to_dict(self):
'retain': self.retain.to_dict()
}
+class SnapshotsConfig():
+ def __init__(self, config):
+ self._retain = RetainConfig(config['retain'], config['retain'])
+
+ @property
+ def retain(self):
+ return self._retain
+
+ def to_dict(self):
+ return {
+ 'retain': self.retain.to_dict()
+ }
+
class RecordConfig():
def __init__(self, global_config, config):
self._enabled = config.get('enabled', global_config['enabled'])
@@ -444,11 +494,11 @@ def __init__(self, global_config, config):
@property
def enabled(self):
return self._enabled
-
+
@property
def retain_days(self):
return self._retain_days
-
+
def to_dict(self):
return {
'enabled': self.enabled,
@@ -456,12 +506,14 @@ def to_dict(self):
}
class FilterConfig():
- def __init__(self, config):
- self._min_area = config['min_area']
- self._max_area = config['max_area']
- self._threshold = config['threshold']
- self._min_score = config.get('min_score')
-
+ def __init__(self, global_config, config, frame_shape=None):
+ self._min_area = config.get('min_area', global_config.get('min_area', 0))
+ self._max_area = config.get('max_area', global_config.get('max_area', 24000000))
+ self._threshold = config.get('threshold', global_config.get('threshold', 0.7))
+ self._min_score = config.get('min_score', global_config.get('min_score', 0.5))
+ self._raw_mask = config.get('mask')
+ self._mask = create_mask(frame_shape, self._raw_mask) if self._raw_mask else None
+
@property
def min_area(self):
return self._min_area
@@ -473,31 +525,33 @@ def max_area(self):
@property
def threshold(self):
return self._threshold
-
+
@property
def min_score(self):
return self._min_score
-
+
+ @property
+ def mask(self):
+ return self._mask
+
def to_dict(self):
return {
'min_area': self.min_area,
'max_area': self.max_area,
'threshold': self.threshold,
- 'min_score': self.min_score
+ 'min_score': self.min_score,
+ 'mask': self._raw_mask
}
class ObjectConfig():
- def __init__(self, global_config, config):
- self._track = config.get('track', global_config['track'])
- if 'filters' in config:
- self._filters = { name: FilterConfig(c) for name, c in config['filters'].items() }
- else:
- self._filters = { name: FilterConfig(c) for name, c in global_config['filters'].items() }
-
+ def __init__(self, global_config, config, frame_shape):
+ self._track = config.get('track', global_config.get('track', DEFAULT_TRACKED_OBJECTS))
+ self._filters = { name: FilterConfig(global_config.get('filters').get(name, {}), config.get('filters').get(name, {}), frame_shape) for name in self._track }
+
@property
def track(self):
return self._track
-
+
@property
def filters(self) -> Dict[str, FilterConfig]:
return self._filters
@@ -509,58 +563,101 @@ def to_dict(self):
}
class CameraSnapshotsConfig():
- def __init__(self, config):
- self._show_timestamp = config['show_timestamp']
- self._draw_zones = config['draw_zones']
- self._draw_bounding_boxes = config['draw_bounding_boxes']
- self._crop_to_region = config['crop_to_region']
+ def __init__(self, global_config, config):
+ self._enabled = config['enabled']
+ self._timestamp = config['timestamp']
+ self._bounding_box = config['bounding_box']
+ self._crop = config['crop']
self._height = config.get('height')
+ self._retain = RetainConfig(global_config['snapshots']['retain'], config['retain'])
@property
- def show_timestamp(self):
- return self._show_timestamp
-
+ def enabled(self):
+ return self._enabled
+
@property
- def draw_zones(self):
- return self._draw_zones
+ def timestamp(self):
+ return self._timestamp
@property
- def draw_bounding_boxes(self):
- return self._draw_bounding_boxes
+ def bounding_box(self):
+ return self._bounding_box
@property
- def crop_to_region(self):
- return self._crop_to_region
+ def crop(self):
+ return self._crop
@property
def height(self):
return self._height
+ @property
+ def retain(self):
+ return self._retain
+
+ def to_dict(self):
+ return {
+ 'enabled': self.enabled,
+ 'timestamp': self.timestamp,
+ 'bounding_box': self.bounding_box,
+ 'crop': self.crop,
+ 'height': self.height,
+ 'retain': self.retain.to_dict()
+ }
+
+class CameraMqttConfig():
+ def __init__(self, config):
+ self._enabled = config['enabled']
+ self._timestamp = config['timestamp']
+ self._bounding_box = config['bounding_box']
+ self._crop = config['crop']
+ self._height = config.get('height')
+
+ @property
+ def enabled(self):
+ return self._enabled
+
+ @property
+ def timestamp(self):
+ return self._timestamp
+
+ @property
+ def bounding_box(self):
+ return self._bounding_box
+
+ @property
+ def crop(self):
+ return self._crop
+
+ @property
+ def height(self):
+ return self._height
+
def to_dict(self):
return {
- 'show_timestamp': self.show_timestamp,
- 'draw_zones': self.draw_zones,
- 'draw_bounding_boxes': self.draw_bounding_boxes,
- 'crop_to_region': self.crop_to_region,
+ 'enabled': self.enabled,
+ 'timestamp': self.timestamp,
+ 'bounding_box': self.bounding_box,
+ 'crop': self.crop,
'height': self.height
}
-class CameraSaveClipsConfig():
+class CameraClipsConfig():
def __init__(self, global_config, config):
self._enabled = config['enabled']
self._pre_capture = config['pre_capture']
self._post_capture = config['post_capture']
- self._objects = config.get('objects', global_config['objects']['track'])
- self._retain = SaveClipsRetainConfig(global_config['save_clips']['retain'], config['retain'])
+ self._objects = config.get('objects')
+ self._retain = RetainConfig(global_config['clips']['retain'], config['retain'])
@property
def enabled(self):
return self._enabled
-
+
@property
def pre_capture(self):
return self._pre_capture
-
+
@property
def post_capture(self):
return self._post_capture
@@ -568,11 +665,11 @@ def post_capture(self):
@property
def objects(self):
return self._objects
-
+
@property
def retain(self):
return self._retain
-
+
def to_dict(self):
return {
'enabled': self.enabled,
@@ -585,24 +682,35 @@ def to_dict(self):
class CameraRtmpConfig():
def __init__(self, global_config, config):
self._enabled = config['enabled']
-
+
@property
def enabled(self):
return self._enabled
-
+
def to_dict(self):
return {
'enabled': self.enabled,
}
class MotionConfig():
- def __init__(self, global_config, config, camera_height: int):
+ def __init__(self, global_config, config, frame_shape):
+ self._raw_mask = config.get('mask')
+ if self._raw_mask:
+ self._mask = create_mask(frame_shape, self._raw_mask)
+ else:
+ default_mask = np.zeros(frame_shape, np.uint8)
+ default_mask[:] = 255
+ self._mask = default_mask
self._threshold = config.get('threshold', global_config.get('threshold', 25))
self._contour_area = config.get('contour_area', global_config.get('contour_area', 100))
self._delta_alpha = config.get('delta_alpha', global_config.get('delta_alpha', 0.2))
self._frame_alpha = config.get('frame_alpha', global_config.get('frame_alpha', 0.2))
- self._frame_height = config.get('frame_height', global_config.get('frame_height', camera_height//6))
-
+ self._frame_height = config.get('frame_height', global_config.get('frame_height', frame_shape[0]//6))
+
+ @property
+ def mask(self):
+ return self._mask
+
@property
def threshold(self):
return self._threshold
@@ -622,9 +730,10 @@ def frame_alpha(self):
@property
def frame_height(self):
return self._frame_height
-
+
def to_dict(self):
return {
+ 'mask': self._raw_mask,
'threshold': self.threshold,
'contour_area': self.contour_area,
'delta_alpha': self.delta_alpha,
@@ -636,14 +745,20 @@ def to_dict(self):
class DetectConfig():
def __init__(self, global_config, config, camera_fps):
+ self._enabled = config['enabled']
self._max_disappeared = config.get('max_disappeared', global_config.get('max_disappeared', camera_fps*2))
-
+
+ @property
+ def enabled(self):
+ return self._enabled
+
@property
def max_disappeared(self):
return self._max_disappeared
-
+
def to_dict(self):
return {
+ 'enabled': self.enabled,
'max_disappeared': self._max_disappeared,
}
@@ -660,36 +775,37 @@ def __init__(self, name, config):
else:
print(f"Unable to parse zone coordinates for {name}")
self._contour = np.array([])
-
+
self._color = (0,0,0)
-
+
@property
def coordinates(self):
return self._coordinates
-
+
@property
def contour(self):
return self._contour
-
+
@contour.setter
def contour(self, val):
self._contour = val
-
+
@property
def color(self):
return self._color
-
+
@color.setter
def color(self, val):
self._color = val
-
+
@property
def filters(self):
return self._filters
-
+
def to_dict(self):
return {
- 'filters': {k: f.to_dict() for k, f in self.filters.items()}
+ 'filters': {k: f.to_dict() for k, f in self.filters.items()},
+ 'coordinates': self._coordinates
}
class CameraConfig():
@@ -701,15 +817,15 @@ def __init__(self, name, config, global_config):
self._frame_shape = (self._height, self._width)
self._frame_shape_yuv = (self._frame_shape[0]*3//2, self._frame_shape[1])
self._fps = config.get('fps')
- self._mask = self._create_mask(config.get('mask'))
self._best_image_timeout = config['best_image_timeout']
self._zones = { name: ZoneConfig(name, z) for name, z in config['zones'].items() }
- self._save_clips = CameraSaveClipsConfig(global_config, config['save_clips'])
+ self._clips = CameraClipsConfig(global_config, config['clips'])
self._record = RecordConfig(global_config['record'], config['record'])
self._rtmp = CameraRtmpConfig(global_config, config['rtmp'])
- self._snapshots = CameraSnapshotsConfig(config['snapshots'])
- self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}))
- self._motion = MotionConfig(global_config['motion'], config['motion'], self._height)
+ self._snapshots = CameraSnapshotsConfig(global_config, config['snapshots'])
+ self._mqtt = CameraMqttConfig(config['mqtt'])
+ self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}), self._frame_shape)
+ self._motion = MotionConfig(global_config['motion'], config['motion'], self._frame_shape)
self._detect = DetectConfig(global_config['detect'], config['detect'], config.get('fps', 5))
self._ffmpeg_cmds = []
@@ -726,31 +842,6 @@ def __init__(self, name, config, global_config):
self._set_zone_colors(self._zones)
- def _create_mask(self, mask):
- mask_img = np.zeros(self.frame_shape, np.uint8)
- mask_img[:] = 255
-
- if isinstance(mask, list):
- for m in mask:
- self._add_mask(m, mask_img)
-
- elif isinstance(mask, str):
- self._add_mask(mask, mask_img)
-
- return mask_img
-
- def _add_mask(self, mask, mask_img):
- if mask.startswith('poly,'):
- points = mask.split(',')[1:]
- contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
- cv2.fillPoly(mask_img, pts=[contour], color=(0))
- else:
- mask_file = cv2.imread(f"/config/{mask}", cv2.IMREAD_GRAYSCALE)
- if mask_file is None or mask_file.size == 0:
- logger.warning(f"Could not read mask file {mask}")
- else:
- mask_img[np.where(mask_file==[0])] = [0]
-
def _get_ffmpeg_cmd(self, ffmpeg_input):
ffmpeg_output_args = []
if 'detect' in ffmpeg_input.roles:
@@ -761,7 +852,7 @@ def _get_ffmpeg_cmd(self, ffmpeg_input):
ffmpeg_output_args = self.ffmpeg.output_args['rtmp'] + [
f"rtmp://127.0.0.1/live/{self.name}"
] + ffmpeg_output_args
- if 'clips' in ffmpeg_input.roles and self.save_clips.enabled:
+ if 'clips' in ffmpeg_input.roles:
ffmpeg_output_args = self.ffmpeg.output_args['clips'] + [
f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"
] + ffmpeg_output_args
@@ -769,7 +860,7 @@ def _get_ffmpeg_cmd(self, ffmpeg_input):
ffmpeg_output_args = self.ffmpeg.output_args['record'] + [
f"{os.path.join(RECORD_DIR, self.name)}-%Y%m%d%H%M%S.mp4"
] + ffmpeg_output_args
-
+
# if there arent any outputs enabled for this input
if len(ffmpeg_output_args) == 0:
return None
@@ -780,9 +871,9 @@ def _get_ffmpeg_cmd(self, ffmpeg_input):
ffmpeg_input.input_args +
['-i', ffmpeg_input.path] +
ffmpeg_output_args)
-
+
return [part for part in cmd if part != '']
-
+
def _set_zone_colors(self, zones: Dict[str, ZoneConfig]):
# set colors for zones
all_zone_names = zones.keys()
@@ -790,10 +881,10 @@ def _set_zone_colors(self, zones: Dict[str, ZoneConfig]):
colors = plt.cm.get_cmap('tab10', len(all_zone_names))
for i, zone in enumerate(all_zone_names):
zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])
-
+
for name, zone in zones.items():
zone.color = zone_colors[name]
-
+
@property
def name(self):
return self._name
@@ -801,55 +892,55 @@ def name(self):
@property
def ffmpeg(self):
return self._ffmpeg
-
+
@property
def height(self):
return self._height
-
+
@property
def width(self):
return self._width
-
+
@property
def fps(self):
return self._fps
-
- @property
- def mask(self):
- return self._mask
-
+
@property
def best_image_timeout(self):
return self._best_image_timeout
-
+
@property
def zones(self)-> Dict[str, ZoneConfig]:
return self._zones
-
+
@property
- def save_clips(self):
- return self._save_clips
-
+ def clips(self):
+ return self._clips
+
@property
def record(self):
return self._record
-
+
@property
def rtmp(self):
return self._rtmp
-
+
@property
def snapshots(self):
return self._snapshots
-
+
+ @property
+ def mqtt(self):
+ return self._mqtt
+
@property
def objects(self):
return self._objects
-
+
@property
def motion(self):
return self._motion
-
+
@property
def detect(self):
return self._detect
@@ -874,10 +965,11 @@ def to_dict(self):
'fps': self.fps,
'best_image_timeout': self.best_image_timeout,
'zones': {k: z.to_dict() for k, z in self.zones.items()},
- 'save_clips': self.save_clips.to_dict(),
+ 'clips': self.clips.to_dict(),
'record': self.record.to_dict(),
'rtmp': self.rtmp.to_dict(),
'snapshots': self.snapshots.to_dict(),
+ 'mqtt': self.mqtt.to_dict(),
'objects': self.objects.to_dict(),
'motion': self.motion.to_dict(),
'detect': self.detect.to_dict(),
@@ -892,7 +984,7 @@ def __init__(self, config_file=None, config=None):
raise ValueError('config or config_file must be defined')
elif not config_file is None:
config = self._load_file(config_file)
-
+
config = FRIGATE_CONFIG_SCHEMA(config)
config = self._sub_env_vars(config)
@@ -901,56 +993,60 @@ def __init__(self, config_file=None, config=None):
self._model = ModelConfig(config['model'])
self._detectors = { name: DetectorConfig(d) for name, d in config['detectors'].items() }
self._mqtt = MqttConfig(config['mqtt'])
- self._save_clips = SaveClipsConfig(config['save_clips'])
+ self._clips = ClipsConfig(config['clips'])
+ self._snapshots = SnapshotsConfig(config['snapshots'])
self._cameras = { name: CameraConfig(name, c, config) for name, c in config['cameras'].items() }
self._logger = LoggerConfig(config['logger'])
+ self._environment_vars = config['environment_vars']
def _sub_env_vars(self, config):
frigate_env_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
if 'password' in config['mqtt']:
- config['mqtt']['password'] = config['mqtt']['password'].format(**frigate_env_vars)
-
+ config['mqtt']['password'] = config['mqtt']['password'].format(**frigate_env_vars)
+
for camera in config['cameras'].values():
for i in camera['ffmpeg']['inputs']:
i['path'] = i['path'].format(**frigate_env_vars)
-
+
return config
def _load_file(self, config_file):
with open(config_file) as f:
raw_config = f.read()
-
- if config_file.endswith(".yml"):
+
+ if config_file.endswith(".yml"):
config = yaml.safe_load(raw_config)
elif config_file.endswith(".json"):
config = json.loads(raw_config)
-
+
return config
-
+
def to_dict(self):
return {
'database': self.database.to_dict(),
'model': self.model.to_dict(),
'detectors': {k: d.to_dict() for k, d in self.detectors.items()},
'mqtt': self.mqtt.to_dict(),
- 'save_clips': self.save_clips.to_dict(),
+ 'clips': self.clips.to_dict(),
+ 'snapshots': self.snapshots.to_dict(),
'cameras': {k: c.to_dict() for k, c in self.cameras.items()},
- 'logger': self.logger.to_dict()
+ 'logger': self.logger.to_dict(),
+ 'environment_vars': self._environment_vars
}
-
+
@property
def database(self):
return self._database
-
+
@property
def model(self):
return self._model
-
+
@property
def detectors(self) -> Dict[str, DetectorConfig]:
return self._detectors
-
+
@property
def logger(self):
return self._logger
@@ -958,11 +1054,19 @@ def logger(self):
@property
def mqtt(self):
return self._mqtt
-
+
@property
- def save_clips(self):
- return self._save_clips
+ def clips(self):
+ return self._clips
+
+ @property
+ def snapshots(self):
+ return self._snapshots
@property
def cameras(self) -> Dict[str, CameraConfig]:
return self._cameras
+
+ @property
+ def environment_vars(self):
+ return self._environment_vars
diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py
--- a/frigate/edgetpu.py
+++ b/frigate/edgetpu.py
@@ -8,6 +8,7 @@
import signal
from abc import ABC, abstractmethod
from multiprocessing.connection import Connection
+from setproctitle import setproctitle
from typing import Dict
import numpy as np
@@ -61,16 +62,15 @@ def __init__(self, tf_device=None, num_threads=3, labels=None):
logger.info(f"Attempting to load TPU as {device_config['device']}")
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', device_config)
logger.info("TPU found")
+ self.interpreter = tflite.Interpreter(
+ model_path='/edgetpu_model.tflite',
+ experimental_delegates=[edge_tpu_delegate])
except ValueError:
- logger.info("No EdgeTPU detected. Falling back to CPU.")
-
- if edge_tpu_delegate is None:
- self.interpreter = tflite.Interpreter(
- model_path='/cpu_model.tflite', num_threads=num_threads)
+ logger.info("No EdgeTPU detected.")
+ raise
else:
self.interpreter = tflite.Interpreter(
- model_path='/edgetpu_model.tflite',
- experimental_delegates=[edge_tpu_delegate])
+ model_path='/cpu_model.tflite', num_threads=num_threads)
self.interpreter.allocate_tensors()
@@ -110,6 +110,7 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
threading.current_thread().name = f"detector:{name}"
logger = logging.getLogger(f"detector.{name}")
logger.info(f"Starting detection process: {os.getpid()}")
+ setproctitle(f"frigate.detector.{name}")
listen()
stop_event = mp.Event()
diff --git a/frigate/events.py b/frigate/events.py
--- a/frigate/events.py
+++ b/frigate/events.py
@@ -88,7 +88,7 @@ def refresh_cache(self):
earliest_event = datetime.datetime.now().timestamp()
# if the earliest event exceeds the max seconds, cap it
- max_seconds = self.config.save_clips.max_seconds
+ max_seconds = self.config.clips.max_seconds
if datetime.datetime.now().timestamp()-earliest_event > max_seconds:
earliest_event = datetime.datetime.now().timestamp()-max_seconds
@@ -102,6 +102,7 @@ def create_clip(self, camera, event_data, pre_capture, post_capture):
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
while len(sorted_clips) == 0 or sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']+post_capture:
+ logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
# get all clips from the camera with the event sorted
@@ -147,7 +148,8 @@ def create_clip(self, camera, event_data, pre_capture, post_capture):
p = sp.run(ffmpeg_cmd, input="\n".join(playlist_lines), encoding='ascii', capture_output=True)
if p.returncode != 0:
logger.error(p.stderr)
- return
+ return False
+ return True
def run(self):
while True:
@@ -162,28 +164,20 @@ def run(self):
self.refresh_cache()
continue
+ logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.refresh_cache()
- save_clips_config = self.config.cameras[camera].save_clips
-
- # if save clips is not enabled for this camera, just continue
- if not save_clips_config.enabled:
- if event_type == 'end':
- self.event_processed_queue.put((event_data['id'], camera))
- continue
-
- # if specific objects are listed for this camera, only save clips for them
- if not event_data['label'] in save_clips_config.objects:
- if event_type == 'end':
- self.event_processed_queue.put((event_data['id'], camera))
- continue
-
if event_type == 'start':
self.events_in_process[event_data['id']] = event_data
if event_type == 'end':
- if len(self.cached_clips) > 0 and not event_data['false_positive']:
- self.create_clip(camera, event_data, save_clips_config.pre_capture, save_clips_config.post_capture)
+ clips_config = self.config.cameras[camera].clips
+
+ if not event_data['false_positive']:
+ clip_created = False
+ if clips_config.enabled and (clips_config.objects is None or event_data['label'] in clips_config.objects):
+ clip_created = self.create_clip(camera, event_data, clips_config.pre_capture, clips_config.post_capture)
+
Event.create(
id=event_data['id'],
label=event_data['label'],
@@ -193,7 +187,9 @@ def run(self):
top_score=event_data['top_score'],
false_positive=event_data['false_positive'],
zones=list(event_data['entered_zones']),
- thumbnail=event_data['thumbnail']
+ thumbnail=event_data['thumbnail'],
+ has_clip=clip_created,
+ has_snapshot=event_data['has_snapshot'],
)
del self.events_in_process[event_data['id']]
self.event_processed_queue.put((event_data['id'], camera))
@@ -204,30 +200,60 @@ def __init__(self, config: FrigateConfig, stop_event):
self.name = 'event_cleanup'
self.config = config
self.stop_event = stop_event
+ self.camera_keys = list(self.config.cameras.keys())
- def run(self):
- counter = 0
- while(True):
- if self.stop_event.is_set():
- logger.info(f"Exiting event cleanup...")
- break
-
- # only expire events every 10 minutes, but check for stop events every 10 seconds
- time.sleep(10)
- counter = counter + 1
- if counter < 60:
- continue
- counter = 0
-
- camera_keys = list(self.config.cameras.keys())
+ def expire(self, media):
+ ## Expire events from unlisted cameras based on the global config
+ if media == 'clips':
+ retain_config = self.config.clips.retain
+ file_extension = 'mp4'
+ update_params = {'has_clip': False}
+ else:
+ retain_config = self.config.snapshots.retain
+ file_extension = 'jpg'
+ update_params = {'has_snapshot': False}
+
+ distinct_labels = (Event.select(Event.label)
+ .where(Event.camera.not_in(self.camera_keys))
+ .distinct())
+
+ # loop over object types in db
+ for l in distinct_labels:
+ # get expiration time for this label
+ expire_days = retain_config.objects.get(l.label, retain_config.default)
+ expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
+ # grab all events after specific time
+ expired_events = (
+ Event.select()
+ .where(Event.camera.not_in(self.camera_keys),
+ Event.start_time < expire_after,
+ Event.label == l.label)
+ )
+ # delete the media from disk
+ for event in expired_events:
+ media_name = f"{event.camera}-{event.id}"
+ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
+ media.unlink(missing_ok=True)
+ # update the clips attribute for the db entry
+ update_query = (
+ Event.update(update_params)
+ .where(Event.camera.not_in(self.camera_keys),
+ Event.start_time < expire_after,
+ Event.label == l.label)
+ )
+ update_query.execute()
- # Expire events from unlisted cameras based on the global config
- retain_config = self.config.save_clips.retain
-
+ ## Expire events from cameras based on the camera config
+ for name, camera in self.config.cameras.items():
+ if media == 'clips':
+ retain_config = camera.clips.retain
+ else:
+ retain_config = camera.snapshots.retain
+ # get distinct objects in database for this camera
distinct_labels = (Event.select(Event.label)
- .where(Event.camera.not_in(camera_keys))
- .distinct())
-
+ .where(Event.camera == name)
+ .distinct())
+
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
@@ -236,54 +262,45 @@ def run(self):
# grab all events after specific time
expired_events = (
Event.select()
- .where(Event.camera.not_in(camera_keys),
+ .where(Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label)
)
# delete the grabbed clips from disk
for event in expired_events:
- clip_name = f"{event.camera}-{event.id}"
- clip = Path(f"{os.path.join(CLIPS_DIR, clip_name)}.mp4")
- clip.unlink(missing_ok=True)
- # delete the event for this type from the db
- delete_query = (
- Event.delete()
- .where(Event.camera.not_in(camera_keys),
+ media_name = f"{event.camera}-{event.id}"
+ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
+ media.unlink(missing_ok=True)
+ # update the clips attribute for the db entry
+ update_query = (
+ Event.update(update_params)
+ .where( Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label)
)
- delete_query.execute()
+ update_query.execute()
+
+ def run(self):
+ counter = 0
+ while(True):
+ if self.stop_event.is_set():
+ logger.info(f"Exiting event cleanup...")
+ break
+
+ # only expire events every 10 minutes, but check for stop events every 10 seconds
+ time.sleep(10)
+ counter = counter + 1
+ if counter < 60:
+ continue
+ counter = 0
- # Expire events from cameras based on the camera config
- for name, camera in self.config.cameras.items():
- retain_config = camera.save_clips.retain
- # get distinct objects in database for this camera
- distinct_labels = (Event.select(Event.label)
- .where(Event.camera == name)
- .distinct())
+ self.expire('clips')
+ self.expire('snapshots')
- # loop over object types in db
- for l in distinct_labels:
- # get expiration time for this label
- expire_days = retain_config.objects.get(l.label, retain_config.default)
- expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
- # grab all events after specific time
- expired_events = (
- Event.select()
- .where(Event.camera == name,
- Event.start_time < expire_after,
- Event.label == l.label)
- )
- # delete the grabbed clips from disk
- for event in expired_events:
- clip_name = f"{event.camera}-{event.id}"
- clip = Path(f"{os.path.join(CLIPS_DIR, clip_name)}.mp4")
- clip.unlink(missing_ok=True)
- # delete the event for this type from the db
- delete_query = (
- Event.delete()
- .where( Event.camera == name,
- Event.start_time < expire_after,
- Event.label == l.label)
- )
- delete_query.execute()
+ # drop events from db where has_clip and has_snapshot are false
+ delete_query = (
+ Event.delete()
+ .where( Event.has_clip == False,
+ Event.has_snapshot == False)
+ )
+ delete_query.execute()
diff --git a/frigate/http.py b/frigate/http.py
--- a/frigate/http.py
+++ b/frigate/http.py
@@ -13,6 +13,7 @@
from playhouse.shortcuts import model_to_dict
from frigate.models import Event
+from frigate.stats import stats_snapshot
from frigate.util import calculate_region
from frigate.version import VERSION
@@ -20,7 +21,7 @@
bp = Blueprint('frigate', __name__)
-def create_app(frigate_config, database: SqliteDatabase, camera_metrics, detectors, detected_frames_processor):
+def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor):
app = Flask(__name__)
@app.before_request
@@ -33,10 +34,9 @@ def _db_close(exc):
database.close()
app.frigate_config = frigate_config
- app.camera_metrics = camera_metrics
- app.detectors = detectors
+ app.stats_tracking = stats_tracking
app.detected_frames_processor = detected_frames_processor
-
+
app.register_blueprint(bp)
return app
@@ -47,18 +47,33 @@ def is_healthy():
@bp.route('/events/summary')
def events_summary():
+ has_clip = request.args.get('has_clip', type=int)
+ has_snapshot = request.args.get('has_snapshot', type=int)
+
+ clauses = []
+
+ if not has_clip is None:
+ clauses.append((Event.has_clip == has_clip))
+
+ if not has_snapshot is None:
+ clauses.append((Event.has_snapshot == has_snapshot))
+
+ if len(clauses) == 0:
+ clauses.append((1 == 1))
+
groups = (
Event
.select(
- Event.camera,
- Event.label,
- fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'),
+ Event.camera,
+ Event.label,
+ fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'),
Event.zones,
fn.COUNT(Event.id).alias('count')
)
+ .where(reduce(operator.and_, clauses))
.group_by(
- Event.camera,
- Event.label,
+ Event.camera,
+ Event.label,
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')),
Event.zones
)
@@ -73,7 +88,7 @@ def event(id):
except DoesNotExist:
return "Event not found", 404
[email protected]('/events/<id>/snapshot.jpg')
[email protected]('/events/<id>/thumbnail.jpg')
def event_snapshot(id):
format = request.args.get('format', 'ios')
thumbnail_bytes = None
@@ -90,18 +105,18 @@ def event_snapshot(id):
thumbnail_bytes = tracked_obj.get_jpg_bytes()
except:
return "Event not found", 404
-
+
if thumbnail_bytes is None:
return "Event not found", 404
-
+
# android notifications prefer a 2:1 ratio
if format == 'android':
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
img = cv2.imdecode(jpg_as_np, flags=1)
thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0))
- ret, jpg = cv2.imencode('.jpg', thumbnail)
+ ret, jpg = cv2.imencode('.jpg', thumbnail)
thumbnail_bytes = jpg.tobytes()
-
+
response = make_response(thumbnail_bytes)
response.headers['Content-Type'] = 'image/jpg'
return response
@@ -114,24 +129,32 @@ def events():
zone = request.args.get('zone')
after = request.args.get('after', type=int)
before = request.args.get('before', type=int)
+ has_clip = request.args.get('has_clip', type=int)
+ has_snapshot = request.args.get('has_snapshot', type=int)
clauses = []
-
+
if camera:
clauses.append((Event.camera == camera))
-
+
if label:
clauses.append((Event.label == label))
-
+
if zone:
clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*"))
-
+
if after:
clauses.append((Event.start_time >= after))
-
+
if before:
clauses.append((Event.start_time <= before))
+ if not has_clip is None:
+ clauses.append((Event.has_clip == has_clip))
+
+ if not has_snapshot is None:
+ clauses.append((Event.has_snapshot == has_snapshot))
+
if len(clauses) == 0:
clauses.append((1 == 1))
@@ -152,31 +175,7 @@ def version():
@bp.route('/stats')
def stats():
- camera_metrics = current_app.camera_metrics
- stats = {}
-
- total_detection_fps = 0
-
- for name, camera_stats in camera_metrics.items():
- total_detection_fps += camera_stats['detection_fps'].value
- stats[name] = {
- 'camera_fps': round(camera_stats['camera_fps'].value, 2),
- 'process_fps': round(camera_stats['process_fps'].value, 2),
- 'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
- 'detection_fps': round(camera_stats['detection_fps'].value, 2),
- 'pid': camera_stats['process'].pid,
- 'capture_pid': camera_stats['capture_process'].pid
- }
-
- stats['detectors'] = {}
- for name, detector in current_app.detectors.items():
- stats['detectors'][name] = {
- 'inference_speed': round(detector.avg_inference_speed.value*1000, 2),
- 'detection_start': detector.detection_start.value,
- 'pid': detector.detect_process.pid
- }
- stats['detection_fps'] = round(total_detection_fps, 2)
-
+ stats = stats_snapshot(current_app.stats_tracking)
return jsonify(stats)
@bp.route('/<camera_name>/<label>/best.jpg')
@@ -188,13 +187,13 @@ def best(camera_name, label):
best_frame = np.zeros((720,1280,3), np.uint8)
else:
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
-
+
crop = bool(request.args.get('crop', 0, type=int))
if crop:
box = best_object.get('box', (0,0,300,300))
region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
-
+
height = int(request.args.get('h', str(best_frame.shape[0])))
width = int(height*best_frame.shape[1]/best_frame.shape[0])
@@ -252,7 +251,7 @@ def latest_frame(camera_name):
return response
else:
return "Camera named {} not found".format(camera_name), 404
-
+
def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
while True:
# max out at specified FPS
diff --git a/frigate/log.py b/frigate/log.py
--- a/frigate/log.py
+++ b/frigate/log.py
@@ -6,6 +6,7 @@
import queue
import multiprocessing as mp
from logging import handlers
+from setproctitle import setproctitle
def listener_configurer():
@@ -31,6 +32,7 @@ def receiveSignal(signalNumber, frame):
signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = f"logger"
+ setproctitle("frigate.logger")
listener_configurer()
while True:
if stop_event.is_set() and log_queue.empty():
@@ -72,4 +74,4 @@ def run(self):
def close(self):
"""Close the write end of the pipe.
"""
- os.close(self.fdWrite)
\ No newline at end of file
+ os.close(self.fdWrite)
diff --git a/frigate/models.py b/frigate/models.py
--- a/frigate/models.py
+++ b/frigate/models.py
@@ -12,3 +12,5 @@ class Event(Model):
false_positive = BooleanField()
zones = JSONField()
thumbnail = TextField()
+ has_clip = BooleanField(default=True)
+ has_snapshot = BooleanField(default=True)
diff --git a/frigate/motion.py b/frigate/motion.py
--- a/frigate/motion.py
+++ b/frigate/motion.py
@@ -5,7 +5,7 @@
class MotionDetector():
- def __init__(self, frame_shape, mask, config: MotionConfig):
+ def __init__(self, frame_shape, config: MotionConfig):
self.config = config
self.frame_shape = frame_shape
self.resize_factor = frame_shape[0]/config.frame_height
@@ -14,7 +14,7 @@ def __init__(self, frame_shape, mask, config: MotionConfig):
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
self.motion_frame_count = 0
self.frame_counter = 0
- resized_mask = cv2.resize(mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
+ resized_mask = cv2.resize(config.mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
self.mask = np.where(resized_mask==[0])
def detect(self, frame):
diff --git a/frigate/mqtt.py b/frigate/mqtt.py
--- a/frigate/mqtt.py
+++ b/frigate/mqtt.py
@@ -3,12 +3,87 @@
import paho.mqtt.client as mqtt
-from frigate.config import MqttConfig
+from frigate.config import FrigateConfig
logger = logging.getLogger(__name__)
-def create_mqtt_client(config: MqttConfig):
- client = mqtt.Client(client_id=config.client_id)
+def create_mqtt_client(config: FrigateConfig, camera_metrics):
+ mqtt_config = config.mqtt
+
+ def on_clips_command(client, userdata, message):
+ payload = message.payload.decode()
+ logger.debug(f"on_clips_toggle: {message.topic} {payload}")
+
+ camera_name = message.topic.split('/')[-3]
+ command = message.topic.split('/')[-1]
+
+ clips_settings = config.cameras[camera_name].clips
+
+ if payload == 'ON':
+ if not clips_settings.enabled:
+ logger.info(f"Turning on clips for {camera_name} via mqtt")
+ clips_settings._enabled = True
+ elif payload == 'OFF':
+ if clips_settings.enabled:
+ logger.info(f"Turning off clips for {camera_name} via mqtt")
+ clips_settings._enabled = False
+ else:
+ logger.warning(f"Received unsupported value at {message.topic}: {payload}")
+
+ if command == "set":
+ state_topic = f"{message.topic[:-4]}/state"
+ client.publish(state_topic, payload, retain=True)
+
+ def on_snapshots_command(client, userdata, message):
+ payload = message.payload.decode()
+ logger.debug(f"on_snapshots_toggle: {message.topic} {payload}")
+
+ camera_name = message.topic.split('/')[-3]
+ command = message.topic.split('/')[-1]
+
+ snapshots_settings = config.cameras[camera_name].snapshots
+
+ if payload == 'ON':
+ if not snapshots_settings.enabled:
+ logger.info(f"Turning on snapshots for {camera_name} via mqtt")
+ snapshots_settings._enabled = True
+ elif payload == 'OFF':
+ if snapshots_settings.enabled:
+ logger.info(f"Turning off snapshots for {camera_name} via mqtt")
+ snapshots_settings._enabled = False
+ else:
+ logger.warning(f"Received unsupported value at {message.topic}: {payload}")
+
+ if command == "set":
+ state_topic = f"{message.topic[:-4]}/state"
+ client.publish(state_topic, payload, retain=True)
+
+ def on_detect_command(client, userdata, message):
+ payload = message.payload.decode()
+ logger.debug(f"on_detect_toggle: {message.topic} {payload}")
+
+ camera_name = message.topic.split('/')[-3]
+ command = message.topic.split('/')[-1]
+
+ detect_settings = config.cameras[camera_name].detect
+
+ if payload == 'ON':
+ if not camera_metrics[camera_name]["detection_enabled"].value:
+ logger.info(f"Turning on detection for {camera_name} via mqtt")
+ camera_metrics[camera_name]["detection_enabled"].value = True
+ detect_settings._enabled = True
+ elif payload == 'OFF':
+ if camera_metrics[camera_name]["detection_enabled"].value:
+ logger.info(f"Turning off detection for {camera_name} via mqtt")
+ camera_metrics[camera_name]["detection_enabled"].value = False
+ detect_settings._enabled = False
+ else:
+ logger.warning(f"Received unsupported value at {message.topic}: {payload}")
+
+ if command == "set":
+ state_topic = f"{message.topic[:-4]}/state"
+ client.publish(state_topic, payload, retain=True)
+
def on_connect(client, userdata, flags, rc):
threading.current_thread().name = "mqtt"
if rc != 0:
@@ -22,15 +97,35 @@ def on_connect(client, userdata, flags, rc):
logger.error("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
logger.info("MQTT connected")
- client.publish(config.topic_prefix+'/available', 'online', retain=True)
+ client.publish(mqtt_config.topic_prefix+'/available', 'online', retain=True)
+
+ client = mqtt.Client(client_id=mqtt_config.client_id)
client.on_connect = on_connect
- client.will_set(config.topic_prefix+'/available', payload='offline', qos=1, retain=True)
- if not config.user is None:
- client.username_pw_set(config.user, password=config.password)
+ client.will_set(mqtt_config.topic_prefix+'/available', payload='offline', qos=1, retain=True)
+
+ # register callbacks
+ for name in config.cameras.keys():
+ client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/clips/#", on_clips_command)
+ client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/snapshots/#", on_snapshots_command)
+ client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/detect/#", on_detect_command)
+
+ if not mqtt_config.user is None:
+ client.username_pw_set(mqtt_config.user, password=mqtt_config.password)
try:
- client.connect(config.host, config.port, 60)
+ client.connect(mqtt_config.host, mqtt_config.port, 60)
except Exception as e:
logger.error(f"Unable to connect to MQTT server: {e}")
raise
+
client.loop_start()
+
+ for name in config.cameras.keys():
+ client.publish(f"{mqtt_config.topic_prefix}/{name}/clips/state", 'ON' if config.cameras[name].clips.enabled else 'OFF', retain=True)
+ client.publish(f"{mqtt_config.topic_prefix}/{name}/snapshots/state", 'ON' if config.cameras[name].clips.enabled else 'OFF', retain=True)
+ client.publish(f"{mqtt_config.topic_prefix}/{name}/detect/state", 'ON' if config.cameras[name].clips.enabled else 'OFF', retain=True)
+
+ client.subscribe(f"{mqtt_config.topic_prefix}/+/clips/#")
+ client.subscribe(f"{mqtt_config.topic_prefix}/+/snapshots/#")
+ client.subscribe(f"{mqtt_config.topic_prefix}/+/detect/#")
+
return client
diff --git a/frigate/object_processing.py b/frigate/object_processing.py
--- a/frigate/object_processing.py
+++ b/frigate/object_processing.py
@@ -74,9 +74,6 @@ def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data):
self.thumbnail_data = None
self.frame = None
self.previous = self.to_dict()
- self._snapshot_jpg_time = 0
- ret, jpg = cv2.imencode('.jpg', np.zeros((300,300,3), np.uint8))
- self._snapshot_jpg = jpg.tobytes()
# start the score history
self.score_history = [self.obj_data['score']]
@@ -167,41 +164,45 @@ def to_dict(self, include_thumbnail: bool = False):
'region': self.obj_data['region'],
'current_zones': self.current_zones.copy(),
'entered_zones': list(self.entered_zones).copy(),
- 'thumbnail': base64.b64encode(self.get_jpg_bytes()).decode('utf-8') if include_thumbnail else None
+ 'thumbnail': base64.b64encode(self.get_thumbnail()).decode('utf-8') if include_thumbnail else None
}
- def get_jpg_bytes(self):
- if self.thumbnail_data is None or self._snapshot_jpg_time == self.thumbnail_data['frame_time']:
- return self._snapshot_jpg
+ def get_thumbnail(self):
+ if self.thumbnail_data is None or not self.thumbnail_data['frame_time'] in self.frame_cache:
+ ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
- if not self.thumbnail_data['frame_time'] in self.frame_cache:
- logger.error(f"Unable to create thumbnail for {self.obj_data['id']}")
- logger.error(f"Looking for frame_time of {self.thumbnail_data['frame_time']}")
- logger.error(f"Thumbnail frames: {','.join([str(k) for k in self.frame_cache.keys()])}")
- return self._snapshot_jpg
+ jpg_bytes = self.get_jpg_bytes(timestamp=False, bounding_box=False, crop=True, height=175)
- # TODO: crop first to avoid converting the entire frame?
- snapshot_config = self.camera_config.snapshots
+ if jpg_bytes:
+ return jpg_bytes
+ else:
+ ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
+ return jpg.tobytes()
+
+ def get_jpg_bytes(self, timestamp=False, bounding_box=False, crop=False, height=None):
+ if self.thumbnail_data is None:
+ return None
+
best_frame = cv2.cvtColor(self.frame_cache[self.thumbnail_data['frame_time']], cv2.COLOR_YUV2BGR_I420)
-
- if snapshot_config.draw_bounding_boxes:
+
+ if bounding_box:
thickness = 2
color = COLOR_MAP[self.obj_data['label']]
+
+ # draw the bounding boxes on the frame
box = self.thumbnail_data['box']
- draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'],
- f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
+ draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'], f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
- if snapshot_config.crop_to_region:
+ if crop:
box = self.thumbnail_data['box']
region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
- if snapshot_config.height:
- height = snapshot_config.height
+ if height:
width = int(height*best_frame.shape[1]/best_frame.shape[0])
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
- if snapshot_config.show_timestamp:
+ if timestamp:
time_to_show = datetime.datetime.fromtimestamp(self.thumbnail_data['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
text_width = size[0][0]
@@ -212,9 +213,9 @@ def get_jpg_bytes(self):
ret, jpg = cv2.imencode('.jpg', best_frame)
if ret:
- self._snapshot_jpg = jpg.tobytes()
-
- return self._snapshot_jpg
+ return jpg.tobytes()
+ else:
+ return None
def zone_filtered(obj: TrackedObject, object_config):
object_name = obj.obj_data['label']
@@ -253,6 +254,8 @@ def __init__(self, name, config, frame_manager):
self._current_frame = np.zeros(self.camera_config.frame_shape_yuv, np.uint8)
self.current_frame_lock = threading.Lock()
self.current_frame_time = 0.0
+ self.motion_boxes = []
+ self.regions = []
self.previous_frame_id = None
self.callbacks = defaultdict(lambda: [])
@@ -290,7 +293,7 @@ def get_current_frame(self, draw_options={}):
cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
if draw_options.get('mask'):
- mask_overlay = np.where(self.camera_config.mask==[0])
+ mask_overlay = np.where(self.camera_config.motion.mask==[0])
frame_copy[mask_overlay] = [0,0,0]
if draw_options.get('motion_boxes'):
@@ -427,19 +430,41 @@ def start(camera, obj: TrackedObject, current_frame_time):
def update(camera, obj: TrackedObject, current_frame_time):
after = obj.to_dict()
- message = { 'before': obj.previous, 'after': after }
+ message = { 'before': obj.previous, 'after': after, 'type': 'new' if obj.previous['false_positive'] else 'update' }
self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
obj.previous = after
def end(camera, obj: TrackedObject, current_frame_time):
+ snapshot_config = self.config.cameras[camera].snapshots
+ event_data = obj.to_dict(include_thumbnail=True)
+ event_data['has_snapshot'] = False
if not obj.false_positive:
- message = { 'before': obj.previous, 'after': obj.to_dict() }
+ message = { 'before': obj.previous, 'after': obj.to_dict(), 'type': 'end' }
self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
- self.event_queue.put(('end', camera, obj.to_dict(include_thumbnail=True)))
-
+ # write snapshot to disk if enabled
+ if snapshot_config.enabled:
+ jpg_bytes = obj.get_jpg_bytes(
+ timestamp=snapshot_config.timestamp,
+ bounding_box=snapshot_config.bounding_box,
+ crop=snapshot_config.crop,
+ height=snapshot_config.height
+ )
+ with open(os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), 'wb') as j:
+ j.write(jpg_bytes)
+ event_data['has_snapshot'] = True
+ self.event_queue.put(('end', camera, event_data))
+
def snapshot(camera, obj: TrackedObject, current_frame_time):
- self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", obj.get_jpg_bytes(), retain=True)
-
+ mqtt_config = self.config.cameras[camera].mqtt
+ if mqtt_config.enabled:
+ jpg_bytes = obj.get_jpg_bytes(
+ timestamp=mqtt_config.timestamp,
+ bounding_box=mqtt_config.bounding_box,
+ crop=mqtt_config.crop,
+ height=mqtt_config.height
+ )
+ self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", jpg_bytes, retain=True)
+
def object_status(camera, object_name, status):
self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)
diff --git a/frigate/record.py b/frigate/record.py
--- a/frigate/record.py
+++ b/frigate/record.py
@@ -45,9 +45,9 @@ def move_files(self):
files_in_use = []
for process in psutil.process_iter():
- if process.name() != 'ffmpeg':
- continue
try:
+ if process.name() != 'ffmpeg':
+ continue
flist = process.open_files()
if flist:
for nt in flist:
diff --git a/frigate/stats.py b/frigate/stats.py
new file mode 100644
--- /dev/null
+++ b/frigate/stats.py
@@ -0,0 +1,70 @@
+import json
+import logging
+import threading
+import time
+
+from frigate.config import FrigateConfig
+from frigate.version import VERSION
+
+logger = logging.getLogger(__name__)
+
+def stats_init(camera_metrics, detectors):
+ stats_tracking = {
+ 'camera_metrics': camera_metrics,
+ 'detectors': detectors,
+ 'started': int(time.time())
+ }
+ return stats_tracking
+
+def stats_snapshot(stats_tracking):
+ camera_metrics = stats_tracking['camera_metrics']
+ stats = {}
+
+ total_detection_fps = 0
+
+ for name, camera_stats in camera_metrics.items():
+ total_detection_fps += camera_stats['detection_fps'].value
+ stats[name] = {
+ 'camera_fps': round(camera_stats['camera_fps'].value, 2),
+ 'process_fps': round(camera_stats['process_fps'].value, 2),
+ 'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
+ 'detection_fps': round(camera_stats['detection_fps'].value, 2),
+ 'pid': camera_stats['process'].pid,
+ 'capture_pid': camera_stats['capture_process'].pid
+ }
+
+ stats['detectors'] = {}
+ for name, detector in stats_tracking["detectors"].items():
+ stats['detectors'][name] = {
+ 'inference_speed': round(detector.avg_inference_speed.value * 1000, 2),
+ 'detection_start': detector.detection_start.value,
+ 'pid': detector.detect_process.pid
+ }
+ stats['detection_fps'] = round(total_detection_fps, 2)
+
+ stats['service'] = {
+ 'uptime': (int(time.time()) - stats_tracking['started']),
+ 'version': VERSION
+ }
+
+ return stats
+
+class StatsEmitter(threading.Thread):
+ def __init__(self, config: FrigateConfig, stats_tracking, mqtt_client, topic_prefix, stop_event):
+ threading.Thread.__init__(self)
+ self.name = 'frigate_stats_emitter'
+ self.config = config
+ self.stats_tracking = stats_tracking
+ self.mqtt_client = mqtt_client
+ self.topic_prefix = topic_prefix
+ self.stop_event = stop_event
+
+ def run(self):
+ time.sleep(10)
+ while True:
+ if self.stop_event.is_set():
+ logger.info(f"Exiting watchdog...")
+ break
+ stats = stats_snapshot(self.stats_tracking)
+ self.mqtt_client.publish(f"{self.topic_prefix}/stats", json.dumps(stats), retain=False)
+ time.sleep(self.config.mqtt.stats_interval)
diff --git a/frigate/util.py b/frigate/util.py
--- a/frigate/util.py
+++ b/frigate/util.py
@@ -2,6 +2,7 @@
import datetime
import hashlib
import json
+import logging
import signal
import subprocess as sp
import threading
@@ -15,6 +16,8 @@
import matplotlib.pyplot as plt
import numpy as np
+logger = logging.getLogger(__name__)
+
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
if color is None:
@@ -288,6 +291,24 @@ def print_stack(sig, frame):
def listen():
signal.signal(signal.SIGUSR1, print_stack)
+def create_mask(frame_shape, mask):
+ mask_img = np.zeros(frame_shape, np.uint8)
+ mask_img[:] = 255
+
+ if isinstance(mask, list):
+ for m in mask:
+ add_mask(m, mask_img)
+
+ elif isinstance(mask, str):
+ add_mask(mask, mask_img)
+
+ return mask_img
+
+def add_mask(mask, mask_img):
+ points = mask.split(',')
+ contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
+ cv2.fillPoly(mask_img, pts=[contour], color=(0))
+
class FrameManager(ABC):
@abstractmethod
def create(self, name, size) -> AnyStr:
diff --git a/frigate/video.py b/frigate/video.py
--- a/frigate/video.py
+++ b/frigate/video.py
@@ -13,6 +13,7 @@
import threading
import time
from collections import defaultdict
+from setproctitle import setproctitle
from typing import Dict, List
import cv2
@@ -30,7 +31,7 @@
logger = logging.getLogger(__name__)
-def filtered(obj, objects_to_track, object_filters, mask=None):
+def filtered(obj, objects_to_track, object_filters):
object_name = obj[0]
if not object_name in objects_to_track:
@@ -53,14 +54,15 @@ def filtered(obj, objects_to_track, object_filters, mask=None):
if obj_settings.min_score > obj[1]:
return True
- # compute the coordinates of the object and make sure
- # the location isnt outside the bounds of the image (can happen from rounding)
- y_location = min(int(obj[2][3]), len(mask)-1)
- x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(mask[0])-1)
-
- # if the object is in a masked location, don't add it to detected objects
- if (not mask is None) and (mask[y_location][x_location] == 0):
- return True
+ if not obj_settings.mask is None:
+ # compute the coordinates of the object and make sure
+ # the location isnt outside the bounds of the image (can happen from rounding)
+ y_location = min(int(obj[2][3]), len(obj_settings.mask)-1)
+ x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(obj_settings.mask[0])-1)
+
+ # if the object is in a masked location, don't add it to detected objects
+ if obj_settings.mask[y_location][x_location] == 0:
+ return True
return False
@@ -249,16 +251,17 @@ def receiveSignal(signalNumber, frame):
signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = f"process:{name}"
+ setproctitle(f"frigate.process:{name}")
listen()
frame_queue = process_info['frame_queue']
+ detection_enabled = process_info['detection_enabled']
frame_shape = config.frame_shape
objects_to_track = config.objects.track
object_filters = config.objects.filters
- mask = config.mask
- motion_detector = MotionDetector(frame_shape, mask, config.motion)
+ motion_detector = MotionDetector(frame_shape, config.motion)
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
object_tracker = ObjectTracker(config.detect)
@@ -266,7 +269,7 @@ def receiveSignal(signalNumber, frame):
frame_manager = SharedMemoryFrameManager()
process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
- object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, mask, stop_event)
+ object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, detection_enabled, stop_event)
logger.info(f"{name}: exiting subprocess")
@@ -276,7 +279,7 @@ def reduce_boxes(boxes):
reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
return [tuple(b) for b in reduced_boxes]
-def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask):
+def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters):
tensor_input = create_tensor_input(frame, model_shape, region)
detections = []
@@ -294,7 +297,7 @@ def detect(object_detector, frame, model_shape, region, objects_to_track, object
(x_max-x_min)*(y_max-y_min),
region)
# apply object filters
- if filtered(det, objects_to_track, object_filters, mask):
+ if filtered(det, objects_to_track, object_filters):
continue
detections.append(det)
return detections
@@ -303,7 +306,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
frame_manager: FrameManager, motion_detector: MotionDetector,
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue, process_info: Dict,
- objects_to_track: List[str], object_filters, mask, stop_event,
+ objects_to_track: List[str], object_filters, detection_enabled: mp.Value, stop_event,
exit_on_empty: bool = False):
fps = process_info['process_fps']
@@ -334,6 +337,14 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
continue
+ if not detection_enabled.value:
+ fps.value = fps_tracker.eps()
+ object_tracker.match_and_update(frame_time, [])
+ detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, [], []))
+ detection_fps.value = object_detector.fps.eps()
+ frame_manager.close(f"{camera_name}{frame_time}")
+ continue
+
# look for motion
motion_boxes = motion_detector.detect(frame)
@@ -356,7 +367,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
# resize regions and detect
detections = []
for region in regions:
- detections.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask))
+ detections.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
#########
# merge objects, check for clipped objects and look again up to 4 times
@@ -391,7 +402,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
regions.append(region)
- selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask))
+ selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
refining = True
else:
@@ -408,11 +419,11 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
# add to the queue if not full
if(detected_objects_queue.full()):
- frame_manager.delete(f"{camera_name}{frame_time}")
- continue
+ frame_manager.delete(f"{camera_name}{frame_time}")
+ continue
else:
- fps_tracker.update()
- fps.value = fps_tracker.eps()
- detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
- detection_fps.value = object_detector.fps.eps()
- frame_manager.close(f"{camera_name}{frame_time}")
+ fps_tracker.update()
+ fps.value = fps_tracker.eps()
+ detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
+ detection_fps.value = object_detector.fps.eps()
+ frame_manager.close(f"{camera_name}{frame_time}")
diff --git a/migrations/001_create_events_table.py b/migrations/001_create_events_table.py
new file mode 100644
--- /dev/null
+++ b/migrations/001_create_events_table.py
@@ -0,0 +1,41 @@
+"""Peewee migrations -- 001_create_events_table.py.
+
+Some examples (model - class or model name)::
+
+ > Model = migrator.orm['model_name'] # Return model in current state by name
+
+ > migrator.sql(sql) # Run custom SQL
+ > migrator.python(func, *args, **kwargs) # Run python code
+ > migrator.create_model(Model) # Create a model (could be used as decorator)
+ > migrator.remove_model(model, cascade=True) # Remove a model
+ > migrator.add_fields(model, **fields) # Add fields to a model
+ > migrator.change_fields(model, **fields) # Change fields
+ > migrator.remove_fields(model, *field_names, cascade=True)
+ > migrator.rename_field(model, old_field_name, new_field_name)
+ > migrator.rename_table(model, new_table_name)
+ > migrator.add_index(model, *col_names, unique=False)
+ > migrator.drop_index(model, *col_names)
+ > migrator.add_not_null(model, *field_names)
+ > migrator.drop_not_null(model, *field_names)
+ > migrator.add_default(model, field_name, default)
+
+"""
+
+import datetime as dt
+import peewee as pw
+from decimal import ROUND_HALF_EVEN
+
+try:
+ import playhouse.postgres_ext as pw_pext
+except ImportError:
+ pass
+
+SQL = pw.SQL
+
+def migrate(migrator, database, fake=False, **kwargs):
+ migrator.sql('CREATE TABLE IF NOT EXISTS "event" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "label" VARCHAR(20) NOT NULL, "camera" VARCHAR(20) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "top_score" REAL NOT NULL, "false_positive" INTEGER NOT NULL, "zones" JSON NOT NULL, "thumbnail" TEXT NOT NULL)')
+ migrator.sql('CREATE INDEX IF NOT EXISTS "event_label" ON "event" ("label")')
+ migrator.sql('CREATE INDEX IF NOT EXISTS "event_camera" ON "event" ("camera")')
+
+def rollback(migrator, database, fake=False, **kwargs):
+ pass
diff --git a/migrations/002_add_clip_snapshot.py b/migrations/002_add_clip_snapshot.py
new file mode 100644
--- /dev/null
+++ b/migrations/002_add_clip_snapshot.py
@@ -0,0 +1,41 @@
+"""Peewee migrations -- 002_add_clip_snapshot.py.
+
+Some examples (model - class or model name)::
+
+ > Model = migrator.orm['model_name'] # Return model in current state by name
+
+ > migrator.sql(sql) # Run custom SQL
+ > migrator.python(func, *args, **kwargs) # Run python code
+ > migrator.create_model(Model) # Create a model (could be used as decorator)
+ > migrator.remove_model(model, cascade=True) # Remove a model
+ > migrator.add_fields(model, **fields) # Add fields to a model
+ > migrator.change_fields(model, **fields) # Change fields
+ > migrator.remove_fields(model, *field_names, cascade=True)
+ > migrator.rename_field(model, old_field_name, new_field_name)
+ > migrator.rename_table(model, new_table_name)
+ > migrator.add_index(model, *col_names, unique=False)
+ > migrator.drop_index(model, *col_names)
+ > migrator.add_not_null(model, *field_names)
+ > migrator.drop_not_null(model, *field_names)
+ > migrator.add_default(model, field_name, default)
+
+"""
+
+import datetime as dt
+import peewee as pw
+from decimal import ROUND_HALF_EVEN
+from frigate.models import Event
+
+try:
+ import playhouse.postgres_ext as pw_pext
+except ImportError:
+ pass
+
+SQL = pw.SQL
+
+
+def migrate(migrator, database, fake=False, **kwargs):
+ migrator.add_fields(Event, has_clip=pw.BooleanField(default=True), has_snapshot=pw.BooleanField(default=True))
+
+def rollback(migrator, database, fake=False, **kwargs):
+ migrator.remove_fields(Event, ['has_clip', 'has_snapshot'])
| diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
--- a/frigate/test/test_config.py
+++ b/frigate/test/test_config.py
@@ -191,12 +191,12 @@ def test_ffmpeg_params(self):
frigate_config = FrigateConfig(config=config)
assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
- def test_inherit_save_clips_retention(self):
+ def test_inherit_clips_retention(self):
config = {
'mqtt': {
'host': 'mqtt'
},
- 'save_clips': {
+ 'clips': {
'retain': {
'default': 20,
'objects': {
@@ -217,14 +217,14 @@ def test_inherit_save_clips_retention(self):
}
}
frigate_config = FrigateConfig(config=config)
- assert(frigate_config.cameras['back'].save_clips.retain.objects['person'] == 30)
+ assert(frigate_config.cameras['back'].clips.retain.objects['person'] == 30)
def test_roles_listed_twice_throws_error(self):
config = {
'mqtt': {
'host': 'mqtt'
},
- 'save_clips': {
+ 'clips': {
'retain': {
'default': 20,
'objects': {
@@ -252,7 +252,7 @@ def test_zone_matching_camera_name_throws_error(self):
'mqtt': {
'host': 'mqtt'
},
- 'save_clips': {
+ 'clips': {
'retain': {
'default': 20,
'objects': {
@@ -279,12 +279,12 @@ def test_zone_matching_camera_name_throws_error(self):
}
self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
- def test_save_clips_should_default_to_global_objects(self):
+ def test_clips_should_default_to_global_objects(self):
config = {
'mqtt': {
'host': 'mqtt'
},
- 'save_clips': {
+ 'clips': {
'retain': {
'default': 20,
'objects': {
@@ -304,16 +304,14 @@ def test_save_clips_should_default_to_global_objects(self):
},
'height': 1080,
'width': 1920,
- 'save_clips': {
+ 'clips': {
'enabled': True
}
}
}
}
config = FrigateConfig(config=config)
- assert(len(config.cameras['back'].save_clips.objects) == 2)
- assert('dog' in config.cameras['back'].save_clips.objects)
- assert('person' in config.cameras['back'].save_clips.objects)
+ assert(config.cameras['back'].clips.objects is None)
def test_role_assigned_but_not_enabled(self):
json_config = {
@@ -325,7 +323,7 @@ def test_role_assigned_but_not_enabled(self):
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect', 'rtmp'] },
- { 'path': 'rtsp://10.0.0.1:554/clips', 'roles': ['clips'] }
+ { 'path': 'rtsp://10.0.0.1:554/record', 'roles': ['record'] }
]
},
'height': 1080,
| Timestamp in mjpeg feed gets hidden if mask is set where timestamp should be
**Describe the bug**
Minor issue
I have mask set to top of video feed.
When looking at the debug video feed using below url, timestamp gets hidden, whereas it should appear on top of mask.
http://ip:5000/camname?mask=1×tamp=1
**Version of frigate**
0.8.0 beta3
Mask config changes
The motion mask has been moved under the `motion` config entry in recent changes. Also, there are now object level masks available as filters.
```yaml
cameras:
front_door:
...
motion:
mask: poly,0,900,1080,900,1080,1920,0,1920
...
objects:
filters:
person:
min_area: 5000
threshold: 0.6
mask: poly,574,1058,772,1072,768,632,439,636,34,600
```
Also note that I updated to a docker based build process for the frontend @paularmstrong
| 2020-12-22T21:25:53Z | [] | [] |
|
blakeblackshear/frigate | 686 | blakeblackshear__frigate-686 | [
"800"
] | d376f6b1d230858fe61006248a9b6530d9b390ff | diff --git a/frigate/app.py b/frigate/app.py
--- a/frigate/app.py
+++ b/frigate/app.py
@@ -8,6 +8,8 @@
import signal
import yaml
+from gevent import pywsgi
+from geventwebsocket.handler import WebSocketHandler
from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase
@@ -106,8 +108,8 @@ def set_log_levels(self):
for log, level in self.config.logger.logs.items():
logging.getLogger(log).setLevel(level)
- if not 'werkzeug' in self.config.logger.logs:
- logging.getLogger('werkzeug').setLevel('ERROR')
+ if not 'geventwebsocket.handler' in self.config.logger.logs:
+ logging.getLogger('geventwebsocket.handler').setLevel('ERROR')
def init_queues(self):
# Queues for clip processing
@@ -135,7 +137,7 @@ def init_stats(self):
self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
def init_web_server(self):
- self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor)
+ self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor, self.mqtt_client)
def init_mqtt(self):
self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
@@ -239,7 +241,9 @@ def receiveSignal(signalNumber, frame):
signal.signal(signal.SIGTERM, receiveSignal)
- self.flask_app.run(host='127.0.0.1', port=5001, debug=False)
+ server = pywsgi.WSGIServer(('127.0.0.1', 5001), self.flask_app, handler_class=WebSocketHandler)
+ server.serve_forever()
+
self.stop()
def stop(self):
@@ -252,6 +256,7 @@ def stop(self):
self.recording_maintainer.join()
self.stats_emitter.join()
self.frigate_watchdog.join()
+ self.db.stop()
for detector in self.detectors.values():
detector.stop()
diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -131,6 +131,7 @@ def filters_for_all_tracked_objects(object_config):
OBJECTS_SCHEMA = vol.Schema(vol.All(filters_for_all_tracked_objects,
{
'track': [str],
+ 'mask': vol.Any(str, [str]),
vol.Optional('filters', default = {}): FILTER_SCHEMA.extend(
{
str: {
@@ -164,6 +165,9 @@ def detect_is_required(inputs):
'input_args': vol.Any(str, [str]),
}], vol.Msg(each_role_used_once, msg="Each input role may only be used once"),
vol.Msg(detect_is_required, msg="The detect role is required")),
+ 'global_args': vol.Any(str, [str]),
+ 'hwaccel_args': vol.Any(str, [str]),
+ 'input_args': vol.Any(str, [str]),
'output_args': {
vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
@@ -198,6 +202,7 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Optional('enabled', default=False): bool,
vol.Optional('pre_capture', default=5): int,
vol.Optional('post_capture', default=5): int,
+ vol.Optional('required_zones', default=[]): [str],
'objects': [str],
vol.Optional('retain', default={}): RETAIN_SCHEMA,
},
@@ -213,6 +218,7 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Optional('timestamp', default=False): bool,
vol.Optional('bounding_box', default=False): bool,
vol.Optional('crop', default=False): bool,
+ vol.Optional('required_zones', default=[]): [str],
'height': int,
vol.Optional('retain', default={}): RETAIN_SCHEMA,
},
@@ -221,7 +227,8 @@ def ensure_zones_and_cameras_have_different_names(cameras):
vol.Optional('timestamp', default=True): bool,
vol.Optional('bounding_box', default=True): bool,
vol.Optional('crop', default=True): bool,
- vol.Optional('height', default=270): int
+ vol.Optional('height', default=270): int,
+ vol.Optional('required_zones', default=[]): [str],
},
vol.Optional('objects', default={}): OBJECTS_SCHEMA,
vol.Optional('motion', default={}): MOTION_SCHEMA,
@@ -389,12 +396,12 @@ def to_dict(self):
}
class CameraInput():
- def __init__(self, global_config, ffmpeg_input):
+ def __init__(self, camera_config, global_config, ffmpeg_input):
self._path = ffmpeg_input['path']
self._roles = ffmpeg_input['roles']
- self._global_args = ffmpeg_input.get('global_args', global_config['global_args'])
- self._hwaccel_args = ffmpeg_input.get('hwaccel_args', global_config['hwaccel_args'])
- self._input_args = ffmpeg_input.get('input_args', global_config['input_args'])
+ self._global_args = ffmpeg_input.get('global_args', camera_config.get('global_args', global_config['global_args']))
+ self._hwaccel_args = ffmpeg_input.get('hwaccel_args', camera_config.get('hwaccel_args', global_config['hwaccel_args']))
+ self._input_args = ffmpeg_input.get('input_args', camera_config.get('input_args', global_config['input_args']))
@property
def path(self):
@@ -418,7 +425,7 @@ def input_args(self):
class CameraFfmpegConfig():
def __init__(self, global_config, config):
- self._inputs = [CameraInput(global_config, i) for i in config['inputs']]
+ self._inputs = [CameraInput(config, global_config, i) for i in config['inputs']]
self._output_args = config.get('output_args', global_config['output_args'])
@property
@@ -506,12 +513,25 @@ def to_dict(self):
}
class FilterConfig():
- def __init__(self, global_config, config, frame_shape=None):
+ def __init__(self, global_config, config, global_mask=None, frame_shape=None):
self._min_area = config.get('min_area', global_config.get('min_area', 0))
self._max_area = config.get('max_area', global_config.get('max_area', 24000000))
self._threshold = config.get('threshold', global_config.get('threshold', 0.7))
self._min_score = config.get('min_score', global_config.get('min_score', 0.5))
- self._raw_mask = config.get('mask')
+
+ self._raw_mask = []
+ if global_mask:
+ if isinstance(global_mask, list):
+ self._raw_mask += global_mask
+ elif isinstance(global_mask, str):
+ self._raw_mask += [global_mask]
+
+ mask = config.get('mask')
+ if mask:
+ if isinstance(mask, list):
+ self._raw_mask += mask
+ elif isinstance(mask, str):
+ self._raw_mask += [mask]
self._mask = create_mask(frame_shape, self._raw_mask) if self._raw_mask else None
@property
@@ -546,7 +566,8 @@ def to_dict(self):
class ObjectConfig():
def __init__(self, global_config, config, frame_shape):
self._track = config.get('track', global_config.get('track', DEFAULT_TRACKED_OBJECTS))
- self._filters = { name: FilterConfig(global_config.get('filters').get(name, {}), config.get('filters').get(name, {}), frame_shape) for name in self._track }
+ self._raw_mask = config.get('mask')
+ self._filters = { name: FilterConfig(global_config['filters'].get(name, {}), config['filters'].get(name, {}), self._raw_mask, frame_shape) for name in self._track }
@property
def track(self):
@@ -559,6 +580,7 @@ def filters(self) -> Dict[str, FilterConfig]:
def to_dict(self):
return {
'track': self.track,
+ 'mask': self._raw_mask,
'filters': { k: f.to_dict() for k, f in self.filters.items() }
}
@@ -570,6 +592,7 @@ def __init__(self, global_config, config):
self._crop = config['crop']
self._height = config.get('height')
self._retain = RetainConfig(global_config['snapshots']['retain'], config['retain'])
+ self._required_zones = config['required_zones']
@property
def enabled(self):
@@ -594,6 +617,10 @@ def height(self):
@property
def retain(self):
return self._retain
+
+ @property
+ def required_zones(self):
+ return self._required_zones
def to_dict(self):
return {
@@ -602,7 +629,8 @@ def to_dict(self):
'bounding_box': self.bounding_box,
'crop': self.crop,
'height': self.height,
- 'retain': self.retain.to_dict()
+ 'retain': self.retain.to_dict(),
+ 'required_zones': self.required_zones
}
class CameraMqttConfig():
@@ -612,6 +640,7 @@ def __init__(self, config):
self._bounding_box = config['bounding_box']
self._crop = config['crop']
self._height = config.get('height')
+ self._required_zones = config['required_zones']
@property
def enabled(self):
@@ -633,13 +662,18 @@ def crop(self):
def height(self):
return self._height
+ @property
+ def required_zones(self):
+ return self._required_zones
+
def to_dict(self):
return {
'enabled': self.enabled,
'timestamp': self.timestamp,
'bounding_box': self.bounding_box,
'crop': self.crop,
- 'height': self.height
+ 'height': self.height,
+ 'required_zones': self.required_zones
}
class CameraClipsConfig():
@@ -649,6 +683,7 @@ def __init__(self, global_config, config):
self._post_capture = config['post_capture']
self._objects = config.get('objects')
self._retain = RetainConfig(global_config['clips']['retain'], config['retain'])
+ self._required_zones = config['required_zones']
@property
def enabled(self):
@@ -670,13 +705,18 @@ def objects(self):
def retain(self):
return self._retain
+ @property
+ def required_zones(self):
+ return self._required_zones
+
def to_dict(self):
return {
'enabled': self.enabled,
'pre_capture': self.pre_capture,
'post_capture': self.post_capture,
'objects': self.objects,
- 'retain': self.retain.to_dict()
+ 'retain': self.retain.to_dict(),
+ 'required_zones': self.required_zones
}
class CameraRtmpConfig():
@@ -746,7 +786,7 @@ def to_dict(self):
class DetectConfig():
def __init__(self, global_config, config, camera_fps):
self._enabled = config['enabled']
- self._max_disappeared = config.get('max_disappeared', global_config.get('max_disappeared', camera_fps*2))
+ self._max_disappeared = config.get('max_disappeared', global_config.get('max_disappeared', camera_fps*5))
@property
def enabled(self):
diff --git a/frigate/events.py b/frigate/events.py
--- a/frigate/events.py
+++ b/frigate/events.py
@@ -10,6 +10,7 @@
from pathlib import Path
import psutil
+import shutil
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
@@ -30,6 +31,18 @@ def __init__(self, config, camera_processes, event_queue, event_processed_queue,
self.event_processed_queue = event_processed_queue
self.events_in_process = {}
self.stop_event = stop_event
+
+ def should_create_clip(self, camera, event_data):
+ if event_data['false_positive']:
+ return False
+
+ # if there are required zones and there is no overlap
+ required_zones = self.config.cameras[camera].clips.required_zones
+ if len(required_zones) > 0 and not set(event_data['entered_zones']) & set(required_zones):
+ logger.debug(f"Not creating clip for {event_data['id']} because it did not enter required zones")
+ return False
+
+ return True
def refresh_cache(self):
cached_files = os.listdir(CACHE_DIR)
@@ -97,6 +110,18 @@ def refresh_cache(self):
del self.cached_clips[f]
logger.debug(f"Cleaning up cached file {f}")
os.remove(os.path.join(CACHE_DIR,f))
+
+ # if we are still using more than 90% of the cache, proactively cleanup
+ cache_usage = shutil.disk_usage("/tmp/cache")
+ if cache_usage.used/cache_usage.total > .9 and cache_usage.free < 200000000 and len(self.cached_clips) > 0:
+ logger.warning("More than 90% of the cache is used.")
+ logger.warning("Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config.")
+ logger.warning("Proactively cleaning up the cache...")
+ while cache_usage.used/cache_usage.total > .9:
+ oldest_clip = min(self.cached_clips.values(), key=lambda x:x['start_time'])
+ del self.cached_clips[oldest_clip['path']]
+ os.remove(os.path.join(CACHE_DIR,oldest_clip['path']))
+ cache_usage = shutil.disk_usage("/tmp/cache")
def create_clip(self, camera, event_data, pre_capture, post_capture):
# get all clips from the camera with the event sorted
@@ -180,11 +205,12 @@ def run(self):
if event_type == 'end':
clips_config = self.config.cameras[camera].clips
- if not event_data['false_positive']:
- clip_created = False
+ clip_created = False
+ if self.should_create_clip(camera, event_data):
if clips_config.enabled and (clips_config.objects is None or event_data['label'] in clips_config.objects):
clip_created = self.create_clip(camera, event_data, clips_config.pre_capture, clips_config.post_capture)
-
+
+ if clip_created or event_data['has_snapshot']:
Event.create(
id=event_data['id'],
label=event_data['label'],
@@ -286,6 +312,38 @@ def expire(self, media):
Event.label == l.label)
)
update_query.execute()
+
+ def purge_duplicates(self):
+ duplicate_query = """with grouped_events as (
+ select id,
+ label,
+ camera,
+ has_snapshot,
+ has_clip,
+ row_number() over (
+ partition by label, camera, round(start_time/5,0)*5
+ order by end_time-start_time desc
+ ) as copy_number
+ from event
+ )
+
+ select distinct id, camera, has_snapshot, has_clip from grouped_events
+ where copy_number > 1;"""
+
+ duplicate_events = Event.raw(duplicate_query)
+ for event in duplicate_events:
+ logger.debug(f"Removing duplicate: {event.id}")
+ media_name = f"{event.camera}-{event.id}"
+ if event.has_snapshot:
+ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
+ media.unlink(missing_ok=True)
+ if event.has_clip:
+ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
+ media.unlink(missing_ok=True)
+
+ (Event.delete()
+ .where( Event.id << [event.id for event in duplicate_events] )
+ .execute())
def run(self):
counter = 0
@@ -294,15 +352,16 @@ def run(self):
logger.info(f"Exiting event cleanup...")
break
- # only expire events every 10 minutes, but check for stop events every 10 seconds
+ # only expire events every 5 minutes, but check for stop events every 10 seconds
time.sleep(10)
counter = counter + 1
- if counter < 60:
+ if counter < 30:
continue
counter = 0
self.expire('clips')
self.expire('snapshots')
+ self.purge_duplicates()
# drop events from db where has_clip and has_snapshot are false
delete_query = (
diff --git a/frigate/http.py b/frigate/http.py
--- a/frigate/http.py
+++ b/frigate/http.py
@@ -1,14 +1,17 @@
import base64
import datetime
+import json
import logging
import os
import time
from functools import reduce
import cv2
+import gevent
import numpy as np
from flask import (Blueprint, Flask, Response, current_app, jsonify,
make_response, request)
+from flask_sockets import Sockets
from peewee import SqliteDatabase, operator, fn, DoesNotExist
from playhouse.shortcuts import model_to_dict
@@ -21,9 +24,65 @@
logger = logging.getLogger(__name__)
bp = Blueprint('frigate', __name__)
+ws = Blueprint('ws', __name__)
-def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor):
+class MqttBackend():
+ """Interface for registering and updating WebSocket clients."""
+
+ def __init__(self, mqtt_client, topic_prefix):
+ self.clients = list()
+ self.mqtt_client = mqtt_client
+ self.topic_prefix = topic_prefix
+
+ def register(self, client):
+ """Register a WebSocket connection for Mqtt updates."""
+ self.clients.append(client)
+
+ def publish(self, message):
+ try:
+ json_message = json.loads(message)
+ json_message = {
+ 'topic': f"{self.topic_prefix}/{json_message['topic']}",
+ 'payload': json_message['payload'],
+ 'retain': json_message.get('retain', False)
+ }
+ except:
+ logger.warning("Unable to parse websocket message as valid json.")
+ return
+
+ logger.debug(f"Publishing mqtt message from websockets at {json_message['topic']}.")
+ self.mqtt_client.publish(json_message['topic'], json_message['payload'], retain=json_message['retain'])
+
+ def run(self):
+ def send(client, userdata, message):
+ """Sends mqtt messages to clients."""
+ try:
+ logger.debug(f"Received mqtt message on {message.topic}.")
+ ws_message = json.dumps({
+ 'topic': message.topic.replace(f"{self.topic_prefix}/",""),
+ 'payload': message.payload.decode()
+ })
+ except:
+ # if the payload can't be decoded don't relay to clients
+ logger.debug(f"MQTT payload for {message.topic} wasn't text. Skipping...")
+ return
+
+ for client in self.clients:
+ try:
+ client.send(ws_message)
+ except:
+ logger.debug("Removing websocket client due to a closed connection.")
+ self.clients.remove(client)
+
+ self.mqtt_client.message_callback_add(f"{self.topic_prefix}/#", send)
+
+ def start(self):
+ """Maintains mqtt subscription in the background."""
+ gevent.spawn(self.run)
+
+def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor, mqtt_client):
app = Flask(__name__)
+ sockets = Sockets(app)
@app.before_request
def _db_connect():
@@ -39,6 +98,10 @@ def _db_close(exc):
app.detected_frames_processor = detected_frames_processor
app.register_blueprint(bp)
+ sockets.register_blueprint(ws)
+
+ app.mqtt_backend = MqttBackend(mqtt_client, frigate_config.mqtt.topic_prefix)
+ app.mqtt_backend.start()
return app
@@ -115,7 +178,7 @@ def event_thumbnail(id):
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
img = cv2.imdecode(jpg_as_np, flags=1)
thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0))
- ret, jpg = cv2.imencode('.jpg', thumbnail)
+ ret, jpg = cv2.imencode('.jpg', thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
thumbnail_bytes = jpg.tobytes()
response = make_response(thumbnail_bytes)
@@ -236,7 +299,7 @@ def best(camera_name, label):
width = int(height*best_frame.shape[1]/best_frame.shape[0])
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
- ret, jpg = cv2.imencode('.jpg', best_frame)
+ ret, jpg = cv2.imencode('.jpg', best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
@@ -283,7 +346,7 @@ def latest_frame(camera_name):
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
- ret, jpg = cv2.imencode('.jpg', frame)
+ ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
@@ -301,6 +364,18 @@ def imagestream(detected_frames_processor, camera_name, fps, height, draw_option
width = int(height*frame.shape[1]/frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
- ret, jpg = cv2.imencode('.jpg', frame)
+ ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
+
[email protected]('/ws')
+def echo_socket(socket):
+ current_app.mqtt_backend.register(socket)
+
+ while not socket.closed:
+ # Sleep to prevent *constant* context-switches.
+ gevent.sleep(0.1)
+
+ message = socket.receive()
+ if message:
+ current_app.mqtt_backend.publish(message)
diff --git a/frigate/mqtt.py b/frigate/mqtt.py
--- a/frigate/mqtt.py
+++ b/frigate/mqtt.py
@@ -91,6 +91,7 @@ def on_connect(client, userdata, flags, rc):
logger.error("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
logger.info("MQTT connected")
+ client.subscribe(f"{mqtt_config.topic_prefix}/#")
client.publish(mqtt_config.topic_prefix+'/available', 'online', retain=True)
client = mqtt.Client(client_id=mqtt_config.client_id)
@@ -118,8 +119,6 @@ def on_connect(client, userdata, flags, rc):
client.publish(f"{mqtt_config.topic_prefix}/{name}/snapshots/state", 'ON' if config.cameras[name].snapshots.enabled else 'OFF', retain=True)
client.publish(f"{mqtt_config.topic_prefix}/{name}/detect/state", 'ON' if config.cameras[name].detect.enabled else 'OFF', retain=True)
- client.subscribe(f"{mqtt_config.topic_prefix}/+/clips/set")
- client.subscribe(f"{mqtt_config.topic_prefix}/+/snapshots/set")
- client.subscribe(f"{mqtt_config.topic_prefix}/+/detect/set")
+ client.subscribe(f"{mqtt_config.topic_prefix}/#")
return client
diff --git a/frigate/object_processing.py b/frigate/object_processing.py
--- a/frigate/object_processing.py
+++ b/frigate/object_processing.py
@@ -217,7 +217,7 @@ def get_jpg_bytes(self, timestamp=False, bounding_box=False, crop=False, height=
cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(255, 255, 255), thickness=2)
- ret, jpg = cv2.imencode('.jpg', best_frame)
+ ret, jpg = cv2.imencode('.jpg', best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
if ret:
return jpg.tobytes()
else:
@@ -454,28 +454,35 @@ def end(camera, obj: TrackedObject, current_frame_time):
message = { 'before': obj.previous, 'after': obj.to_dict(), 'type': 'end' }
self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
# write snapshot to disk if enabled
- if snapshot_config.enabled:
+ if snapshot_config.enabled and self.should_save_snapshot(camera, obj):
jpg_bytes = obj.get_jpg_bytes(
timestamp=snapshot_config.timestamp,
bounding_box=snapshot_config.bounding_box,
crop=snapshot_config.crop,
height=snapshot_config.height
)
- with open(os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), 'wb') as j:
- j.write(jpg_bytes)
- event_data['has_snapshot'] = True
+ if jpg_bytes is None:
+ logger.warning(f"Unable to save snapshot for {obj.obj_data['id']}.")
+ else:
+ with open(os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), 'wb') as j:
+ j.write(jpg_bytes)
+ event_data['has_snapshot'] = True
self.event_queue.put(('end', camera, event_data))
def snapshot(camera, obj: TrackedObject, current_frame_time):
mqtt_config = self.config.cameras[camera].mqtt
- if mqtt_config.enabled:
+ if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
jpg_bytes = obj.get_jpg_bytes(
timestamp=mqtt_config.timestamp,
bounding_box=mqtt_config.bounding_box,
crop=mqtt_config.crop,
height=mqtt_config.height
)
- self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", jpg_bytes, retain=True)
+
+ if jpg_bytes is None:
+ logger.warning(f"Unable to send mqtt snapshot for {obj.obj_data['id']}.")
+ else:
+ self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", jpg_bytes, retain=True)
def object_status(camera, object_name, status):
self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)
@@ -499,6 +506,24 @@ def object_status(camera, object_name, status):
# }
self.zone_data = defaultdict(lambda: defaultdict(lambda: {}))
+ def should_save_snapshot(self, camera, obj: TrackedObject):
+ # if there are required zones and there is no overlap
+ required_zones = self.config.cameras[camera].snapshots.required_zones
+ if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
+ logger.debug(f"Not creating snapshot for {obj.obj_data['id']} because it did not enter required zones")
+ return False
+
+ return True
+
+ def should_mqtt_snapshot(self, camera, obj: TrackedObject):
+ # if there are required zones and there is no overlap
+ required_zones = self.config.cameras[camera].mqtt.required_zones
+ if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
+ logger.debug(f"Not sending mqtt for {obj.obj_data['id']} because it did not enter required zones")
+ return False
+
+ return True
+
def get_best(self, camera, label):
# TODO: need a lock here
camera_state = self.camera_states[camera]
diff --git a/frigate/stats.py b/frigate/stats.py
--- a/frigate/stats.py
+++ b/frigate/stats.py
@@ -2,8 +2,11 @@
import logging
import threading
import time
+import psutil
+import shutil
from frigate.config import FrigateConfig
+from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.version import VERSION
logger = logging.getLogger(__name__)
@@ -16,6 +19,15 @@ def stats_init(camera_metrics, detectors):
}
return stats_tracking
+def get_fs_type(path):
+ bestMatch = ""
+ fsType = ""
+ for part in psutil.disk_partitions(all=True):
+ if path.startswith(part.mountpoint) and len(bestMatch) < len(part.mountpoint):
+ fsType = part.fstype
+ bestMatch = part.mountpoint
+ return fsType
+
def stats_snapshot(stats_tracking):
camera_metrics = stats_tracking['camera_metrics']
stats = {}
@@ -44,9 +56,19 @@ def stats_snapshot(stats_tracking):
stats['service'] = {
'uptime': (int(time.time()) - stats_tracking['started']),
- 'version': VERSION
+ 'version': VERSION,
+ 'storage': {}
}
+ for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:
+ storage_stats = shutil.disk_usage(path)
+ stats['service']['storage'][path] = {
+ 'total': round(storage_stats.total/1000000, 1),
+ 'used': round(storage_stats.used/1000000, 1),
+ 'free': round(storage_stats.free/1000000, 1),
+ 'mount_type': get_fs_type(path)
+ }
+
return stats
class StatsEmitter(threading.Thread):
diff --git a/frigate/video.py b/frigate/video.py
--- a/frigate/video.py
+++ b/frigate/video.py
@@ -281,6 +281,13 @@ def reduce_boxes(boxes):
reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
return [tuple(b) for b in reduced_boxes]
+# modified from https://stackoverflow.com/a/40795835
+def intersects_any(box_a, boxes):
+ for box in boxes:
+ if box_a[2] < box[0] or box_a[0] > box[2] or box_a[1] > box[3] or box_a[3] < box[1]:
+ continue
+ return True
+
def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters):
tensor_input = create_tensor_input(frame, model_shape, region)
@@ -350,7 +357,8 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
# look for motion
motion_boxes = motion_detector.detect(frame)
- tracked_object_boxes = [obj['box'] for obj in object_tracker.tracked_objects.values()]
+ # only get the tracked object boxes that intersect with motion
+ tracked_object_boxes = [obj['box'] for obj in object_tracker.tracked_objects.values() if intersects_any(obj['box'], motion_boxes)]
# combine motion boxes with known locations of existing objects
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
@@ -416,8 +424,12 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
if refining:
refine_count += 1
+ # Limit to the detections overlapping with motion areas
+ # to avoid picking up stationary background objects
+ detections_with_motion = [d for d in detections if intersects_any(d[2], motion_boxes)]
+
# now that we have refined our detections, we need to track objects
- object_tracker.match_and_update(frame_time, detections)
+ object_tracker.match_and_update(frame_time, detections_with_motion)
# add to the queue if not full
if(detected_objects_queue.full()):
diff --git a/frigate/watchdog.py b/frigate/watchdog.py
--- a/frigate/watchdog.py
+++ b/frigate/watchdog.py
@@ -31,8 +31,8 @@ def run(self):
detection_start = detector.detection_start.value
if (detection_start > 0.0 and
now - detection_start > 10):
- logger.info("Detection appears to be stuck. Restarting detection process")
+ logger.info("Detection appears to be stuck. Restarting detection process...")
detector.start_or_restart()
elif not detector.detect_process.is_alive():
- logger.info("Detection appears to have stopped. Restarting frigate")
+ logger.info("Detection appears to have stopped. Exiting frigate...")
os.kill(os.getpid(), signal.SIGTERM)
| diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
--- a/frigate/test/test_config.py
+++ b/frigate/test/test_config.py
@@ -160,7 +160,40 @@ def test_override_object_filters(self):
assert('dog' in frigate_config.cameras['back'].objects.filters)
assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
- def test_ffmpeg_params(self):
+ def test_global_object_mask(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'objects': {
+ 'track': ['person', 'dog']
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920,
+ 'objects': {
+ 'mask': '0,0,1,1,0,1',
+ 'filters': {
+ 'dog': {
+ 'mask': '1,1,1,1,1,1'
+ }
+ }
+ }
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert('dog' in frigate_config.cameras['back'].objects.filters)
+ assert(len(frigate_config.cameras['back'].objects.filters['dog']._raw_mask) == 2)
+ assert(len(frigate_config.cameras['back'].objects.filters['person']._raw_mask) == 1)
+
+ def test_ffmpeg_params_global(self):
config = {
'ffmpeg': {
'input_args': ['-re']
@@ -190,6 +223,64 @@ def test_ffmpeg_params(self):
}
frigate_config = FrigateConfig(config=config)
assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
+
+ def test_ffmpeg_params_camera(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ ],
+ 'input_args': ['-re']
+ },
+ 'height': 1080,
+ 'width': 1920,
+ 'objects': {
+ 'track': ['person', 'dog'],
+ 'filters': {
+ 'dog': {
+ 'threshold': 0.7
+ }
+ }
+ }
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
+
+ def test_ffmpeg_params_input(self):
+ config = {
+ 'mqtt': {
+ 'host': 'mqtt'
+ },
+ 'cameras': {
+ 'back': {
+ 'ffmpeg': {
+ 'inputs': [
+ { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'], 'input_args': ['-re'] }
+ ]
+ },
+ 'height': 1080,
+ 'width': 1920,
+ 'objects': {
+ 'track': ['person', 'dog'],
+ 'filters': {
+ 'dog': {
+ 'threshold': 0.7
+ }
+ }
+ }
+ }
+ }
+ }
+ frigate_config = FrigateConfig(config=config)
+ assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
+
def test_inherit_clips_retention(self):
config = {
diff --git a/web/src/__tests__/App.test.jsx b/web/src/__tests__/App.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/__tests__/App.test.jsx
@@ -0,0 +1,27 @@
+import { h } from 'preact';
+import * as Api from '../api';
+import * as IDB from 'idb-keyval';
+import * as PreactRouter from 'preact-router';
+import App from '../App';
+import { render, screen } from '@testing-library/preact';
+
+describe('App', () => {
+ let mockUseConfig;
+
+ beforeEach(() => {
+ jest.spyOn(IDB, 'get').mockImplementation(() => Promise.resolve(undefined));
+ jest.spyOn(IDB, 'set').mockImplementation(() => Promise.resolve(true));
+ mockUseConfig = jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
+ data: { cameras: { front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } } } },
+ }));
+ jest.spyOn(Api, 'useApiHost').mockImplementation(() => 'http://base-url.local:5000');
+ jest.spyOn(PreactRouter, 'Router').mockImplementation(() => <div data-testid="router" />);
+ });
+
+ test('shows a loading indicator while loading', async () => {
+ mockUseConfig.mockReturnValue({ status: 'loading' });
+ render(<App />);
+ await screen.findByTestId('app');
+ expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
+ });
+});
diff --git a/web/src/__tests__/AppBar.test.jsx b/web/src/__tests__/AppBar.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/__tests__/AppBar.test.jsx
@@ -0,0 +1,53 @@
+import { h } from 'preact';
+import * as Context from '../context';
+import AppBar from '../AppBar';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+describe('AppBar', () => {
+ beforeEach(() => {
+ jest.spyOn(Context, 'useDarkMode').mockImplementation(() => ({
+ setDarkMode: jest.fn(),
+ }));
+ jest.spyOn(Context, 'DarkModeProvider').mockImplementation(({ children }) => {
+ return <div>{children}</div>;
+ });
+ });
+
+ test('shows a menu on overflow click', async () => {
+ render(
+ <Context.DarkModeProvider>
+ <Context.DrawerProvider>
+ <AppBar />
+ </Context.DrawerProvider>
+ </Context.DarkModeProvider>
+ );
+
+ const overflowButton = await screen.findByLabelText('More options');
+ fireEvent.click(overflowButton);
+
+ const menu = await screen.findByRole('listbox');
+ expect(menu).toBeInTheDocument();
+ });
+
+ test('sets dark mode on MenuItem select', async () => {
+ const setDarkModeSpy = jest.fn();
+ jest.spyOn(Context, 'useDarkMode').mockImplementation(() => ({
+ setDarkMode: setDarkModeSpy,
+ }));
+ render(
+ <Context.DarkModeProvider>
+ <Context.DrawerProvider>
+ <AppBar />
+ </Context.DrawerProvider>
+ </Context.DarkModeProvider>
+ );
+
+ const overflowButton = await screen.findByLabelText('More options');
+ fireEvent.click(overflowButton);
+
+ await screen.findByRole('listbox');
+
+ fireEvent.click(screen.getByText('Light'));
+ expect(setDarkModeSpy).toHaveBeenCalledWith('light');
+ });
+});
diff --git a/web/src/__tests__/Sidebar.test.jsx b/web/src/__tests__/Sidebar.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/__tests__/Sidebar.test.jsx
@@ -0,0 +1,33 @@
+import { h } from 'preact';
+import * as Api from '../api';
+import * as Context from '../context';
+import Sidebar from '../Sidebar';
+import { render, screen } from '@testing-library/preact';
+
+describe('Sidebar', () => {
+ beforeEach(() => {
+ jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
+ data: {
+ cameras: {
+ front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } },
+ side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] } },
+ },
+ },
+ }));
+
+ jest.spyOn(Context, 'useDrawer').mockImplementation(() => ({ showDrawer: true, setShowDrawer: () => {} }));
+ });
+
+ test('does not render cameras by default', async () => {
+ render(<Sidebar />);
+ expect(screen.queryByRole('link', { name: 'front' })).not.toBeInTheDocument();
+ expect(screen.queryByRole('link', { name: 'side' })).not.toBeInTheDocument();
+ });
+
+ test('render cameras if in camera route', async () => {
+ window.history.replaceState({}, 'Cameras', '/cameras/front');
+ render(<Sidebar />);
+ expect(screen.queryByRole('link', { name: 'front' })).toBeInTheDocument();
+ expect(screen.queryByRole('link', { name: 'side' })).toBeInTheDocument();
+ });
+});
diff --git a/web/src/api/__tests__/index.test.jsx b/web/src/api/__tests__/index.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/api/__tests__/index.test.jsx
@@ -0,0 +1,121 @@
+import { h } from 'preact';
+import * as Mqtt from '../mqtt';
+import { ApiProvider, useFetch, useApiHost } from '..';
+import { render, screen } from '@testing-library/preact';
+
+describe('useApiHost', () => {
+ beforeEach(() => {
+ jest.spyOn(Mqtt, 'MqttProvider').mockImplementation(({ children }) => children);
+ });
+
+ test('is set from the baseUrl', async () => {
+ function Test() {
+ const apiHost = useApiHost();
+ return <div>{apiHost}</div>;
+ }
+ render(
+ <ApiProvider>
+ <Test />
+ </ApiProvider>
+ );
+ expect(screen.queryByText('http://base-url.local:5000')).toBeInTheDocument();
+ });
+});
+
+function Test() {
+ const { data, status } = useFetch('/api/tacos');
+ return (
+ <div>
+ <span>{data ? data.returnData : ''}</span>
+ <span>{status}</span>
+ </div>
+ );
+}
+
+describe('useFetch', () => {
+ let fetchSpy;
+
+ beforeEach(() => {
+ jest.spyOn(Mqtt, 'MqttProvider').mockImplementation(({ children }) => children);
+ fetchSpy = jest.spyOn(window, 'fetch').mockImplementation(async (url, options) => {
+ if (url.endsWith('/api/config')) {
+ return Promise.resolve({ ok: true, json: () => Promise.resolve({}) });
+ }
+ return new Promise((resolve) => {
+ setTimeout(() => {
+ resolve({ ok: true, json: () => Promise.resolve({ returnData: 'yep' }) });
+ }, 1);
+ });
+ });
+ });
+
+ test('loads data', async () => {
+ render(
+ <ApiProvider>
+ <Test />
+ </ApiProvider>
+ );
+
+ expect(screen.queryByText('loading')).toBeInTheDocument();
+ expect(screen.queryByText('yep')).not.toBeInTheDocument();
+
+ jest.runAllTimers();
+ await screen.findByText('loaded');
+ expect(fetchSpy).toHaveBeenCalledWith('http://base-url.local:5000/api/tacos');
+
+ expect(screen.queryByText('loaded')).toBeInTheDocument();
+ expect(screen.queryByText('yep')).toBeInTheDocument();
+ });
+
+ test('sets error if response is not okay', async () => {
+ jest.spyOn(window, 'fetch').mockImplementation((url) => {
+ if (url.includes('/config')) {
+ return Promise.resolve({ ok: true, json: () => Promise.resolve({}) });
+ }
+ return new Promise((resolve) => {
+ setTimeout(() => {
+ resolve({ ok: false });
+ }, 1);
+ });
+ });
+
+ render(
+ <ApiProvider>
+ <Test />
+ </ApiProvider>
+ );
+
+ expect(screen.queryByText('loading')).toBeInTheDocument();
+ jest.runAllTimers();
+ await screen.findByText('error');
+ });
+
+ test('does not re-fetch if the query has already been made', async () => {
+ const { rerender } = render(
+ <ApiProvider>
+ <Test key={0} />
+ </ApiProvider>
+ );
+
+ expect(screen.queryByText('loading')).toBeInTheDocument();
+ expect(screen.queryByText('yep')).not.toBeInTheDocument();
+
+ jest.runAllTimers();
+ await screen.findByText('loaded');
+ expect(fetchSpy).toHaveBeenCalledWith('http://base-url.local:5000/api/tacos');
+
+ rerender(
+ <ApiProvider>
+ <Test key={1} />
+ </ApiProvider>
+ );
+
+ expect(screen.queryByText('loaded')).toBeInTheDocument();
+ expect(screen.queryByText('yep')).toBeInTheDocument();
+
+ jest.runAllTimers();
+
+ // once for /api/config, once for /api/tacos
+ expect(fetchSpy).toHaveBeenCalledTimes(2);
+ });
+});
diff --git a/web/src/api/__tests__/mqtt.test.jsx b/web/src/api/__tests__/mqtt.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/api/__tests__/mqtt.test.jsx
@@ -0,0 +1,135 @@
+import { h } from 'preact';
+import { Mqtt, MqttProvider, useMqtt } from '../mqtt';
+import { useCallback, useContext } from 'preact/hooks';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+function Test() {
+ const { state } = useContext(Mqtt);
+ return state.__connected ? (
+ <div data-testid="data">
+ {Object.keys(state).map((key) => (
+ <div data-testid={key}>{JSON.stringify(state[key])}</div>
+ ))}
+ </div>
+ ) : null;
+}
+
+const TEST_URL = 'ws://test-foo:1234/ws';
+
+describe('MqttProvider', () => {
+ let createWebsocket, wsClient;
+ beforeEach(() => {
+ wsClient = {
+ close: jest.fn(),
+ send: jest.fn(),
+ };
+ createWebsocket = jest.fn((url) => {
+ wsClient.args = [url];
+ return new Proxy(
+ {},
+ {
+ get(target, prop, receiver) {
+ return wsClient[prop];
+ },
+ set(target, prop, value) {
+ wsClient[prop] = typeof value === 'function' ? jest.fn(value) : value;
+ if (prop === 'onopen') {
+ wsClient[prop]();
+ }
+ return true;
+ },
+ }
+ );
+ });
+ });
+
+ test('connects to the mqtt server', async () => {
+ render(
+ <MqttProvider config={mockConfig} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <Test />
+ </MqttProvider>
+ );
+ await screen.findByTestId('data');
+ expect(wsClient.args).toEqual([TEST_URL]);
+ expect(screen.getByTestId('__connected')).toHaveTextContent('true');
+ });
+
+ test('receives data through useMqtt', async () => {
+ function Test() {
+ const {
+ value: { payload, retain },
+ connected,
+ } = useMqtt('tacos');
+ return connected ? (
+ <div>
+ <div data-testid="payload">{JSON.stringify(payload)}</div>
+ <div data-testid="retain">{JSON.stringify(retain)}</div>
+ </div>
+ ) : null;
+ }
+
+ const { rerender } = render(
+ <MqttProvider config={mockConfig} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <Test />
+ </MqttProvider>
+ );
+ await screen.findByTestId('payload');
+ wsClient.onmessage({
+ data: JSON.stringify({ topic: 'tacos', payload: JSON.stringify({ yes: true }), retain: false }),
+ });
+ rerender(
+ <MqttProvider config={mockConfig} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <Test />
+ </MqttProvider>
+ );
+ expect(screen.getByTestId('payload')).toHaveTextContent('{"yes":true}');
+ expect(screen.getByTestId('retain')).toHaveTextContent('false');
+ });
+
+ test('can send values through useMqtt', async () => {
+ function Test() {
+ const { send, connected } = useMqtt('tacos');
+ const handleClick = useCallback(() => {
+ send({ yes: true });
+ }, [send]);
+ return connected ? <button onClick={handleClick}>click me</button> : null;
+ }
+
+ render(
+ <MqttProvider config={mockConfig} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <Test />
+ </MqttProvider>
+ );
+ await screen.findByRole('button');
+ fireEvent.click(screen.getByRole('button'));
+ await expect(wsClient.send).toHaveBeenCalledWith(
+ JSON.stringify({ topic: 'tacos', payload: JSON.stringify({ yes: true }) })
+ );
+ });
+
+ test('prefills the clips/detect/snapshots state from config', async () => {
+ jest.spyOn(Date, 'now').mockReturnValue(123456);
+ const config = {
+ cameras: {
+ front: { name: 'front', detect: { enabled: true }, clips: { enabled: false }, snapshots: { enabled: true } },
+ side: { name: 'side', detect: { enabled: false }, clips: { enabled: false }, snapshots: { enabled: false } },
+ },
+ };
+ render(
+ <MqttProvider config={config} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <Test />
+ </MqttProvider>
+ );
+ await screen.findByTestId('data');
+ expect(screen.getByTestId('front/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}');
+ expect(screen.getByTestId('front/clips/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
+ expect(screen.getByTestId('front/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}');
+ expect(screen.getByTestId('side/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
+ expect(screen.getByTestId('side/clips/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
+ expect(screen.getByTestId('side/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
+ });
+});
+
+const mockConfig = {
+ cameras: {},
+};
diff --git a/web/src/components/__tests__/ActivityIndicator.test.jsx b/web/src/components/__tests__/ActivityIndicator.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/ActivityIndicator.test.jsx
@@ -0,0 +1,47 @@
+import { h } from 'preact';
+import ActivityIndicator from '../ActivityIndicator';
+import { render, screen } from '@testing-library/preact';
+
+describe('ActivityIndicator', () => {
+ test('renders an ActivityIndicator with default size md', async () => {
+ render(<ActivityIndicator />);
+ expect(screen.getByLabelText('Loading…')).toMatchInlineSnapshot(`
+ <div
+ aria-label="Loading…"
+ class="w-full flex items-center justify-center"
+ >
+ <div
+ class="activityindicator ease-in rounded-full border-gray-200 text-blue-500 h-8 w-8 border-4 border-t-4"
+ />
+ </div>
+ `);
+ });
+
+ test('renders an ActivityIndicator with size sm', async () => {
+ render(<ActivityIndicator size="sm" />);
+ expect(screen.getByLabelText('Loading…')).toMatchInlineSnapshot(`
+ <div
+ aria-label="Loading…"
+ class="w-full flex items-center justify-center"
+ >
+ <div
+ class="activityindicator ease-in rounded-full border-gray-200 text-blue-500 h-4 w-4 border-2 border-t-2"
+ />
+ </div>
+ `);
+ });
+
+ test('renders an ActivityIndicator with size lg', async () => {
+ render(<ActivityIndicator size="lg" />);
+ expect(screen.getByLabelText('Loading…')).toMatchInlineSnapshot(`
+ <div
+ aria-label="Loading…"
+ class="w-full flex items-center justify-center"
+ >
+ <div
+ class="activityindicator ease-in rounded-full border-gray-200 text-blue-500 h-16 w-16 border-8 border-t-8"
+ />
+ </div>
+ `);
+ });
+});
diff --git a/web/src/components/__tests__/AppBar.test.jsx b/web/src/components/__tests__/AppBar.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/AppBar.test.jsx
@@ -0,0 +1,132 @@
+import { h } from 'preact';
+import { DrawerProvider } from '../../context';
+import AppBar from '../AppBar';
+import { fireEvent, render, screen } from '@testing-library/preact';
+import { useRef } from 'preact/hooks';
+
+function Title() {
+ return <div>I am the title</div>;
+}
+
+describe('AppBar', () => {
+ test('renders the title', async () => {
+ render(
+ <DrawerProvider>
+ <AppBar title={Title} />
+ </DrawerProvider>
+ );
+ expect(screen.getByText('I am the title')).toBeInTheDocument();
+ });
+
+ describe('overflow menu', () => {
+ test('is not rendered if a ref is not provided', async () => {
+ const handleOverflow = jest.fn();
+ render(
+ <DrawerProvider>
+ <AppBar title={Title} onOverflowClick={handleOverflow} />
+ </DrawerProvider>
+ );
+ expect(screen.queryByLabelText('More options')).not.toBeInTheDocument();
+ });
+
+ test('is not rendered if a click handler is not provided', async () => {
+ function Wrapper() {
+ const ref = useRef(null);
+ return <AppBar title={Title} overflowRef={ref} />;
+ }
+
+ render(
+ <DrawerProvider>
+ <Wrapper />
+ </DrawerProvider>
+ );
+ expect(screen.queryByLabelText('More options')).not.toBeInTheDocument();
+ });
+
+ test('is rendered with click handler and ref', async () => {
+ const handleOverflow = jest.fn();
+
+ function Wrapper() {
+ const ref = useRef(null);
+ return <AppBar title={Title} overflowRef={ref} onOverflowClick={handleOverflow} />;
+ }
+
+ render(
+ <DrawerProvider>
+ <Wrapper />
+ </DrawerProvider>
+ );
+ expect(screen.queryByLabelText('More options')).toBeInTheDocument();
+ });
+
+ test('calls the handler when clicked', async () => {
+ const handleOverflow = jest.fn();
+
+ function Wrapper() {
+ const ref = useRef(null);
+ return <AppBar title={Title} overflowRef={ref} onOverflowClick={handleOverflow} />;
+ }
+
+ render(
+ <DrawerProvider>
+ <Wrapper />
+ </DrawerProvider>
+ );
+
+ fireEvent.click(screen.queryByLabelText('More options'));
+
+ expect(handleOverflow).toHaveBeenCalled();
+ });
+ });
+
+ describe('scrolling', () => {
+ test('is visible initially', async () => {
+ render(
+ <DrawerProvider>
+ <AppBar title={Title} />
+ </DrawerProvider>
+ );
+
+ const classes = screen.getByTestId('appbar').classList;
+
+ expect(classes.contains('translate-y-0')).toBe(true);
+ expect(classes.contains('-translate-y-full')).toBe(false);
+ });
+
+ test('hides when scrolled downward', async () => {
+ jest.spyOn(window, 'requestAnimationFrame').mockImplementation((cb) => cb());
+ render(
+ <DrawerProvider>
+ <AppBar title={Title} />
+ </DrawerProvider>
+ );
+
+ window.scrollY = 300;
+ await fireEvent.scroll(document, { target: { scrollY: 300 } });
+
+ const classes = screen.getByTestId('appbar').classList;
+
+ expect(classes.contains('translate-y-0')).toBe(false);
+ expect(classes.contains('-translate-y-full')).toBe(true);
+ });
+
+ test('reappears when scrolled upward', async () => {
+ jest.spyOn(window, 'requestAnimationFrame').mockImplementation((cb) => cb());
+ render(
+ <DrawerProvider>
+ <AppBar title={Title} />
+ </DrawerProvider>
+ );
+
+ window.scrollY = 300;
+ await fireEvent.scroll(document, { target: { scrollY: 300 } });
+ window.scrollY = 280;
+ await fireEvent.scroll(document, { target: { scrollY: 280 } });
+
+ const classes = screen.getByTestId('appbar').classList;
+
+ expect(classes.contains('translate-y-0')).toBe(true);
+ expect(classes.contains('-translate-y-full')).toBe(false);
+ });
+ });
+});
diff --git a/web/src/components/__tests__/AutoUpdatingCameraImage.test.jsx b/web/src/components/__tests__/AutoUpdatingCameraImage.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/AutoUpdatingCameraImage.test.jsx
@@ -0,0 +1,42 @@
+import { h } from 'preact';
+import AutoUpdatingCameraImage from '../AutoUpdatingCameraImage';
+import { screen, render } from '@testing-library/preact';
+
+let mockOnload;
+jest.mock('../CameraImage', () => {
+ function CameraImage({ onload, searchParams }) {
+ mockOnload = () => {
+ onload();
+ };
+ return <div data-testid="camera-image">{searchParams}</div>;
+ }
+ return {
+ __esModule: true,
+ default: CameraImage,
+ };
+});
+
+describe('AutoUpdatingCameraImage', () => {
+ let dateNowSpy;
+ beforeEach(() => {
+ dateNowSpy = jest.spyOn(Date, 'now').mockReturnValue(0);
+ });
+
+ test('shows FPS by default', async () => {
+ render(<AutoUpdatingCameraImage camera="tacos" />);
+ expect(screen.queryByText('Displaying at 0fps')).toBeInTheDocument();
+ });
+
+ test('does not show FPS if turned off', async () => {
+ render(<AutoUpdatingCameraImage camera="tacos" showFps={false} />);
+ expect(screen.queryByText('Displaying at 0fps')).not.toBeInTheDocument();
+ });
+
+ test('on load, sets a new cache key to search params', async () => {
+ dateNowSpy.mockReturnValueOnce(100).mockReturnValueOnce(200).mockReturnValueOnce(300);
+ render(<AutoUpdatingCameraImage camera="tacos" searchParams="foo" />);
+ mockOnload();
+ jest.runAllTimers();
+ expect(screen.queryByText('cache=100&foo')).toBeInTheDocument();
+ });
+});
diff --git a/web/src/components/__tests__/Button.test.jsx b/web/src/components/__tests__/Button.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/Button.test.jsx
@@ -0,0 +1,36 @@
+import { h } from 'preact';
+import Button from '../Button';
+import { render, screen } from '@testing-library/preact';
+
+describe('Button', () => {
+ test('renders children', async () => {
+ render(
+ <Button>
+ <div>hello</div>
+ <div>hi</div>
+ </Button>
+ );
+ expect(screen.queryByText('hello')).toBeInTheDocument();
+ expect(screen.queryByText('hi')).toBeInTheDocument();
+ });
+
+ test('includes focus, active, and hover classes when enabled', async () => {
+ render(<Button>click me</Button>);
+
+ const classList = screen.queryByRole('button').classList;
+ expect(classList.contains('focus:outline-none')).toBe(true);
+ expect(classList.contains('focus:ring-2')).toBe(true);
+ expect(classList.contains('hover:shadow-md')).toBe(true);
+ expect(classList.contains('active:bg-blue-600')).toBe(true);
+ });
+
+ test('does not focus, active, and hover classes when enabled', async () => {
+ render(<Button disabled>click me</Button>);
+
+ const classList = screen.queryByRole('button').classList;
+ expect(classList.contains('focus:outline-none')).toBe(false);
+ expect(classList.contains('focus:ring-2')).toBe(false);
+ expect(classList.contains('hover:shadow-md')).toBe(false);
+ expect(classList.contains('active:bg-blue-600')).toBe(false);
+ });
+});
diff --git a/web/src/components/__tests__/CameraImage.test.jsx b/web/src/components/__tests__/CameraImage.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/CameraImage.test.jsx
@@ -0,0 +1,40 @@
+import { h } from 'preact';
+import * as Api from '../../api';
+import * as Hooks from '../../hooks';
+import CameraImage from '../CameraImage';
+import { render, screen } from '@testing-library/preact';
+
+describe('CameraImage', () => {
+ beforeEach(() => {
+ jest.spyOn(Api, 'useConfig').mockImplementation(() => {
+ return { data: { cameras: { front: { name: 'front', width: 1280, height: 720 } } } };
+ });
+ jest.spyOn(Api, 'useApiHost').mockReturnValue('http://base-url.local:5000');
+ jest.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 0 }]);
+ });
+
+ test('renders an activity indicator while loading', async () => {
+ render(<CameraImage camera="front" />);
+ expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
+ });
+
+ test('creates a scaled canvas using the available width & height, preserving camera aspect ratio', async () => {
+ jest.spyOn(Hooks, 'useResizeObserver').mockReturnValueOnce([{ width: 720 }]);
+
+ render(<CameraImage camera="front" />);
+ expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
+ const canvas = screen.queryByTestId('cameraimage-canvas');
+ expect(canvas).toHaveAttribute('height', '405');
+ expect(canvas).toHaveAttribute('width', '720');
+ });
+
+ test('allows camera image to stretch to available space', async () => {
+ jest.spyOn(Hooks, 'useResizeObserver').mockReturnValueOnce([{ width: 1400 }]);
+
+ render(<CameraImage camera="front" stretch />);
+ expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
+ const canvas = screen.queryByTestId('cameraimage-canvas');
+ expect(canvas).toHaveAttribute('height', '787');
+ expect(canvas).toHaveAttribute('width', '1400');
+ });
+});
diff --git a/web/src/components/__tests__/Card.test.jsx b/web/src/components/__tests__/Card.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/Card.test.jsx
@@ -0,0 +1,46 @@
+import { h } from 'preact';
+import Card from '../Card';
+import { render, screen } from '@testing-library/preact';
+
+describe('Card', () => {
+ test('renders a Card with media', async () => {
+ render(<Card media={<img src="tacos.jpg" alt="tacos" />} />);
+ expect(screen.queryByAltText('tacos')).toBeInTheDocument();
+ });
+
+ test('renders a Card with a link around media', async () => {
+ render(<Card href="/tacos" media={<img src="tacos.jpg" alt="tacos" />} />);
+ expect(screen.queryByAltText('tacos')).toBeInTheDocument();
+ expect(screen.getByAltText('tacos').closest('a')).toHaveAttribute('href', '/tacos');
+ });
+
+ test('renders a Card with a header', async () => {
+ render(<Card header="Tacos!" />);
+ expect(screen.queryByText('Tacos!')).toBeInTheDocument();
+ });
+
+ test('renders a Card with a linked header', async () => {
+ render(<Card href="/tacos" header="Tacos!" />);
+ expect(screen.queryByText('Tacos!')).toBeInTheDocument();
+ expect(screen.queryByText('Tacos!').closest('a')).toHaveAttribute('href', '/tacos');
+ });
+
+ test('renders content', async () => {
+ const content = <div data-testid="content">hello</div>;
+ render(<Card content={content} />);
+ expect(screen.queryByTestId('content')).toBeInTheDocument();
+ });
+
+ test('renders buttons', async () => {
+ const buttons = [
+ { name: 'Tacos', href: '/tacos' },
+ { name: 'Burritos', href: '/burritos' },
+ ];
+ render(<Card buttons={buttons} />);
+ expect(screen.queryByText('Tacos')).toHaveAttribute('role', 'button');
+ expect(screen.queryByText('Tacos')).toHaveAttribute('href', '/tacos');
+
+ expect(screen.queryByText('Burritos')).toHaveAttribute('role', 'button');
+ expect(screen.queryByText('Burritos')).toHaveAttribute('href', '/burritos');
+ });
+});
diff --git a/web/src/components/__tests__/Heading.test.jsx b/web/src/components/__tests__/Heading.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/Heading.test.jsx
@@ -0,0 +1,25 @@
+import { h } from 'preact';
+import Heading from '../Heading';
+import { render, screen } from '@testing-library/preact';
+
+describe('Heading', () => {
+ test('renders content with default size', async () => {
+ render(<Heading>Hello</Heading>);
+ expect(screen.queryByText('Hello')).toBeInTheDocument();
+ expect(screen.queryByText('Hello').classList.contains('text-2xl')).toBe(true);
+ });
+
+ test('renders with custom size', async () => {
+ render(<Heading size="lg">Hello</Heading>);
+ expect(screen.queryByText('Hello')).toBeInTheDocument();
+ expect(screen.queryByText('Hello').classList.contains('text-2xl')).toBe(false);
+ expect(screen.queryByText('Hello').classList.contains('text-lg')).toBe(true);
+ });
+
+ test('renders with custom className', async () => {
+ render(<Heading className="tacos">Hello</Heading>);
+ expect(screen.queryByText('Hello')).toBeInTheDocument();
+ expect(screen.queryByText('Hello').classList.contains('text-2xl')).toBe(true);
+ expect(screen.queryByText('Hello').classList.contains('tacos')).toBe(true);
+ });
+});
diff --git a/web/src/components/__tests__/Link.test.jsx b/web/src/components/__tests__/Link.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/Link.test.jsx
@@ -0,0 +1,17 @@
+import { h } from 'preact';
+import Link from '../Link';
+import { render, screen } from '@testing-library/preact';
+
+describe('Link', () => {
+ test('renders a link', async () => {
+ render(<Link href="/tacos">Hello</Link>);
+ expect(screen.queryByText('Hello')).toMatchInlineSnapshot(`
+ <a
+ class="text-blue-500 hover:underline"
+ href="/tacos"
+ >
+ Hello
+ </a>
+ `);
+ });
+});
diff --git a/web/src/components/__tests__/Menu.test.jsx b/web/src/components/__tests__/Menu.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/Menu.test.jsx
@@ -0,0 +1,52 @@
+import { h } from 'preact';
+import Menu, { MenuItem } from '../Menu';
+import { fireEvent, render, screen } from '@testing-library/preact';
+import { useRef } from 'preact/hooks';
+
+describe('Menu', () => {
+ test('renders a dialog', async () => {
+ function Test() {
+ const relativeRef = useRef();
+ return (
+ <div>
+ <div ref={relativeRef} />
+ <Menu relativeTo={relativeRef} />
+ </div>
+ );
+ }
+
+ render(<Test />);
+ expect(screen.queryByRole('listbox')).toBeInTheDocument();
+ });
+});
+
+describe('MenuItem', () => {
+ test('renders a menu item', async () => {
+ render(<MenuItem label="Tacos" />);
+ expect(screen.queryByRole('option')).toHaveTextContent('Tacos');
+ });
+
+ test('calls onSelect when clicked', async () => {
+ const handleSelect = jest.fn();
+ render(<MenuItem label="Tacos" onSelect={handleSelect} value="tacos-value" />);
+ fireEvent.click(screen.queryByRole('option'));
+ expect(handleSelect).toHaveBeenCalledWith('tacos-value', 'Tacos');
+ });
+
+ test('renders and icon when passed', async () => {
+ function Icon() {
+ return <div data-testid="icon" />;
+ }
+ render(<MenuItem icon={Icon} label="Tacos" />);
+ expect(screen.queryByTestId('icon')).toBeInTheDocument();
+ });
+
+ test('applies different styles when focused', async () => {
+ const { rerender } = render(<MenuItem label="Tacos" />);
+ const classes = Array.from(screen.queryByRole('option').classList);
+ rerender(<MenuItem label="Tacos" focus />);
+ const focusClasses = Array.from(screen.queryByRole('option').classList);
+
+ expect(focusClasses.length).toBeGreaterThan(classes.length);
+ });
+});
diff --git a/web/src/components/__tests__/NavigationDrawer.test.jsx b/web/src/components/__tests__/NavigationDrawer.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/NavigationDrawer.test.jsx
@@ -0,0 +1,61 @@
+import { h } from 'preact';
+import * as Context from '../../context';
+import NavigationDrawer, { Destination } from '../NavigationDrawer';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+describe('NavigationDrawer', () => {
+ let useDrawer, setShowDrawer;
+
+ beforeEach(() => {
+ setShowDrawer = jest.fn();
+ useDrawer = jest.spyOn(Context, 'useDrawer').mockImplementation(() => ({ showDrawer: true, setShowDrawer }));
+ });
+
+ test('renders a navigation drawer', async () => {
+ render(
+ <NavigationDrawer>
+ <div data-testid="children">Hello</div>
+ </NavigationDrawer>
+ );
+ expect(screen.queryByTestId('children')).toHaveTextContent('Hello');
+ expect(screen.queryByTestId('drawer').classList.contains('translate-x-full')).toBe(false);
+ expect(screen.queryByTestId('drawer').classList.contains('translate-x-0')).toBe(true);
+ });
+
+ test('is dismissed when the scrim is clicked', async () => {
+ useDrawer
+ .mockReturnValueOnce({ showDrawer: true, setShowDrawer })
+ .mockReturnValueOnce({ showDrawer: false, setShowDrawer });
+ render(<NavigationDrawer />);
+ fireEvent.click(screen.queryByTestId('scrim'));
+ expect(setShowDrawer).toHaveBeenCalledWith(false);
+ });
+
+ test('is not visible when not set to show', async () => {
+ useDrawer.mockReturnValue({ showDrawer: false, setShowDrawer });
+ render(<NavigationDrawer />);
+ expect(screen.queryByTestId('scrim')).not.toBeInTheDocument();
+ expect(screen.queryByTestId('drawer').classList.contains('-translate-x-full')).toBe(true);
+ expect(screen.queryByTestId('drawer').classList.contains('translate-x-0')).toBe(false);
+ });
+});
+
+describe('Destination', () => {
+ let setShowDrawer;
+
+ beforeEach(() => {
+ setShowDrawer = jest.fn();
+ jest.spyOn(Context, 'useDrawer').mockImplementation(() => ({ showDrawer: true, setShowDrawer }));
+ });
+
+ test('dismisses the drawer moments after being clicked', async () => {
+ render(
+ <NavigationDrawer>
+ <Destination href="/tacos" text="Tacos" />
+ </NavigationDrawer>
+ );
+ fireEvent.click(screen.queryByText('Tacos'));
+ jest.runAllTimers();
+ expect(setShowDrawer).toHaveBeenCalledWith(false);
+ });
+});
diff --git a/web/src/components/__tests__/RelativeModal.test.jsx b/web/src/components/__tests__/RelativeModal.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/RelativeModal.test.jsx
@@ -0,0 +1,63 @@
+import { h, createRef } from 'preact';
+import RelativeModal from '../RelativeModal';
+import userEvent from '@testing-library/user-event';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+describe('RelativeModal', () => {
+ test('keeps tab focus', async () => {
+ const ref = createRef();
+ render(
+ <div>
+ <label for="outside-input">outside</label>
+ <input id="outside-input" tabindex="0" />
+ <div ref={ref} />
+ <RelativeModal relativeTo={ref}>
+ <input data-testid="modal-input-0" tabindex="0" />
+ <input data-testid="modal-input-1" tabindex="0" />
+ </RelativeModal>
+ </div>
+ );
+
+ const inputs = screen.queryAllByTestId(/modal-input/);
+ expect(document.activeElement).toBe(inputs[0]);
+ userEvent.tab();
+ expect(document.activeElement).toBe(inputs[1]);
+ userEvent.tab();
+ expect(document.activeElement).toBe(inputs[0]);
+ });
+
+ test('pressing ESC dismisses', async () => {
+ const handleDismiss = jest.fn();
+ const ref = createRef();
+ render(
+ <div>
+ <div ref={ref} />
+ <RelativeModal onDismiss={handleDismiss} relativeTo={ref}>
+ <input data-testid="modal-input-0" tabindex="0" />
+ </RelativeModal>
+ </div>
+ );
+
+ const dialog = screen.queryByRole('dialog');
+ expect(dialog).toBeInTheDocument();
+
+ fireEvent.keyDown(document.activeElement, { key: 'Escape', code: 'Escape' });
+ expect(handleDismiss).toHaveBeenCalled();
+ });
+
+ test('clicking a scrim dismisses', async () => {
+ const handleDismiss = jest.fn();
+ const ref = createRef();
+ render(
+ <div>
+ <div ref={ref} />
+ <RelativeModal onDismiss={handleDismiss} relativeTo={ref}>
+ <input data-testid="modal-input-0" tabindex="0" />
+ </RelativeModal>
+ </div>
+ );
+
+ fireEvent.click(screen.queryByTestId('scrim'));
+ expect(handleDismiss).toHaveBeenCalled();
+ });
+});
diff --git a/web/src/components/__tests__/Select.test.jsx b/web/src/components/__tests__/Select.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/Select.test.jsx
@@ -0,0 +1,34 @@
+import { h } from 'preact';
+import Select from '../Select';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+describe('Select', () => {
+ test('on focus, shows a menu', async () => {
+ const handleChange = jest.fn();
+ render(<Select label="Tacos" onChange={handleChange} options={['tacos', 'burritos']} />);
+
+ expect(screen.queryByRole('listbox')).not.toBeInTheDocument();
+ fireEvent.click(screen.getByRole('textbox'));
+ expect(screen.queryByRole('listbox')).toBeInTheDocument();
+ expect(screen.queryByRole('option', { name: 'tacos' })).toBeInTheDocument();
+ expect(screen.queryByRole('option', { name: 'burritos' })).toBeInTheDocument();
+
+ fireEvent.click(screen.queryByRole('option', { name: 'burritos' }));
+ expect(handleChange).toHaveBeenCalledWith('burritos', 'burritos');
+ });
+
+ test('allows keyboard navigation', async () => {
+ const handleChange = jest.fn();
+ render(<Select label="Tacos" onChange={handleChange} options={['tacos', 'burritos']} />);
+
+ expect(screen.queryByRole('listbox')).not.toBeInTheDocument();
+ const input = screen.getByRole('textbox');
+ fireEvent.focus(input);
+ fireEvent.keyDown(input, { key: 'Enter', code: 'Enter' });
+ expect(screen.queryByRole('listbox')).toBeInTheDocument();
+
+ fireEvent.keyDown(input, { key: 'ArrowDown', code: 'ArrowDown' });
+ fireEvent.keyDown(input, { key: 'Enter', code: 'Enter' });
+ expect(handleChange).toHaveBeenCalledWith('burritos', 'burritos');
+ });
+});
diff --git a/web/src/components/__tests__/Switch.test.jsx b/web/src/components/__tests__/Switch.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/Switch.test.jsx
@@ -0,0 +1,47 @@
+import { h } from 'preact';
+import Switch from '../Switch';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+describe('Switch', () => {
+ test('renders a hidden checkbox', async () => {
+ render(
+ <div>
+ <Switch id="unchecked-switch" />
+ <Switch id="checked-switch" checked={true} />
+ </div>
+ );
+
+ const unchecked = screen.queryByTestId('unchecked-switch-input');
+ expect(unchecked).toHaveAttribute('type', 'checkbox');
+ expect(unchecked).not.toBeChecked();
+
+ const checked = screen.queryByTestId('checked-switch-input');
+ expect(checked).toHaveAttribute('type', 'checkbox');
+ expect(checked).toBeChecked();
+ });
+
+ test('calls onChange callback when checked/unchecked', async () => {
+ const handleChange = jest.fn();
+ const { rerender } = render(<Switch id="check" onChange={handleChange} />);
+ fireEvent.change(screen.queryByTestId('check-input'), { checked: true });
+ expect(handleChange).toHaveBeenCalledWith('check', true);
+
+ rerender(<Switch id="check" onChange={handleChange} checked />);
+ fireEvent.change(screen.queryByTestId('check-input'), { checked: false });
+ expect(handleChange).toHaveBeenCalledWith('check', false);
+ });
+
+ test('renders a label before', async () => {
+ render(<Switch id="check" label="This is the label" />);
+ const items = screen.queryAllByTestId(/check-.+/);
+ expect(items[0]).toHaveTextContent('This is the label');
+ expect(items[1]).toHaveAttribute('data-testid', 'check-input');
+ });
+
+ test('renders a label after', async () => {
+ render(<Switch id="check" label="This is the label" labelPosition="after" />);
+ const items = screen.queryAllByTestId(/check-.+/);
+ expect(items[0]).toHaveAttribute('data-testid', 'check-input');
+ expect(items[1]).toHaveTextContent('This is the label');
+ });
+});
diff --git a/web/src/components/__tests__/TextField.test.jsx b/web/src/components/__tests__/TextField.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/TextField.test.jsx
@@ -0,0 +1,73 @@
+import { h } from 'preact';
+import TextField from '../TextField';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+describe('TextField', () => {
+ test('can render a leading icon', async () => {
+ render(<TextField label="Tacos" leadingIcon={FakeLeadingIcon} />);
+ expect(screen.getByTestId('icon-leading')).toBeInTheDocument();
+ });
+
+ test('can render a trailing icon', async () => {
+ render(<TextField label="Tacos" trailingIcon={FakeTrailingIcon} />);
+ expect(screen.getByTestId('icon-trailing')).toBeInTheDocument();
+ });
+
+ test('can renders icons in correct positions', async () => {
+ render(<TextField label="Tacos" leadingIcon={FakeLeadingIcon} trailingIcon={FakeTrailingIcon} />);
+ const icons = screen.queryAllByTestId(/icon-.+/);
+ expect(icons[0]).toHaveAttribute('data-testid', 'icon-leading');
+ expect(icons[1]).toHaveAttribute('data-testid', 'icon-trailing');
+ });
+
+ test('focuses and blurs', async () => {
+ const handleFocus = jest.fn();
+ const handleBlur = jest.fn();
+ render(<TextField label="Tacos" onFocus={handleFocus} onBlur={handleBlur} />);
+
+ fireEvent.focus(screen.getByRole('textbox'));
+ expect(handleFocus).toHaveBeenCalled();
+ expect(screen.getByText('Tacos').classList.contains('-translate-y-2')).toBe(true);
+
+ fireEvent.blur(screen.getByRole('textbox'));
+ expect(handleBlur).toHaveBeenCalled();
+ expect(screen.getByText('Tacos').classList.contains('-translate-y-2')).toBe(false);
+ });
+
+ test('onChange updates the value', async () => {
+ const handleChangeText = jest.fn();
+ render(<TextField label="Tacos" onChangeText={handleChangeText} />);
+
+ const input = screen.getByRole('textbox');
+ fireEvent.input(input, { target: { value: 'i like tacos' } });
+ expect(handleChangeText).toHaveBeenCalledWith('i like tacos');
+ expect(input.value).toEqual('i like tacos');
+ });
+
+ test('still updates the value if an original value was given', async () => {
+ render(<TextField label="Tacos" value="no, burritos" />);
+
+ const input = screen.getByRole('textbox');
+ fireEvent.input(input, { target: { value: 'i like tacos' } });
+ expect(input.value).toEqual('i like tacos');
+ });
+
+ test('changes the value if the prop value changes', async () => {
+ const { rerender } = render(<TextField key="test" label="Tacos" value="no, burritos" />);
+
+ const input = screen.getByRole('textbox');
+ fireEvent.input(input, { target: { value: 'i like tacos' } });
+ expect(input.value).toEqual('i like tacos');
+
+ rerender(<TextField key="test" label="Tacos" value="no, really, burritos" />);
+ expect(input.value).toEqual('no, really, burritos');
+ });
+});
+
+function FakeLeadingIcon() {
+ return <div data-testid="icon-leading" />;
+}
+
+function FakeTrailingIcon() {
+ return <div data-testid="icon-trailing" />;
+}
diff --git a/web/src/components/__tests__/Toolltip.test.jsx b/web/src/components/__tests__/Toolltip.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/Toolltip.test.jsx
@@ -0,0 +1,115 @@
+import { h, createRef } from 'preact';
+import Tooltip from '../Tooltip';
+import { render, screen } from '@testing-library/preact';
+
+describe('Tooltip', () => {
+ test('renders in a relative position', async () => {
+ jest
+ .spyOn(window.HTMLElement.prototype, 'getBoundingClientRect')
+ // relativeTo
+ .mockReturnValueOnce({
+ x: 100,
+ y: 100,
+ width: 50,
+ height: 10,
+ })
+ // tooltip
+ .mockReturnValueOnce({ width: 40, height: 15 });
+
+ const ref = createRef();
+ render(
+ <div>
+ <div ref={ref} />
+ <Tooltip relativeTo={ref} text="hello" />
+ </div>
+ );
+
+ const tooltip = await screen.findByRole('tooltip');
+ const style = window.getComputedStyle(tooltip);
+ expect(style.left).toEqual('105px');
+ expect(style.top).toEqual('70px');
+ });
+
+ test('if too far right, renders to the left', async () => {
+ window.innerWidth = 1024;
+ jest
+ .spyOn(window.HTMLElement.prototype, 'getBoundingClientRect')
+ // relativeTo
+ .mockReturnValueOnce({
+ x: 1000,
+ y: 100,
+ width: 24,
+ height: 10,
+ })
+ // tooltip
+ .mockReturnValueOnce({ width: 50, height: 15 });
+
+ const ref = createRef();
+ render(
+ <div>
+ <div ref={ref} />
+ <Tooltip relativeTo={ref} text="hello" />
+ </div>
+ );
+
+ const tooltip = await screen.findByRole('tooltip');
+ const style = window.getComputedStyle(tooltip);
+ expect(style.left).toEqual('942px');
+ expect(style.top).toEqual('97px');
+ });
+
+ test('if too far left, renders to the right', async () => {
+ jest
+ .spyOn(window.HTMLElement.prototype, 'getBoundingClientRect')
+ // relativeTo
+ .mockReturnValueOnce({
+ x: 0,
+ y: 100,
+ width: 24,
+ height: 10,
+ })
+ // tooltip
+ .mockReturnValueOnce({ width: 50, height: 15 });
+
+ const ref = createRef();
+ render(
+ <div>
+ <div ref={ref} />
+ <Tooltip relativeTo={ref} text="hello" />
+ </div>
+ );
+
+ const tooltip = await screen.findByRole('tooltip');
+ const style = window.getComputedStyle(tooltip);
+ expect(style.left).toEqual('32px');
+ expect(style.top).toEqual('97px');
+ });
+
+ test('if too close to top, renders to the bottom', async () => {
+ window.scrollY = 90;
+ jest
+ .spyOn(window.HTMLElement.prototype, 'getBoundingClientRect')
+ // relativeTo
+ .mockReturnValueOnce({
+ x: 100,
+ y: 100,
+ width: 24,
+ height: 10,
+ })
+ // tooltip
+ .mockReturnValueOnce({ width: 50, height: 15 });
+
+ const ref = createRef();
+ render(
+ <div>
+ <div ref={ref} />
+ <Tooltip relativeTo={ref} text="hello" />
+ </div>
+ );
+
+ const tooltip = await screen.findByRole('tooltip');
+ const style = window.getComputedStyle(tooltip);
+ expect(style.left).toEqual('87px');
+ expect(style.top).toEqual('160px');
+ });
+});
diff --git a/web/src/context/__tests__/index.test.jsx b/web/src/context/__tests__/index.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/context/__tests__/index.test.jsx
@@ -0,0 +1,217 @@
+import { h } from 'preact';
+import * as IDB from 'idb-keyval';
+import { DarkModeProvider, useDarkMode, usePersistence } from '..';
+import { fireEvent, render, screen } from '@testing-library/preact';
+import { useCallback } from 'preact/hooks';
+
+function DarkModeChecker() {
+ const { currentMode } = useDarkMode();
+ return <div data-testid={currentMode}>{currentMode}</div>;
+}
+
+describe('DarkMode', () => {
+ let MockIDB;
+ beforeEach(() => {
+ MockIDB = {
+ get: jest.spyOn(IDB, 'get').mockImplementation(() => Promise.resolve(undefined)),
+ set: jest.spyOn(IDB, 'set').mockImplementation(() => Promise.resolve(true)),
+ };
+ });
+
+ test('uses media by default', async () => {
+ render(
+ <DarkModeProvider>
+ <DarkModeChecker />
+ </DarkModeProvider>
+ );
+ const el = await screen.findByTestId('media');
+ expect(el).toBeInTheDocument();
+ });
+
+ test('uses the mode stored in idb - dark', async () => {
+ MockIDB.get.mockResolvedValue('dark');
+ render(
+ <DarkModeProvider>
+ <DarkModeChecker />
+ </DarkModeProvider>
+ );
+ const el = await screen.findByTestId('dark');
+ expect(el).toBeInTheDocument();
+ expect(document.body.classList.contains('dark')).toBe(true);
+ });
+
+ test('uses the mode stored in idb - light', async () => {
+ MockIDB.get.mockResolvedValue('light');
+ render(
+ <DarkModeProvider>
+ <DarkModeChecker />
+ </DarkModeProvider>
+ );
+ const el = await screen.findByTestId('light');
+ expect(el).toBeInTheDocument();
+ expect(document.body.classList.contains('dark')).toBe(false);
+ });
+
+ test('allows updating the mode', async () => {
+ MockIDB.get.mockResolvedValue('dark');
+
+ function Updater() {
+ const { setDarkMode } = useDarkMode();
+ const handleClick = useCallback(() => {
+ setDarkMode('light');
+ }, [setDarkMode]);
+ return <div onClick={handleClick}>click me</div>;
+ }
+
+ render(
+ <DarkModeProvider>
+ <DarkModeChecker />
+ <Updater />
+ </DarkModeProvider>
+ );
+
+ const dark = await screen.findByTestId('dark');
+ expect(dark).toBeInTheDocument();
+ expect(document.body.classList.contains('dark')).toBe(true);
+
+ const button = await screen.findByText('click me');
+ fireEvent.click(button);
+
+ const light = await screen.findByTestId('light');
+ expect(light).toBeInTheDocument();
+ expect(document.body.classList.contains('dark')).toBe(false);
+ });
+
+ test('when using media, matches on preference', async () => {
+ MockIDB.get.mockResolvedValue('media');
+ jest.spyOn(window, 'matchMedia').mockImplementation((query) => {
+ if (query === '(prefers-color-scheme: dark)') {
+ return { matches: true, addEventListener: jest.fn(), removeEventListener: jest.fn() };
+ }
+
+ throw new Error(`Unexpected query to matchMedia: ${query}`);
+ });
+ render(
+ <DarkModeProvider>
+ <DarkModeChecker />
+ </DarkModeProvider>
+ );
+
+ const el = await screen.findByTestId('dark');
+ expect(el).toBeInTheDocument();
+ expect(document.body.classList.contains('dark')).toBe(true);
+ });
+});
+
+describe('usePersistence', () => {
+ let MockIDB;
+ beforeEach(() => {
+ MockIDB = {
+ get: jest.spyOn(IDB, 'get').mockImplementation(() => Promise.resolve(undefined)),
+ set: jest.spyOn(IDB, 'set').mockImplementation(() => Promise.resolve(true)),
+ };
+ });
+
+ test('returns a defaultValue initially', async () => {
+ MockIDB.get.mockImplementationOnce(
+ () =>
+ new Promise((resolve) => {
+ setTimeout(() => {
+ resolve('foo');
+ }, 1);
+ })
+ );
+
+ function Component() {
+ const [value, , loaded] = usePersistence('tacos', 'my-default');
+ return (
+ <div>
+ <div data-testid="loaded">{loaded ? 'loaded' : 'not loaded'}</div>
+ <div data-testid="value">{value}</div>
+ </div>
+ );
+ }
+
+ render(<Component />);
+
+ expect(screen.getByTestId('loaded')).toMatchInlineSnapshot(`
+ <div
+ data-testid="loaded"
+ >
+ not loaded
+ </div>
+ `);
+ expect(screen.getByTestId('value')).toMatchInlineSnapshot(`
+ <div
+ data-testid="value"
+ >
+ my-default
+ </div>
+ `);
+
+ jest.runAllTimers();
+ });
+
+ test('updates with the previously-persisted value', async () => {
+ MockIDB.get.mockResolvedValue('are delicious');
+
+ function Component() {
+ const [value, , loaded] = usePersistence('tacos', 'my-default');
+ return (
+ <div>
+ <div data-testid="loaded">{loaded ? 'loaded' : 'not loaded'}</div>
+ <div data-testid="value">{value}</div>
+ </div>
+ );
+ }
+
+ render(<Component />);
+
+ await screen.findByText('loaded');
+
+ expect(screen.getByTestId('loaded')).toMatchInlineSnapshot(`
+ <div
+ data-testid="loaded"
+ >
+ loaded
+ </div>
+ `);
+ expect(screen.getByTestId('value')).toMatchInlineSnapshot(`
+ <div
+ data-testid="value"
+ >
+ are delicious
+ </div>
+ `);
+ });
+
+ test('can be updated manually', async () => {
+ MockIDB.get.mockResolvedValue('are delicious');
+
+ function Component() {
+ const [value, setValue] = usePersistence('tacos', 'my-default');
+ const handleClick = useCallback(() => {
+ setValue('super delicious');
+ }, [setValue]);
+ return (
+ <div>
+ <div onClick={handleClick}>click me</div>
+ <div data-testid="value">{value}</div>
+ </div>
+ );
+ }
+
+ render(<Component />);
+
+ const button = await screen.findByText('click me');
+ fireEvent.click(button);
+
+ expect(screen.getByTestId('value')).toMatchInlineSnapshot(`
+ <div
+ data-testid="value"
+ >
+ super delicious
+ </div>
+ `);
+ });
+});
diff --git a/web/src/routes/__tests__/Camera.test.jsx b/web/src/routes/__tests__/Camera.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/routes/__tests__/Camera.test.jsx
@@ -0,0 +1,58 @@
+import { h } from 'preact';
+import * as AutoUpdatingCameraImage from '../../components/AutoUpdatingCameraImage';
+import * as Api from '../../api';
+import * as Context from '../../context';
+import Camera from '../Camera';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+describe('Camera Route', () => {
+ let mockUsePersistence, mockSetOptions;
+
+ beforeEach(() => {
+ mockSetOptions = jest.fn();
+ mockUsePersistence = jest.spyOn(Context, 'usePersistence').mockImplementation(() => [{}, mockSetOptions]);
+ jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
+ data: { cameras: { front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } } } },
+ }));
+ jest.spyOn(Api, 'useApiHost').mockImplementation(() => 'http://base-url.local:5000');
+ jest.spyOn(AutoUpdatingCameraImage, 'default').mockImplementation(({ searchParams }) => {
+ return <div data-testid="mock-image">{searchParams.toString()}</div>;
+ });
+ });
+
+ test('reads camera feed options from persistence', async () => {
+ mockUsePersistence.mockReturnValue([
+ {
+ bbox: true,
+ timestamp: false,
+ zones: true,
+ mask: false,
+ motion: true,
+ regions: false,
+ },
+ mockSetOptions,
+ ]);
+ render(<Camera camera="front" />);
+ fireEvent.click(screen.queryByText('Show Options'));
+ expect(screen.queryByTestId('mock-image')).toHaveTextContent(
+ 'bbox=1×tamp=0&zones=1&mask=0&motion=1®ions=0'
+ );
+ });
+
+ test('updates camera feed options to persistence', async () => {
+ mockUsePersistence
+ .mockReturnValueOnce([{}, mockSetOptions])
+ .mockReturnValueOnce([{ bbox: true }, mockSetOptions])
+ .mockReturnValueOnce([{ bbox: true, timestamp: true }, mockSetOptions]);
+
+ render(<Camera camera="front" />);
+
+ fireEvent.click(screen.queryByText('Show Options'));
+ fireEvent.change(screen.queryByTestId('bbox-input'), { target: { checked: true } });
+ fireEvent.change(screen.queryByTestId('timestamp-input'), { target: { checked: true } });
+ fireEvent.click(screen.queryByText('Hide Options'));
+
+ expect(mockSetOptions).toHaveBeenCalledWith({ bbox: true, timestamp: true });
+ expect(screen.queryByTestId('mock-image')).toHaveTextContent('bbox=1×tamp=1');
+ });
+});
diff --git a/web/src/routes/__tests__/Cameras.test.jsx b/web/src/routes/__tests__/Cameras.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/routes/__tests__/Cameras.test.jsx
@@ -0,0 +1,74 @@
+import { h } from 'preact';
+import * as Api from '../../api';
+import * as CameraImage from '../../components/CameraImage';
+import * as Mqtt from '../../api/mqtt';
+import Cameras from '../Cameras';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+describe('Cameras Route', () => {
+ let useConfigMock;
+
+ beforeEach(() => {
+ useConfigMock = jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
+ data: {
+ cameras: {
+ front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } },
+ side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] } },
+ },
+ },
+ status: 'loaded',
+ }));
+ jest.spyOn(Api, 'useApiHost').mockImplementation(() => 'http://base-url.local:5000');
+ jest.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />);
+ jest.spyOn(Mqtt, 'useMqtt').mockImplementation(() => ({ value: { payload: 'OFF' }, send: jest.fn() }));
+ });
+
+ test('shows an ActivityIndicator if not yet loaded', async () => {
+ useConfigMock.mockReturnValueOnce(() => ({ status: 'loading' }));
+ render(<Cameras />);
+ expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
+ });
+
+ test('shows cameras', async () => {
+ render(<Cameras />);
+
+ expect(screen.queryByLabelText('Loading…')).not.toBeInTheDocument();
+
+ expect(screen.queryByText('front')).toBeInTheDocument();
+ expect(screen.queryByText('front').closest('a')).toHaveAttribute('href', '/cameras/front');
+
+ expect(screen.queryByText('side')).toBeInTheDocument();
+ expect(screen.queryByText('side').closest('a')).toHaveAttribute('href', '/cameras/side');
+ });
+
+ test('buttons toggle detect, clips, and snapshots', async () => {
+ const sendDetect = jest.fn();
+ const sendClips = jest.fn();
+ const sendSnapshots = jest.fn();
+ jest.spyOn(Mqtt, 'useDetectState').mockImplementation(() => {
+ return { payload: 'ON', send: sendDetect };
+ });
+ jest.spyOn(Mqtt, 'useClipsState').mockImplementation(() => {
+ return { payload: 'OFF', send: sendClips };
+ });
+ jest.spyOn(Mqtt, 'useSnapshotsState').mockImplementation(() => {
+ return { payload: 'ON', send: sendSnapshots };
+ });
+
+ render(<Cameras />);
+
+ fireEvent.click(screen.getAllByLabelText('Toggle detect off')[0]);
+ expect(sendDetect).toHaveBeenCalledWith('OFF');
+ expect(sendDetect).toHaveBeenCalledTimes(1);
+
+ fireEvent.click(screen.getAllByLabelText('Toggle snapshots off')[0]);
+ expect(sendSnapshots).toHaveBeenCalledWith('OFF');
+
+ fireEvent.click(screen.getAllByLabelText('Toggle clips on')[0]);
+ expect(sendClips).toHaveBeenCalledWith('ON');
+
+ expect(sendDetect).toHaveBeenCalledTimes(1);
+ expect(sendSnapshots).toHaveBeenCalledTimes(1);
+ expect(sendClips).toHaveBeenCalledTimes(1);
+ });
+});
diff --git a/web/src/routes/__tests__/Debug.test.jsx b/web/src/routes/__tests__/Debug.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/routes/__tests__/Debug.test.jsx
@@ -0,0 +1,78 @@
+import { h } from 'preact';
+import * as Api from '../../api';
+import * as Mqtt from '../../api/mqtt';
+import Debug from '../Debug';
+import { render, screen } from '@testing-library/preact';
+
+describe('Debug Route', () => {
+ let useStatsMock, useMqttMock;
+
+ beforeEach(() => {
+ jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
+ data: {
+ service: {
+ version: '0.8.3',
+ },
+ cameras: {
+ front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } },
+ side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] } },
+ },
+ mqtt: {
+ stats_interva: 60,
+ },
+ },
+ status: 'loaded',
+ }));
+ useStatsMock = jest.spyOn(Api, 'useStats').mockImplementation(() => ({ data: statsMock }));
+ useMqttMock = jest.spyOn(Mqtt, 'useMqtt').mockImplementation(() => ({ value: { payload: null } }));
+ });
+
+ test('shows an ActivityIndicator if stats are null', async () => {
+ useStatsMock.mockReturnValue({ data: null });
+ render(<Debug />);
+ expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
+ });
+
+ test('shows stats and config', async () => {
+ render(<Debug />);
+ expect(screen.queryByLabelText('Loading…')).not.toBeInTheDocument();
+
+ expect(screen.queryByTestId('detectors')).toBeInTheDocument();
+ expect(screen.queryByText('coral')).toBeInTheDocument();
+
+ expect(screen.queryByTestId('cameras')).toBeInTheDocument();
+ expect(screen.queryByText('front')).toBeInTheDocument();
+ expect(screen.queryByText('side')).toBeInTheDocument();
+
+ expect(screen.queryByText('Config')).toBeInTheDocument();
+ expect(screen.queryByRole('button', { name: 'Copy to Clipboard' })).toBeInTheDocument();
+ });
+
+ test('updates the stats from mqtt', async () => {
+ const { rerender } = render(<Debug />);
+ expect(useMqttMock).toHaveBeenCalledWith('stats');
+ useMqttMock.mockReturnValue({
+ value: {
+ payload: { ...statsMock, detectors: { coral: { ...statsMock.detectors.coral, inference_speed: 42.4242 } } },
+ },
+ });
+ rerender(<Debug />);
+
+ expect(screen.queryByText('42.4242')).toBeInTheDocument();
+ });
+});
+
+const statsMock = {
+ detection_fps: 0.0,
+ detectors: { coral: { detection_start: 0.0, inference_speed: 8.94, pid: 52 } },
+ front: { camera_fps: 5.0, capture_pid: 64, detection_fps: 0.0, pid: 54, process_fps: 0.0, skipped_fps: 0.0 },
+ side: {
+ camera_fps: 6.9,
+ capture_pid: 71,
+ detection_fps: 0.0,
+ pid: 60,
+ process_fps: 0.0,
+ skipped_fps: 0.0,
+ },
+ service: { uptime: 34812, version: '0.8.1-d376f6b' },
+};
diff --git a/web/src/routes/__tests__/Event.test.jsx b/web/src/routes/__tests__/Event.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/routes/__tests__/Event.test.jsx
@@ -0,0 +1,74 @@
+import { h } from 'preact';
+import * as Api from '../../api';
+import Event from '../Event';
+import { render, screen } from '@testing-library/preact';
+
+describe('Event Route', () => {
+ let useEventMock;
+
+ beforeEach(() => {
+ useEventMock = jest.spyOn(Api, 'useEvent').mockImplementation(() => ({
+ data: mockEvent,
+ status: 'loaded',
+ }));
+ jest.spyOn(Api, 'useApiHost').mockImplementation(() => 'http://localhost:5000');
+ });
+
+ test('shows an ActivityIndicator if not yet loaded', async () => {
+ useEventMock.mockReturnValueOnce(() => ({ status: 'loading' }));
+ render(<Event eventId={mockEvent.id} />);
+ expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
+ });
+
+ test('shows cameras', async () => {
+ render(<Event eventId={mockEvent.id} />);
+
+ expect(screen.queryByLabelText('Loading…')).not.toBeInTheDocument();
+
+ expect(screen.queryByText('Clip')).toBeInTheDocument();
+ expect(screen.queryByLabelText('Clip for event 1613257326.237365-83cgl2')).toHaveAttribute(
+ 'src',
+ 'http://localhost:5000/clips/front-1613257326.237365-83cgl2.mp4'
+ );
+ expect(screen.queryByText('Best image')).toBeInTheDocument();
+ expect(screen.queryByText('Thumbnail')).not.toBeInTheDocument();
+ expect(screen.queryByAltText('person at 82.0% confidence')).toHaveAttribute(
+ 'src',
+ 'http://localhost:5000/clips/front-1613257326.237365-83cgl2.jpg'
+ );
+ });
+
+ test('shows the thumbnail if no snapshot available', async () => {
+ useEventMock.mockReturnValue({ data: { ...mockEvent, has_snapshot: false }, status: 'loaded' });
+ render(<Event eventId={mockEvent.id} />);
+
+ expect(screen.queryByText('Best image')).not.toBeInTheDocument();
+ expect(screen.queryByText('Thumbnail')).toBeInTheDocument();
+ expect(screen.queryByAltText('person at 82.0% confidence')).toHaveAttribute(
+ 'src',
+ 'data:image/jpeg;base64,/9j/4aa...'
+ );
+ });
+
+ test('does not render a video if there is no clip', async () => {
+ useEventMock.mockReturnValue({ data: { ...mockEvent, has_clip: false }, status: 'loaded' });
+ render(<Event eventId={mockEvent.id} />);
+
+ expect(screen.queryByText('Clip')).not.toBeInTheDocument();
+ expect(screen.queryByLabelText('Clip for event 1613257326.237365-83cgl2')).not.toBeInTheDocument();
+ });
+});
+
+const mockEvent = {
+ camera: 'front',
+ end_time: 1613257337.841237,
+ false_positive: false,
+ has_clip: true,
+ has_snapshot: true,
+ id: '1613257326.237365-83cgl2',
+ label: 'person',
+ start_time: 1613257326.237365,
+ top_score: 0.8203125,
+ zones: ['front_patio'],
+ thumbnail: '/9j/4aa...',
+};
diff --git a/web/src/routes/__tests__/Events.test.jsx b/web/src/routes/__tests__/Events.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/routes/__tests__/Events.test.jsx
@@ -0,0 +1,83 @@
+import { h } from 'preact';
+import * as Api from '../../api';
+import * as Hooks from '../../hooks';
+import Events from '../Events';
+import { render, screen } from '@testing-library/preact';
+
+describe('Events Route', () => {
+ let useEventsMock, useIntersectionMock;
+
+ beforeEach(() => {
+ useEventsMock = jest.spyOn(Api, 'useEvents').mockImplementation(() => ({
+ data: null,
+ status: 'loading',
+ }));
+ jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
+ data: {
+ cameras: {
+ front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] }, zones: [] },
+ side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] }, zones: [] },
+ },
+ },
+ }));
+ jest.spyOn(Api, 'useApiHost').mockImplementation(() => 'http://localhost:5000');
+ useIntersectionMock = jest.spyOn(Hooks, 'useIntersectionObserver').mockImplementation(() => [null, jest.fn()]);
+ });
+
+ test('shows an ActivityIndicator if not yet loaded', async () => {
+ render(<Events limit={5} path="/events" />);
+ expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
+ });
+
+ test('does not show ActivityIndicator after loaded', async () => {
+ useEventsMock.mockReturnValue({ data: mockEvents, status: 'loaded' });
+ render(<Events limit={5} path="/events" />);
+ expect(screen.queryByLabelText('Loading…')).not.toBeInTheDocument();
+ });
+
+ test('loads more when the intersectionObserver fires', async () => {
+ const setIntersectionNode = jest.fn();
+ useIntersectionMock.mockReturnValue([null, setIntersectionNode]);
+ useEventsMock.mockImplementation((searchString) => {
+ if (searchString.includes('before=')) {
+ const params = new URLSearchParams(searchString);
+ const before = parseFloat(params.get('before'));
+ const index = mockEvents.findIndex((el) => el.start_time === before + 0.0001);
+ return { data: mockEvents.slice(index, index + 5), status: 'loaded' };
+ }
+
+ return { data: mockEvents.slice(0, 5), status: 'loaded' };
+ });
+
+ const { rerender } = render(<Events limit={5} path="/events" />);
+ expect(setIntersectionNode).toHaveBeenCalled();
+ expect(useEventsMock).toHaveBeenCalledWith('include_thumbnails=0&limit=5&');
+ expect(screen.queryAllByTestId(/event-\d+/)).toHaveLength(5);
+
+ useIntersectionMock.mockReturnValue([
+ {
+ isIntersecting: true,
+ target: { dataset: { startTime: mockEvents[4].start_time } },
+ },
+ setIntersectionNode,
+ ]);
+ rerender(<Events limit={5} path="/events" />);
+ expect(useEventsMock).toHaveBeenCalledWith(
+ `include_thumbnails=0&limit=5&before=${mockEvents[4].start_time - 0.0001}`
+ );
+ expect(screen.queryAllByTestId(/event-\d+/)).toHaveLength(10);
+ });
+});
+
+const mockEvents = new Array(12).fill(null).map((v, i) => ({
+ end_time: 1613257337 + i,
+ false_positive: false,
+ has_clip: true,
+ has_snapshot: true,
+ id: i,
+ label: 'person',
+ start_time: 1613257326 + i,
+ top_score: Math.random(),
+ zones: ['front_patio'],
+ thumbnail: '/9j/4aa...',
+}));
| The default ffmpeg args link is dead on documentation page
**Describe the bug**
The default ffmpeg args link is dead at https://blakeblackshear.github.io/frigate/troubleshooting/#how-can-i-get-sound-or-audio-in-my-clips-and-recordings
This link is dead:
https://blakeblackshear.github.io/frigate/troubleshooting/configuration/index#ffmpeg
| 2021-02-01T14:03:54Z | [] | [] |
|
blakeblackshear/frigate | 825 | blakeblackshear__frigate-825 | [
"1176",
"1779",
"1656",
"1143"
] | d35b09b18fb2fa5c9945f965fde9668e16e78c84 | diff --git a/frigate/__main__.py b/frigate/__main__.py
--- a/frigate/__main__.py
+++ b/frigate/__main__.py
@@ -1,4 +1,6 @@
-import faulthandler; faulthandler.enable()
+import faulthandler
+
+faulthandler.enable()
import sys
import threading
@@ -6,10 +8,10 @@
from frigate.app import FrigateApp
-cli = sys.modules['flask.cli']
+cli = sys.modules["flask.cli"]
cli.show_server_banner = lambda *x: None
-if __name__ == '__main__':
+if __name__ == "__main__":
frigate_app = FrigateApp()
frigate_app.start()
diff --git a/frigate/app.py b/frigate/app.py
--- a/frigate/app.py
+++ b/frigate/app.py
@@ -2,38 +2,40 @@
import logging
import multiprocessing as mp
import os
+import signal
+import sys
+import threading
from logging.handlers import QueueHandler
from typing import Dict, List
-import sys
-import signal
import yaml
-from gevent import pywsgi
-from geventwebsocket.handler import WebSocketHandler
from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase
-from frigate.config import FrigateConfig
-from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+from frigate.config import DetectorTypeEnum, FrigateConfig
+from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
from frigate.edgetpu import EdgeTPUProcess
-from frigate.events import EventProcessor, EventCleanup
+from frigate.events import EventCleanup, EventProcessor
from frigate.http import create_app
from frigate.log import log_process, root_configurer
-from frigate.models import Event
-from frigate.mqtt import create_mqtt_client
+from frigate.models import Event, Recordings
+from frigate.mqtt import MqttSocketRelay, create_mqtt_client
from frigate.object_processing import TrackedObjectProcessor
-from frigate.record import RecordingMaintainer
+from frigate.output import output_frames
+from frigate.record import RecordingCleanup, RecordingMaintainer
from frigate.stats import StatsEmitter, stats_init
+from frigate.version import VERSION
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog
-from frigate.zeroconf import broadcast_zeroconf
logger = logging.getLogger(__name__)
-class FrigateApp():
+
+class FrigateApp:
def __init__(self):
self.stop_event = mp.Event()
+ self.base_config: FrigateConfig = None
self.config: FrigateConfig = None
self.detection_queue = mp.Queue()
self.detectors: Dict[str, EdgeTPUProcess] = {}
@@ -54,148 +56,257 @@ def ensure_dirs(self):
else:
logger.debug(f"Skipping directory: {d}")
- tmpfs_size = self.config.clips.tmpfs_cache_size
- if tmpfs_size:
- logger.info(f"Creating tmpfs of size {tmpfs_size}")
- rc = os.system(f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}")
- if rc != 0:
- logger.error(f"Failed to create tmpfs, error code: {rc}")
-
def init_logger(self):
- self.log_process = mp.Process(target=log_process, args=(self.log_queue,), name='log_process')
+ self.log_process = mp.Process(
+ target=log_process, args=(self.log_queue,), name="log_process"
+ )
self.log_process.daemon = True
self.log_process.start()
root_configurer(self.log_queue)
-
+
def init_config(self):
- config_file = os.environ.get('CONFIG_FILE', '/config/config.yml')
- self.config = FrigateConfig(config_file=config_file)
+ config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
+ user_config = FrigateConfig.parse_file(config_file)
+ self.config = user_config.runtime_config
for camera_name in self.config.cameras.keys():
# create camera_metrics
self.camera_metrics[camera_name] = {
- 'camera_fps': mp.Value('d', 0.0),
- 'skipped_fps': mp.Value('d', 0.0),
- 'process_fps': mp.Value('d', 0.0),
- 'detection_enabled': mp.Value('i', self.config.cameras[camera_name].detect.enabled),
- 'detection_fps': mp.Value('d', 0.0),
- 'detection_frame': mp.Value('d', 0.0),
- 'read_start': mp.Value('d', 0.0),
- 'ffmpeg_pid': mp.Value('i', 0),
- 'frame_queue': mp.Queue(maxsize=2),
+ "camera_fps": mp.Value("d", 0.0),
+ "skipped_fps": mp.Value("d", 0.0),
+ "process_fps": mp.Value("d", 0.0),
+ "detection_enabled": mp.Value(
+ "i", self.config.cameras[camera_name].detect.enabled
+ ),
+ "detection_fps": mp.Value("d", 0.0),
+ "detection_frame": mp.Value("d", 0.0),
+ "read_start": mp.Value("d", 0.0),
+ "ffmpeg_pid": mp.Value("i", 0),
+ "frame_queue": mp.Queue(maxsize=2),
}
-
+
def check_config(self):
for name, camera in self.config.cameras.items():
- assigned_roles = list(set([r for i in camera.ffmpeg.inputs for r in i.roles]))
- if not camera.clips.enabled and 'clips' in assigned_roles:
- logger.warning(f"Camera {name} has clips assigned to an input, but clips is not enabled.")
- elif camera.clips.enabled and not 'clips' in assigned_roles:
- logger.warning(f"Camera {name} has clips enabled, but clips is not assigned to an input.")
-
- if not camera.record.enabled and 'record' in assigned_roles:
- logger.warning(f"Camera {name} has record assigned to an input, but record is not enabled.")
- elif camera.record.enabled and not 'record' in assigned_roles:
- logger.warning(f"Camera {name} has record enabled, but record is not assigned to an input.")
-
- if not camera.rtmp.enabled and 'rtmp' in assigned_roles:
- logger.warning(f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled.")
- elif camera.rtmp.enabled and not 'rtmp' in assigned_roles:
- logger.warning(f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input.")
-
+ assigned_roles = list(
+ set([r for i in camera.ffmpeg.inputs for r in i.roles])
+ )
+ if not camera.record.enabled and "record" in assigned_roles:
+ logger.warning(
+ f"Camera {name} has record assigned to an input, but record is not enabled."
+ )
+ elif camera.record.enabled and not "record" in assigned_roles:
+ logger.warning(
+ f"Camera {name} has record enabled, but record is not assigned to an input."
+ )
+
+ if not camera.rtmp.enabled and "rtmp" in assigned_roles:
+ logger.warning(
+ f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled."
+ )
+ elif camera.rtmp.enabled and not "rtmp" in assigned_roles:
+ logger.warning(
+ f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input."
+ )
+
def set_log_levels(self):
- logging.getLogger().setLevel(self.config.logger.default)
+ logging.getLogger().setLevel(self.config.logger.default.value.upper())
for log, level in self.config.logger.logs.items():
- logging.getLogger(log).setLevel(level)
-
- if not 'geventwebsocket.handler' in self.config.logger.logs:
- logging.getLogger('geventwebsocket.handler').setLevel('ERROR')
+ logging.getLogger(log).setLevel(level.value.upper())
+
+ if not "werkzeug" in self.config.logger.logs:
+ logging.getLogger("werkzeug").setLevel("ERROR")
def init_queues(self):
# Queues for clip processing
self.event_queue = mp.Queue()
self.event_processed_queue = mp.Queue()
+ self.video_output_queue = mp.Queue(maxsize=len(self.config.cameras.keys()) * 2)
# Queue for cameras to push tracked objects to
- self.detected_frames_queue = mp.Queue(maxsize=len(self.config.cameras.keys())*2)
+ self.detected_frames_queue = mp.Queue(
+ maxsize=len(self.config.cameras.keys()) * 2
+ )
def init_database(self):
+ # Migrate DB location
+ old_db_path = os.path.join(CLIPS_DIR, "frigate.db")
+ if not os.path.isfile(self.config.database.path) and os.path.isfile(
+ old_db_path
+ ):
+ os.rename(old_db_path, self.config.database.path)
+
+ # Migrate DB schema
migrate_db = SqliteExtDatabase(self.config.database.path)
# Run migrations
- del(logging.getLogger('peewee_migrate').handlers[:])
+ del logging.getLogger("peewee_migrate").handlers[:]
router = Router(migrate_db)
router.run()
migrate_db.close()
self.db = SqliteQueueDatabase(self.config.database.path)
- models = [Event]
+ models = [Event, Recordings]
self.db.bind(models)
def init_stats(self):
self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
def init_web_server(self):
- self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor, self.mqtt_client)
+ self.flask_app = create_app(
+ self.config,
+ self.db,
+ self.stats_tracking,
+ self.detected_frames_processor,
+ )
def init_mqtt(self):
self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
+ def start_mqtt_relay(self):
+ self.mqtt_relay = MqttSocketRelay(
+ self.mqtt_client, self.config.mqtt.topic_prefix
+ )
+ self.mqtt_relay.start()
+
def start_detectors(self):
+ model_path = self.config.model.path
model_shape = (self.config.model.height, self.config.model.width)
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
- shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=self.config.model.height*self.config.model.width*3)
- shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
+
+ try:
+ shm_in = mp.shared_memory.SharedMemory(
+ name=name,
+ create=True,
+ size=self.config.model.height * self.config.model.width * 3,
+ )
+ except FileExistsError:
+ shm_in = mp.shared_memory.SharedMemory(name=name)
+
+ try:
+ shm_out = mp.shared_memory.SharedMemory(
+ name=f"out-{name}", create=True, size=20 * 6 * 4
+ )
+ except FileExistsError:
+ shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
+
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
for name, detector in self.config.detectors.items():
- if detector.type == 'cpu':
- self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, 'cpu', detector.num_threads)
- if detector.type == 'edgetpu':
- self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, detector.device, detector.num_threads)
+ if detector.type == DetectorTypeEnum.cpu:
+ self.detectors[name] = EdgeTPUProcess(
+ name,
+ self.detection_queue,
+ self.detection_out_events,
+ model_path,
+ model_shape,
+ "cpu",
+ detector.num_threads,
+ )
+ if detector.type == DetectorTypeEnum.edgetpu:
+ self.detectors[name] = EdgeTPUProcess(
+ name,
+ self.detection_queue,
+ self.detection_out_events,
+ model_path,
+ model_shape,
+ detector.device,
+ detector.num_threads,
+ )
def start_detected_frames_processor(self):
- self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
- self.detected_frames_queue, self.event_queue, self.event_processed_queue, self.stop_event)
+ self.detected_frames_processor = TrackedObjectProcessor(
+ self.config,
+ self.mqtt_client,
+ self.config.mqtt.topic_prefix,
+ self.detected_frames_queue,
+ self.event_queue,
+ self.event_processed_queue,
+ self.video_output_queue,
+ self.stop_event,
+ )
self.detected_frames_processor.start()
+ def start_video_output_processor(self):
+ output_processor = mp.Process(
+ target=output_frames,
+ name=f"output_processor",
+ args=(
+ self.config,
+ self.video_output_queue,
+ ),
+ )
+ output_processor.daemon = True
+ self.output_processor = output_processor
+ output_processor.start()
+ logger.info(f"Output process started: {output_processor.pid}")
+
def start_camera_processors(self):
model_shape = (self.config.model.height, self.config.model.width)
for name, config in self.config.cameras.items():
- camera_process = mp.Process(target=track_camera, name=f"camera_processor:{name}", args=(name, config, model_shape,
- self.detection_queue, self.detection_out_events[name], self.detected_frames_queue,
- self.camera_metrics[name]))
+ camera_process = mp.Process(
+ target=track_camera,
+ name=f"camera_processor:{name}",
+ args=(
+ name,
+ config,
+ model_shape,
+ self.config.model.merged_labelmap,
+ self.detection_queue,
+ self.detection_out_events[name],
+ self.detected_frames_queue,
+ self.camera_metrics[name],
+ ),
+ )
camera_process.daemon = True
- self.camera_metrics[name]['process'] = camera_process
+ self.camera_metrics[name]["process"] = camera_process
camera_process.start()
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
def start_camera_capture_processes(self):
for name, config in self.config.cameras.items():
- capture_process = mp.Process(target=capture_camera, name=f"camera_capture:{name}", args=(name, config,
- self.camera_metrics[name]))
+ capture_process = mp.Process(
+ target=capture_camera,
+ name=f"camera_capture:{name}",
+ args=(name, config, self.camera_metrics[name]),
+ )
capture_process.daemon = True
- self.camera_metrics[name]['capture_process'] = capture_process
+ self.camera_metrics[name]["capture_process"] = capture_process
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
-
+
def start_event_processor(self):
- self.event_processor = EventProcessor(self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event)
+ self.event_processor = EventProcessor(
+ self.config,
+ self.camera_metrics,
+ self.event_queue,
+ self.event_processed_queue,
+ self.stop_event,
+ )
self.event_processor.start()
-
+
def start_event_cleanup(self):
self.event_cleanup = EventCleanup(self.config, self.stop_event)
self.event_cleanup.start()
-
+
def start_recording_maintainer(self):
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
self.recording_maintainer.start()
+ def start_recording_cleanup(self):
+ self.recording_cleanup = RecordingCleanup(self.config, self.stop_event)
+ self.recording_cleanup.start()
+
def start_stats_emitter(self):
- self.stats_emitter = StatsEmitter(self.config, self.stats_tracking, self.mqtt_client, self.config.mqtt.topic_prefix, self.stop_event)
+ self.stats_emitter = StatsEmitter(
+ self.config,
+ self.stats_tracking,
+ self.mqtt_client,
+ self.config.mqtt.topic_prefix,
+ self.stop_event,
+ )
self.stats_emitter.start()
def start_watchdog(self):
@@ -204,6 +315,7 @@ def start_watchdog(self):
def start(self):
self.init_logger()
+ logger.info(f"Starting Frigate ({VERSION})")
try:
try:
self.init_config()
@@ -223,14 +335,17 @@ def start(self):
self.log_process.terminate()
sys.exit(1)
self.start_detectors()
+ self.start_video_output_processor()
self.start_detected_frames_processor()
self.start_camera_processors()
self.start_camera_capture_processes()
self.init_stats()
self.init_web_server()
+ self.start_mqtt_relay()
self.start_event_processor()
self.start_event_cleanup()
self.start_recording_maintainer()
+ self.start_recording_cleanup()
self.start_stats_emitter()
self.start_watchdog()
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
@@ -238,22 +353,26 @@ def start(self):
def receiveSignal(signalNumber, frame):
self.stop()
sys.exit()
-
+
signal.signal(signal.SIGTERM, receiveSignal)
- server = pywsgi.WSGIServer(('127.0.0.1', 5001), self.flask_app, handler_class=WebSocketHandler)
- server.serve_forever()
+ try:
+ self.flask_app.run(host="127.0.0.1", port=5001, debug=False)
+ except KeyboardInterrupt:
+ pass
self.stop()
-
+
def stop(self):
logger.info(f"Stopping...")
self.stop_event.set()
+ self.mqtt_relay.stop()
self.detected_frames_processor.join()
self.event_processor.join()
self.event_cleanup.join()
self.recording_maintainer.join()
+ self.recording_cleanup.join()
self.stats_emitter.join()
self.frigate_watchdog.join()
self.db.stop()
diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -1,1057 +1,806 @@
-import base64
+from __future__ import annotations
+
import json
import logging
import os
-from typing import Dict
+from enum import Enum
+from typing import Dict, List, Optional, Tuple, Union
-import cv2
import matplotlib.pyplot as plt
import numpy as np
-import voluptuous as vol
import yaml
+from pydantic import BaseModel, Extra, Field, validator
+from pydantic.fields import PrivateAttr
-from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
-from frigate.util import create_mask
+from frigate.const import BASE_DIR, CACHE_DIR, RECORD_DIR
+from frigate.edgetpu import load_labels
+from frigate.util import create_mask, deep_merge
logger = logging.getLogger(__name__)
-DEFAULT_TRACKED_OBJECTS = ['person']
-
-DETECTORS_SCHEMA = vol.Schema(
- {
- vol.Required(str): {
- vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']),
- vol.Optional('device', default='usb'): str,
- vol.Optional('num_threads', default=3): int
- }
- }
-)
-
-DEFAULT_DETECTORS = {
- 'coral': {
- 'type': 'edgetpu',
- 'device': 'usb'
- }
-}
-
-MQTT_SCHEMA = vol.Schema(
- {
- vol.Required('host'): str,
- vol.Optional('port', default=1883): int,
- vol.Optional('topic_prefix', default='frigate'): str,
- vol.Optional('client_id', default='frigate'): str,
- vol.Optional('stats_interval', default=60): int,
- 'user': str,
- 'password': str
- }
-)
-
-RETAIN_SCHEMA = vol.Schema(
- {
- vol.Required('default',default=10): int,
- 'objects': {
- str: int
- }
- }
-)
-
-CLIPS_SCHEMA = vol.Schema(
- {
- vol.Optional('max_seconds', default=300): int,
- 'tmpfs_cache_size': str,
- vol.Optional('retain', default={}): RETAIN_SCHEMA
- }
-)
-
-FFMPEG_GLOBAL_ARGS_DEFAULT = ['-hide_banner','-loglevel','warning']
-FFMPEG_INPUT_ARGS_DEFAULT = ['-avoid_negative_ts', 'make_zero',
- '-fflags', '+genpts+discardcorrupt',
- '-rtsp_transport', 'tcp',
- '-stimeout', '5000000',
- '-use_wallclock_as_timestamps', '1']
-DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ['-f', 'rawvideo',
- '-pix_fmt', 'yuv420p']
-RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-c", "copy", "-f", "flv"]
-SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "segment", "-segment_time",
- "10", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime",
- "1", "-c", "copy", "-an"]
-RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "segment", "-segment_time",
- "60", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime",
- "1", "-c", "copy", "-an"]
-
-GLOBAL_FFMPEG_SCHEMA = vol.Schema(
- {
- vol.Optional('global_args', default=FFMPEG_GLOBAL_ARGS_DEFAULT): vol.Any(str, [str]),
- vol.Optional('hwaccel_args', default=[]): vol.Any(str, [str]),
- vol.Optional('input_args', default=FFMPEG_INPUT_ARGS_DEFAULT): vol.Any(str, [str]),
- vol.Optional('output_args', default={}): {
- vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
- vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
- vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
- vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
- }
- }
-)
-
-MOTION_SCHEMA = vol.Schema(
- {
- 'mask': vol.Any(str, [str]),
- 'threshold': vol.Range(min=1, max=255),
- 'contour_area': int,
- 'delta_alpha': float,
- 'frame_alpha': float,
- 'frame_height': int
- }
-)
-
-DETECT_SCHEMA = vol.Schema(
- {
- 'max_disappeared': int
- }
-)
-
-FILTER_SCHEMA = vol.Schema(
- {
- str: {
- 'min_area': int,
- 'max_area': int,
- 'threshold': float,
- }
- }
-)
-
-def filters_for_all_tracked_objects(object_config):
- for tracked_object in object_config.get('track', DEFAULT_TRACKED_OBJECTS):
- if not 'filters' in object_config:
- object_config['filters'] = {}
- if not tracked_object in object_config['filters']:
- object_config['filters'][tracked_object] = {}
- return object_config
-
-OBJECTS_SCHEMA = vol.Schema(vol.All(filters_for_all_tracked_objects,
- {
- 'track': [str],
- 'mask': vol.Any(str, [str]),
- vol.Optional('filters', default = {}): FILTER_SCHEMA.extend(
- {
- str: {
- 'min_score': float,
- 'mask': vol.Any(str, [str]),
- }
- })
- }
-))
-
-def each_role_used_once(inputs):
- roles = [role for i in inputs for role in i['roles']]
- roles_set = set(roles)
- if len(roles) > len(roles_set):
- raise ValueError
- return inputs
-
-def detect_is_required(inputs):
- roles = [role for i in inputs for role in i['roles']]
- if not 'detect' in roles:
- raise ValueError
- return inputs
-
-CAMERA_FFMPEG_SCHEMA = vol.Schema(
- {
- vol.Required('inputs'): vol.All([{
- vol.Required('path'): str,
- vol.Required('roles'): ['detect', 'clips', 'record', 'rtmp'],
- 'global_args': vol.Any(str, [str]),
- 'hwaccel_args': vol.Any(str, [str]),
- 'input_args': vol.Any(str, [str]),
- }], vol.Msg(each_role_used_once, msg="Each input role may only be used once"),
- vol.Msg(detect_is_required, msg="The detect role is required")),
- 'global_args': vol.Any(str, [str]),
- 'hwaccel_args': vol.Any(str, [str]),
- 'input_args': vol.Any(str, [str]),
- 'output_args': {
- vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
- vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
- vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
- vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]),
- }
- }
-)
-
-def ensure_zones_and_cameras_have_different_names(cameras):
- zones = [zone for camera in cameras.values() for zone in camera['zones'].keys()]
- for zone in zones:
- if zone in cameras.keys():
- raise ValueError
- return cameras
-
-CAMERAS_SCHEMA = vol.Schema(vol.All(
- {
- str: {
- vol.Required('ffmpeg'): CAMERA_FFMPEG_SCHEMA,
- vol.Required('height'): int,
- vol.Required('width'): int,
- 'fps': int,
- vol.Optional('best_image_timeout', default=60): int,
- vol.Optional('zones', default={}): {
- str: {
- vol.Required('coordinates'): vol.Any(str, [str]),
- vol.Optional('filters', default={}): FILTER_SCHEMA
- }
- },
- vol.Optional('clips', default={}): {
- vol.Optional('enabled', default=False): bool,
- vol.Optional('pre_capture', default=5): int,
- vol.Optional('post_capture', default=5): int,
- vol.Optional('required_zones', default=[]): [str],
- 'objects': [str],
- vol.Optional('retain', default={}): RETAIN_SCHEMA,
- },
- vol.Optional('record', default={}): {
- 'enabled': bool,
- 'retain_days': int,
- },
- vol.Optional('rtmp', default={}): {
- vol.Required('enabled', default=True): bool,
- },
- vol.Optional('snapshots', default={}): {
- vol.Optional('enabled', default=False): bool,
- vol.Optional('timestamp', default=False): bool,
- vol.Optional('bounding_box', default=False): bool,
- vol.Optional('crop', default=False): bool,
- vol.Optional('required_zones', default=[]): [str],
- 'height': int,
- vol.Optional('retain', default={}): RETAIN_SCHEMA,
- },
- vol.Optional('mqtt', default={}): {
- vol.Optional('enabled', default=True): bool,
- vol.Optional('timestamp', default=True): bool,
- vol.Optional('bounding_box', default=True): bool,
- vol.Optional('crop', default=True): bool,
- vol.Optional('height', default=270): int,
- vol.Optional('required_zones', default=[]): [str],
- },
- vol.Optional('objects', default={}): OBJECTS_SCHEMA,
- vol.Optional('motion', default={}): MOTION_SCHEMA,
- vol.Optional('detect', default={}): DETECT_SCHEMA.extend({
- vol.Optional('enabled', default=True): bool
- })
- }
- }, vol.Msg(ensure_zones_and_cameras_have_different_names, msg='Zones cannot share names with cameras'))
-)
-
-FRIGATE_CONFIG_SCHEMA = vol.Schema(
- {
- vol.Optional('database', default={}): {
- vol.Optional('path', default=os.path.join(CLIPS_DIR, 'frigate.db')): str
- },
- vol.Optional('model', default={'width': 320, 'height': 320}): {
- vol.Required('width'): int,
- vol.Required('height'): int
- },
- vol.Optional('detectors', default=DEFAULT_DETECTORS): DETECTORS_SCHEMA,
- 'mqtt': MQTT_SCHEMA,
- vol.Optional('logger', default={'default': 'info', 'logs': {}}): {
- vol.Optional('default', default='info'): vol.In(['info', 'debug', 'warning', 'error', 'critical']),
- vol.Optional('logs', default={}): {str: vol.In(['info', 'debug', 'warning', 'error', 'critical']) }
- },
- vol.Optional('snapshots', default={}): {
- vol.Optional('retain', default={}): RETAIN_SCHEMA
- },
- vol.Optional('clips', default={}): CLIPS_SCHEMA,
- vol.Optional('record', default={}): {
- vol.Optional('enabled', default=False): bool,
- vol.Optional('retain_days', default=30): int,
- },
- vol.Optional('ffmpeg', default={}): GLOBAL_FFMPEG_SCHEMA,
- vol.Optional('objects', default={}): OBJECTS_SCHEMA,
- vol.Optional('motion', default={}): MOTION_SCHEMA,
- vol.Optional('detect', default={}): DETECT_SCHEMA,
- vol.Required('cameras', default={}): CAMERAS_SCHEMA,
- vol.Optional('environment_vars', default={}): { str: str }
- }
-)
-
-class DatabaseConfig():
- def __init__(self, config):
- self._path = config['path']
-
- @property
- def path(self):
- return self._path
-
- def to_dict(self):
- return {
- 'path': self.path
- }
-
-class ModelConfig():
- def __init__(self, config):
- self._width = config['width']
- self._height = config['height']
-
- @property
- def width(self):
- return self._width
-
- @property
- def height(self):
- return self._height
-
- def to_dict(self):
- return {
- 'width': self.width,
- 'height': self.height
- }
-
-class DetectorConfig():
- def __init__(self, config):
- self._type = config['type']
- self._device = config['device']
- self._num_threads = config['num_threads']
-
- @property
- def type(self):
- return self._type
-
- @property
- def device(self):
- return self._device
-
- @property
- def num_threads(self):
- return self._num_threads
-
- def to_dict(self):
- return {
- 'type': self.type,
- 'device': self.device,
- 'num_threads': self.num_threads
- }
-
-class LoggerConfig():
- def __init__(self, config):
- self._default = config['default'].upper()
- self._logs = {k: v.upper() for k, v in config['logs'].items()}
-
- @property
- def default(self):
- return self._default
-
- @property
- def logs(self):
- return self._logs
-
- def to_dict(self):
- return {
- 'default': self.default,
- 'logs': self.logs
- }
-
-class MqttConfig():
- def __init__(self, config):
- self._host = config['host']
- self._port = config['port']
- self._topic_prefix = config['topic_prefix']
- self._client_id = config['client_id']
- self._user = config.get('user')
- self._password = config.get('password')
- self._stats_interval = config.get('stats_interval')
-
- @property
- def host(self):
- return self._host
-
- @property
- def port(self):
- return self._port
-
- @property
- def topic_prefix(self):
- return self._topic_prefix
-
- @property
- def client_id(self):
- return self._client_id
-
- @property
- def user(self):
- return self._user
-
- @property
- def password(self):
- return self._password
-
- @property
- def stats_interval(self):
- return self._stats_interval
-
- def to_dict(self):
- return {
- 'host': self.host,
- 'port': self.port,
- 'topic_prefix': self.topic_prefix,
- 'client_id': self.client_id,
- 'user': self.user,
- 'stats_interval': self.stats_interval
- }
+# TODO: Identify what the default format to display timestamps is
+DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
+# German Style:
+# DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S"
+
+FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
+
+DEFAULT_TRACKED_OBJECTS = ["person"]
+DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
+
+
+class FrigateBaseModel(BaseModel):
+ class Config:
+ extra = Extra.forbid
+
+
+class DetectorTypeEnum(str, Enum):
+ edgetpu = "edgetpu"
+ cpu = "cpu"
+
+
+class DetectorConfig(FrigateBaseModel):
+ type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type")
+ device: str = Field(default="usb", title="Device Type")
+ num_threads: int = Field(default=3, title="Number of detection threads")
+
+
+class MqttConfig(FrigateBaseModel):
+ host: str = Field(title="MQTT Host")
+ port: int = Field(default=1883, title="MQTT Port")
+ topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix")
+ client_id: str = Field(default="frigate", title="MQTT Client ID")
+ stats_interval: int = Field(default=60, title="MQTT Camera Stats Interval")
+ user: Optional[str] = Field(title="MQTT Username")
+ password: Optional[str] = Field(title="MQTT Password")
+ tls_ca_certs: Optional[str] = Field(title="MQTT TLS CA Certificates")
+ tls_client_cert: Optional[str] = Field(title="MQTT TLS Client Certificate")
+ tls_client_key: Optional[str] = Field(title="MQTT TLS Client Key")
+ tls_insecure: Optional[bool] = Field(title="MQTT TLS Insecure")
+
+ @validator("password", pre=True, always=True)
+ def validate_password(cls, v, values):
+ if (v is None) != (values["user"] is None):
+ raise ValueError("Password must be provided with username.")
+ return v
+
+
+class RetainConfig(FrigateBaseModel):
+ default: float = Field(default=10, title="Default retention period.")
+ objects: Dict[str, float] = Field(
+ default_factory=dict, title="Object retention period."
+ )
+
+
+class EventsConfig(FrigateBaseModel):
+ max_seconds: int = Field(default=300, title="Maximum event duration.")
+ pre_capture: int = Field(default=5, title="Seconds to retain before event starts.")
+ post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
+ required_zones: List[str] = Field(
+ default_factory=list,
+ title="List of required zones to be entered in order to save the event.",
+ )
+ objects: Optional[List[str]] = Field(
+ title="List of objects to be detected in order to save the event.",
+ )
+ retain: RetainConfig = Field(
+ default_factory=RetainConfig, title="Event retention settings."
+ )
+
+
+class RecordConfig(FrigateBaseModel):
+ enabled: bool = Field(default=False, title="Enable record on all cameras.")
+ retain_days: float = Field(default=0, title="Recording retention period in days.")
+ events: EventsConfig = Field(
+ default_factory=EventsConfig, title="Event specific settings."
+ )
+
+
+class MotionConfig(FrigateBaseModel):
+ threshold: int = Field(
+ default=25,
+ title="Motion detection threshold (1-255).",
+ ge=1,
+ le=255,
+ )
+ contour_area: Optional[int] = Field(title="Contour Area")
+ delta_alpha: float = Field(default=0.2, title="Delta Alpha")
+ frame_alpha: float = Field(default=0.2, title="Frame Alpha")
+ frame_height: Optional[int] = Field(title="Frame Height")
+ mask: Union[str, List[str]] = Field(
+ default="", title="Coordinates polygon for the motion mask."
+ )
+
+
+class RuntimeMotionConfig(MotionConfig):
+ raw_mask: Union[str, List[str]] = ""
+ mask: np.ndarray = None
+
+ def __init__(self, **config):
+ frame_shape = config.get("frame_shape", (1, 1))
+
+ if "frame_height" not in config:
+ config["frame_height"] = max(frame_shape[0] // 6, 180)
+
+ if "contour_area" not in config:
+ frame_width = frame_shape[1] * config["frame_height"] / frame_shape[0]
+ config["contour_area"] = (
+ config["frame_height"] * frame_width * 0.00173611111
+ )
+
+ mask = config.get("mask", "")
+ config["raw_mask"] = mask
-class CameraInput():
- def __init__(self, camera_config, global_config, ffmpeg_input):
- self._path = ffmpeg_input['path']
- self._roles = ffmpeg_input['roles']
- self._global_args = ffmpeg_input.get('global_args', camera_config.get('global_args', global_config['global_args']))
- self._hwaccel_args = ffmpeg_input.get('hwaccel_args', camera_config.get('hwaccel_args', global_config['hwaccel_args']))
- self._input_args = ffmpeg_input.get('input_args', camera_config.get('input_args', global_config['input_args']))
-
- @property
- def path(self):
- return self._path
-
- @property
- def roles(self):
- return self._roles
-
- @property
- def global_args(self):
- return self._global_args if isinstance(self._global_args, list) else self._global_args.split(' ')
-
- @property
- def hwaccel_args(self):
- return self._hwaccel_args if isinstance(self._hwaccel_args, list) else self._hwaccel_args.split(' ')
-
- @property
- def input_args(self):
- return self._input_args if isinstance(self._input_args, list) else self._input_args.split(' ')
-
-class CameraFfmpegConfig():
- def __init__(self, global_config, config):
- self._inputs = [CameraInput(config, global_config, i) for i in config['inputs']]
- self._output_args = config.get('output_args', global_config['output_args'])
-
- @property
- def inputs(self):
- return self._inputs
-
- @property
- def output_args(self):
- return {k: v if isinstance(v, list) else v.split(' ') for k, v in self._output_args.items()}
-
-class RetainConfig():
- def __init__(self, global_config, config):
- self._default = config.get('default', global_config.get('default'))
- self._objects = config.get('objects', global_config.get('objects', {}))
-
- @property
- def default(self):
- return self._default
-
- @property
- def objects(self):
- return self._objects
-
- def to_dict(self):
- return {
- 'default': self.default,
- 'objects': self.objects
- }
-
-class ClipsConfig():
- def __init__(self, config):
- self._max_seconds = config['max_seconds']
- self._tmpfs_cache_size = config.get('tmpfs_cache_size', '').strip()
- self._retain = RetainConfig(config['retain'], config['retain'])
-
- @property
- def max_seconds(self):
- return self._max_seconds
-
- @property
- def tmpfs_cache_size(self):
- return self._tmpfs_cache_size
-
- @property
- def retain(self):
- return self._retain
-
- def to_dict(self):
- return {
- 'max_seconds': self.max_seconds,
- 'tmpfs_cache_size': self.tmpfs_cache_size,
- 'retain': self.retain.to_dict()
- }
-
-class SnapshotsConfig():
- def __init__(self, config):
- self._retain = RetainConfig(config['retain'], config['retain'])
-
- @property
- def retain(self):
- return self._retain
-
- def to_dict(self):
- return {
- 'retain': self.retain.to_dict()
- }
-
-class RecordConfig():
- def __init__(self, global_config, config):
- self._enabled = config.get('enabled', global_config['enabled'])
- self._retain_days = config.get('retain_days', global_config['retain_days'])
-
- @property
- def enabled(self):
- return self._enabled
-
- @property
- def retain_days(self):
- return self._retain_days
-
- def to_dict(self):
- return {
- 'enabled': self.enabled,
- 'retain_days': self.retain_days,
- }
-
-class FilterConfig():
- def __init__(self, global_config, config, global_mask=None, frame_shape=None):
- self._min_area = config.get('min_area', global_config.get('min_area', 0))
- self._max_area = config.get('max_area', global_config.get('max_area', 24000000))
- self._threshold = config.get('threshold', global_config.get('threshold', 0.7))
- self._min_score = config.get('min_score', global_config.get('min_score', 0.5))
-
- self._raw_mask = []
- if global_mask:
- if isinstance(global_mask, list):
- self._raw_mask += global_mask
- elif isinstance(global_mask, str):
- self._raw_mask += [global_mask]
-
- mask = config.get('mask')
if mask:
- if isinstance(mask, list):
- self._raw_mask += mask
- elif isinstance(mask, str):
- self._raw_mask += [mask]
- self._mask = create_mask(frame_shape, self._raw_mask) if self._raw_mask else None
-
- @property
- def min_area(self):
- return self._min_area
-
- @property
- def max_area(self):
- return self._max_area
-
- @property
- def threshold(self):
- return self._threshold
-
- @property
- def min_score(self):
- return self._min_score
-
- @property
- def mask(self):
- return self._mask
-
- def to_dict(self):
- return {
- 'min_area': self.min_area,
- 'max_area': self.max_area,
- 'threshold': self.threshold,
- 'min_score': self.min_score,
- 'mask': self._raw_mask
- }
-
-class ObjectConfig():
- def __init__(self, global_config, config, frame_shape):
- self._track = config.get('track', global_config.get('track', DEFAULT_TRACKED_OBJECTS))
- self._raw_mask = config.get('mask')
- self._filters = { name: FilterConfig(global_config['filters'].get(name, {}), config['filters'].get(name, {}), self._raw_mask, frame_shape) for name in self._track }
-
- @property
- def track(self):
- return self._track
-
- @property
- def filters(self) -> Dict[str, FilterConfig]:
- return self._filters
-
- def to_dict(self):
- return {
- 'track': self.track,
- 'mask': self._raw_mask,
- 'filters': { k: f.to_dict() for k, f in self.filters.items() }
- }
-
-class CameraSnapshotsConfig():
- def __init__(self, global_config, config):
- self._enabled = config['enabled']
- self._timestamp = config['timestamp']
- self._bounding_box = config['bounding_box']
- self._crop = config['crop']
- self._height = config.get('height')
- self._retain = RetainConfig(global_config['snapshots']['retain'], config['retain'])
- self._required_zones = config['required_zones']
-
- @property
- def enabled(self):
- return self._enabled
-
- @property
- def timestamp(self):
- return self._timestamp
-
- @property
- def bounding_box(self):
- return self._bounding_box
-
- @property
- def crop(self):
- return self._crop
-
- @property
- def height(self):
- return self._height
-
- @property
- def retain(self):
- return self._retain
-
- @property
- def required_zones(self):
- return self._required_zones
-
- def to_dict(self):
- return {
- 'enabled': self.enabled,
- 'timestamp': self.timestamp,
- 'bounding_box': self.bounding_box,
- 'crop': self.crop,
- 'height': self.height,
- 'retain': self.retain.to_dict(),
- 'required_zones': self.required_zones
- }
-
-class CameraMqttConfig():
- def __init__(self, config):
- self._enabled = config['enabled']
- self._timestamp = config['timestamp']
- self._bounding_box = config['bounding_box']
- self._crop = config['crop']
- self._height = config.get('height')
- self._required_zones = config['required_zones']
-
- @property
- def enabled(self):
- return self._enabled
-
- @property
- def timestamp(self):
- return self._timestamp
-
- @property
- def bounding_box(self):
- return self._bounding_box
-
- @property
- def crop(self):
- return self._crop
-
- @property
- def height(self):
- return self._height
-
- @property
- def required_zones(self):
- return self._required_zones
-
- def to_dict(self):
- return {
- 'enabled': self.enabled,
- 'timestamp': self.timestamp,
- 'bounding_box': self.bounding_box,
- 'crop': self.crop,
- 'height': self.height,
- 'required_zones': self.required_zones
- }
-
-class CameraClipsConfig():
- def __init__(self, global_config, config):
- self._enabled = config['enabled']
- self._pre_capture = config['pre_capture']
- self._post_capture = config['post_capture']
- self._objects = config.get('objects')
- self._retain = RetainConfig(global_config['clips']['retain'], config['retain'])
- self._required_zones = config['required_zones']
-
- @property
- def enabled(self):
- return self._enabled
-
- @property
- def pre_capture(self):
- return self._pre_capture
-
- @property
- def post_capture(self):
- return self._post_capture
-
- @property
- def objects(self):
- return self._objects
-
- @property
- def retain(self):
- return self._retain
-
- @property
- def required_zones(self):
- return self._required_zones
-
- def to_dict(self):
- return {
- 'enabled': self.enabled,
- 'pre_capture': self.pre_capture,
- 'post_capture': self.post_capture,
- 'objects': self.objects,
- 'retain': self.retain.to_dict(),
- 'required_zones': self.required_zones
- }
-
-class CameraRtmpConfig():
- def __init__(self, global_config, config):
- self._enabled = config['enabled']
-
- @property
- def enabled(self):
- return self._enabled
-
- def to_dict(self):
- return {
- 'enabled': self.enabled,
- }
-
-class MotionConfig():
- def __init__(self, global_config, config, frame_shape):
- self._raw_mask = config.get('mask')
- if self._raw_mask:
- self._mask = create_mask(frame_shape, self._raw_mask)
+ config["mask"] = create_mask(frame_shape, mask)
else:
- default_mask = np.zeros(frame_shape, np.uint8)
- default_mask[:] = 255
- self._mask = default_mask
- self._threshold = config.get('threshold', global_config.get('threshold', 25))
- self._contour_area = config.get('contour_area', global_config.get('contour_area', 100))
- self._delta_alpha = config.get('delta_alpha', global_config.get('delta_alpha', 0.2))
- self._frame_alpha = config.get('frame_alpha', global_config.get('frame_alpha', 0.2))
- self._frame_height = config.get('frame_height', global_config.get('frame_height', frame_shape[0]//6))
-
- @property
- def mask(self):
- return self._mask
-
- @property
- def threshold(self):
- return self._threshold
-
- @property
- def contour_area(self):
- return self._contour_area
-
- @property
- def delta_alpha(self):
- return self._delta_alpha
-
- @property
- def frame_alpha(self):
- return self._frame_alpha
-
- @property
- def frame_height(self):
- return self._frame_height
-
- def to_dict(self):
- return {
- 'mask': self._raw_mask,
- 'threshold': self.threshold,
- 'contour_area': self.contour_area,
- 'delta_alpha': self.delta_alpha,
- 'frame_alpha': self.frame_alpha,
- 'frame_height': self.frame_height,
- }
-
-
-
-class DetectConfig():
- def __init__(self, global_config, config, camera_fps):
- self._enabled = config['enabled']
- self._max_disappeared = config.get('max_disappeared', global_config.get('max_disappeared', camera_fps*5))
-
- @property
- def enabled(self):
- return self._enabled
+ empty_mask = np.zeros(frame_shape, np.uint8)
+ empty_mask[:] = 255
+ config["mask"] = empty_mask
+
+ super().__init__(**config)
+
+ def dict(self, **kwargs):
+ ret = super().dict(**kwargs)
+ if "mask" in ret:
+ ret["mask"] = ret["raw_mask"]
+ ret.pop("raw_mask")
+ return ret
+
+ class Config:
+ arbitrary_types_allowed = True
+ extra = Extra.ignore
+
+
+class DetectConfig(FrigateBaseModel):
+ height: int = Field(default=720, title="Height of the stream for the detect role.")
+ width: int = Field(default=1280, title="Width of the stream for the detect role.")
+ fps: int = Field(
+ default=5, title="Number of frames per second to process through detection."
+ )
+ enabled: bool = Field(default=True, title="Detection Enabled.")
+ max_disappeared: Optional[int] = Field(
+ title="Maximum number of frames the object can dissapear before detection ends."
+ )
+
+
+class FilterConfig(FrigateBaseModel):
+ min_area: int = Field(
+ default=0, title="Minimum area of bounding box for object to be counted."
+ )
+ max_area: int = Field(
+ default=24000000, title="Maximum area of bounding box for object to be counted."
+ )
+ threshold: float = Field(
+ default=0.7,
+ title="Average detection confidence threshold for object to be counted.",
+ )
+ min_score: float = Field(
+ default=0.5, title="Minimum detection confidence for object to be counted."
+ )
+ mask: Optional[Union[str, List[str]]] = Field(
+ title="Detection area polygon mask for this filter configuration.",
+ )
+
+
+class RuntimeFilterConfig(FilterConfig):
+ mask: Optional[np.ndarray]
+ raw_mask: Optional[Union[str, List[str]]]
+
+ def __init__(self, **config):
+ mask = config.get("mask")
+ config["raw_mask"] = mask
+
+ if mask is not None:
+ config["mask"] = create_mask(config.get("frame_shape", (1, 1)), mask)
+
+ super().__init__(**config)
+
+ def dict(self, **kwargs):
+ ret = super().dict(**kwargs)
+ if "mask" in ret:
+ ret["mask"] = ret["raw_mask"]
+ ret.pop("raw_mask")
+ return ret
+
+ class Config:
+ arbitrary_types_allowed = True
+ extra = Extra.ignore
+
+
+# this uses the base model because the color is an extra attribute
+class ZoneConfig(BaseModel):
+ filters: Dict[str, FilterConfig] = Field(
+ default_factory=dict, title="Zone filters."
+ )
+ coordinates: Union[str, List[str]] = Field(
+ title="Coordinates polygon for the defined zone."
+ )
+ objects: List[str] = Field(
+ default_factory=list,
+ title="List of objects that can trigger the zone.",
+ )
+ _color: Optional[Tuple[int, int, int]] = PrivateAttr()
+ _contour: np.ndarray = PrivateAttr()
+
+ @property
+ def color(self) -> Tuple[int, int, int]:
+ return self._color
@property
- def max_disappeared(self):
- return self._max_disappeared
-
- def to_dict(self):
- return {
- 'enabled': self.enabled,
- 'max_disappeared': self._max_disappeared,
- }
-
-class ZoneConfig():
- def __init__(self, name, config):
- self._coordinates = config['coordinates']
- self._filters = { name: FilterConfig(c, c) for name, c in config['filters'].items() }
+ def contour(self) -> np.ndarray:
+ return self._contour
- if isinstance(self._coordinates, list):
- self._contour = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in self._coordinates])
- elif isinstance(self._coordinates, str):
- points = self._coordinates.split(',')
- self._contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
+ def __init__(self, **config):
+ super().__init__(**config)
+
+ self._color = config.get("color", (0, 0, 0))
+ coordinates = config["coordinates"]
+
+ if isinstance(coordinates, list):
+ self._contour = np.array(
+ [[int(p.split(",")[0]), int(p.split(",")[1])] for p in coordinates]
+ )
+ elif isinstance(coordinates, str):
+ points = coordinates.split(",")
+ self._contour = np.array(
+ [[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
+ )
else:
- print(f"Unable to parse zone coordinates for {name}")
self._contour = np.array([])
- self._color = (0,0,0)
-
- @property
- def coordinates(self):
- return self._coordinates
- @property
- def contour(self):
- return self._contour
+class ObjectConfig(FrigateBaseModel):
+ track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
+ filters: Optional[Dict[str, FilterConfig]] = Field(title="Object filters.")
+ mask: Union[str, List[str]] = Field(default="", title="Object mask.")
+
+
+class BirdseyeModeEnum(str, Enum):
+ objects = "objects"
+ motion = "motion"
+ continuous = "continuous"
+
+
+class BirdseyeConfig(FrigateBaseModel):
+ enabled: bool = Field(default=True, title="Enable birdseye view.")
+ width: int = Field(default=1280, title="Birdseye width.")
+ height: int = Field(default=720, title="Birdseye height.")
+ quality: int = Field(
+ default=8,
+ title="Encoding quality.",
+ ge=1,
+ le=31,
+ )
+ mode: BirdseyeModeEnum = Field(
+ default=BirdseyeModeEnum.objects, title="Tracking mode."
+ )
+
+
+FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning"]
+FFMPEG_INPUT_ARGS_DEFAULT = [
+ "-avoid_negative_ts",
+ "make_zero",
+ "-fflags",
+ "+genpts+discardcorrupt",
+ "-rtsp_transport",
+ "tcp",
+ "-stimeout",
+ "5000000",
+ "-use_wallclock_as_timestamps",
+ "1",
+]
+DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "rawvideo", "-pix_fmt", "yuv420p"]
+RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-c", "copy", "-f", "flv"]
+RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = [
+ "-f",
+ "segment",
+ "-segment_time",
+ "10",
+ "-segment_format",
+ "mp4",
+ "-reset_timestamps",
+ "1",
+ "-strftime",
+ "1",
+ "-c",
+ "copy",
+ "-an",
+]
+
+
+class FfmpegOutputArgsConfig(FrigateBaseModel):
+ detect: Union[str, List[str]] = Field(
+ default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
+ title="Detect role FFmpeg output arguments.",
+ )
+ record: Union[str, List[str]] = Field(
+ default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
+ title="Record role FFmpeg output arguments.",
+ )
+ rtmp: Union[str, List[str]] = Field(
+ default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT,
+ title="RTMP role FFmpeg output arguments.",
+ )
+
+
+class FfmpegConfig(FrigateBaseModel):
+ global_args: Union[str, List[str]] = Field(
+ default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments."
+ )
+ hwaccel_args: Union[str, List[str]] = Field(
+ default_factory=list, title="FFmpeg hardware acceleration arguments."
+ )
+ input_args: Union[str, List[str]] = Field(
+ default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments."
+ )
+ output_args: FfmpegOutputArgsConfig = Field(
+ default_factory=FfmpegOutputArgsConfig,
+ title="FFmpeg output arguments per role.",
+ )
+
+
+class CameraRoleEnum(str, Enum):
+ record = "record"
+ rtmp = "rtmp"
+ detect = "detect"
+
+
+class CameraInput(FrigateBaseModel):
+ path: str = Field(title="Camera input path.")
+ roles: List[CameraRoleEnum] = Field(title="Roles assigned to this input.")
+ global_args: Union[str, List[str]] = Field(
+ default_factory=list, title="FFmpeg global arguments."
+ )
+ hwaccel_args: Union[str, List[str]] = Field(
+ default_factory=list, title="FFmpeg hardware acceleration arguments."
+ )
+ input_args: Union[str, List[str]] = Field(
+ default_factory=list, title="FFmpeg input arguments."
+ )
+
+
+class CameraFfmpegConfig(FfmpegConfig):
+ inputs: List[CameraInput] = Field(title="Camera inputs.")
+
+ @validator("inputs")
+ def validate_roles(cls, v):
+ roles = [role for i in v for role in i.roles]
+ roles_set = set(roles)
+
+ if len(roles) > len(roles_set):
+ raise ValueError("Each input role may only be used once.")
+
+ if not "detect" in roles:
+ raise ValueError("The detect role is required.")
+
+ return v
+
+
+class SnapshotsConfig(FrigateBaseModel):
+ enabled: bool = Field(default=False, title="Snapshots enabled.")
+ clean_copy: bool = Field(
+ default=True, title="Create a clean copy of the snapshot image."
+ )
+ timestamp: bool = Field(
+ default=False, title="Add a timestamp overlay on the snapshot."
+ )
+ bounding_box: bool = Field(
+ default=True, title="Add a bounding box overlay on the snapshot."
+ )
+ crop: bool = Field(default=False, title="Crop the snapshot to the detected object.")
+ required_zones: List[str] = Field(
+ default_factory=list,
+ title="List of required zones to be entered in order to save a snapshot.",
+ )
+ height: Optional[int] = Field(title="Snapshot image height.")
+ retain: RetainConfig = Field(
+ default_factory=RetainConfig, title="Snapshot retention."
+ )
+ quality: int = Field(
+ default=70,
+ title="Quality of the encoded jpeg (0-100).",
+ ge=0,
+ le=100,
+ )
+
+
+class ColorConfig(FrigateBaseModel):
+ red: int = Field(default=255, ge=0, le=255, title="Red")
+ green: int = Field(default=255, ge=0, le=255, title="Green")
+ blue: int = Field(default=255, ge=0, le=255, title="Blue")
+
+
+class TimestampPositionEnum(str, Enum):
+ tl = "tl"
+ tr = "tr"
+ bl = "bl"
+ br = "br"
+
+
+class TimestampEffectEnum(str, Enum):
+ solid = "solid"
+ shadow = "shadow"
+
+
+class TimestampStyleConfig(FrigateBaseModel):
+ position: TimestampPositionEnum = Field(
+ default=TimestampPositionEnum.tl, title="Timestamp position."
+ )
+ format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
+ color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
+ thickness: int = Field(default=2, title="Timestamp thickness.")
+ effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.")
+
+
+class CameraMqttConfig(FrigateBaseModel):
+ enabled: bool = Field(default=True, title="Send image over MQTT.")
+ timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
+ bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.")
+ crop: bool = Field(default=True, title="Crop MQTT image to detected object.")
+ height: int = Field(default=270, title="MQTT image height.")
+ required_zones: List[str] = Field(
+ default_factory=list,
+ title="List of required zones to be entered in order to send the image.",
+ )
+ quality: int = Field(
+ default=70,
+ title="Quality of the encoded jpeg (0-100).",
+ ge=0,
+ le=100,
+ )
+
+
+class RtmpConfig(FrigateBaseModel):
+ enabled: bool = Field(default=True, title="RTMP restreaming enabled.")
+
+
+class CameraLiveConfig(FrigateBaseModel):
+ height: int = Field(default=720, title="Live camera view height")
+ quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")
+
+
+class CameraConfig(FrigateBaseModel):
+ name: Optional[str] = Field(title="Camera name.")
+ ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
+ best_image_timeout: int = Field(
+ default=60,
+ title="How long to wait for the image with the highest confidence score.",
+ )
+ zones: Dict[str, ZoneConfig] = Field(
+ default_factory=dict, title="Zone configuration."
+ )
+ record: RecordConfig = Field(
+ default_factory=RecordConfig, title="Record configuration."
+ )
+ rtmp: RtmpConfig = Field(
+ default_factory=RtmpConfig, title="RTMP restreaming configuration."
+ )
+ live: CameraLiveConfig = Field(
+ default_factory=CameraLiveConfig, title="Live playback settings."
+ )
+ snapshots: SnapshotsConfig = Field(
+ default_factory=SnapshotsConfig, title="Snapshot configuration."
+ )
+ mqtt: CameraMqttConfig = Field(
+ default_factory=CameraMqttConfig, title="MQTT configuration."
+ )
+ objects: ObjectConfig = Field(
+ default_factory=ObjectConfig, title="Object configuration."
+ )
+ motion: Optional[MotionConfig] = Field(title="Motion detection configuration.")
+ detect: DetectConfig = Field(
+ default_factory=DetectConfig, title="Object detection configuration."
+ )
+ timestamp_style: TimestampStyleConfig = Field(
+ default_factory=TimestampStyleConfig, title="Timestamp style configuration."
+ )
+
+ def __init__(self, **config):
+ # Set zone colors
+ if "zones" in config:
+ colors = plt.cm.get_cmap("tab10", len(config["zones"]))
+ config["zones"] = {
+ name: {**z, "color": tuple(round(255 * c) for c in colors(idx)[:3])}
+ for idx, (name, z) in enumerate(config["zones"].items())
+ }
- @contour.setter
- def contour(self, val):
- self._contour = val
+ super().__init__(**config)
@property
- def color(self):
- return self._color
-
- @color.setter
- def color(self, val):
- self._color = val
+ def frame_shape(self) -> Tuple[int, int]:
+ return self.detect.height, self.detect.width
@property
- def filters(self):
- return self._filters
-
- def to_dict(self):
- return {
- 'filters': {k: f.to_dict() for k, f in self.filters.items()},
- 'coordinates': self._coordinates
- }
+ def frame_shape_yuv(self) -> Tuple[int, int]:
+ return self.detect.height * 3 // 2, self.detect.width
-class CameraConfig():
- def __init__(self, name, config, global_config):
- self._name = name
- self._ffmpeg = CameraFfmpegConfig(global_config['ffmpeg'], config['ffmpeg'])
- self._height = config.get('height')
- self._width = config.get('width')
- self._frame_shape = (self._height, self._width)
- self._frame_shape_yuv = (self._frame_shape[0]*3//2, self._frame_shape[1])
- self._fps = config.get('fps')
- self._best_image_timeout = config['best_image_timeout']
- self._zones = { name: ZoneConfig(name, z) for name, z in config['zones'].items() }
- self._clips = CameraClipsConfig(global_config, config['clips'])
- self._record = RecordConfig(global_config['record'], config['record'])
- self._rtmp = CameraRtmpConfig(global_config, config['rtmp'])
- self._snapshots = CameraSnapshotsConfig(global_config, config['snapshots'])
- self._mqtt = CameraMqttConfig(config['mqtt'])
- self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}), self._frame_shape)
- self._motion = MotionConfig(global_config['motion'], config['motion'], self._frame_shape)
- self._detect = DetectConfig(global_config['detect'], config['detect'], config.get('fps', 5))
-
- self._ffmpeg_cmds = []
- for ffmpeg_input in self._ffmpeg.inputs:
+ @property
+ def ffmpeg_cmds(self) -> List[Dict[str, List[str]]]:
+ ffmpeg_cmds = []
+ for ffmpeg_input in self.ffmpeg.inputs:
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
if ffmpeg_cmd is None:
continue
- self._ffmpeg_cmds.append({
- 'roles': ffmpeg_input.roles,
- 'cmd': ffmpeg_cmd
- })
-
+ ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd})
+ return ffmpeg_cmds
- self._set_zone_colors(self._zones)
-
- def _get_ffmpeg_cmd(self, ffmpeg_input):
+ def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
ffmpeg_output_args = []
- if 'detect' in ffmpeg_input.roles:
- ffmpeg_output_args = self.ffmpeg.output_args['detect'] + ffmpeg_output_args + ['pipe:']
- if self.fps:
- ffmpeg_output_args = ["-r", str(self.fps)] + ffmpeg_output_args
- if 'rtmp' in ffmpeg_input.roles and self.rtmp.enabled:
- ffmpeg_output_args = self.ffmpeg.output_args['rtmp'] + [
- f"rtmp://127.0.0.1/live/{self.name}"
- ] + ffmpeg_output_args
- if 'clips' in ffmpeg_input.roles:
- ffmpeg_output_args = self.ffmpeg.output_args['clips'] + [
- f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"
- ] + ffmpeg_output_args
- if 'record' in ffmpeg_input.roles and self.record.enabled:
- ffmpeg_output_args = self.ffmpeg.output_args['record'] + [
- f"{os.path.join(RECORD_DIR, self.name)}-%Y%m%d%H%M%S.mp4"
- ] + ffmpeg_output_args
+ if "detect" in ffmpeg_input.roles:
+ detect_args = (
+ self.ffmpeg.output_args.detect
+ if isinstance(self.ffmpeg.output_args.detect, list)
+ else self.ffmpeg.output_args.detect.split(" ")
+ )
+ ffmpeg_output_args = (
+ [
+ "-r",
+ str(self.detect.fps),
+ "-s",
+ f"{self.detect.width}x{self.detect.height}",
+ ]
+ + detect_args
+ + ffmpeg_output_args
+ + ["pipe:"]
+ )
+ if "rtmp" in ffmpeg_input.roles and self.rtmp.enabled:
+ rtmp_args = (
+ self.ffmpeg.output_args.rtmp
+ if isinstance(self.ffmpeg.output_args.rtmp, list)
+ else self.ffmpeg.output_args.rtmp.split(" ")
+ )
+ ffmpeg_output_args = (
+ rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
+ )
+ if "record" in ffmpeg_input.roles and self.record.enabled:
+ record_args = (
+ self.ffmpeg.output_args.record
+ if isinstance(self.ffmpeg.output_args.record, list)
+ else self.ffmpeg.output_args.record.split(" ")
+ )
+ ffmpeg_output_args = (
+ record_args
+ + [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"]
+ + ffmpeg_output_args
+ )
# if there arent any outputs enabled for this input
if len(ffmpeg_output_args) == 0:
return None
- cmd = (['ffmpeg'] +
- ffmpeg_input.global_args +
- ffmpeg_input.hwaccel_args +
- ffmpeg_input.input_args +
- ['-i', ffmpeg_input.path] +
- ffmpeg_output_args)
-
- return [part for part in cmd if part != '']
-
- def _set_zone_colors(self, zones: Dict[str, ZoneConfig]):
- # set colors for zones
- all_zone_names = zones.keys()
- zone_colors = {}
- colors = plt.cm.get_cmap('tab10', len(all_zone_names))
- for i, zone in enumerate(all_zone_names):
- zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])
-
- for name, zone in zones.items():
- zone.color = zone_colors[name]
-
- @property
- def name(self):
- return self._name
-
- @property
- def ffmpeg(self):
- return self._ffmpeg
-
- @property
- def height(self):
- return self._height
-
- @property
- def width(self):
- return self._width
-
- @property
- def fps(self):
- return self._fps
-
- @property
- def best_image_timeout(self):
- return self._best_image_timeout
-
- @property
- def zones(self)-> Dict[str, ZoneConfig]:
- return self._zones
+ global_args = ffmpeg_input.global_args or self.ffmpeg.global_args
+ hwaccel_args = ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args
+ input_args = ffmpeg_input.input_args or self.ffmpeg.input_args
- @property
- def clips(self):
- return self._clips
+ global_args = (
+ global_args if isinstance(global_args, list) else global_args.split(" ")
+ )
+ hwaccel_args = (
+ hwaccel_args if isinstance(hwaccel_args, list) else hwaccel_args.split(" ")
+ )
+ input_args = (
+ input_args if isinstance(input_args, list) else input_args.split(" ")
+ )
- @property
- def record(self):
- return self._record
+ cmd = (
+ ["ffmpeg"]
+ + global_args
+ + hwaccel_args
+ + input_args
+ + ["-i", ffmpeg_input.path]
+ + ffmpeg_output_args
+ )
- @property
- def rtmp(self):
- return self._rtmp
+ return [part for part in cmd if part != ""]
- @property
- def snapshots(self):
- return self._snapshots
- @property
- def mqtt(self):
- return self._mqtt
+class DatabaseConfig(FrigateBaseModel):
+ path: str = Field(
+ default=os.path.join(BASE_DIR, "frigate.db"), title="Database path."
+ )
- @property
- def objects(self):
- return self._objects
- @property
- def motion(self):
- return self._motion
+class ModelConfig(FrigateBaseModel):
+ path: Optional[str] = Field(title="Custom Object detection model path.")
+ labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
+ width: int = Field(default=320, title="Object detection model input width.")
+ height: int = Field(default=320, title="Object detection model input height.")
+ labelmap: Dict[int, str] = Field(
+ default_factory=dict, title="Labelmap customization."
+ )
+ _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
+ _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
@property
- def detect(self):
- return self._detect
+ def merged_labelmap(self) -> Dict[int, str]:
+ return self._merged_labelmap
@property
- def frame_shape(self):
- return self._frame_shape
+ def colormap(self) -> Dict[int, tuple[int, int, int]]:
+ return self._colormap
- @property
- def frame_shape_yuv(self):
- return self._frame_shape_yuv
+ def __init__(self, **config):
+ super().__init__(**config)
- @property
- def ffmpeg_cmds(self):
- return self._ffmpeg_cmds
-
- def to_dict(self):
- return {
- 'name': self.name,
- 'height': self.height,
- 'width': self.width,
- 'fps': self.fps,
- 'best_image_timeout': self.best_image_timeout,
- 'zones': {k: z.to_dict() for k, z in self.zones.items()},
- 'clips': self.clips.to_dict(),
- 'record': self.record.to_dict(),
- 'rtmp': self.rtmp.to_dict(),
- 'snapshots': self.snapshots.to_dict(),
- 'mqtt': self.mqtt.to_dict(),
- 'objects': self.objects.to_dict(),
- 'motion': self.motion.to_dict(),
- 'detect': self.detect.to_dict(),
- 'frame_shape': self.frame_shape,
- 'ffmpeg_cmds': [{'roles': c['roles'], 'cmd': ' '.join(c['cmd'])} for c in self.ffmpeg_cmds],
+ self._merged_labelmap = {
+ **load_labels(config.get("labelmap_path", "/labelmap.txt")),
+ **config.get("labelmap", {}),
}
-
-class FrigateConfig():
- def __init__(self, config_file=None, config=None):
- if config is None and config_file is None:
- raise ValueError('config or config_file must be defined')
- elif not config_file is None:
- config = self._load_file(config_file)
-
- config = FRIGATE_CONFIG_SCHEMA(config)
-
- config = self._sub_env_vars(config)
-
- self._database = DatabaseConfig(config['database'])
- self._model = ModelConfig(config['model'])
- self._detectors = { name: DetectorConfig(d) for name, d in config['detectors'].items() }
- self._mqtt = MqttConfig(config['mqtt'])
- self._clips = ClipsConfig(config['clips'])
- self._snapshots = SnapshotsConfig(config['snapshots'])
- self._cameras = { name: CameraConfig(name, c, config) for name, c in config['cameras'].items() }
- self._logger = LoggerConfig(config['logger'])
- self._environment_vars = config['environment_vars']
-
- def _sub_env_vars(self, config):
- frigate_env_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
-
- if 'password' in config['mqtt']:
- config['mqtt']['password'] = config['mqtt']['password'].format(**frigate_env_vars)
-
- for camera in config['cameras'].values():
- for i in camera['ffmpeg']['inputs']:
- i['path'] = i['path'].format(**frigate_env_vars)
+ cmap = plt.cm.get_cmap("tab10", len(self._merged_labelmap.keys()))
+
+ self._colormap = {}
+ for key, val in self._merged_labelmap.items():
+ self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
+
+
+class LogLevelEnum(str, Enum):
+ debug = "debug"
+ info = "info"
+ warning = "warning"
+ error = "error"
+ critical = "critical"
+
+
+class LoggerConfig(FrigateBaseModel):
+ default: LogLevelEnum = Field(
+ default=LogLevelEnum.info, title="Default logging level."
+ )
+ logs: Dict[str, LogLevelEnum] = Field(
+ default_factory=dict, title="Log level for specified processes."
+ )
+
+
+class FrigateConfig(FrigateBaseModel):
+ mqtt: MqttConfig = Field(title="MQTT Configuration.")
+ database: DatabaseConfig = Field(
+ default_factory=DatabaseConfig, title="Database configuration."
+ )
+ environment_vars: Dict[str, str] = Field(
+ default_factory=dict, title="Frigate environment variables."
+ )
+ model: ModelConfig = Field(
+ default_factory=ModelConfig, title="Detection model configuration."
+ )
+ detectors: Dict[str, DetectorConfig] = Field(
+ default={name: DetectorConfig(**d) for name, d in DEFAULT_DETECTORS.items()},
+ title="Detector hardware configuration.",
+ )
+ logger: LoggerConfig = Field(
+ default_factory=LoggerConfig, title="Logging configuration."
+ )
+ record: RecordConfig = Field(
+ default_factory=RecordConfig, title="Global record configuration."
+ )
+ snapshots: SnapshotsConfig = Field(
+ default_factory=SnapshotsConfig, title="Global snapshots configuration."
+ )
+ live: CameraLiveConfig = Field(
+ default_factory=CameraLiveConfig, title="Global live configuration."
+ )
+ rtmp: RtmpConfig = Field(
+ default_factory=RtmpConfig, title="Global RTMP restreaming configuration."
+ )
+ birdseye: BirdseyeConfig = Field(
+ default_factory=BirdseyeConfig, title="Birdseye configuration."
+ )
+ ffmpeg: FfmpegConfig = Field(
+ default_factory=FfmpegConfig, title="Global FFmpeg configuration."
+ )
+ objects: ObjectConfig = Field(
+ default_factory=ObjectConfig, title="Global object configuration."
+ )
+ motion: Optional[MotionConfig] = Field(
+ title="Global motion detection configuration."
+ )
+ detect: DetectConfig = Field(
+ default_factory=DetectConfig, title="Global object tracking configuration."
+ )
+ cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.")
+ timestamp_style: TimestampStyleConfig = Field(
+ default_factory=TimestampStyleConfig,
+ title="Global timestamp style configuration.",
+ )
+
+ @property
+ def runtime_config(self) -> FrigateConfig:
+ """Merge camera config with globals."""
+ config = self.copy(deep=True)
+
+ # MQTT password substitution
+ if config.mqtt.password:
+ config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS)
+
+ # Global config to propegate down to camera level
+ global_config = config.dict(
+ include={
+ "record": ...,
+ "snapshots": ...,
+ "live": ...,
+ "rtmp": ...,
+ "objects": ...,
+ "motion": ...,
+ "detect": ...,
+ "ffmpeg": ...,
+ "timestamp_style": ...,
+ },
+ exclude_unset=True,
+ )
+
+ for name, camera in config.cameras.items():
+ merged_config = deep_merge(camera.dict(exclude_unset=True), global_config)
+ camera_config: CameraConfig = CameraConfig.parse_obj(
+ {"name": name, **merged_config}
+ )
+
+ # Default max_disappeared configuration
+ max_disappeared = camera_config.detect.fps * 5
+ if camera_config.detect.max_disappeared is None:
+ camera_config.detect.max_disappeared = max_disappeared
+
+ # FFMPEG input substitution
+ for input in camera_config.ffmpeg.inputs:
+ input.path = input.path.format(**FRIGATE_ENV_VARS)
+
+ # Add default filters
+ object_keys = camera_config.objects.track
+ if camera_config.objects.filters is None:
+ camera_config.objects.filters = {}
+ object_keys = object_keys - camera_config.objects.filters.keys()
+ for key in object_keys:
+ camera_config.objects.filters[key] = FilterConfig()
+
+ # Apply global object masks and convert masks to numpy array
+ for object, filter in camera_config.objects.filters.items():
+ if camera_config.objects.mask:
+ filter_mask = []
+ if filter.mask is not None:
+ filter_mask = (
+ filter.mask
+ if isinstance(filter.mask, list)
+ else [filter.mask]
+ )
+ object_mask = (
+ camera_config.objects.mask
+ if isinstance(camera_config.objects.mask, list)
+ else [camera_config.objects.mask]
+ )
+ filter.mask = filter_mask + object_mask
+
+ # Set runtime filter to create masks
+ camera_config.objects.filters[object] = RuntimeFilterConfig(
+ frame_shape=camera_config.frame_shape,
+ **filter.dict(exclude_unset=True),
+ )
+
+ # Convert motion configuration
+ if camera_config.motion is None:
+ camera_config.motion = RuntimeMotionConfig(
+ frame_shape=camera_config.frame_shape
+ )
+ else:
+ camera_config.motion = RuntimeMotionConfig(
+ frame_shape=camera_config.frame_shape,
+ raw_mask=camera_config.motion.mask,
+ **camera_config.motion.dict(exclude_unset=True),
+ )
+
+ config.cameras[name] = camera_config
return config
- def _load_file(self, config_file):
+ @validator("cameras")
+ def ensure_zones_and_cameras_have_different_names(cls, v: Dict[str, CameraConfig]):
+ zones = [zone for camera in v.values() for zone in camera.zones.keys()]
+ for zone in zones:
+ if zone in v.keys():
+ raise ValueError("Zones cannot share names with cameras")
+ return v
+
+ @classmethod
+ def parse_file(cls, config_file):
with open(config_file) as f:
raw_config = f.read()
@@ -1060,53 +809,4 @@ def _load_file(self, config_file):
elif config_file.endswith(".json"):
config = json.loads(raw_config)
- return config
-
- def to_dict(self):
- return {
- 'database': self.database.to_dict(),
- 'model': self.model.to_dict(),
- 'detectors': {k: d.to_dict() for k, d in self.detectors.items()},
- 'mqtt': self.mqtt.to_dict(),
- 'clips': self.clips.to_dict(),
- 'snapshots': self.snapshots.to_dict(),
- 'cameras': {k: c.to_dict() for k, c in self.cameras.items()},
- 'logger': self.logger.to_dict(),
- 'environment_vars': self._environment_vars
- }
-
- @property
- def database(self):
- return self._database
-
- @property
- def model(self):
- return self._model
-
- @property
- def detectors(self) -> Dict[str, DetectorConfig]:
- return self._detectors
-
- @property
- def logger(self):
- return self._logger
-
- @property
- def mqtt(self):
- return self._mqtt
-
- @property
- def clips(self):
- return self._clips
-
- @property
- def snapshots(self):
- return self._snapshots
-
- @property
- def cameras(self) -> Dict[str, CameraConfig]:
- return self._cameras
-
- @property
- def environment_vars(self):
- return self._environment_vars
+ return cls.parse_obj(config)
diff --git a/frigate/const.py b/frigate/const.py
--- a/frigate/const.py
+++ b/frigate/const.py
@@ -1,3 +1,4 @@
-CLIPS_DIR = '/media/frigate/clips'
-RECORD_DIR = '/media/frigate/recordings'
-CACHE_DIR = '/tmp/cache'
\ No newline at end of file
+BASE_DIR = "/media/frigate"
+CLIPS_DIR = f"{BASE_DIR}/clips"
+RECORD_DIR = f"{BASE_DIR}/recordings"
+CACHE_DIR = "/tmp/cache"
diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py
--- a/frigate/edgetpu.py
+++ b/frigate/edgetpu.py
@@ -1,50 +1,51 @@
import datetime
-import hashlib
import logging
import multiprocessing as mp
import os
import queue
-import threading
import signal
+import threading
from abc import ABC, abstractmethod
-from multiprocessing.connection import Connection
-from setproctitle import setproctitle
from typing import Dict
import numpy as np
import tflite_runtime.interpreter as tflite
+from setproctitle import setproctitle
from tflite_runtime.interpreter import load_delegate
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen
logger = logging.getLogger(__name__)
-def load_labels(path, encoding='utf-8'):
- """Loads labels from file (with or without index numbers).
- Args:
- path: path to label file.
- encoding: label file encoding.
- Returns:
- Dictionary mapping indices to labels.
- """
- with open(path, 'r', encoding=encoding) as f:
- lines = f.readlines()
- if not lines:
- return {}
-
- if lines[0].split(' ', maxsplit=1)[0].isdigit():
- pairs = [line.split(' ', maxsplit=1) for line in lines]
- return {int(index): label.strip() for index, label in pairs}
- else:
- return {index: line.strip() for index, line in enumerate(lines)}
+
+def load_labels(path, encoding="utf-8"):
+ """Loads labels from file (with or without index numbers).
+ Args:
+ path: path to label file.
+ encoding: label file encoding.
+ Returns:
+ Dictionary mapping indices to labels.
+ """
+ with open(path, "r", encoding=encoding) as f:
+ lines = f.readlines()
+ if not lines:
+ return {}
+
+ if lines[0].split(" ", maxsplit=1)[0].isdigit():
+ pairs = [line.split(" ", maxsplit=1) for line in lines]
+ return {int(index): label.strip() for index, label in pairs}
+ else:
+ return {index: line.strip() for index, line in enumerate(lines)}
+
class ObjectDetector(ABC):
@abstractmethod
- def detect(self, tensor_input, threshold = .4):
+ def detect(self, tensor_input, threshold=0.4):
pass
+
class LocalObjectDetector(ObjectDetector):
- def __init__(self, tf_device=None, num_threads=3, labels=None):
+ def __init__(self, tf_device=None, model_path=None, num_threads=3, labels=None):
self.fps = EventsPerSecond()
if labels is None:
self.labels = {}
@@ -57,27 +58,34 @@ def __init__(self, tf_device=None, num_threads=3, labels=None):
edge_tpu_delegate = None
- if tf_device != 'cpu':
+ if tf_device != "cpu":
try:
logger.info(f"Attempting to load TPU as {device_config['device']}")
- edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', device_config)
+ edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
logger.info("TPU found")
self.interpreter = tflite.Interpreter(
- model_path='/edgetpu_model.tflite',
- experimental_delegates=[edge_tpu_delegate])
+ model_path=model_path or "/edgetpu_model.tflite",
+ experimental_delegates=[edge_tpu_delegate],
+ )
except ValueError:
- logger.info("No EdgeTPU detected.")
+ logger.error(
+ "No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
+ )
raise
else:
+ logger.warning(
+ "CPU detectors are not recommended and should only be used for testing or for trial purposes."
+ )
self.interpreter = tflite.Interpreter(
- model_path='/cpu_model.tflite', num_threads=num_threads)
-
+ model_path=model_path or "/cpu_model.tflite", num_threads=num_threads
+ )
+
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
-
- def detect(self, tensor_input, threshold=.4):
+
+ def detect(self, tensor_input, threshold=0.4):
detections = []
raw_detections = self.detect_raw(tensor_input)
@@ -85,28 +93,51 @@ def detect(self, tensor_input, threshold=.4):
for d in raw_detections:
if d[1] < threshold:
break
- detections.append((
- self.labels[int(d[0])],
- float(d[1]),
- (d[2], d[3], d[4], d[5])
- ))
+ detections.append(
+ (self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
+ )
self.fps.update()
return detections
def detect_raw(self, tensor_input):
- self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input)
+ self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke()
- boxes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[0]['index']))
- label_codes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[1]['index']))
- scores = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[2]['index']))
-
- detections = np.zeros((20,6), np.float32)
- for i, score in enumerate(scores):
- detections[i] = [label_codes[i], score, boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]]
-
+
+ boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
+ class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
+ scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
+ count = int(
+ self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
+ )
+
+ detections = np.zeros((20, 6), np.float32)
+
+ for i in range(count):
+ if scores[i] < 0.4 or i == 20:
+ break
+ detections[i] = [
+ class_ids[i],
+ float(scores[i]),
+ boxes[i][0],
+ boxes[i][1],
+ boxes[i][2],
+ boxes[i][3],
+ ]
+
return detections
-def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device, num_threads):
+
+def run_detector(
+ name: str,
+ detection_queue: mp.Queue,
+ out_events: Dict[str, mp.Event],
+ avg_speed,
+ start,
+ model_path,
+ model_shape,
+ tf_device,
+ num_threads,
+):
threading.current_thread().name = f"detector:{name}"
logger = logging.getLogger(f"detector.{name}")
logger.info(f"Starting detection process: {os.getpid()}")
@@ -114,33 +145,32 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
listen()
stop_event = mp.Event()
+
def receiveSignal(signalNumber, frame):
stop_event.set()
-
+
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
frame_manager = SharedMemoryFrameManager()
- object_detector = LocalObjectDetector(tf_device=tf_device, num_threads=num_threads)
+ object_detector = LocalObjectDetector(
+ tf_device=tf_device, model_path=model_path, num_threads=num_threads
+ )
outputs = {}
for name in out_events.keys():
out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
- out_np = np.ndarray((20,6), dtype=np.float32, buffer=out_shm.buf)
- outputs[name] = {
- 'shm': out_shm,
- 'np': out_np
- }
-
- while True:
- if stop_event.is_set():
- break
+ out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
+ outputs[name] = {"shm": out_shm, "np": out_np}
+ while not stop_event.is_set():
try:
connection_id = detection_queue.get(timeout=5)
except queue.Empty:
continue
- input_frame = frame_manager.get(connection_id, (1,model_shape[0],model_shape[1],3))
+ input_frame = frame_manager.get(
+ connection_id, (1, model_shape[0], model_shape[1], 3)
+ )
if input_frame is None:
continue
@@ -148,26 +178,37 @@ def receiveSignal(signalNumber, frame):
# detect and send the output
start.value = datetime.datetime.now().timestamp()
detections = object_detector.detect_raw(input_frame)
- duration = datetime.datetime.now().timestamp()-start.value
- outputs[connection_id]['np'][:] = detections[:]
+ duration = datetime.datetime.now().timestamp() - start.value
+ outputs[connection_id]["np"][:] = detections[:]
out_events[connection_id].set()
start.value = 0.0
- avg_speed.value = (avg_speed.value*9 + duration)/10
-
-class EdgeTPUProcess():
- def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None, num_threads=3):
+ avg_speed.value = (avg_speed.value * 9 + duration) / 10
+
+
+class EdgeTPUProcess:
+ def __init__(
+ self,
+ name,
+ detection_queue,
+ out_events,
+ model_path,
+ model_shape,
+ tf_device=None,
+ num_threads=3,
+ ):
self.name = name
self.out_events = out_events
self.detection_queue = detection_queue
- self.avg_inference_speed = mp.Value('d', 0.01)
- self.detection_start = mp.Value('d', 0.0)
+ self.avg_inference_speed = mp.Value("d", 0.01)
+ self.detection_start = mp.Value("d", 0.0)
self.detect_process = None
+ self.model_path = model_path
self.model_shape = model_shape
self.tf_device = tf_device
self.num_threads = num_threads
self.start_or_restart()
-
+
def stop(self):
self.detect_process.terminate()
logging.info("Waiting for detection process to exit gracefully...")
@@ -181,23 +222,42 @@ def start_or_restart(self):
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.stop()
- self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device, self.num_threads))
+ self.detect_process = mp.Process(
+ target=run_detector,
+ name=f"detector:{self.name}",
+ args=(
+ self.name,
+ self.detection_queue,
+ self.out_events,
+ self.avg_inference_speed,
+ self.detection_start,
+ self.model_path,
+ self.model_shape,
+ self.tf_device,
+ self.num_threads,
+ ),
+ )
self.detect_process.daemon = True
self.detect_process.start()
-class RemoteObjectDetector():
+
+class RemoteObjectDetector:
def __init__(self, name, labels, detection_queue, event, model_shape):
- self.labels = load_labels(labels)
+ self.labels = labels
self.name = name
self.fps = EventsPerSecond()
self.detection_queue = detection_queue
self.event = event
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
- self.np_shm = np.ndarray((1,model_shape[0],model_shape[1],3), dtype=np.uint8, buffer=self.shm.buf)
- self.out_shm = mp.shared_memory.SharedMemory(name=f"out-{self.name}", create=False)
- self.out_np_shm = np.ndarray((20,6), dtype=np.float32, buffer=self.out_shm.buf)
-
- def detect(self, tensor_input, threshold=.4):
+ self.np_shm = np.ndarray(
+ (1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
+ )
+ self.out_shm = mp.shared_memory.SharedMemory(
+ name=f"out-{self.name}", create=False
+ )
+ self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
+
+ def detect(self, tensor_input, threshold=0.4):
detections = []
# copy input to shared memory
@@ -213,14 +273,12 @@ def detect(self, tensor_input, threshold=.4):
for d in self.out_np_shm:
if d[1] < threshold:
break
- detections.append((
- self.labels[int(d[0])],
- float(d[1]),
- (d[2], d[3], d[4], d[5])
- ))
+ detections.append(
+ (self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
+ )
self.fps.update()
return detections
-
+
def cleanup(self):
self.shm.unlink()
self.out_shm.unlink()
diff --git a/frigate/events.py b/frigate/events.py
--- a/frigate/events.py
+++ b/frigate/events.py
@@ -1,29 +1,26 @@
import datetime
-import json
import logging
import os
import queue
-import subprocess as sp
import threading
import time
-from collections import defaultdict
from pathlib import Path
-import psutil
-import shutil
+from peewee import fn
-from frigate.config import FrigateConfig
-from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+from frigate.config import EventsConfig, FrigateConfig, RecordConfig
+from frigate.const import CLIPS_DIR
from frigate.models import Event
-from peewee import fn
-
logger = logging.getLogger(__name__)
+
class EventProcessor(threading.Thread):
- def __init__(self, config, camera_processes, event_queue, event_processed_queue, stop_event):
+ def __init__(
+ self, config, camera_processes, event_queue, event_processed_queue, stop_event
+ ):
threading.Thread.__init__(self)
- self.name = 'event_processor'
+ self.name = "event_processor"
self.config = config
self.camera_processes = camera_processes
self.cached_clips = {}
@@ -32,292 +29,153 @@ def __init__(self, config, camera_processes, event_queue, event_processed_queue,
self.events_in_process = {}
self.stop_event = stop_event
- def should_create_clip(self, camera, event_data):
- if event_data['false_positive']:
- return False
-
- # if there are required zones and there is no overlap
- required_zones = self.config.cameras[camera].clips.required_zones
- if len(required_zones) > 0 and not set(event_data['entered_zones']) & set(required_zones):
- logger.debug(f"Not creating clip for {event_data['id']} because it did not enter required zones")
- return False
-
- return True
-
- def refresh_cache(self):
- cached_files = os.listdir(CACHE_DIR)
-
- files_in_use = []
- for process in psutil.process_iter():
- try:
- if process.name() != 'ffmpeg':
- continue
-
- flist = process.open_files()
- if flist:
- for nt in flist:
- if nt.path.startswith(CACHE_DIR):
- files_in_use.append(nt.path.split('/')[-1])
- except:
- continue
-
- for f in cached_files:
- if f in files_in_use or f in self.cached_clips:
- continue
-
- camera = '-'.join(f.split('-')[:-1])
- start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S')
-
- ffprobe_cmd = " ".join([
- 'ffprobe',
- '-v',
- 'error',
- '-show_entries',
- 'format=duration',
- '-of',
- 'default=noprint_wrappers=1:nokey=1',
- f"{os.path.join(CACHE_DIR,f)}"
- ])
- p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
- (output, err) = p.communicate()
- p_status = p.wait()
- if p_status == 0:
- duration = float(output.decode('utf-8').strip())
- else:
- logger.info(f"bad file: {f}")
- os.remove(os.path.join(CACHE_DIR,f))
- continue
-
- self.cached_clips[f] = {
- 'path': f,
- 'camera': camera,
- 'start_time': start_time.timestamp(),
- 'duration': duration
- }
-
- if len(self.events_in_process) > 0:
- earliest_event = min(self.events_in_process.values(), key=lambda x:x['start_time'])['start_time']
- else:
- earliest_event = datetime.datetime.now().timestamp()
-
- # if the earliest event exceeds the max seconds, cap it
- max_seconds = self.config.clips.max_seconds
- if datetime.datetime.now().timestamp()-earliest_event > max_seconds:
- earliest_event = datetime.datetime.now().timestamp()-max_seconds
-
- for f, data in list(self.cached_clips.items()):
- if earliest_event-90 > data['start_time']+data['duration']:
- del self.cached_clips[f]
- logger.debug(f"Cleaning up cached file {f}")
- os.remove(os.path.join(CACHE_DIR,f))
-
- # if we are still using more than 90% of the cache, proactively cleanup
- cache_usage = shutil.disk_usage("/tmp/cache")
- if cache_usage.used/cache_usage.total > .9 and cache_usage.free < 200000000 and len(self.cached_clips) > 0:
- logger.warning("More than 90% of the cache is used.")
- logger.warning("Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config.")
- logger.warning("Proactively cleaning up the cache...")
- while cache_usage.used/cache_usage.total > .9:
- oldest_clip = min(self.cached_clips.values(), key=lambda x:x['start_time'])
- del self.cached_clips[oldest_clip['path']]
- os.remove(os.path.join(CACHE_DIR,oldest_clip['path']))
- cache_usage = shutil.disk_usage("/tmp/cache")
-
- def create_clip(self, camera, event_data, pre_capture, post_capture):
- # get all clips from the camera with the event sorted
- sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
-
- # if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds
- wait_count = 0
- while len(sorted_clips) == 0 or sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']+post_capture:
- if wait_count > 4:
- logger.warning(f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event.")
- return False
- logger.debug(f"No cache clips for {camera}. Waiting...")
- time.sleep(5)
- self.refresh_cache()
- # get all clips from the camera with the event sorted
- sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
- wait_count += 1
-
- playlist_start = event_data['start_time']-pre_capture
- playlist_end = event_data['end_time']+post_capture
- playlist_lines = []
- for clip in sorted_clips:
- # clip ends before playlist start time, skip
- if clip['start_time']+clip['duration'] < playlist_start:
- continue
- # clip starts after playlist ends, finish
- if clip['start_time'] > playlist_end:
- break
- playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
- # if this is the starting clip, add an inpoint
- if clip['start_time'] < playlist_start:
- playlist_lines.append(f"inpoint {int(playlist_start-clip['start_time'])}")
- # if this is the ending clip, add an outpoint
- if clip['start_time']+clip['duration'] > playlist_end:
- playlist_lines.append(f"outpoint {int(playlist_end-clip['start_time'])}")
-
- clip_name = f"{camera}-{event_data['id']}"
- ffmpeg_cmd = [
- 'ffmpeg',
- '-y',
- '-protocol_whitelist',
- 'pipe,file',
- '-f',
- 'concat',
- '-safe',
- '0',
- '-i',
- '-',
- '-c',
- 'copy',
- '-movflags',
- '+faststart',
- f"{os.path.join(CLIPS_DIR, clip_name)}.mp4"
- ]
-
- p = sp.run(ffmpeg_cmd, input="\n".join(playlist_lines), encoding='ascii', capture_output=True)
- if p.returncode != 0:
- logger.error(p.stderr)
- return False
- return True
-
def run(self):
- while True:
- if self.stop_event.is_set():
- logger.info(f"Exiting event processor...")
- break
-
+ while not self.stop_event.is_set():
try:
event_type, camera, event_data = self.event_queue.get(timeout=10)
except queue.Empty:
- if not self.stop_event.is_set():
- self.refresh_cache()
continue
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
- self.refresh_cache()
- if event_type == 'start':
- self.events_in_process[event_data['id']] = event_data
+ if event_type == "start":
+ self.events_in_process[event_data["id"]] = event_data
- if event_type == 'end':
- clips_config = self.config.cameras[camera].clips
+ if event_type == "end":
+ event_config: EventsConfig = self.config.cameras[camera].record.events
- clip_created = False
- if self.should_create_clip(camera, event_data):
- if clips_config.enabled and (clips_config.objects is None or event_data['label'] in clips_config.objects):
- clip_created = self.create_clip(camera, event_data, clips_config.pre_capture, clips_config.post_capture)
-
- if clip_created or event_data['has_snapshot']:
+ if event_data["has_clip"] or event_data["has_snapshot"]:
Event.create(
- id=event_data['id'],
- label=event_data['label'],
+ id=event_data["id"],
+ label=event_data["label"],
camera=camera,
- start_time=event_data['start_time'],
- end_time=event_data['end_time'],
- top_score=event_data['top_score'],
- false_positive=event_data['false_positive'],
- zones=list(event_data['entered_zones']),
- thumbnail=event_data['thumbnail'],
- has_clip=clip_created,
- has_snapshot=event_data['has_snapshot'],
+ start_time=event_data["start_time"] - event_config.pre_capture,
+ end_time=event_data["end_time"] + event_config.post_capture,
+ top_score=event_data["top_score"],
+ false_positive=event_data["false_positive"],
+ zones=list(event_data["entered_zones"]),
+ thumbnail=event_data["thumbnail"],
+ region=event_data["region"],
+ box=event_data["box"],
+ area=event_data["area"],
+ has_clip=event_data["has_clip"],
+ has_snapshot=event_data["has_snapshot"],
)
- del self.events_in_process[event_data['id']]
- self.event_processed_queue.put((event_data['id'], camera))
+
+ del self.events_in_process[event_data["id"]]
+ self.event_processed_queue.put((event_data["id"], camera))
+
+ logger.info(f"Exiting event processor...")
+
class EventCleanup(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
- self.name = 'event_cleanup'
+ self.name = "event_cleanup"
self.config = config
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
- def expire(self, media):
+ def expire(self, media_type):
## Expire events from unlisted cameras based on the global config
- if media == 'clips':
- retain_config = self.config.clips.retain
- file_extension = 'mp4'
- update_params = {'has_clip': False}
+ if media_type == "clips":
+ retain_config = self.config.record.events.retain
+ file_extension = "mp4"
+ update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
- file_extension = 'jpg'
- update_params = {'has_snapshot': False}
-
- distinct_labels = (Event.select(Event.label)
- .where(Event.camera.not_in(self.camera_keys))
- .distinct())
-
+ file_extension = "jpg"
+ update_params = {"has_snapshot": False}
+
+ distinct_labels = (
+ Event.select(Event.label)
+ .where(Event.camera.not_in(self.camera_keys))
+ .distinct()
+ )
+
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
- expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
+ expire_after = (
+ datetime.datetime.now() - datetime.timedelta(days=expire_days)
+ ).timestamp()
# grab all events after specific time
- expired_events = (
- Event.select()
- .where(Event.camera.not_in(self.camera_keys),
- Event.start_time < expire_after,
- Event.label == l.label)
+ expired_events = Event.select().where(
+ Event.camera.not_in(self.camera_keys),
+ Event.start_time < expire_after,
+ Event.label == l.label,
)
# delete the media from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
- media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
- media.unlink(missing_ok=True)
+ media_path = Path(
+ f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
+ )
+ media_path.unlink(missing_ok=True)
+ if file_extension == "jpg":
+ media_path = Path(
+ f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
+ )
+ media_path.unlink(missing_ok=True)
+
# update the clips attribute for the db entry
- update_query = (
- Event.update(update_params)
- .where(Event.camera.not_in(self.camera_keys),
- Event.start_time < expire_after,
- Event.label == l.label)
+ update_query = Event.update(update_params).where(
+ Event.camera.not_in(self.camera_keys),
+ Event.start_time < expire_after,
+ Event.label == l.label,
)
update_query.execute()
## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items():
- if media == 'clips':
- retain_config = camera.clips.retain
+ if media_type == "clips":
+ retain_config = camera.record.events.retain
else:
retain_config = camera.snapshots.retain
# get distinct objects in database for this camera
- distinct_labels = (Event.select(Event.label)
- .where(Event.camera == name)
- .distinct())
+ distinct_labels = (
+ Event.select(Event.label).where(Event.camera == name).distinct()
+ )
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
- expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
+ expire_after = (
+ datetime.datetime.now() - datetime.timedelta(days=expire_days)
+ ).timestamp()
# grab all events after specific time
- expired_events = (
- Event.select()
- .where(Event.camera == name,
- Event.start_time < expire_after,
- Event.label == l.label)
+ expired_events = Event.select().where(
+ Event.camera == name,
+ Event.start_time < expire_after,
+ Event.label == l.label,
)
# delete the grabbed clips from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
- media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
- media.unlink(missing_ok=True)
+ media_path = Path(
+ f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
+ )
+ media_path.unlink(missing_ok=True)
+ if file_extension == "jpg":
+ media_path = Path(
+ f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
+ )
+ media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
- update_query = (
- Event.update(update_params)
- .where( Event.camera == name,
- Event.start_time < expire_after,
- Event.label == l.label)
+ update_query = Event.update(update_params).where(
+ Event.camera == name,
+ Event.start_time < expire_after,
+ Event.label == l.label,
)
update_query.execute()
def purge_duplicates(self):
duplicate_query = """with grouped_events as (
select id,
- label,
- camera,
+ label,
+ camera,
has_snapshot,
has_clip,
row_number() over (
@@ -327,46 +185,37 @@ def purge_duplicates(self):
from event
)
- select distinct id, camera, has_snapshot, has_clip from grouped_events
+ select distinct id, camera, has_snapshot, has_clip from grouped_events
where copy_number > 1;"""
duplicate_events = Event.raw(duplicate_query)
for event in duplicate_events:
logger.debug(f"Removing duplicate: {event.id}")
media_name = f"{event.camera}-{event.id}"
- if event.has_snapshot:
- media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
- media.unlink(missing_ok=True)
- if event.has_clip:
- media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
- media.unlink(missing_ok=True)
+ media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
+ media_path.unlink(missing_ok=True)
+ media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
+ media_path.unlink(missing_ok=True)
+ media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
+ media_path.unlink(missing_ok=True)
+
+ (
+ Event.delete()
+ .where(Event.id << [event.id for event in duplicate_events])
+ .execute()
+ )
- (Event.delete()
- .where( Event.id << [event.id for event in duplicate_events] )
- .execute())
-
def run(self):
- counter = 0
- while(True):
- if self.stop_event.is_set():
- logger.info(f"Exiting event cleanup...")
- break
-
- # only expire events every 5 minutes, but check for stop events every 10 seconds
- time.sleep(10)
- counter = counter + 1
- if counter < 30:
- continue
- counter = 0
-
- self.expire('clips')
- self.expire('snapshots')
+ # only expire events every 5 minutes
+ while not self.stop_event.wait(300):
+ self.expire("clips")
+ self.expire("snapshots")
self.purge_duplicates()
# drop events from db where has_clip and has_snapshot are false
- delete_query = (
- Event.delete()
- .where( Event.has_clip == False,
- Event.has_snapshot == False)
+ delete_query = Event.delete().where(
+ Event.has_clip == False, Event.has_snapshot == False
)
delete_query.execute()
+
+ logger.info(f"Exiting event cleanup...")
diff --git a/frigate/http.py b/frigate/http.py
--- a/frigate/http.py
+++ b/frigate/http.py
@@ -1,92 +1,56 @@
import base64
-import datetime
+from collections import OrderedDict
+from datetime import datetime, timedelta
import json
+import glob
import logging
import os
+import re
+import subprocess as sp
import time
from functools import reduce
+from pathlib import Path
import cv2
-import gevent
+from flask.helpers import send_file
+
import numpy as np
-from flask import (Blueprint, Flask, Response, current_app, jsonify,
- make_response, request)
-from flask_sockets import Sockets
-from peewee import SqliteDatabase, operator, fn, DoesNotExist
+from flask import (
+ Blueprint,
+ Flask,
+ Response,
+ current_app,
+ jsonify,
+ make_response,
+ request,
+)
+
+from peewee import SqliteDatabase, operator, fn, DoesNotExist, Value
from playhouse.shortcuts import model_to_dict
-from frigate.const import CLIPS_DIR
-from frigate.models import Event
+from frigate.const import CLIPS_DIR, RECORD_DIR
+from frigate.models import Event, Recordings
from frigate.stats import stats_snapshot
from frigate.util import calculate_region
from frigate.version import VERSION
logger = logging.getLogger(__name__)
-bp = Blueprint('frigate', __name__)
-ws = Blueprint('ws', __name__)
-
-class MqttBackend():
- """Interface for registering and updating WebSocket clients."""
-
- def __init__(self, mqtt_client, topic_prefix):
- self.clients = list()
- self.mqtt_client = mqtt_client
- self.topic_prefix = topic_prefix
+bp = Blueprint("frigate", __name__)
- def register(self, client):
- """Register a WebSocket connection for Mqtt updates."""
- self.clients.append(client)
- def publish(self, message):
- try:
- json_message = json.loads(message)
- json_message = {
- 'topic': f"{self.topic_prefix}/{json_message['topic']}",
- 'payload': json_message['payload'],
- 'retain': json_message.get('retain', False)
- }
- except:
- logger.warning("Unable to parse websocket message as valid json.")
- return
-
- logger.debug(f"Publishing mqtt message from websockets at {json_message['topic']}.")
- self.mqtt_client.publish(json_message['topic'], json_message['payload'], retain=json_message['retain'])
-
- def run(self):
- def send(client, userdata, message):
- """Sends mqtt messages to clients."""
- try:
- logger.debug(f"Received mqtt message on {message.topic}.")
- ws_message = json.dumps({
- 'topic': message.topic.replace(f"{self.topic_prefix}/",""),
- 'payload': message.payload.decode()
- })
- except:
- # if the payload can't be decoded don't relay to clients
- logger.debug(f"MQTT payload for {message.topic} wasn't text. Skipping...")
- return
-
- for client in self.clients:
- try:
- client.send(ws_message)
- except:
- logger.debug("Removing websocket client due to a closed connection.")
- self.clients.remove(client)
-
- self.mqtt_client.message_callback_add(f"{self.topic_prefix}/#", send)
-
- def start(self):
- """Maintains mqtt subscription in the background."""
- gevent.spawn(self.run)
-
-def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor, mqtt_client):
+def create_app(
+ frigate_config,
+ database: SqliteDatabase,
+ stats_tracking,
+ detected_frames_processor,
+):
app = Flask(__name__)
- sockets = Sockets(app)
@app.before_request
def _db_connect():
- database.connect()
+ if database.is_closed():
+ database.connect()
@app.teardown_request
def _db_close(exc):
@@ -98,21 +62,19 @@ def _db_close(exc):
app.detected_frames_processor = detected_frames_processor
app.register_blueprint(bp)
- sockets.register_blueprint(ws)
-
- app.mqtt_backend = MqttBackend(mqtt_client, frigate_config.mqtt.topic_prefix)
- app.mqtt_backend.start()
return app
[email protected]('/')
+
[email protected]("/")
def is_healthy():
return "Frigate is running. Alive and healthy!"
[email protected]('/events/summary')
+
[email protected]("/events/summary")
def events_summary():
- has_clip = request.args.get('has_clip', type=int)
- has_snapshot = request.args.get('has_snapshot', type=int)
+ has_clip = request.args.get("has_clip", type=int)
+ has_snapshot = request.args.get("has_snapshot", type=int)
clauses = []
@@ -123,38 +85,66 @@ def events_summary():
clauses.append((Event.has_snapshot == has_snapshot))
if len(clauses) == 0:
- clauses.append((1 == 1))
+ clauses.append((True))
groups = (
- Event
- .select(
- Event.camera,
- Event.label,
- fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'),
- Event.zones,
- fn.COUNT(Event.id).alias('count')
- )
- .where(reduce(operator.and_, clauses))
- .group_by(
- Event.camera,
- Event.label,
- fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')),
- Event.zones
- )
+ Event.select(
+ Event.camera,
+ Event.label,
+ fn.strftime(
+ "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
+ ).alias("day"),
+ Event.zones,
+ fn.COUNT(Event.id).alias("count"),
+ )
+ .where(reduce(operator.and_, clauses))
+ .group_by(
+ Event.camera,
+ Event.label,
+ fn.strftime(
+ "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
+ ),
+ Event.zones,
)
+ )
return jsonify([e for e in groups.dicts()])
[email protected]('/events/<id>')
+
[email protected]("/events/<id>", methods=("GET",))
def event(id):
try:
return model_to_dict(Event.get(Event.id == id))
except DoesNotExist:
return "Event not found", 404
[email protected]('/events/<id>/thumbnail.jpg')
+
[email protected]("/events/<id>", methods=("DELETE",))
+def delete_event(id):
+ try:
+ event = Event.get(Event.id == id)
+ except DoesNotExist:
+ return make_response(
+ jsonify({"success": False, "message": "Event" + id + " not found"}), 404
+ )
+
+ media_name = f"{event.camera}-{event.id}"
+ if event.has_snapshot:
+ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
+ media.unlink(missing_ok=True)
+ if event.has_clip:
+ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
+ media.unlink(missing_ok=True)
+
+ event.delete_instance()
+ return make_response(
+ jsonify({"success": True, "message": "Event" + id + " deleted"}), 200
+ )
+
+
[email protected]("/events/<id>/thumbnail.jpg")
def event_thumbnail(id):
- format = request.args.get('format', 'ios')
+ format = request.args.get("format", "ios")
thumbnail_bytes = None
try:
event = Event.get(Event.id == id)
@@ -162,7 +152,8 @@ def event_thumbnail(id):
except DoesNotExist:
# see if the object is currently being tracked
try:
- for camera_state in current_app.detected_frames_processor.camera_states.values():
+ camera_states = current_app.detected_frames_processor.camera_states.values()
+ for camera_state in camera_states:
if id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(id)
if not tracked_obj is None:
@@ -174,60 +165,114 @@ def event_thumbnail(id):
return "Event not found", 404
# android notifications prefer a 2:1 ratio
- if format == 'android':
+ if format == "android":
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
img = cv2.imdecode(jpg_as_np, flags=1)
- thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0))
- ret, jpg = cv2.imencode('.jpg', thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
+ thumbnail = cv2.copyMakeBorder(
+ img,
+ 0,
+ 0,
+ int(img.shape[1] * 0.5),
+ int(img.shape[1] * 0.5),
+ cv2.BORDER_CONSTANT,
+ (0, 0, 0),
+ )
+ ret, jpg = cv2.imencode(".jpg", thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
thumbnail_bytes = jpg.tobytes()
response = make_response(thumbnail_bytes)
- response.headers['Content-Type'] = 'image/jpg'
+ response.headers["Content-Type"] = "image/jpg"
return response
[email protected]('/events/<id>/snapshot.jpg')
+
[email protected]("/events/<id>/snapshot.jpg")
def event_snapshot(id):
+ download = request.args.get("download", type=bool)
jpg_bytes = None
try:
event = Event.get(Event.id == id)
if not event.has_snapshot:
return "Snapshot not available", 404
# read snapshot from disk
- with open(os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), 'rb') as image_file:
+ with open(
+ os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), "rb"
+ ) as image_file:
jpg_bytes = image_file.read()
except DoesNotExist:
# see if the object is currently being tracked
try:
- for camera_state in current_app.detected_frames_processor.camera_states.values():
+ camera_states = current_app.detected_frames_processor.camera_states.values()
+ for camera_state in camera_states:
if id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(id)
if not tracked_obj is None:
jpg_bytes = tracked_obj.get_jpg_bytes(
- timestamp=request.args.get('timestamp', type=int),
- bounding_box=request.args.get('bbox', type=int),
- crop=request.args.get('crop', type=int),
- height=request.args.get('h', type=int)
+ timestamp=request.args.get("timestamp", type=int),
+ bounding_box=request.args.get("bbox", type=int),
+ crop=request.args.get("crop", type=int),
+ height=request.args.get("h", type=int),
+ quality=request.args.get("quality", default=70, type=int),
)
except:
return "Event not found", 404
except:
return "Event not found", 404
+ if jpg_bytes is None:
+ return "Event not found", 404
+
response = make_response(jpg_bytes)
- response.headers['Content-Type'] = 'image/jpg'
+ response.headers["Content-Type"] = "image/jpg"
+ if download:
+ response.headers[
+ "Content-Disposition"
+ ] = f"attachment; filename=snapshot-{id}.jpg"
+ return response
+
+
[email protected]("/events/<id>/clip.mp4")
+def event_clip(id):
+ download = request.args.get("download", type=bool)
+
+ try:
+ event: Event = Event.get(Event.id == id)
+ except DoesNotExist:
+ return "Event not found.", 404
+
+ if not event.has_clip:
+ return "Clip not available", 404
+
+ file_name = f"{event.camera}-{id}.mp4"
+ clip_path = os.path.join(CLIPS_DIR, file_name)
+
+ if not os.path.isfile(clip_path):
+ return recording_clip(event.camera, event.start_time, event.end_time)
+
+ response = make_response()
+ response.headers["Content-Description"] = "File Transfer"
+ response.headers["Cache-Control"] = "no-cache"
+ response.headers["Content-Type"] = "video/mp4"
+ if download:
+ response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
+ response.headers["Content-Length"] = os.path.getsize(clip_path)
+ response.headers[
+ "X-Accel-Redirect"
+ ] = f"/clips/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile
+
return response
[email protected]('/events')
+
[email protected]("/events")
def events():
- limit = request.args.get('limit', 100)
- camera = request.args.get('camera')
- label = request.args.get('label')
- zone = request.args.get('zone')
- after = request.args.get('after', type=float)
- before = request.args.get('before', type=float)
- has_clip = request.args.get('has_clip', type=int)
- has_snapshot = request.args.get('has_snapshot', type=int)
- include_thumbnails = request.args.get('include_thumbnails', default=1, type=int)
+ limit = request.args.get("limit", 100)
+ camera = request.args.get("camera")
+ label = request.args.get("label")
+ zone = request.args.get("zone")
+ after = request.args.get("after", type=float)
+ before = request.args.get("before", type=float)
+ has_clip = request.args.get("has_clip", type=int)
+ has_snapshot = request.args.get("has_snapshot", type=int)
+ include_thumbnails = request.args.get("include_thumbnails", default=1, type=int)
clauses = []
excluded_fields = []
@@ -239,7 +284,7 @@ def events():
clauses.append((Event.label == label))
if zone:
- clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*"))
+ clauses.append((Event.zones.cast("text") % f'*"{zone}"*'))
if after:
clauses.append((Event.start_time >= after))
@@ -257,125 +302,427 @@ def events():
excluded_fields.append(Event.thumbnail)
if len(clauses) == 0:
- clauses.append((1 == 1))
+ clauses.append((True))
- events = (Event.select()
- .where(reduce(operator.and_, clauses))
- .order_by(Event.start_time.desc())
- .limit(limit))
+ events = (
+ Event.select()
+ .where(reduce(operator.and_, clauses))
+ .order_by(Event.start_time.desc())
+ .limit(limit)
+ )
return jsonify([model_to_dict(e, exclude=excluded_fields) for e in events])
[email protected]('/config')
+
[email protected]("/config")
def config():
- return jsonify(current_app.frigate_config.to_dict())
+ config = current_app.frigate_config.dict()
+
+ # add in the ffmpeg_cmds
+ for camera_name, camera in current_app.frigate_config.cameras.items():
+ camera_dict = config["cameras"][camera_name]
+ camera_dict["ffmpeg_cmds"] = camera.ffmpeg_cmds
+ for cmd in camera_dict["ffmpeg_cmds"]:
+ cmd["cmd"] = " ".join(cmd["cmd"])
[email protected]('/version')
+ return jsonify(config)
+
+
[email protected]("/config/schema")
+def config_schema():
+ return current_app.response_class(
+ current_app.frigate_config.schema_json(), mimetype="application/json"
+ )
+
+
[email protected]("/version")
def version():
return VERSION
[email protected]('/stats')
+
[email protected]("/stats")
def stats():
stats = stats_snapshot(current_app.stats_tracking)
return jsonify(stats)
[email protected]('/<camera_name>/<label>/best.jpg')
+
[email protected]("/<camera_name>/<label>/best.jpg")
def best(camera_name, label):
if camera_name in current_app.frigate_config.cameras:
best_object = current_app.detected_frames_processor.get_best(camera_name, label)
- best_frame = best_object.get('frame')
+ best_frame = best_object.get("frame")
if best_frame is None:
- best_frame = np.zeros((720,1280,3), np.uint8)
+ best_frame = np.zeros((720, 1280, 3), np.uint8)
else:
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
- crop = bool(request.args.get('crop', 0, type=int))
+ crop = bool(request.args.get("crop", 0, type=int))
if crop:
- box = best_object.get('box', (0,0,300,300))
- region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
- best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
+ box = best_object.get("box", (0, 0, 300, 300))
+ region = calculate_region(
+ best_frame.shape, box[0], box[1], box[2], box[3], 1.1
+ )
+ best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
- height = int(request.args.get('h', str(best_frame.shape[0])))
- width = int(height*best_frame.shape[1]/best_frame.shape[0])
+ height = int(request.args.get("h", str(best_frame.shape[0])))
+ width = int(height * best_frame.shape[1] / best_frame.shape[0])
+ resize_quality = request.args.get("quality", default=70, type=int)
- best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
- ret, jpg = cv2.imencode('.jpg', best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
+ best_frame = cv2.resize(
+ best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
+ )
+ ret, jpg = cv2.imencode(
+ ".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
+ )
response = make_response(jpg.tobytes())
- response.headers['Content-Type'] = 'image/jpg'
+ response.headers["Content-Type"] = "image/jpg"
return response
else:
return "Camera named {} not found".format(camera_name), 404
[email protected]('/<camera_name>')
+
[email protected]("/<camera_name>")
def mjpeg_feed(camera_name):
- fps = int(request.args.get('fps', '3'))
- height = int(request.args.get('h', '360'))
+ fps = int(request.args.get("fps", "3"))
+ height = int(request.args.get("h", "360"))
draw_options = {
- 'bounding_boxes': request.args.get('bbox', type=int),
- 'timestamp': request.args.get('timestamp', type=int),
- 'zones': request.args.get('zones', type=int),
- 'mask': request.args.get('mask', type=int),
- 'motion_boxes': request.args.get('motion', type=int),
- 'regions': request.args.get('regions', type=int),
+ "bounding_boxes": request.args.get("bbox", type=int),
+ "timestamp": request.args.get("timestamp", type=int),
+ "zones": request.args.get("zones", type=int),
+ "mask": request.args.get("mask", type=int),
+ "motion_boxes": request.args.get("motion", type=int),
+ "regions": request.args.get("regions", type=int),
}
if camera_name in current_app.frigate_config.cameras:
# return a multipart response
- return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height, draw_options),
- mimetype='multipart/x-mixed-replace; boundary=frame')
+ return Response(
+ imagestream(
+ current_app.detected_frames_processor,
+ camera_name,
+ fps,
+ height,
+ draw_options,
+ ),
+ mimetype="multipart/x-mixed-replace; boundary=frame",
+ )
else:
return "Camera named {} not found".format(camera_name), 404
[email protected]('/<camera_name>/latest.jpg')
+
[email protected]("/<camera_name>/latest.jpg")
def latest_frame(camera_name):
draw_options = {
- 'bounding_boxes': request.args.get('bbox', type=int),
- 'timestamp': request.args.get('timestamp', type=int),
- 'zones': request.args.get('zones', type=int),
- 'mask': request.args.get('mask', type=int),
- 'motion_boxes': request.args.get('motion', type=int),
- 'regions': request.args.get('regions', type=int),
+ "bounding_boxes": request.args.get("bbox", type=int),
+ "timestamp": request.args.get("timestamp", type=int),
+ "zones": request.args.get("zones", type=int),
+ "mask": request.args.get("mask", type=int),
+ "motion_boxes": request.args.get("motion", type=int),
+ "regions": request.args.get("regions", type=int),
}
+ resize_quality = request.args.get("quality", default=70, type=int)
+
if camera_name in current_app.frigate_config.cameras:
- # max out at specified FPS
- frame = current_app.detected_frames_processor.get_current_frame(camera_name, draw_options)
+ frame = current_app.detected_frames_processor.get_current_frame(
+ camera_name, draw_options
+ )
if frame is None:
- frame = np.zeros((720,1280,3), np.uint8)
+ frame = np.zeros((720, 1280, 3), np.uint8)
- height = int(request.args.get('h', str(frame.shape[0])))
- width = int(height*frame.shape[1]/frame.shape[0])
+ height = int(request.args.get("h", str(frame.shape[0])))
+ width = int(height * frame.shape[1] / frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
- ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
+ ret, jpg = cv2.imencode(
+ ".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
+ )
response = make_response(jpg.tobytes())
- response.headers['Content-Type'] = 'image/jpg'
+ response.headers["Content-Type"] = "image/jpg"
return response
else:
return "Camera named {} not found".format(camera_name), 404
+
[email protected]("/<camera_name>/recordings")
+def recordings(camera_name):
+ dates = OrderedDict()
+
+ # Retrieve all recordings for this camera
+ recordings = (
+ Recordings.select()
+ .where(Recordings.camera == camera_name)
+ .order_by(Recordings.start_time.asc())
+ )
+
+ last_end = 0
+ recording: Recordings
+ for recording in recordings:
+ date = datetime.fromtimestamp(recording.start_time)
+ key = date.strftime("%Y-%m-%d")
+ hour = date.strftime("%H")
+
+ # Create Day Record
+ if key not in dates:
+ dates[key] = OrderedDict()
+
+ # Create Hour Record
+ if hour not in dates[key]:
+ dates[key][hour] = {"delay": {}, "events": []}
+
+ # Check for delay
+ the_hour = datetime.strptime(f"{key} {hour}", "%Y-%m-%d %H").timestamp()
+ # diff current recording start time and the greater of the previous end time or top of the hour
+ diff = recording.start_time - max(last_end, the_hour)
+ # Determine seconds into recording
+ seconds = 0
+ if datetime.fromtimestamp(last_end).strftime("%H") == hour:
+ seconds = int(last_end - the_hour)
+ # Determine the delay
+ delay = min(int(diff), 3600 - seconds)
+ if delay > 1:
+ # Add an offset for any delay greater than a second
+ dates[key][hour]["delay"][seconds] = delay
+
+ last_end = recording.end_time
+
+ # Packing intervals to return all events with same label and overlapping times as one row.
+ # See: https://blogs.solidq.com/en/sqlserver/packing-intervals/
+ events = Event.raw(
+ """WITH C1 AS
+ (
+ SELECT id, label, camera, top_score, start_time AS ts, +1 AS type, 1 AS sub
+ FROM event
+ WHERE camera = ?
+ UNION ALL
+ SELECT id, label, camera, top_score, end_time + 15 AS ts, -1 AS type, 0 AS sub
+ FROM event
+ WHERE camera = ?
+ ),
+ C2 AS
+ (
+ SELECT C1.*,
+ SUM(type) OVER(PARTITION BY label ORDER BY ts, type DESC
+ ROWS BETWEEN UNBOUNDED PRECEDING
+ AND CURRENT ROW) - sub AS cnt
+ FROM C1
+ ),
+ C3 AS
+ (
+ SELECT id, label, camera, top_score, ts,
+ (ROW_NUMBER() OVER(PARTITION BY label ORDER BY ts) - 1) / 2 + 1
+ AS grpnum
+ FROM C2
+ WHERE cnt = 0
+ )
+ SELECT MIN(id) as id, label, camera, MAX(top_score) as top_score, MIN(ts) AS start_time, max(ts) AS end_time
+ FROM C3
+ GROUP BY label, grpnum
+ ORDER BY start_time;""",
+ camera_name,
+ camera_name,
+ )
+
+ event: Event
+ for event in events:
+ date = datetime.fromtimestamp(event.start_time)
+ key = date.strftime("%Y-%m-%d")
+ hour = date.strftime("%H")
+ if key in dates and hour in dates[key]:
+ dates[key][hour]["events"].append(
+ model_to_dict(
+ event,
+ exclude=[
+ Event.false_positive,
+ Event.zones,
+ Event.thumbnail,
+ Event.has_clip,
+ Event.has_snapshot,
+ ],
+ )
+ )
+
+ return jsonify(
+ [
+ {
+ "date": date,
+ "events": sum([len(value["events"]) for value in hours.values()]),
+ "recordings": [
+ {"hour": hour, "delay": value["delay"], "events": value["events"]}
+ for hour, value in hours.items()
+ ],
+ }
+ for date, hours in dates.items()
+ ]
+ )
+
+
[email protected]("/<camera>/start/<int:start_ts>/end/<int:end_ts>/clip.mp4")
[email protected]("/<camera>/start/<float:start_ts>/end/<float:end_ts>/clip.mp4")
+def recording_clip(camera, start_ts, end_ts):
+ download = request.args.get("download", type=bool)
+
+ recordings = (
+ Recordings.select()
+ .where(
+ (Recordings.start_time.between(start_ts, end_ts))
+ | (Recordings.end_time.between(start_ts, end_ts))
+ | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
+ )
+ .where(Recordings.camera == camera)
+ .order_by(Recordings.start_time.asc())
+ )
+
+ playlist_lines = []
+ clip: Recordings
+ for clip in recordings:
+ playlist_lines.append(f"file '{clip.path}'")
+ # if this is the starting clip, add an inpoint
+ if clip.start_time < start_ts:
+ playlist_lines.append(f"inpoint {int(start_ts - clip.start_time)}")
+ # if this is the ending clip, add an outpoint
+ if clip.end_time > end_ts:
+ playlist_lines.append(f"outpoint {int(end_ts - clip.start_time)}")
+
+ file_name = f"clip_{camera}_{start_ts}-{end_ts}.mp4"
+ path = f"/tmp/cache/{file_name}"
+
+ ffmpeg_cmd = [
+ "ffmpeg",
+ "-y",
+ "-protocol_whitelist",
+ "pipe,file",
+ "-f",
+ "concat",
+ "-safe",
+ "0",
+ "-i",
+ "-",
+ "-c",
+ "copy",
+ "-movflags",
+ "+faststart",
+ path,
+ ]
+
+ p = sp.run(
+ ffmpeg_cmd,
+ input="\n".join(playlist_lines),
+ encoding="ascii",
+ capture_output=True,
+ )
+ if p.returncode != 0:
+ logger.error(p.stderr)
+ return f"Could not create clip from recordings for {camera}.", 500
+
+ response = make_response()
+ response.headers["Content-Description"] = "File Transfer"
+ response.headers["Cache-Control"] = "no-cache"
+ response.headers["Content-Type"] = "video/mp4"
+ if download:
+ response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
+ response.headers["Content-Length"] = os.path.getsize(path)
+ response.headers[
+ "X-Accel-Redirect"
+ ] = f"/cache/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile
+
+ return response
+
+
[email protected]("/vod/<camera>/start/<int:start_ts>/end/<int:end_ts>")
[email protected]("/vod/<camera>/start/<float:start_ts>/end/<float:end_ts>")
+def vod_ts(camera, start_ts, end_ts):
+ recordings = (
+ Recordings.select()
+ .where(
+ Recordings.start_time.between(start_ts, end_ts)
+ | Recordings.end_time.between(start_ts, end_ts)
+ | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
+ )
+ .where(Recordings.camera == camera)
+ .order_by(Recordings.start_time.asc())
+ )
+
+ clips = []
+ durations = []
+
+ recording: Recordings
+ for recording in recordings:
+ clip = {"type": "source", "path": recording.path}
+ duration = int(recording.duration * 1000)
+ # Determine if offset is needed for first clip
+ if recording.start_time < start_ts:
+ offset = int((start_ts - recording.start_time) * 1000)
+ clip["clipFrom"] = offset
+ duration -= offset
+ # Determine if we need to end the last clip early
+ if recording.end_time > end_ts:
+ duration -= int((recording.end_time - end_ts) * 1000)
+ clips.append(clip)
+ durations.append(duration)
+
+ if not clips:
+ return "No recordings found.", 404
+
+ hour_ago = datetime.now() - timedelta(hours=1)
+ return jsonify(
+ {
+ "cache": hour_ago.timestamp() > start_ts,
+ "discontinuity": False,
+ "durations": durations,
+ "sequences": [{"clips": clips}],
+ }
+ )
+
+
[email protected]("/vod/<year_month>/<day>/<hour>/<camera>")
+def vod_hour(year_month, day, hour, camera):
+ start_date = datetime.strptime(f"{year_month}-{day} {hour}", "%Y-%m-%d %H")
+ end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1)
+ start_ts = start_date.timestamp()
+ end_ts = end_date.timestamp()
+
+ return vod_ts(camera, start_ts, end_ts)
+
+
[email protected]("/vod/event/<id>")
+def vod_event(id):
+ try:
+ event: Event = Event.get(Event.id == id)
+ except DoesNotExist:
+ return "Event not found.", 404
+
+ if not event.has_clip:
+ return "Clip not available", 404
+
+ clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4")
+
+ if not os.path.isfile(clip_path):
+ return vod_ts(event.camera, event.start_time, event.end_time)
+
+ duration = int((event.end_time - event.start_time) * 1000)
+ return jsonify(
+ {
+ "cache": True,
+ "discontinuity": False,
+ "durations": [duration],
+ "sequences": [{"clips": [{"type": "source", "path": clip_path}]}],
+ }
+ )
+
+
def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
while True:
# max out at specified FPS
- gevent.sleep(1/fps)
+ time.sleep(1 / fps)
frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
if frame is None:
- frame = np.zeros((height,int(height*16/9),3), np.uint8)
+ frame = np.zeros((height, int(height * 16 / 9), 3), np.uint8)
- width = int(height*frame.shape[1]/frame.shape[0])
+ width = int(height * frame.shape[1] / frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
- ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
- yield (b'--frame\r\n'
- b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
-
[email protected]('/ws')
-def echo_socket(socket):
- current_app.mqtt_backend.register(socket)
-
- while not socket.closed:
- # Sleep to prevent *constant* context-switches.
- gevent.sleep(0.1)
-
- message = socket.receive()
- if message:
- current_app.mqtt_backend.publish(message)
+ ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
+ yield (
+ b"--frame\r\n"
+ b"Content-Type: image/jpeg\r\n\r\n" + jpg.tobytes() + b"\r\n\r\n"
+ )
diff --git a/frigate/log.py b/frigate/log.py
--- a/frigate/log.py
+++ b/frigate/log.py
@@ -13,38 +13,34 @@
def listener_configurer():
root = logging.getLogger()
console_handler = logging.StreamHandler()
- formatter = logging.Formatter('%(name)-30s %(levelname)-8s: %(message)s')
+ formatter = logging.Formatter(
+ "[%(asctime)s] %(name)-30s %(levelname)-8s: %(message)s", "%Y-%m-%d %H:%M:%S"
+ )
console_handler.setFormatter(formatter)
root.addHandler(console_handler)
root.setLevel(logging.INFO)
+
def root_configurer(queue):
h = handlers.QueueHandler(queue)
root = logging.getLogger()
root.addHandler(h)
root.setLevel(logging.INFO)
-def log_process(log_queue):
- stop_event = mp.Event()
- def receiveSignal(signalNumber, frame):
- stop_event.set()
-
- signal.signal(signal.SIGTERM, receiveSignal)
- signal.signal(signal.SIGINT, receiveSignal)
+def log_process(log_queue):
threading.current_thread().name = f"logger"
setproctitle("frigate.logger")
listener_configurer()
while True:
- if stop_event.is_set() and log_queue.empty():
- break
try:
record = log_queue.get(timeout=5)
- except queue.Empty:
+ except (queue.Empty, KeyboardInterrupt):
continue
logger = logging.getLogger(record.name)
logger.handle(record)
+
# based on https://codereview.stackexchange.com/a/17959
class LogPipe(threading.Thread):
def __init__(self, log_name, level):
@@ -61,23 +57,20 @@ def __init__(self, log_name, level):
self.start()
def fileno(self):
- """Return the write file descriptor of the pipe
- """
+ """Return the write file descriptor of the pipe"""
return self.fdWrite
def run(self):
- """Run the thread, logging everything.
- """
- for line in iter(self.pipeReader.readline, ''):
- self.deque.append(line.strip('\n'))
+ """Run the thread, logging everything."""
+ for line in iter(self.pipeReader.readline, ""):
+ self.deque.append(line.strip("\n"))
self.pipeReader.close()
-
+
def dump(self):
while len(self.deque) > 0:
self.logger.log(self.level, self.deque.popleft())
def close(self):
- """Close the write end of the pipe.
- """
+ """Close the write end of the pipe."""
os.close(self.fdWrite)
diff --git a/frigate/models.py b/frigate/models.py
--- a/frigate/models.py
+++ b/frigate/models.py
@@ -1,3 +1,4 @@
+from numpy import unique
from peewee import *
from playhouse.sqlite_ext import *
@@ -14,3 +15,15 @@ class Event(Model):
thumbnail = TextField()
has_clip = BooleanField(default=True)
has_snapshot = BooleanField(default=True)
+ region = JSONField()
+ box = JSONField()
+ area = IntegerField()
+
+
+class Recordings(Model):
+ id = CharField(null=False, primary_key=True, max_length=30)
+ camera = CharField(index=True, max_length=20)
+ path = CharField(unique=True)
+ start_time = DateTimeField()
+ end_time = DateTimeField()
+ duration = FloatField()
diff --git a/frigate/motion.py b/frigate/motion.py
--- a/frigate/motion.py
+++ b/frigate/motion.py
@@ -4,26 +4,37 @@
from frigate.config import MotionConfig
-class MotionDetector():
+class MotionDetector:
def __init__(self, frame_shape, config: MotionConfig):
self.config = config
self.frame_shape = frame_shape
- self.resize_factor = frame_shape[0]/config.frame_height
- self.motion_frame_size = (config.frame_height, config.frame_height*frame_shape[1]//frame_shape[0])
+ self.resize_factor = frame_shape[0] / config.frame_height
+ self.motion_frame_size = (
+ config.frame_height,
+ config.frame_height * frame_shape[1] // frame_shape[0],
+ )
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
self.motion_frame_count = 0
self.frame_counter = 0
- resized_mask = cv2.resize(config.mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
- self.mask = np.where(resized_mask==[0])
+ resized_mask = cv2.resize(
+ config.mask,
+ dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
+ interpolation=cv2.INTER_LINEAR,
+ )
+ self.mask = np.where(resized_mask == [0])
def detect(self, frame):
motion_boxes = []
- gray = frame[0:self.frame_shape[0], 0:self.frame_shape[1]]
+ gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]]
# resize frame
- resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
+ resized_frame = cv2.resize(
+ gray,
+ dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
+ interpolation=cv2.INTER_LINEAR,
+ )
# TODO: can I improve the contrast of the grayscale image here?
@@ -48,7 +59,9 @@ def detect(self, frame):
# compute the threshold image for the current frame
# TODO: threshold
- current_thresh = cv2.threshold(frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
+ current_thresh = cv2.threshold(
+ frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY
+ )[1]
# black out everything in the avg_delta where there isnt motion in the current frame
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
@@ -56,7 +69,9 @@ def detect(self, frame):
# then look for deltas above the threshold, but only in areas where there is a delta
# in the current frame. this prevents deltas from previous frames from being included
- thresh = cv2.threshold(avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
+ thresh = cv2.threshold(
+ avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY
+ )[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
@@ -70,16 +85,27 @@ def detect(self, frame):
contour_area = cv2.contourArea(c)
if contour_area > self.config.contour_area:
x, y, w, h = cv2.boundingRect(c)
- motion_boxes.append((int(x*self.resize_factor), int(y*self.resize_factor), int((x+w)*self.resize_factor), int((y+h)*self.resize_factor)))
-
+ motion_boxes.append(
+ (
+ int(x * self.resize_factor),
+ int(y * self.resize_factor),
+ int((x + w) * self.resize_factor),
+ int((y + h) * self.resize_factor),
+ )
+ )
+
if len(motion_boxes) > 0:
self.motion_frame_count += 1
if self.motion_frame_count >= 10:
# only average in the current frame if the difference persists for a bit
- cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
+ cv2.accumulateWeighted(
+ resized_frame, self.avg_frame, self.config.frame_alpha
+ )
else:
# when no motion, just keep averaging the frames together
- cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
+ cv2.accumulateWeighted(
+ resized_frame, self.avg_frame, self.config.frame_alpha
+ )
self.motion_frame_count = 0
return motion_boxes
diff --git a/frigate/mqtt.py b/frigate/mqtt.py
--- a/frigate/mqtt.py
+++ b/frigate/mqtt.py
@@ -1,31 +1,42 @@
+import json
import logging
import threading
+from wsgiref.simple_server import make_server
import paho.mqtt.client as mqtt
+from ws4py.server.wsgirefserver import (
+ WebSocketWSGIHandler,
+ WebSocketWSGIRequestHandler,
+ WSGIServer,
+)
+from ws4py.server.wsgiutils import WebSocketWSGIApplication
+from ws4py.websocket import WebSocket
from frigate.config import FrigateConfig
+from frigate.util import restart_frigate
logger = logging.getLogger(__name__)
+
def create_mqtt_client(config: FrigateConfig, camera_metrics):
mqtt_config = config.mqtt
- def on_clips_command(client, userdata, message):
+ def on_recordings_command(client, userdata, message):
payload = message.payload.decode()
- logger.debug(f"on_clips_toggle: {message.topic} {payload}")
+ logger.debug(f"on_recordings_toggle: {message.topic} {payload}")
- camera_name = message.topic.split('/')[-3]
+ camera_name = message.topic.split("/")[-3]
- clips_settings = config.cameras[camera_name].clips
+ record_settings = config.cameras[camera_name].record
- if payload == 'ON':
- if not clips_settings.enabled:
- logger.info(f"Turning on clips for {camera_name} via mqtt")
- clips_settings._enabled = True
- elif payload == 'OFF':
- if clips_settings.enabled:
- logger.info(f"Turning off clips for {camera_name} via mqtt")
- clips_settings._enabled = False
+ if payload == "ON":
+ if not record_settings.enabled:
+ logger.info(f"Turning on recordings for {camera_name} via mqtt")
+ record_settings.enabled = True
+ elif payload == "OFF":
+ if record_settings.enabled:
+ logger.info(f"Turning off recordings for {camera_name} via mqtt")
+ record_settings.enabled = False
else:
logger.warning(f"Received unsupported value at {message.topic}: {payload}")
@@ -36,74 +47,106 @@ def on_snapshots_command(client, userdata, message):
payload = message.payload.decode()
logger.debug(f"on_snapshots_toggle: {message.topic} {payload}")
- camera_name = message.topic.split('/')[-3]
+ camera_name = message.topic.split("/")[-3]
snapshots_settings = config.cameras[camera_name].snapshots
- if payload == 'ON':
+ if payload == "ON":
if not snapshots_settings.enabled:
logger.info(f"Turning on snapshots for {camera_name} via mqtt")
- snapshots_settings._enabled = True
- elif payload == 'OFF':
+ snapshots_settings.enabled = True
+ elif payload == "OFF":
if snapshots_settings.enabled:
logger.info(f"Turning off snapshots for {camera_name} via mqtt")
- snapshots_settings._enabled = False
+ snapshots_settings.enabled = False
else:
logger.warning(f"Received unsupported value at {message.topic}: {payload}")
state_topic = f"{message.topic[:-4]}/state"
client.publish(state_topic, payload, retain=True)
-
+
def on_detect_command(client, userdata, message):
payload = message.payload.decode()
logger.debug(f"on_detect_toggle: {message.topic} {payload}")
- camera_name = message.topic.split('/')[-3]
+ camera_name = message.topic.split("/")[-3]
detect_settings = config.cameras[camera_name].detect
- if payload == 'ON':
+ if payload == "ON":
if not camera_metrics[camera_name]["detection_enabled"].value:
logger.info(f"Turning on detection for {camera_name} via mqtt")
camera_metrics[camera_name]["detection_enabled"].value = True
- detect_settings._enabled = True
- elif payload == 'OFF':
+ detect_settings.enabled = True
+ elif payload == "OFF":
if camera_metrics[camera_name]["detection_enabled"].value:
logger.info(f"Turning off detection for {camera_name} via mqtt")
camera_metrics[camera_name]["detection_enabled"].value = False
- detect_settings._enabled = False
+ detect_settings.enabled = False
else:
logger.warning(f"Received unsupported value at {message.topic}: {payload}")
state_topic = f"{message.topic[:-4]}/state"
client.publish(state_topic, payload, retain=True)
+ def on_restart_command(client, userdata, message):
+ restart_frigate()
+
def on_connect(client, userdata, flags, rc):
threading.current_thread().name = "mqtt"
if rc != 0:
if rc == 3:
- logger.error("MQTT Server unavailable")
+ logger.error("Unable to connect to MQTT server: MQTT Server unavailable")
elif rc == 4:
- logger.error("MQTT Bad username or password")
+ logger.error("Unable to connect to MQTT server: MQTT Bad username or password")
elif rc == 5:
- logger.error("MQTT Not authorized")
+ logger.error("Unable to connect to MQTT server: MQTT Not authorized")
else:
- logger.error("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
-
+ logger.error(
+ "Unable to connect to MQTT server: Connection refused. Error code: "
+ + str(rc)
+ )
+
logger.info("MQTT connected")
client.subscribe(f"{mqtt_config.topic_prefix}/#")
- client.publish(mqtt_config.topic_prefix+'/available', 'online', retain=True)
+ client.publish(mqtt_config.topic_prefix + "/available", "online", retain=True)
- client = mqtt.Client(client_id=mqtt_config.client_id)
+ client = mqtt.Client(client_id=mqtt_config.client_id)
client.on_connect = on_connect
- client.will_set(mqtt_config.topic_prefix+'/available', payload='offline', qos=1, retain=True)
-
+ client.will_set(
+ mqtt_config.topic_prefix + "/available", payload="offline", qos=1, retain=True
+ )
+
# register callbacks
for name in config.cameras.keys():
- client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/clips/set", on_clips_command)
- client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command)
- client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command)
-
+ client.message_callback_add(
+ f"{mqtt_config.topic_prefix}/{name}/recordings/set", on_recordings_command
+ )
+ client.message_callback_add(
+ f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command
+ )
+ client.message_callback_add(
+ f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command
+ )
+
+ client.message_callback_add(
+ f"{mqtt_config.topic_prefix}/restart", on_restart_command
+ )
+
+ if not mqtt_config.tls_ca_certs is None:
+ if (
+ not mqtt_config.tls_client_cert is None
+ and not mqtt_config.tls_client_key is None
+ ):
+ client.tls_set(
+ mqtt_config.tls_ca_certs,
+ mqtt_config.tls_client_cert,
+ mqtt_config.tls_client_key,
+ )
+ else:
+ client.tls_set(mqtt_config.tls_ca_certs)
+ if not mqtt_config.tls_insecure is None:
+ client.tls_insecure_set(mqtt_config.tls_insecure)
if not mqtt_config.user is None:
client.username_pw_set(mqtt_config.user, password=mqtt_config.password)
try:
@@ -115,10 +158,96 @@ def on_connect(client, userdata, flags, rc):
client.loop_start()
for name in config.cameras.keys():
- client.publish(f"{mqtt_config.topic_prefix}/{name}/clips/state", 'ON' if config.cameras[name].clips.enabled else 'OFF', retain=True)
- client.publish(f"{mqtt_config.topic_prefix}/{name}/snapshots/state", 'ON' if config.cameras[name].snapshots.enabled else 'OFF', retain=True)
- client.publish(f"{mqtt_config.topic_prefix}/{name}/detect/state", 'ON' if config.cameras[name].detect.enabled else 'OFF', retain=True)
-
- client.subscribe(f"{mqtt_config.topic_prefix}/#")
+ client.publish(
+ f"{mqtt_config.topic_prefix}/{name}/recordings/state",
+ "ON" if config.cameras[name].record.enabled else "OFF",
+ retain=True,
+ )
+ client.publish(
+ f"{mqtt_config.topic_prefix}/{name}/snapshots/state",
+ "ON" if config.cameras[name].snapshots.enabled else "OFF",
+ retain=True,
+ )
+ client.publish(
+ f"{mqtt_config.topic_prefix}/{name}/detect/state",
+ "ON" if config.cameras[name].detect.enabled else "OFF",
+ retain=True,
+ )
return client
+
+
+class MqttSocketRelay:
+ def __init__(self, mqtt_client, topic_prefix):
+ self.mqtt_client = mqtt_client
+ self.topic_prefix = topic_prefix
+
+ def start(self):
+ class MqttWebSocket(WebSocket):
+ topic_prefix = self.topic_prefix
+ mqtt_client = self.mqtt_client
+
+ def received_message(self, message):
+ try:
+ json_message = json.loads(message.data.decode("utf-8"))
+ json_message = {
+ "topic": f"{self.topic_prefix}/{json_message['topic']}",
+ "payload": json_message.get("payload"),
+ "retain": json_message.get("retain", False),
+ }
+ except Exception as e:
+ logger.warning("Unable to parse websocket message as valid json.")
+ return
+
+ logger.debug(
+ f"Publishing mqtt message from websockets at {json_message['topic']}."
+ )
+ self.mqtt_client.publish(
+ json_message["topic"],
+ json_message["payload"],
+ retain=json_message["retain"],
+ )
+
+ # start a websocket server on 5002
+ WebSocketWSGIHandler.http_version = "1.1"
+ self.websocket_server = make_server(
+ "127.0.0.1",
+ 5002,
+ server_class=WSGIServer,
+ handler_class=WebSocketWSGIRequestHandler,
+ app=WebSocketWSGIApplication(handler_cls=MqttWebSocket),
+ )
+ self.websocket_server.initialize_websockets_manager()
+ self.websocket_thread = threading.Thread(
+ target=self.websocket_server.serve_forever
+ )
+
+ def send(client, userdata, message):
+ """Sends mqtt messages to clients."""
+ try:
+ logger.debug(f"Received mqtt message on {message.topic}.")
+ ws_message = json.dumps(
+ {
+ "topic": message.topic.replace(f"{self.topic_prefix}/", ""),
+ "payload": message.payload.decode(),
+ }
+ )
+ except Exception as e:
+ # if the payload can't be decoded don't relay to clients
+ logger.debug(
+ f"MQTT payload for {message.topic} wasn't text. Skipping..."
+ )
+ return
+
+ self.websocket_server.manager.broadcast(ws_message)
+
+ self.mqtt_client.message_callback_add(f"{self.topic_prefix}/#", send)
+
+ self.websocket_thread.start()
+
+ def stop(self):
+ self.websocket_server.manager.close_all()
+ self.websocket_server.manager.stop()
+ self.websocket_server.manager.join()
+ self.websocket_server.shutdown()
+ self.websocket_thread.join()
diff --git a/frigate/object_processing.py b/frigate/object_processing.py
--- a/frigate/object_processing.py
+++ b/frigate/object_processing.py
@@ -1,5 +1,5 @@
-import copy
import base64
+import copy
import datetime
import hashlib
import itertools
@@ -14,62 +14,67 @@
from typing import Callable, Dict
import cv2
-import matplotlib.pyplot as plt
import numpy as np
-from frigate.config import FrigateConfig, CameraConfig
-from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+from frigate.config import CameraConfig, SnapshotsConfig, RecordConfig, FrigateConfig
+from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
from frigate.edgetpu import load_labels
-from frigate.util import SharedMemoryFrameManager, draw_box_with_label, calculate_region
+from frigate.util import (
+ SharedMemoryFrameManager,
+ calculate_region,
+ draw_box_with_label,
+ draw_timestamp,
+)
logger = logging.getLogger(__name__)
-PATH_TO_LABELS = '/labelmap.txt'
-
-LABELS = load_labels(PATH_TO_LABELS)
-cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
-
-COLOR_MAP = {}
-for key, val in LABELS.items():
- COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
def on_edge(box, frame_shape):
if (
- box[0] == 0 or
- box[1] == 0 or
- box[2] == frame_shape[1]-1 or
- box[3] == frame_shape[0]-1
+ box[0] == 0
+ or box[1] == 0
+ or box[2] == frame_shape[1] - 1
+ or box[3] == frame_shape[0] - 1
):
return True
+
def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool:
# larger is better
# cutoff images are less ideal, but they should also be smaller?
# better scores are obviously better too
# if the new_thumb is on an edge, and the current thumb is not
- if on_edge(new_obj['box'], frame_shape) and not on_edge(current_thumb['box'], frame_shape):
+ if on_edge(new_obj["box"], frame_shape) and not on_edge(
+ current_thumb["box"], frame_shape
+ ):
return False
# if the score is better by more than 5%
- if new_obj['score'] > current_thumb['score']+.05:
+ if new_obj["score"] > current_thumb["score"] + 0.05:
return True
# if the area is 10% larger
- if new_obj['area'] > current_thumb['area']*1.1:
+ if new_obj["area"] > current_thumb["area"] * 1.1:
return True
return False
-class TrackedObject():
- def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data):
+
+class TrackedObject:
+ def __init__(
+ self, camera, colormap, camera_config: CameraConfig, frame_cache, obj_data
+ ):
self.obj_data = obj_data
self.camera = camera
+ self.colormap = colormap
self.camera_config = camera_config
self.frame_cache = frame_cache
self.current_zones = []
self.entered_zones = set()
self.false_positive = True
+ self.has_clip = False
+ self.has_snapshot = False
self.top_score = self.computed_score = 0.0
self.thumbnail_data = None
self.last_updated = 0
@@ -78,33 +83,32 @@ def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data):
self.previous = self.to_dict()
# start the score history
- self.score_history = [self.obj_data['score']]
+ self.score_history = [self.obj_data["score"]]
def _is_false_positive(self):
# once a true positive, always a true positive
if not self.false_positive:
return False
- threshold = self.camera_config.objects.filters[self.obj_data['label']].threshold
- if self.computed_score < threshold:
- return True
- return False
+ threshold = self.camera_config.objects.filters[self.obj_data["label"]].threshold
+ return self.computed_score < threshold
def compute_score(self):
scores = self.score_history[:]
# pad with zeros if you dont have at least 3 scores
if len(scores) < 3:
- scores += [0.0]*(3 - len(scores))
+ scores += [0.0] * (3 - len(scores))
return median(scores)
def update(self, current_frame_time, obj_data):
significant_update = False
+ zone_change = False
self.obj_data.update(obj_data)
# if the object is not in the current frame, add a 0.0 to the score history
- if self.obj_data['frame_time'] != current_frame_time:
+ if self.obj_data["frame_time"] != current_frame_time:
self.score_history.append(0.0)
else:
- self.score_history.append(self.obj_data['score'])
+ self.score_history.append(self.obj_data["score"])
# only keep the last 10 scores
if len(self.score_history) > 10:
self.score_history = self.score_history[-10:]
@@ -117,27 +121,29 @@ def update(self, current_frame_time, obj_data):
if not self.false_positive:
# determine if this frame is a better thumbnail
- if (
- self.thumbnail_data is None
- or is_better_thumbnail(self.thumbnail_data, self.obj_data, self.camera_config.frame_shape)
+ if self.thumbnail_data is None or is_better_thumbnail(
+ self.thumbnail_data, self.obj_data, self.camera_config.frame_shape
):
self.thumbnail_data = {
- 'frame_time': self.obj_data['frame_time'],
- 'box': self.obj_data['box'],
- 'area': self.obj_data['area'],
- 'region': self.obj_data['region'],
- 'score': self.obj_data['score']
+ "frame_time": self.obj_data["frame_time"],
+ "box": self.obj_data["box"],
+ "area": self.obj_data["area"],
+ "region": self.obj_data["region"],
+ "score": self.obj_data["score"],
}
significant_update = True
# check zones
current_zones = []
- bottom_center = (self.obj_data['centroid'][0], self.obj_data['box'][3])
+ bottom_center = (self.obj_data["centroid"][0], self.obj_data["box"][3])
# check each zone
for name, zone in self.camera_config.zones.items():
+ # if the zone is not for this object type, skip
+ if len(zone.objects) > 0 and not self.obj_data["label"] in zone.objects:
+ continue
contour = zone.contour
# check if the object is in the zone
- if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0):
+ if cv2.pointPolygonTest(contour, bottom_center, False) >= 0:
# if the object passed the filters once, dont apply again
if name in self.current_zones or not zone_filtered(self, zone.filters):
current_zones.append(name)
@@ -145,98 +151,162 @@ def update(self, current_frame_time, obj_data):
# if the zones changed, signal an update
if not self.false_positive and set(self.current_zones) != set(current_zones):
- significant_update = True
+ zone_change = True
self.current_zones = current_zones
- return significant_update
+ return (significant_update, zone_change)
def to_dict(self, include_thumbnail: bool = False):
- return {
- 'id': self.obj_data['id'],
- 'camera': self.camera,
- 'frame_time': self.obj_data['frame_time'],
- 'label': self.obj_data['label'],
- 'top_score': self.top_score,
- 'false_positive': self.false_positive,
- 'start_time': self.obj_data['start_time'],
- 'end_time': self.obj_data.get('end_time', None),
- 'score': self.obj_data['score'],
- 'box': self.obj_data['box'],
- 'area': self.obj_data['area'],
- 'region': self.obj_data['region'],
- 'current_zones': self.current_zones.copy(),
- 'entered_zones': list(self.entered_zones).copy(),
- 'thumbnail': base64.b64encode(self.get_thumbnail()).decode('utf-8') if include_thumbnail else None
+ snapshot_time = (
+ self.thumbnail_data["frame_time"]
+ if not self.thumbnail_data is None
+ else 0.0
+ )
+ event = {
+ "id": self.obj_data["id"],
+ "camera": self.camera,
+ "frame_time": self.obj_data["frame_time"],
+ "snapshot_time": snapshot_time,
+ "label": self.obj_data["label"],
+ "top_score": self.top_score,
+ "false_positive": self.false_positive,
+ "start_time": self.obj_data["start_time"],
+ "end_time": self.obj_data.get("end_time", None),
+ "score": self.obj_data["score"],
+ "box": self.obj_data["box"],
+ "area": self.obj_data["area"],
+ "region": self.obj_data["region"],
+ "current_zones": self.current_zones.copy(),
+ "entered_zones": list(self.entered_zones).copy(),
+ "has_clip": self.has_clip,
+ "has_snapshot": self.has_snapshot,
}
+ if include_thumbnail:
+ event["thumbnail"] = base64.b64encode(self.get_thumbnail()).decode("utf-8")
+
+ return event
+
def get_thumbnail(self):
- if self.thumbnail_data is None or not self.thumbnail_data['frame_time'] in self.frame_cache:
- ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
+ if (
+ self.thumbnail_data is None
+ or self.thumbnail_data["frame_time"] not in self.frame_cache
+ ):
+ ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
- jpg_bytes = self.get_jpg_bytes(timestamp=False, bounding_box=False, crop=True, height=175)
+ jpg_bytes = self.get_jpg_bytes(
+ timestamp=False, bounding_box=False, crop=True, height=175
+ )
if jpg_bytes:
return jpg_bytes
else:
- ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
+ ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
return jpg.tobytes()
-
- def get_jpg_bytes(self, timestamp=False, bounding_box=False, crop=False, height=None):
+
+ def get_clean_png(self):
if self.thumbnail_data is None:
return None
-
+
try:
- best_frame = cv2.cvtColor(self.frame_cache[self.thumbnail_data['frame_time']], cv2.COLOR_YUV2BGR_I420)
+ best_frame = cv2.cvtColor(
+ self.frame_cache[self.thumbnail_data["frame_time"]],
+ cv2.COLOR_YUV2BGR_I420,
+ )
except KeyError:
- logger.warning(f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache")
+ logger.warning(
+ f"Unable to create clean png because frame {self.thumbnail_data['frame_time']} is not in the cache"
+ )
return None
-
+
+ ret, png = cv2.imencode(".png", best_frame)
+ if ret:
+ return png.tobytes()
+ else:
+ return None
+
+ def get_jpg_bytes(
+ self, timestamp=False, bounding_box=False, crop=False, height=None, quality=70
+ ):
+ if self.thumbnail_data is None:
+ return None
+
+ try:
+ best_frame = cv2.cvtColor(
+ self.frame_cache[self.thumbnail_data["frame_time"]],
+ cv2.COLOR_YUV2BGR_I420,
+ )
+ except KeyError:
+ logger.warning(
+ f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache"
+ )
+ return None
+
if bounding_box:
thickness = 2
- color = COLOR_MAP[self.obj_data['label']]
+ color = self.colormap[self.obj_data["label"]]
# draw the bounding boxes on the frame
- box = self.thumbnail_data['box']
- draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'], f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
+ box = self.thumbnail_data["box"]
+ draw_box_with_label(
+ best_frame,
+ box[0],
+ box[1],
+ box[2],
+ box[3],
+ self.obj_data["label"],
+ f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}",
+ thickness=thickness,
+ color=color,
+ )
if crop:
- box = self.thumbnail_data['box']
- region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
- best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
+ box = self.thumbnail_data["box"]
+ region = calculate_region(
+ best_frame.shape, box[0], box[1], box[2], box[3], 1.1
+ )
+ best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
if height:
- width = int(height*best_frame.shape[1]/best_frame.shape[0])
- best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
-
+ width = int(height * best_frame.shape[1] / best_frame.shape[0])
+ best_frame = cv2.resize(
+ best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
+ )
if timestamp:
- time_to_show = datetime.datetime.fromtimestamp(self.thumbnail_data['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
- size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
- text_width = size[0][0]
- desired_size = max(150, 0.33*best_frame.shape[1])
- font_scale = desired_size/text_width
- cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX,
- fontScale=font_scale, color=(255, 255, 255), thickness=2)
-
- ret, jpg = cv2.imencode('.jpg', best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
+ color = self.camera_config.timestamp_style.color
+ draw_timestamp(
+ best_frame,
+ self.thumbnail_data["frame_time"],
+ self.camera_config.timestamp_style.format,
+ font_effect=self.camera_config.timestamp_style.effect,
+ font_thickness=self.camera_config.timestamp_style.thickness,
+ font_color=(color.blue, color.green, color.red),
+ position=self.camera_config.timestamp_style.position,
+ )
+
+ ret, jpg = cv2.imencode(
+ ".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), quality]
+ )
if ret:
return jpg.tobytes()
else:
return None
+
def zone_filtered(obj: TrackedObject, object_config):
- object_name = obj.obj_data['label']
+ object_name = obj.obj_data["label"]
if object_name in object_config:
obj_settings = object_config[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
- if obj_settings.min_area > obj.obj_data['area']:
+ if obj_settings.min_area > obj.obj_data["area"]:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
- if obj_settings.max_area < obj.obj_data['area']:
+ if obj_settings.max_area < obj.obj_data["area"]:
return True
# if the score is lower than the threshold, skip
@@ -245,70 +315,109 @@ def zone_filtered(obj: TrackedObject, object_config):
return False
+
# Maintains the state of a camera
-class CameraState():
- def __init__(self, name, config, frame_manager):
+class CameraState:
+ def __init__(
+ self, name, config: FrigateConfig, frame_manager: SharedMemoryFrameManager
+ ):
self.name = name
self.config = config
self.camera_config = config.cameras[name]
self.frame_manager = frame_manager
self.best_objects: Dict[str, TrackedObject] = {}
- self.object_counts = defaultdict(lambda: 0)
+ self.object_counts = defaultdict(int)
self.tracked_objects: Dict[str, TrackedObject] = {}
self.frame_cache = {}
- self.zone_objects = defaultdict(lambda: [])
+ self.zone_objects = defaultdict(list)
self._current_frame = np.zeros(self.camera_config.frame_shape_yuv, np.uint8)
self.current_frame_lock = threading.Lock()
self.current_frame_time = 0.0
self.motion_boxes = []
self.regions = []
self.previous_frame_id = None
- self.callbacks = defaultdict(lambda: [])
+ self.callbacks = defaultdict(list)
def get_current_frame(self, draw_options={}):
with self.current_frame_lock:
frame_copy = np.copy(self._current_frame)
frame_time = self.current_frame_time
- tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()}
+ tracked_objects = {k: v.to_dict() for k, v in self.tracked_objects.items()}
motion_boxes = self.motion_boxes.copy()
regions = self.regions.copy()
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
# draw on the frame
- if draw_options.get('bounding_boxes'):
+ if draw_options.get("bounding_boxes"):
# draw the bounding boxes on the frame
for obj in tracked_objects.values():
- thickness = 2
- color = COLOR_MAP[obj['label']]
-
- if obj['frame_time'] != frame_time:
+ if obj["frame_time"] == frame_time:
+ thickness = 2
+ color = self.config.model.colormap[obj["label"]]
+ else:
thickness = 1
- color = (255,0,0)
+ color = (255, 0, 0)
# draw the bounding boxes on the frame
- box = obj['box']
- draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
+ box = obj["box"]
+ draw_box_with_label(
+ frame_copy,
+ box[0],
+ box[1],
+ box[2],
+ box[3],
+ obj["label"],
+ f"{obj['score']:.0%} {int(obj['area'])}",
+ thickness=thickness,
+ color=color,
+ )
- if draw_options.get('regions'):
+ if draw_options.get("regions"):
for region in regions:
- cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 2)
+ cv2.rectangle(
+ frame_copy,
+ (region[0], region[1]),
+ (region[2], region[3]),
+ (0, 255, 0),
+ 2,
+ )
- if draw_options.get('zones'):
+ if draw_options.get("zones"):
for name, zone in self.camera_config.zones.items():
- thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
+ thickness = (
+ 8
+ if any(
+ name in obj["current_zones"] for obj in tracked_objects.values()
+ )
+ else 2
+ )
cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
- if draw_options.get('mask'):
- mask_overlay = np.where(self.camera_config.motion.mask==[0])
- frame_copy[mask_overlay] = [0,0,0]
+ if draw_options.get("mask"):
+ mask_overlay = np.where(self.camera_config.motion.mask == [0])
+ frame_copy[mask_overlay] = [0, 0, 0]
- if draw_options.get('motion_boxes'):
+ if draw_options.get("motion_boxes"):
for m_box in motion_boxes:
- cv2.rectangle(frame_copy, (m_box[0], m_box[1]), (m_box[2], m_box[3]), (0,0,255), 2)
+ cv2.rectangle(
+ frame_copy,
+ (m_box[0], m_box[1]),
+ (m_box[2], m_box[3]),
+ (0, 0, 255),
+ 2,
+ )
- if draw_options.get('timestamp'):
- time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
- cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
+ if draw_options.get("timestamp"):
+ color = self.camera_config.timestamp_style.color
+ draw_timestamp(
+ frame_copy,
+ frame_time,
+ self.camera_config.timestamp_style.format,
+ font_effect=self.camera_config.timestamp_style.effect,
+ font_thickness=self.camera_config.timestamp_style.thickness,
+ font_color=(color.blue, color.green, color.red),
+ position=self.camera_config.timestamp_style.position,
+ )
return frame_copy
@@ -319,112 +428,164 @@ def on(self, event_type: str, callback: Callable[[Dict], None]):
self.callbacks[event_type].append(callback)
def update(self, frame_time, current_detections, motion_boxes, regions):
- self.current_frame_time = frame_time
- self.motion_boxes = motion_boxes
- self.regions = regions
# get the new frame
frame_id = f"{self.name}{frame_time}"
- current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv)
+ current_frame = self.frame_manager.get(
+ frame_id, self.camera_config.frame_shape_yuv
+ )
- current_ids = current_detections.keys()
- previous_ids = self.tracked_objects.keys()
- removed_ids = list(set(previous_ids).difference(current_ids))
- new_ids = list(set(current_ids).difference(previous_ids))
- updated_ids = list(set(current_ids).intersection(previous_ids))
+ tracked_objects = self.tracked_objects.copy()
+ current_ids = set(current_detections.keys())
+ previous_ids = set(tracked_objects.keys())
+ removed_ids = previous_ids.difference(current_ids)
+ new_ids = current_ids.difference(previous_ids)
+ updated_ids = current_ids.intersection(previous_ids)
for id in new_ids:
- new_obj = self.tracked_objects[id] = TrackedObject(self.name, self.camera_config, self.frame_cache, current_detections[id])
+ new_obj = tracked_objects[id] = TrackedObject(
+ self.name,
+ self.config.model.colormap,
+ self.camera_config,
+ self.frame_cache,
+ current_detections[id],
+ )
# call event handlers
- for c in self.callbacks['start']:
+ for c in self.callbacks["start"]:
c(self.name, new_obj, frame_time)
for id in updated_ids:
- updated_obj = self.tracked_objects[id]
- significant_update = updated_obj.update(frame_time, current_detections[id])
+ updated_obj = tracked_objects[id]
+ significant_update, zone_change = updated_obj.update(
+ frame_time, current_detections[id]
+ )
if significant_update:
# ensure this frame is stored in the cache
- if updated_obj.thumbnail_data['frame_time'] == frame_time and frame_time not in self.frame_cache:
+ if (
+ updated_obj.thumbnail_data["frame_time"] == frame_time
+ and frame_time not in self.frame_cache
+ ):
self.frame_cache[frame_time] = np.copy(current_frame)
-
+
updated_obj.last_updated = frame_time
-
+
# if it has been more than 5 seconds since the last publish
- # and the last update is greater than the last publish
- if frame_time - updated_obj.last_published > 5 and updated_obj.last_updated > updated_obj.last_published:
+ # and the last update is greater than the last publish or
+ # the object has changed zones
+ if (
+ frame_time - updated_obj.last_published > 5
+ and updated_obj.last_updated > updated_obj.last_published
+ ) or zone_change:
# call event handlers
- for c in self.callbacks['update']:
+ for c in self.callbacks["update"]:
c(self.name, updated_obj, frame_time)
updated_obj.last_published = frame_time
for id in removed_ids:
# publish events to mqtt
- removed_obj = self.tracked_objects[id]
- if not 'end_time' in removed_obj.obj_data:
- removed_obj.obj_data['end_time'] = frame_time
- for c in self.callbacks['end']:
+ removed_obj = tracked_objects[id]
+ if not "end_time" in removed_obj.obj_data:
+ removed_obj.obj_data["end_time"] = frame_time
+ for c in self.callbacks["end"]:
c(self.name, removed_obj, frame_time)
# TODO: can i switch to looking this up and only changing when an event ends?
# maintain best objects
- for obj in self.tracked_objects.values():
- object_type = obj.obj_data['label']
+ for obj in tracked_objects.values():
+ object_type = obj.obj_data["label"]
# if the object's thumbnail is not from the current frame
- if obj.false_positive or obj.thumbnail_data['frame_time'] != self.current_frame_time:
+ if obj.false_positive or obj.thumbnail_data["frame_time"] != frame_time:
continue
if object_type in self.best_objects:
current_best = self.best_objects[object_type]
now = datetime.datetime.now().timestamp()
# if the object is a higher score than the current best score
# or the current object is older than desired, use the new object
- if (is_better_thumbnail(current_best.thumbnail_data, obj.thumbnail_data, self.camera_config.frame_shape)
- or (now - current_best.thumbnail_data['frame_time']) > self.camera_config.best_image_timeout):
+ if (
+ is_better_thumbnail(
+ current_best.thumbnail_data,
+ obj.thumbnail_data,
+ self.camera_config.frame_shape,
+ )
+ or (now - current_best.thumbnail_data["frame_time"])
+ > self.camera_config.best_image_timeout
+ ):
self.best_objects[object_type] = obj
- for c in self.callbacks['snapshot']:
+ for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[object_type], frame_time)
else:
self.best_objects[object_type] = obj
- for c in self.callbacks['snapshot']:
+ for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[object_type], frame_time)
# update overall camera state for each object type
- obj_counter = Counter()
- for obj in self.tracked_objects.values():
- if not obj.false_positive:
- obj_counter[obj.obj_data['label']] += 1
+ obj_counter = Counter(
+ obj.obj_data["label"]
+ for obj in tracked_objects.values()
+ if not obj.false_positive
+ )
# report on detected objects
for obj_name, count in obj_counter.items():
if count != self.object_counts[obj_name]:
self.object_counts[obj_name] = count
- for c in self.callbacks['object_status']:
+ for c in self.callbacks["object_status"]:
c(self.name, obj_name, count)
# expire any objects that are >0 and no longer detected
- expired_objects = [obj_name for obj_name, count in self.object_counts.items() if count > 0 and not obj_name in obj_counter]
+ expired_objects = [
+ obj_name
+ for obj_name, count in self.object_counts.items()
+ if count > 0 and obj_name not in obj_counter
+ ]
for obj_name in expired_objects:
self.object_counts[obj_name] = 0
- for c in self.callbacks['object_status']:
+ for c in self.callbacks["object_status"]:
c(self.name, obj_name, 0)
- for c in self.callbacks['snapshot']:
+ for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[obj_name], frame_time)
# cleanup thumbnail frame cache
- current_thumb_frames = set([obj.thumbnail_data['frame_time'] for obj in self.tracked_objects.values() if not obj.false_positive])
- current_best_frames = set([obj.thumbnail_data['frame_time'] for obj in self.best_objects.values()])
- thumb_frames_to_delete = [t for t in self.frame_cache.keys() if not t in current_thumb_frames and not t in current_best_frames]
+ current_thumb_frames = {
+ obj.thumbnail_data["frame_time"]
+ for obj in tracked_objects.values()
+ if not obj.false_positive
+ }
+ current_best_frames = {
+ obj.thumbnail_data["frame_time"] for obj in self.best_objects.values()
+ }
+ thumb_frames_to_delete = [
+ t
+ for t in self.frame_cache.keys()
+ if t not in current_thumb_frames and t not in current_best_frames
+ ]
for t in thumb_frames_to_delete:
del self.frame_cache[t]
with self.current_frame_lock:
+ self.tracked_objects = tracked_objects
+ self.current_frame_time = frame_time
+ self.motion_boxes = motion_boxes
+ self.regions = regions
self._current_frame = current_frame
- if not self.previous_frame_id is None:
- self.frame_manager.delete(self.previous_frame_id)
+ if self.previous_frame_id is not None:
+ self.frame_manager.close(self.previous_frame_id)
self.previous_frame_id = frame_id
+
class TrackedObjectProcessor(threading.Thread):
- def __init__(self, config: FrigateConfig, client, topic_prefix, tracked_objects_queue, event_queue, event_processed_queue, stop_event):
+ def __init__(
+ self,
+ config: FrigateConfig,
+ client,
+ topic_prefix,
+ tracked_objects_queue,
+ event_queue,
+ event_processed_queue,
+ video_output_queue,
+ stop_event,
+ ):
threading.Thread.__init__(self)
self.name = "detected_frames_processor"
self.config = config
@@ -433,42 +594,79 @@ def __init__(self, config: FrigateConfig, client, topic_prefix, tracked_objects_
self.tracked_objects_queue = tracked_objects_queue
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
+ self.video_output_queue = video_output_queue
self.stop_event = stop_event
self.camera_states: Dict[str, CameraState] = {}
self.frame_manager = SharedMemoryFrameManager()
def start(camera, obj: TrackedObject, current_frame_time):
- self.event_queue.put(('start', camera, obj.to_dict()))
+ self.event_queue.put(("start", camera, obj.to_dict()))
def update(camera, obj: TrackedObject, current_frame_time):
after = obj.to_dict()
- message = { 'before': obj.previous, 'after': after, 'type': 'new' if obj.previous['false_positive'] else 'update' }
- self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
+ message = {
+ "before": obj.previous,
+ "after": after,
+ "type": "new" if obj.previous["false_positive"] else "update",
+ }
+ self.client.publish(
+ f"{self.topic_prefix}/events", json.dumps(message), retain=False
+ )
obj.previous = after
def end(camera, obj: TrackedObject, current_frame_time):
- snapshot_config = self.config.cameras[camera].snapshots
- event_data = obj.to_dict(include_thumbnail=True)
- event_data['has_snapshot'] = False
- if not obj.false_positive:
- message = { 'before': obj.previous, 'after': obj.to_dict(), 'type': 'end' }
- self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
- # write snapshot to disk if enabled
- if snapshot_config.enabled and self.should_save_snapshot(camera, obj):
- jpg_bytes = obj.get_jpg_bytes(
- timestamp=snapshot_config.timestamp,
- bounding_box=snapshot_config.bounding_box,
- crop=snapshot_config.crop,
- height=snapshot_config.height
- )
- if jpg_bytes is None:
- logger.warning(f"Unable to save snapshot for {obj.obj_data['id']}.")
+ # populate has_snapshot
+ obj.has_snapshot = self.should_save_snapshot(camera, obj)
+ obj.has_clip = self.should_retain_recording(camera, obj)
+
+ # write the snapshot to disk
+ if obj.has_snapshot:
+ snapshot_config: SnapshotsConfig = self.config.cameras[camera].snapshots
+ jpg_bytes = obj.get_jpg_bytes(
+ timestamp=snapshot_config.timestamp,
+ bounding_box=snapshot_config.bounding_box,
+ crop=snapshot_config.crop,
+ height=snapshot_config.height,
+ quality=snapshot_config.quality,
+ )
+ if jpg_bytes is None:
+ logger.warning(f"Unable to save snapshot for {obj.obj_data['id']}.")
+ else:
+ with open(
+ os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"),
+ "wb",
+ ) as j:
+ j.write(jpg_bytes)
+
+ # write clean snapshot if enabled
+ if snapshot_config.clean_copy:
+ png_bytes = obj.get_clean_png()
+ if png_bytes is None:
+ logger.warning(
+ f"Unable to save clean snapshot for {obj.obj_data['id']}."
+ )
else:
- with open(os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), 'wb') as j:
- j.write(jpg_bytes)
- event_data['has_snapshot'] = True
- self.event_queue.put(('end', camera, event_data))
-
+ with open(
+ os.path.join(
+ CLIPS_DIR,
+ f"{camera}-{obj.obj_data['id']}-clean.png",
+ ),
+ "wb",
+ ) as p:
+ p.write(png_bytes)
+
+ if not obj.false_positive:
+ message = {
+ "before": obj.previous,
+ "after": obj.to_dict(),
+ "type": "end",
+ }
+ self.client.publish(
+ f"{self.topic_prefix}/events", json.dumps(message), retain=False
+ )
+
+ self.event_queue.put(("end", camera, obj.to_dict(include_thumbnail=True)))
+
def snapshot(camera, obj: TrackedObject, current_frame_time):
mqtt_config = self.config.cameras[camera].mqtt
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
@@ -476,24 +674,33 @@ def snapshot(camera, obj: TrackedObject, current_frame_time):
timestamp=mqtt_config.timestamp,
bounding_box=mqtt_config.bounding_box,
crop=mqtt_config.crop,
- height=mqtt_config.height
+ height=mqtt_config.height,
+ quality=mqtt_config.quality,
)
if jpg_bytes is None:
- logger.warning(f"Unable to send mqtt snapshot for {obj.obj_data['id']}.")
+ logger.warning(
+ f"Unable to send mqtt snapshot for {obj.obj_data['id']}."
+ )
else:
- self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", jpg_bytes, retain=True)
-
+ self.client.publish(
+ f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot",
+ jpg_bytes,
+ retain=True,
+ )
+
def object_status(camera, object_name, status):
- self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)
+ self.client.publish(
+ f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False
+ )
for camera in self.config.cameras.keys():
camera_state = CameraState(camera, self.config, self.frame_manager)
- camera_state.on('start', start)
- camera_state.on('update', update)
- camera_state.on('end', end)
- camera_state.on('snapshot', snapshot)
- camera_state.on('object_status', object_status)
+ camera_state.on("start", start)
+ camera_state.on("update", update)
+ camera_state.on("end", end)
+ camera_state.on("snapshot", snapshot)
+ camera_state.on("object_status", object_status)
self.camera_states[camera] = camera_state
# {
@@ -504,13 +711,53 @@ def object_status(camera, object_name, status):
# }
# }
# }
- self.zone_data = defaultdict(lambda: defaultdict(lambda: {}))
+ self.zone_data = defaultdict(lambda: defaultdict(dict))
def should_save_snapshot(self, camera, obj: TrackedObject):
+ if obj.false_positive:
+ return False
+
+ snapshot_config: SnapshotsConfig = self.config.cameras[camera].snapshots
+
+ if not snapshot_config.enabled:
+ return False
+
# if there are required zones and there is no overlap
- required_zones = self.config.cameras[camera].snapshots.required_zones
+ required_zones = snapshot_config.required_zones
if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
- logger.debug(f"Not creating snapshot for {obj.obj_data['id']} because it did not enter required zones")
+ logger.debug(
+ f"Not creating snapshot for {obj.obj_data['id']} because it did not enter required zones"
+ )
+ return False
+
+ return True
+
+ def should_retain_recording(self, camera, obj: TrackedObject):
+ if obj.false_positive:
+ return False
+
+ record_config: RecordConfig = self.config.cameras[camera].record
+
+ # Recording is disabled
+ if not record_config.enabled:
+ return False
+
+ # If there are required zones and there is no overlap
+ required_zones = record_config.events.required_zones
+ if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
+ logger.debug(
+ f"Not creating clip for {obj.obj_data['id']} because it did not enter required zones"
+ )
+ return False
+
+ # If the required objects are not present
+ if (
+ record_config.events.objects is not None
+ and obj.obj_data["label"] not in record_config.events.objects
+ ):
+ logger.debug(
+ f"Not creating clip for {obj.obj_data['id']} because it did not contain required objects"
+ )
return False
return True
@@ -519,7 +766,9 @@ def should_mqtt_snapshot(self, camera, obj: TrackedObject):
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].mqtt.required_zones
if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
- logger.debug(f"Not sending mqtt for {obj.obj_data['id']} because it did not enter required zones")
+ logger.debug(
+ f"Not sending mqtt for {obj.obj_data['id']} because it did not enter required zones"
+ )
return False
return True
@@ -530,7 +779,9 @@ def get_best(self, camera, label):
if label in camera_state.best_objects:
best_obj = camera_state.best_objects[label]
best = best_obj.thumbnail_data.copy()
- best['frame'] = camera_state.frame_cache.get(best_obj.thumbnail_data['frame_time'])
+ best["frame"] = camera_state.frame_cache.get(
+ best_obj.thumbnail_data["frame_time"]
+ )
return best
else:
return {}
@@ -539,46 +790,73 @@ def get_current_frame(self, camera, draw_options={}):
return self.camera_states[camera].get_current_frame(draw_options)
def run(self):
- while True:
- if self.stop_event.is_set():
- logger.info(f"Exiting object processor...")
- break
-
+ while not self.stop_event.is_set():
try:
- camera, frame_time, current_tracked_objects, motion_boxes, regions = self.tracked_objects_queue.get(True, 10)
+ (
+ camera,
+ frame_time,
+ current_tracked_objects,
+ motion_boxes,
+ regions,
+ ) = self.tracked_objects_queue.get(True, 10)
except queue.Empty:
continue
camera_state = self.camera_states[camera]
- camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
+ camera_state.update(
+ frame_time, current_tracked_objects, motion_boxes, regions
+ )
+
+ self.video_output_queue.put(
+ (
+ camera,
+ frame_time,
+ current_tracked_objects,
+ motion_boxes,
+ regions,
+ )
+ )
# update zone counts for each label
# for each zone in the current camera
for zone in self.config.cameras[camera].zones.keys():
# count labels for the camera in the zone
- obj_counter = Counter()
- for obj in camera_state.tracked_objects.values():
- if zone in obj.current_zones and not obj.false_positive:
- obj_counter[obj.obj_data['label']] += 1
+ obj_counter = Counter(
+ obj.obj_data["label"]
+ for obj in camera_state.tracked_objects.values()
+ if zone in obj.current_zones and not obj.false_positive
+ )
# update counts and publish status
- for label in set(list(self.zone_data[zone].keys()) + list(obj_counter.keys())):
+ for label in set(self.zone_data[zone].keys()) | set(obj_counter.keys()):
# if we have previously published a count for this zone/label
zone_label = self.zone_data[zone][label]
if camera in zone_label:
current_count = sum(zone_label.values())
- zone_label[camera] = obj_counter[label] if label in obj_counter else 0
+ zone_label[camera] = (
+ obj_counter[label] if label in obj_counter else 0
+ )
new_count = sum(zone_label.values())
if new_count != current_count:
- self.client.publish(f"{self.topic_prefix}/{zone}/{label}", new_count, retain=False)
+ self.client.publish(
+ f"{self.topic_prefix}/{zone}/{label}",
+ new_count,
+ retain=False,
+ )
# if this is a new zone/label combo for this camera
else:
if label in obj_counter:
zone_label[camera] = obj_counter[label]
- self.client.publish(f"{self.topic_prefix}/{zone}/{label}", obj_counter[label], retain=False)
+ self.client.publish(
+ f"{self.topic_prefix}/{zone}/{label}",
+ obj_counter[label],
+ retain=False,
+ )
# cleanup event finished queue
while not self.event_processed_queue.empty():
event_id, camera = self.event_processed_queue.get()
self.camera_states[camera].finished(event_id)
+
+ logger.info(f"Exiting object processor...")
diff --git a/frigate/objects.py b/frigate/objects.py
--- a/frigate/objects.py
+++ b/frigate/objects.py
@@ -16,24 +16,24 @@
from frigate.util import draw_box_with_label
-class ObjectTracker():
+class ObjectTracker:
def __init__(self, config: DetectConfig):
self.tracked_objects = {}
self.disappeared = {}
self.max_disappeared = config.max_disappeared
def register(self, index, obj):
- rand_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
+ rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
id = f"{obj['frame_time']}-{rand_id}"
- obj['id'] = id
- obj['start_time'] = obj['frame_time']
+ obj["id"] = id
+ obj["start_time"] = obj["frame_time"]
self.tracked_objects[id] = obj
self.disappeared[id] = 0
def deregister(self, id):
del self.tracked_objects[id]
del self.disappeared[id]
-
+
def update(self, id, new_obj):
self.disappeared[id] = 0
self.tracked_objects[id].update(new_obj)
@@ -42,97 +42,90 @@ def match_and_update(self, frame_time, new_objects):
# group by name
new_object_groups = defaultdict(lambda: [])
for obj in new_objects:
- new_object_groups[obj[0]].append({
- 'label': obj[0],
- 'score': obj[1],
- 'box': obj[2],
- 'area': obj[3],
- 'region': obj[4],
- 'frame_time': frame_time
- })
-
+ new_object_groups[obj[0]].append(
+ {
+ "label": obj[0],
+ "score": obj[1],
+ "box": obj[2],
+ "area": obj[3],
+ "region": obj[4],
+ "frame_time": frame_time,
+ }
+ )
+
# update any tracked objects with labels that are not
# seen in the current objects and deregister if needed
for obj in list(self.tracked_objects.values()):
- if not obj['label'] in new_object_groups:
- if self.disappeared[obj['id']] >= self.max_disappeared:
- self.deregister(obj['id'])
+ if not obj["label"] in new_object_groups:
+ if self.disappeared[obj["id"]] >= self.max_disappeared:
+ self.deregister(obj["id"])
else:
- self.disappeared[obj['id']] += 1
-
+ self.disappeared[obj["id"]] += 1
+
if len(new_objects) == 0:
return
-
+
# track objects for each label type
for label, group in new_object_groups.items():
- current_objects = [o for o in self.tracked_objects.values() if o['label'] == label]
- current_ids = [o['id'] for o in current_objects]
- current_centroids = np.array([o['centroid'] for o in current_objects])
+ current_objects = [
+ o for o in self.tracked_objects.values() if o["label"] == label
+ ]
+ current_ids = [o["id"] for o in current_objects]
+ current_centroids = np.array([o["centroid"] for o in current_objects])
# compute centroids of new objects
for obj in group:
- centroid_x = int((obj['box'][0]+obj['box'][2]) / 2.0)
- centroid_y = int((obj['box'][1]+obj['box'][3]) / 2.0)
- obj['centroid'] = (centroid_x, centroid_y)
+ centroid_x = int((obj["box"][0] + obj["box"][2]) / 2.0)
+ centroid_y = int((obj["box"][1] + obj["box"][3]) / 2.0)
+ obj["centroid"] = (centroid_x, centroid_y)
if len(current_objects) == 0:
for index, obj in enumerate(group):
self.register(index, obj)
- return
-
- new_centroids = np.array([o['centroid'] for o in group])
+ continue
+
+ new_centroids = np.array([o["centroid"] for o in group])
# compute the distance between each pair of tracked
# centroids and new centroids, respectively -- our
- # goal will be to match each new centroid to an existing
+ # goal will be to match each current centroid to a new
# object centroid
D = dist.cdist(current_centroids, new_centroids)
- # in order to perform this matching we must (1) find the
- # smallest value in each row and then (2) sort the row
- # indexes based on their minimum values so that the row
- # with the smallest value is at the *front* of the index
- # list
+ # in order to perform this matching we must (1) find the smallest
+ # value in each row (i.e. the distance from each current object to
+ # the closest new object) and then (2) sort the row indexes based
+ # on their minimum values so that the row with the smallest
+ # distance (the best match) is at the *front* of the index list
rows = D.min(axis=1).argsort()
- # next, we perform a similar process on the columns by
- # finding the smallest value in each column and then
- # sorting using the previously computed row index list
+ # next, we determine which new object each existing object matched
+ # against, and apply the same sorting as was applied previously
cols = D.argmin(axis=1)[rows]
- # in order to determine if we need to update, register,
- # or deregister an object we need to keep track of which
- # of the rows and column indexes we have already examined
- usedRows = set()
- usedCols = set()
-
- # loop over the combination of the (row, column) index
- # tuples
- for (row, col) in zip(rows, cols):
- # if we have already examined either the row or
- # column value before, ignore it
- if row in usedRows or col in usedCols:
- continue
-
- # otherwise, grab the object ID for the current row,
- # set its new centroid, and reset the disappeared
- # counter
+ # many current objects may register with each new object, so only
+ # match the closest ones. unique returns the indices of the first
+ # occurrences of each value, and because the rows are sorted by
+ # distance, this will be index of the closest match
+ _, index = np.unique(cols, return_index=True)
+ rows = rows[index]
+ cols = cols[index]
+
+ # loop over the combination of the (row, column) index tuples
+ for row, col in zip(rows, cols):
+ # grab the object ID for the current row, set its new centroid,
+ # and reset the disappeared counter
objectID = current_ids[row]
self.update(objectID, group[col])
- # indicate that we have examined each of the row and
- # column indexes, respectively
- usedRows.add(row)
- usedCols.add(col)
-
- # compute the column index we have NOT yet examined
- unusedRows = set(range(0, D.shape[0])).difference(usedRows)
- unusedCols = set(range(0, D.shape[1])).difference(usedCols)
+ # compute the row and column indices we have NOT yet examined
+ unusedRows = set(range(D.shape[0])).difference(rows)
+ unusedCols = set(range(D.shape[1])).difference(cols)
# in the event that the number of object centroids is
- # equal or greater than the number of input centroids
- # we need to check and see if some of these objects have
- # potentially disappeared
+ # equal or greater than the number of input centroids
+ # we need to check and see if some of these objects have
+ # potentially disappeared
if D.shape[0] >= D.shape[1]:
for row in unusedRows:
id = current_ids[row]
diff --git a/frigate/output.py b/frigate/output.py
new file mode 100644
--- /dev/null
+++ b/frigate/output.py
@@ -0,0 +1,455 @@
+import datetime
+import glob
+import logging
+import math
+import multiprocessing as mp
+import queue
+import signal
+import subprocess as sp
+import threading
+from multiprocessing import shared_memory
+from wsgiref.simple_server import make_server
+
+import cv2
+import numpy as np
+from setproctitle import setproctitle
+from ws4py.server.wsgirefserver import (
+ WebSocketWSGIHandler,
+ WebSocketWSGIRequestHandler,
+ WSGIServer,
+)
+from ws4py.server.wsgiutils import WebSocketWSGIApplication
+from ws4py.websocket import WebSocket
+
+from frigate.config import BirdseyeModeEnum, FrigateConfig
+from frigate.util import SharedMemoryFrameManager, copy_yuv_to_position, get_yuv_crop
+
+logger = logging.getLogger(__name__)
+
+
+class FFMpegConverter:
+ def __init__(self, in_width, in_height, out_width, out_height, quality):
+ ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split(
+ " "
+ )
+ self.process = sp.Popen(
+ ffmpeg_cmd,
+ stdout=sp.PIPE,
+ stderr=sp.DEVNULL,
+ stdin=sp.PIPE,
+ start_new_session=True,
+ )
+
+ def write(self, b):
+ self.process.stdin.write(b)
+
+ def read(self, length):
+ try:
+ return self.process.stdout.read1(length)
+ except ValueError:
+ return False
+
+ def exit(self):
+ self.process.terminate()
+ try:
+ self.process.communicate(timeout=30)
+ except sp.TimeoutExpired:
+ self.process.kill()
+ self.process.communicate()
+
+
+class BroadcastThread(threading.Thread):
+ def __init__(self, camera, converter, websocket_server):
+ super(BroadcastThread, self).__init__()
+ self.camera = camera
+ self.converter = converter
+ self.websocket_server = websocket_server
+
+ def run(self):
+ while True:
+ buf = self.converter.read(65536)
+ if buf:
+ manager = self.websocket_server.manager
+ with manager.lock:
+ websockets = manager.websockets.copy()
+ ws_iter = iter(websockets.values())
+
+ for ws in ws_iter:
+ if (
+ not ws.terminated
+ and ws.environ["PATH_INFO"] == f"/{self.camera}"
+ ):
+ try:
+ ws.send(buf, binary=True)
+ except:
+ pass
+ elif self.converter.process.poll() is not None:
+ break
+
+
+class BirdsEyeFrameManager:
+ def __init__(self, config, frame_manager: SharedMemoryFrameManager):
+ self.config = config
+ self.mode = config.birdseye.mode
+ self.frame_manager = frame_manager
+ width = config.birdseye.width
+ height = config.birdseye.height
+ self.frame_shape = (height, width)
+ self.yuv_shape = (height * 3 // 2, width)
+ self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
+
+ # initialize the frame as black and with the frigate logo
+ self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
+ self.blank_frame[:] = 128
+ self.blank_frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]] = 16
+
+ # find and copy the logo on the blank frame
+ logo_files = glob.glob("/opt/frigate/web/apple-touch-icon.*.png")
+ frigate_logo = None
+ if len(logo_files) > 0:
+ frigate_logo = cv2.imread(logo_files[0], cv2.IMREAD_UNCHANGED)
+ if not frigate_logo is None:
+ transparent_layer = frigate_logo[:, :, 3]
+ y_offset = height // 2 - transparent_layer.shape[0] // 2
+ x_offset = width // 2 - transparent_layer.shape[1] // 2
+ self.blank_frame[
+ y_offset : y_offset + transparent_layer.shape[1],
+ x_offset : x_offset + transparent_layer.shape[0],
+ ] = transparent_layer
+ else:
+ logger.warning("Unable to read frigate logo")
+
+ self.frame[:] = self.blank_frame
+
+ self.cameras = {}
+ for camera, settings in self.config.cameras.items():
+ # precalculate the coordinates for all the channels
+ y, u1, u2, v1, v2 = get_yuv_crop(
+ settings.frame_shape_yuv,
+ (
+ 0,
+ 0,
+ settings.frame_shape[1],
+ settings.frame_shape[0],
+ ),
+ )
+ self.cameras[camera] = {
+ "last_active_frame": 0.0,
+ "current_frame": 0.0,
+ "layout_frame": 0.0,
+ "channel_dims": {
+ "y": y,
+ "u1": u1,
+ "u2": u2,
+ "v1": v1,
+ "v2": v2,
+ },
+ }
+
+ self.camera_layout = []
+ self.active_cameras = set()
+ self.layout_dim = 0
+ self.last_output_time = 0.0
+
+ def clear_frame(self):
+ logger.debug(f"Clearing the birdseye frame")
+ self.frame[:] = self.blank_frame
+
+ def copy_to_position(self, position, camera=None, frame_time=None):
+ if camera is None:
+ frame = None
+ channel_dims = None
+ else:
+ try:
+ frame = self.frame_manager.get(
+ f"{camera}{frame_time}", self.config.cameras[camera].frame_shape_yuv
+ )
+ except FileNotFoundError:
+ # TODO: better frame management would prevent this edge case
+ logger.warning(
+ f"Unable to copy frame {camera}{frame_time} to birdseye."
+ )
+ return
+ channel_dims = self.cameras[camera]["channel_dims"]
+
+ copy_yuv_to_position(
+ self.frame,
+ self.layout_offsets[position],
+ self.layout_frame_shape,
+ frame,
+ channel_dims,
+ )
+
+ def camera_active(self, object_box_count, motion_box_count):
+ if self.mode == BirdseyeModeEnum.continuous:
+ return True
+
+ if (
+ self.mode == BirdseyeModeEnum.motion
+ and object_box_count + motion_box_count > 0
+ ):
+ return True
+
+ if self.mode == BirdseyeModeEnum.objects and object_box_count > 0:
+ return True
+
+ def update_frame(self):
+ # determine how many cameras are tracking objects within the last 30 seconds
+ active_cameras = set(
+ [
+ cam
+ for cam, cam_data in self.cameras.items()
+ if cam_data["last_active_frame"] > 0
+ and cam_data["current_frame"] - cam_data["last_active_frame"] < 30
+ ]
+ )
+
+ # if there are no active cameras
+ if len(active_cameras) == 0:
+ # if the layout is already cleared
+ if len(self.camera_layout) == 0:
+ return False
+ # if the layout needs to be cleared
+ else:
+ self.camera_layout = []
+ self.layout_dim = 0
+ self.clear_frame()
+ return True
+
+ # calculate layout dimensions
+ layout_dim = math.ceil(math.sqrt(len(active_cameras)))
+
+ # reset the layout if it needs to be different
+ if layout_dim != self.layout_dim:
+ logger.debug(f"Changing layout size from {self.layout_dim} to {layout_dim}")
+ self.layout_dim = layout_dim
+
+ self.camera_layout = [None] * layout_dim * layout_dim
+
+ # calculate resolution of each position in the layout
+ self.layout_frame_shape = (
+ self.frame_shape[0] // layout_dim, # height
+ self.frame_shape[1] // layout_dim, # width
+ )
+
+ self.clear_frame()
+
+ for cam_data in self.cameras.values():
+ cam_data["layout_frame"] = 0.0
+
+ self.active_cameras = set()
+
+ self.layout_offsets = []
+
+ # calculate the x and y offset for each position in the layout
+ for position in range(0, len(self.camera_layout)):
+ y_offset = self.layout_frame_shape[0] * math.floor(
+ position / self.layout_dim
+ )
+ x_offset = self.layout_frame_shape[1] * (position % self.layout_dim)
+ self.layout_offsets.append((y_offset, x_offset))
+
+ removed_cameras = self.active_cameras.difference(active_cameras)
+ added_cameras = active_cameras.difference(self.active_cameras)
+
+ self.active_cameras = active_cameras
+
+ # update each position in the layout
+ for position, camera in enumerate(self.camera_layout, start=0):
+
+ # if this camera was removed, replace it or clear it
+ if camera in removed_cameras:
+ # if replacing this camera with a newly added one
+ if len(added_cameras) > 0:
+ added_camera = added_cameras.pop()
+ self.camera_layout[position] = added_camera
+ self.copy_to_position(
+ position,
+ added_camera,
+ self.cameras[added_camera]["current_frame"],
+ )
+ self.cameras[added_camera]["layout_frame"] = self.cameras[
+ added_camera
+ ]["current_frame"]
+ # if removing this camera with no replacement
+ else:
+ self.camera_layout[position] = None
+ self.copy_to_position(position)
+ removed_cameras.remove(camera)
+ # if an empty spot and there are cameras to add
+ elif camera is None and len(added_cameras) > 0:
+ added_camera = added_cameras.pop()
+ self.camera_layout[position] = added_camera
+ self.copy_to_position(
+ position,
+ added_camera,
+ self.cameras[added_camera]["current_frame"],
+ )
+ self.cameras[added_camera]["layout_frame"] = self.cameras[added_camera][
+ "current_frame"
+ ]
+ # if not an empty spot and the camera has a newer frame, copy it
+ elif (
+ not camera is None
+ and self.cameras[camera]["current_frame"]
+ != self.cameras[camera]["layout_frame"]
+ ):
+ self.copy_to_position(
+ position, camera, self.cameras[camera]["current_frame"]
+ )
+ self.cameras[camera]["layout_frame"] = self.cameras[camera][
+ "current_frame"
+ ]
+
+ return True
+
+ def update(self, camera, object_count, motion_count, frame_time, frame) -> bool:
+
+ # update the last active frame for the camera
+ self.cameras[camera]["current_frame"] = frame_time
+ if self.camera_active(object_count, motion_count):
+ self.cameras[camera]["last_active_frame"] = frame_time
+
+ now = datetime.datetime.now().timestamp()
+
+ # limit output to 10 fps
+ if (now - self.last_output_time) < 1 / 10:
+ return False
+
+ # if the frame was updated or the fps is too low, send frame
+ if self.update_frame() or (now - self.last_output_time) > 1:
+ self.last_output_time = now
+ return True
+ return False
+
+
+def output_frames(config: FrigateConfig, video_output_queue):
+ threading.current_thread().name = f"output"
+ setproctitle(f"frigate.output")
+
+ stop_event = mp.Event()
+
+ def receiveSignal(signalNumber, frame):
+ stop_event.set()
+
+ signal.signal(signal.SIGTERM, receiveSignal)
+ signal.signal(signal.SIGINT, receiveSignal)
+
+ frame_manager = SharedMemoryFrameManager()
+ previous_frames = {}
+
+ # start a websocket server on 8082
+ WebSocketWSGIHandler.http_version = "1.1"
+ websocket_server = make_server(
+ "127.0.0.1",
+ 8082,
+ server_class=WSGIServer,
+ handler_class=WebSocketWSGIRequestHandler,
+ app=WebSocketWSGIApplication(handler_cls=WebSocket),
+ )
+ websocket_server.initialize_websockets_manager()
+ websocket_thread = threading.Thread(target=websocket_server.serve_forever)
+
+ converters = {}
+ broadcasters = {}
+
+ for camera, cam_config in config.cameras.items():
+ width = int(
+ cam_config.live.height
+ * (cam_config.frame_shape[1] / cam_config.frame_shape[0])
+ )
+ converters[camera] = FFMpegConverter(
+ cam_config.frame_shape[1],
+ cam_config.frame_shape[0],
+ width,
+ cam_config.live.height,
+ cam_config.live.quality,
+ )
+ broadcasters[camera] = BroadcastThread(
+ camera, converters[camera], websocket_server
+ )
+
+ if config.birdseye.enabled:
+ converters["birdseye"] = FFMpegConverter(
+ config.birdseye.width,
+ config.birdseye.height,
+ config.birdseye.width,
+ config.birdseye.height,
+ config.birdseye.quality,
+ )
+ broadcasters["birdseye"] = BroadcastThread(
+ "birdseye", converters["birdseye"], websocket_server
+ )
+
+ websocket_thread.start()
+
+ for t in broadcasters.values():
+ t.start()
+
+ birdseye_manager = BirdsEyeFrameManager(config, frame_manager)
+
+ while not stop_event.is_set():
+ try:
+ (
+ camera,
+ frame_time,
+ current_tracked_objects,
+ motion_boxes,
+ regions,
+ ) = video_output_queue.get(True, 10)
+ except queue.Empty:
+ continue
+
+ frame_id = f"{camera}{frame_time}"
+
+ frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
+
+ # send camera frame to ffmpeg process if websockets are connected
+ if any(
+ ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
+ ):
+ # write to the converter for the camera if clients are listening to the specific camera
+ converters[camera].write(frame.tobytes())
+
+ # update birdseye if websockets are connected
+ if config.birdseye.enabled and any(
+ ws.environ["PATH_INFO"].endswith("birdseye")
+ for ws in websocket_server.manager
+ ):
+ if birdseye_manager.update(
+ camera,
+ len(current_tracked_objects),
+ len(motion_boxes),
+ frame_time,
+ frame,
+ ):
+ converters["birdseye"].write(birdseye_manager.frame.tobytes())
+
+ if camera in previous_frames:
+ frame_manager.delete(f"{camera}{previous_frames[camera]}")
+
+ previous_frames[camera] = frame_time
+
+ while not video_output_queue.empty():
+ (
+ camera,
+ frame_time,
+ current_tracked_objects,
+ motion_boxes,
+ regions,
+ ) = video_output_queue.get(True, 10)
+
+ frame_id = f"{camera}{frame_time}"
+ frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
+ frame_manager.delete(frame_id)
+
+ for c in converters.values():
+ c.exit()
+ for b in broadcasters.values():
+ b.join()
+ websocket_server.manager.close_all()
+ websocket_server.manager.stop()
+ websocket_server.manager.join()
+ websocket_server.shutdown()
+ websocket_thread.join()
+ logger.info("exiting output process...")
diff --git a/frigate/process_clip.py b/frigate/process_clip.py
--- a/frigate/process_clip.py
+++ b/frigate/process_clip.py
@@ -14,39 +14,41 @@
from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
from frigate.edgetpu import LocalObjectDetector
from frigate.motion import MotionDetector
-from frigate.object_processing import COLOR_MAP, CameraState
+from frigate.object_processing import CameraState
from frigate.objects import ObjectTracker
-from frigate.util import (DictFrameManager, EventsPerSecond,
- SharedMemoryFrameManager, draw_box_with_label)
-from frigate.video import (capture_frames, process_frames,
- start_or_restart_ffmpeg)
+from frigate.util import (
+ DictFrameManager,
+ EventsPerSecond,
+ SharedMemoryFrameManager,
+ draw_box_with_label,
+)
+from frigate.video import capture_frames, process_frames, start_or_restart_ffmpeg
logging.basicConfig()
logging.root.setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
+
def get_frame_shape(source):
- ffprobe_cmd = " ".join([
- 'ffprobe',
- '-v',
- 'panic',
- '-show_error',
- '-show_streams',
- '-of',
- 'json',
- '"'+source+'"'
- ])
- p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
- (output, err) = p.communicate()
- p_status = p.wait()
- info = json.loads(output)
-
- video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
-
- if video_info['height'] != 0 and video_info['width'] != 0:
- return (video_info['height'], video_info['width'], 3)
-
+ ffprobe_cmd = [
+ "ffprobe",
+ "-v",
+ "panic",
+ "-show_error",
+ "-show_streams",
+ "-of",
+ "json",
+ source,
+ ]
+ p = sp.run(ffprobe_cmd, capture_output=True)
+ info = json.loads(p.stdout)
+
+ video_info = [s for s in info["streams"] if s["codec_type"] == "video"][0]
+
+ if video_info["height"] != 0 and video_info["width"] != 0:
+ return (video_info["height"], video_info["width"], 3)
+
# fallback to using opencv if ffprobe didnt succeed
video = cv2.VideoCapture(source)
ret, frame = video.read()
@@ -54,14 +56,17 @@ def get_frame_shape(source):
video.release()
return frame_shape
-class ProcessClip():
+
+class ProcessClip:
def __init__(self, clip_path, frame_shape, config: FrigateConfig):
self.clip_path = clip_path
- self.camera_name = 'camera'
+ self.camera_name = "camera"
self.config = config
- self.camera_config = self.config.cameras['camera']
+ self.camera_config = self.config.cameras["camera"]
self.frame_shape = self.camera_config.frame_shape
- self.ffmpeg_cmd = [c['cmd'] for c in self.camera_config.ffmpeg_cmds if 'detect' in c['roles']][0]
+ self.ffmpeg_cmd = [
+ c["cmd"] for c in self.camera_config.ffmpeg_cmds if "detect" in c["roles"]
+ ][0]
self.frame_manager = SharedMemoryFrameManager()
self.frame_queue = mp.Queue()
self.detected_objects_queue = mp.Queue()
@@ -70,37 +75,66 @@ def __init__(self, clip_path, frame_shape, config: FrigateConfig):
def load_frames(self):
fps = EventsPerSecond()
skipped_fps = EventsPerSecond()
- current_frame = mp.Value('d', 0.0)
- frame_size = self.camera_config.frame_shape_yuv[0] * self.camera_config.frame_shape_yuv[1]
- ffmpeg_process = start_or_restart_ffmpeg(self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size)
- capture_frames(ffmpeg_process, self.camera_name, self.camera_config.frame_shape_yuv, self.frame_manager,
- self.frame_queue, fps, skipped_fps, current_frame)
+ current_frame = mp.Value("d", 0.0)
+ frame_size = (
+ self.camera_config.frame_shape_yuv[0]
+ * self.camera_config.frame_shape_yuv[1]
+ )
+ ffmpeg_process = start_or_restart_ffmpeg(
+ self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size
+ )
+ capture_frames(
+ ffmpeg_process,
+ self.camera_name,
+ self.camera_config.frame_shape_yuv,
+ self.frame_manager,
+ self.frame_queue,
+ fps,
+ skipped_fps,
+ current_frame,
+ )
ffmpeg_process.wait()
ffmpeg_process.communicate()
-
- def process_frames(self, objects_to_track=['person'], object_filters={}):
+
+ def process_frames(self, objects_to_track=["person"], object_filters={}):
mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
mask[:] = 255
- motion_detector = MotionDetector(self.frame_shape, mask, self.camera_config.motion)
+ motion_detector = MotionDetector(
+ self.frame_shape, mask, self.camera_config.motion
+ )
- object_detector = LocalObjectDetector(labels='/labelmap.txt')
+ object_detector = LocalObjectDetector(labels="/labelmap.txt")
object_tracker = ObjectTracker(self.camera_config.detect)
process_info = {
- 'process_fps': mp.Value('d', 0.0),
- 'detection_fps': mp.Value('d', 0.0),
- 'detection_frame': mp.Value('d', 0.0)
+ "process_fps": mp.Value("d", 0.0),
+ "detection_fps": mp.Value("d", 0.0),
+ "detection_frame": mp.Value("d", 0.0),
}
stop_event = mp.Event()
model_shape = (self.config.model.height, self.config.model.width)
- process_frames(self.camera_name, self.frame_queue, self.frame_shape, model_shape,
- self.frame_manager, motion_detector, object_detector, object_tracker,
- self.detected_objects_queue, process_info,
- objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
-
+ process_frames(
+ self.camera_name,
+ self.frame_queue,
+ self.frame_shape,
+ model_shape,
+ self.frame_manager,
+ motion_detector,
+ object_detector,
+ object_tracker,
+ self.detected_objects_queue,
+ process_info,
+ objects_to_track,
+ object_filters,
+ mask,
+ stop_event,
+ exit_on_empty=True,
+ )
+
def top_object(self, debug_path=None):
obj_detected = False
top_computed_score = 0.0
+
def handle_event(name, obj, frame_time):
nonlocal obj_detected
nonlocal top_computed_score
@@ -108,48 +142,85 @@ def handle_event(name, obj, frame_time):
top_computed_score = obj.computed_score
if not obj.false_positive:
obj_detected = True
- self.camera_state.on('new', handle_event)
- self.camera_state.on('update', handle_event)
- while(not self.detected_objects_queue.empty()):
- camera_name, frame_time, current_tracked_objects, motion_boxes, regions = self.detected_objects_queue.get()
+ self.camera_state.on("new", handle_event)
+ self.camera_state.on("update", handle_event)
+
+ while not self.detected_objects_queue.empty():
+ (
+ camera_name,
+ frame_time,
+ current_tracked_objects,
+ motion_boxes,
+ regions,
+ ) = self.detected_objects_queue.get()
if not debug_path is None:
- self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
+ self.save_debug_frame(
+ debug_path, frame_time, current_tracked_objects.values()
+ )
+
+ self.camera_state.update(
+ frame_time, current_tracked_objects, motion_boxes, regions
+ )
- self.camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
-
self.frame_manager.delete(self.camera_state.previous_frame_id)
-
- return {
- 'object_detected': obj_detected,
- 'top_score': top_computed_score
- }
-
+
+ return {"object_detected": obj_detected, "top_score": top_computed_score}
+
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
- current_frame = cv2.cvtColor(self.frame_manager.get(f"{self.camera_name}{frame_time}", self.camera_config.frame_shape_yuv), cv2.COLOR_YUV2BGR_I420)
+ current_frame = cv2.cvtColor(
+ self.frame_manager.get(
+ f"{self.camera_name}{frame_time}", self.camera_config.frame_shape_yuv
+ ),
+ cv2.COLOR_YUV2BGR_I420,
+ )
# draw the bounding boxes on the frame
for obj in tracked_objects:
thickness = 2
- color = (0,0,175)
+ color = (0, 0, 175)
- if obj['frame_time'] != frame_time:
+ if obj["frame_time"] != frame_time:
thickness = 1
- color = (255,0,0)
+ color = (255, 0, 0)
else:
- color = (255,255,0)
+ color = (255, 255, 0)
# draw the bounding boxes on the frame
- box = obj['box']
- draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['id'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
+ box = obj["box"]
+ draw_box_with_label(
+ current_frame,
+ box[0],
+ box[1],
+ box[2],
+ box[3],
+ obj["id"],
+ f"{int(obj['score']*100)}% {int(obj['area'])}",
+ thickness=thickness,
+ color=color,
+ )
# draw the regions on the frame
- region = obj['region']
- draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
-
- cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", current_frame)
+ region = obj["region"]
+ draw_box_with_label(
+ current_frame,
+ region[0],
+ region[1],
+ region[2],
+ region[3],
+ "region",
+ "",
+ thickness=1,
+ color=(0, 255, 0),
+ )
+
+ cv2.imwrite(
+ f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg",
+ current_frame,
+ )
+
@click.command()
@click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
[email protected]("-l", "--label", default='person', help="Label name to detect.")
[email protected]("-l", "--label", default="person", help="Label name to detect.")
@click.option("-t", "--threshold", default=0.85, help="Threshold value for objects.")
@click.option("-s", "--scores", default=None, help="File to save csv of top scores")
@click.option("--debug-path", default=None, help="Path to output frames for debugging.")
@@ -159,34 +230,37 @@ def process(path, label, threshold, scores, debug_path):
files = os.listdir(path)
files.sort()
clips = [os.path.join(path, file) for file in files]
- elif os.path.isfile(path):
+ elif os.path.isfile(path):
clips.append(path)
json_config = {
- 'mqtt': {
- 'host': 'mqtt'
- },
- 'cameras': {
- 'camera': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'path.mp4', 'global_args': '', 'input_args': '', 'roles': ['detect'] }
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "camera": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "path.mp4",
+ "global_args": "",
+ "input_args": "",
+ "roles": ["detect"],
+ }
]
},
- 'height': 1920,
- 'width': 1080
+ "height": 1920,
+ "width": 1080,
}
- }
+ },
}
results = []
for c in clips:
logger.info(c)
frame_shape = get_frame_shape(c)
-
- json_config['cameras']['camera']['height'] = frame_shape[0]
- json_config['cameras']['camera']['width'] = frame_shape[1]
- json_config['cameras']['camera']['ffmpeg']['inputs'][0]['path'] = c
+
+ json_config["cameras"]["camera"]["height"] = frame_shape[0]
+ json_config["cameras"]["camera"]["width"] = frame_shape[1]
+ json_config["cameras"]["camera"]["ffmpeg"]["inputs"][0]["path"] = c
config = FrigateConfig(config=FRIGATE_CONFIG_SCHEMA(json_config))
@@ -197,12 +271,15 @@ def process(path, label, threshold, scores, debug_path):
results.append((c, process_clip.top_object(debug_path)))
if not scores is None:
- with open(scores, 'w') as writer:
+ with open(scores, "w") as writer:
for result in results:
writer.write(f"{result[0]},{result[1]['top_score']}\n")
-
- positive_count = sum(1 for result in results if result[1]['object_detected'])
- print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
-if __name__ == '__main__':
+ positive_count = sum(1 for result in results if result[1]["object_detected"])
+ print(
+ f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s)."
+ )
+
+
+if __name__ == "__main__":
process()
diff --git a/frigate/record.py b/frigate/record.py
--- a/frigate/record.py
+++ b/frigate/record.py
@@ -1,125 +1,305 @@
import datetime
-import json
+import itertools
import logging
import os
-import queue
+import random
+import shutil
+import string
import subprocess as sp
import threading
-import time
-from collections import defaultdict
from pathlib import Path
import psutil
+from peewee import JOIN, DoesNotExist
from frigate.config import FrigateConfig
-from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+from frigate.const import CACHE_DIR, RECORD_DIR
+from frigate.models import Event, Recordings
logger = logging.getLogger(__name__)
SECONDS_IN_DAY = 60 * 60 * 24
+
def remove_empty_directories(directory):
- # list all directories recursively and sort them by path,
- # longest first
- paths = sorted(
- [x[0] for x in os.walk(RECORD_DIR)],
- key=lambda p: len(str(p)),
- reverse=True,
- )
- for path in paths:
- # don't delete the parent
- if path == RECORD_DIR:
- continue
- if len(os.listdir(path)) == 0:
- os.rmdir(path)
+ # list all directories recursively and sort them by path,
+ # longest first
+ paths = sorted(
+ [x[0] for x in os.walk(RECORD_DIR)],
+ key=lambda p: len(str(p)),
+ reverse=True,
+ )
+ for path in paths:
+ # don't delete the parent
+ if path == RECORD_DIR:
+ continue
+ if len(os.listdir(path)) == 0:
+ os.rmdir(path)
+
class RecordingMaintainer(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
- self.name = 'recording_maint'
+ self.name = "recording_maint"
self.config = config
self.stop_event = stop_event
def move_files(self):
- recordings = [d for d in os.listdir(RECORD_DIR) if os.path.isfile(os.path.join(RECORD_DIR, d)) and d.endswith(".mp4")]
+ recordings = [
+ d
+ for d in os.listdir(CACHE_DIR)
+ if os.path.isfile(os.path.join(CACHE_DIR, d))
+ and d.endswith(".mp4")
+ and not d.startswith("clip_")
+ ]
files_in_use = []
for process in psutil.process_iter():
try:
- if process.name() != 'ffmpeg':
+ if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
- if nt.path.startswith(RECORD_DIR):
- files_in_use.append(nt.path.split('/')[-1])
+ if nt.path.startswith(CACHE_DIR):
+ files_in_use.append(nt.path.split("/")[-1])
except:
continue
for f in recordings:
+ # Skip files currently in use
if f in files_in_use:
continue
- camera = '-'.join(f.split('-')[:-1])
- start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S')
-
- ffprobe_cmd = " ".join([
- 'ffprobe',
- '-v',
- 'error',
- '-show_entries',
- 'format=duration',
- '-of',
- 'default=noprint_wrappers=1:nokey=1',
- f"{os.path.join(RECORD_DIR,f)}"
- ])
- p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
- (output, err) = p.communicate()
- p_status = p.wait()
- if p_status == 0:
- duration = float(output.decode('utf-8').strip())
+ cache_path = os.path.join(CACHE_DIR, f)
+ basename = os.path.splitext(f)[0]
+ camera, date = basename.rsplit("-", maxsplit=1)
+ start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
+
+ # Just delete files if recordings are turned off
+ if (
+ not camera in self.config.cameras
+ or not self.config.cameras[camera].record.enabled
+ ):
+ Path(cache_path).unlink(missing_ok=True)
+ continue
+
+ ffprobe_cmd = [
+ "ffprobe",
+ "-v",
+ "error",
+ "-show_entries",
+ "format=duration",
+ "-of",
+ "default=noprint_wrappers=1:nokey=1",
+ f"{cache_path}",
+ ]
+ p = sp.run(ffprobe_cmd, capture_output=True)
+ if p.returncode == 0:
+ duration = float(p.stdout.decode().strip())
+ end_time = start_time + datetime.timedelta(seconds=duration)
else:
- logger.info(f"bad file: {f}")
- os.remove(os.path.join(RECORD_DIR,f))
+ logger.warning(f"Discarding a corrupt recording segment: {f}")
+ Path(cache_path).unlink(missing_ok=True)
continue
- directory = os.path.join(RECORD_DIR, start_time.strftime('%Y-%m/%d/%H'), camera)
+ directory = os.path.join(
+ RECORD_DIR, start_time.strftime("%Y-%m/%d/%H"), camera
+ )
if not os.path.exists(directory):
os.makedirs(directory)
file_name = f"{start_time.strftime('%M.%S.mp4')}"
+ file_path = os.path.join(directory, file_name)
+
+ # copy then delete is required when recordings are stored on some network drives
+ shutil.copyfile(cache_path, file_path)
+ os.remove(cache_path)
+
+ rand_id = "".join(
+ random.choices(string.ascii_lowercase + string.digits, k=6)
+ )
+ Recordings.create(
+ id=f"{start_time.timestamp()}-{rand_id}",
+ camera=camera,
+ path=file_path,
+ start_time=start_time.timestamp(),
+ end_time=end_time.timestamp(),
+ duration=duration,
+ )
+
+ def run(self):
+ # Check for new files every 5 seconds
+ while not self.stop_event.wait(5):
+ self.move_files()
+
+ logger.info(f"Exiting recording maintenance...")
+
+
+class RecordingCleanup(threading.Thread):
+ def __init__(self, config: FrigateConfig, stop_event):
+ threading.Thread.__init__(self)
+ self.name = "recording_cleanup"
+ self.config = config
+ self.stop_event = stop_event
+
+ def clean_tmp_clips(self):
+ # delete any clips more than 5 minutes old
+ for p in Path("/tmp/cache").rglob("clip_*.mp4"):
+ logger.debug(f"Checking tmp clip {p}.")
+ if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):
+ logger.debug("Deleting tmp clip.")
+ p.unlink(missing_ok=True)
+
+ def expire_recordings(self):
+ logger.debug("Start expire recordings (new).")
+
+ logger.debug("Start deleted cameras.")
+ # Handle deleted cameras
+ expire_days = self.config.record.retain_days
+ expire_before = (
+ datetime.datetime.now() - datetime.timedelta(days=expire_days)
+ ).timestamp()
+ no_camera_recordings: Recordings = Recordings.select().where(
+ Recordings.camera.not_in(list(self.config.cameras.keys())),
+ Recordings.end_time < expire_before,
+ )
+
+ deleted_recordings = set()
+ for recording in no_camera_recordings:
+ Path(recording.path).unlink(missing_ok=True)
+ deleted_recordings.add(recording.id)
+
+ logger.debug(f"Expiring {len(deleted_recordings)} recordings")
+ Recordings.delete().where(Recordings.id << deleted_recordings).execute()
+ logger.debug("End deleted cameras.")
- os.rename(os.path.join(RECORD_DIR,f), os.path.join(directory,file_name))
+ logger.debug("Start all cameras.")
+ for camera, config in self.config.cameras.items():
+ logger.debug(f"Start camera: {camera}.")
+ # When deleting recordings without events, we have to keep at LEAST the configured max clip duration
+ min_end = (
+ datetime.datetime.now()
+ - datetime.timedelta(seconds=config.record.events.max_seconds)
+ ).timestamp()
+ expire_days = config.record.retain_days
+ expire_before = (
+ datetime.datetime.now() - datetime.timedelta(days=expire_days)
+ ).timestamp()
+ expire_date = min(min_end, expire_before)
+
+ # Get recordings to check for expiration
+ recordings: Recordings = (
+ Recordings.select()
+ .where(
+ Recordings.camera == camera,
+ Recordings.end_time < expire_date,
+ )
+ .order_by(Recordings.start_time)
+ )
+
+ # Get all the events to check against
+ events: Event = (
+ Event.select()
+ .where(
+ Event.camera == camera, Event.end_time < expire_date, Event.has_clip
+ )
+ .order_by(Event.start_time)
+ .objects()
+ )
+
+ # loop over recordings and see if they overlap with any non-expired events
+ event_start = 0
+ deleted_recordings = set()
+ for recording in recordings.objects().iterator():
+ keep = False
+ # Now look for a reason to keep this recording segment
+ for idx in range(event_start, len(events)):
+ event = events[idx]
+
+ # if the event starts in the future, stop checking events
+ # and let this recording segment expire
+ if event.start_time > recording.end_time:
+ keep = False
+ break
+
+ # if the event ends after the recording starts, keep it
+ # and stop looking at events
+ if event.end_time >= recording.start_time:
+ keep = True
+ break
+
+ # if the event ends before this recording segment starts, skip
+ # this event and check the next event for an overlap.
+ # since the events and recordings are sorted, we can skip events
+ # that end before the previous recording segment started on future segments
+ if event.end_time < recording.start_time:
+ event_start = idx
+
+ # Delete recordings outside of the retention window
+ if not keep:
+ Path(recording.path).unlink(missing_ok=True)
+ deleted_recordings.add(recording.id)
+
+ logger.debug(f"Expiring {len(deleted_recordings)} recordings")
+ Recordings.delete().where(Recordings.id << deleted_recordings).execute()
+
+ logger.debug(f"End camera: {camera}.")
+
+ logger.debug("End all cameras.")
+ logger.debug("End expire recordings (new).")
def expire_files(self):
+ logger.debug("Start expire files (legacy).")
+
+ default_expire = (
+ datetime.datetime.now().timestamp()
+ - SECONDS_IN_DAY * self.config.record.retain_days
+ )
delete_before = {}
+
for name, camera in self.config.cameras.items():
- delete_before[name] = datetime.datetime.now().timestamp() - SECONDS_IN_DAY*camera.record.retain_days
+ delete_before[name] = (
+ datetime.datetime.now().timestamp()
+ - SECONDS_IN_DAY * camera.record.retain_days
+ )
- for p in Path('/media/frigate/recordings').rglob("*.mp4"):
- if not p.parent.name in delete_before:
- continue
- if p.stat().st_mtime < delete_before[p.parent.name]:
+ # find all the recordings older than the oldest recording in the db
+ try:
+ oldest_recording = (
+ Recordings.select().order_by(Recordings.start_time.desc()).get()
+ )
+
+ oldest_timestamp = oldest_recording.start_time
+ except DoesNotExist:
+ oldest_timestamp = datetime.datetime.now().timestamp()
+
+ logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
+ process = sp.run(
+ ["find", RECORD_DIR, "-type", "f", "-newermt", f"@{oldest_timestamp}"],
+ capture_output=True,
+ text=True,
+ )
+ files_to_check = process.stdout.splitlines()
+
+ for f in files_to_check:
+ p = Path(f)
+ if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
p.unlink(missing_ok=True)
+ logger.debug("End expire files (legacy).")
+
def run(self):
- counter = 0
- self.expire_files()
- while(True):
- if self.stop_event.is_set():
- logger.info(f"Exiting recording maintenance...")
+ # Expire recordings every minute, clean directories every hour.
+ for counter in itertools.cycle(range(60)):
+ if self.stop_event.wait(60):
+ logger.info(f"Exiting recording cleanup...")
break
- # only expire events every 10 minutes, but check for new files every 10 seconds
- time.sleep(10)
- counter = counter + 1
- if counter > 60:
+ self.expire_recordings()
+ self.clean_tmp_clips()
+
+ if counter == 0:
self.expire_files()
remove_empty_directories(RECORD_DIR)
- counter = 0
-
- self.move_files()
-
-
-
diff --git a/frigate/stats.py b/frigate/stats.py
--- a/frigate/stats.py
+++ b/frigate/stats.py
@@ -11,14 +11,16 @@
logger = logging.getLogger(__name__)
+
def stats_init(camera_metrics, detectors):
stats_tracking = {
- 'camera_metrics': camera_metrics,
- 'detectors': detectors,
- 'started': int(time.time())
+ "camera_metrics": camera_metrics,
+ "detectors": detectors,
+ "started": int(time.time()),
}
return stats_tracking
+
def get_fs_type(path):
bestMatch = ""
fsType = ""
@@ -28,53 +30,62 @@ def get_fs_type(path):
bestMatch = part.mountpoint
return fsType
+
def stats_snapshot(stats_tracking):
- camera_metrics = stats_tracking['camera_metrics']
+ camera_metrics = stats_tracking["camera_metrics"]
stats = {}
total_detection_fps = 0
for name, camera_stats in camera_metrics.items():
- total_detection_fps += camera_stats['detection_fps'].value
+ total_detection_fps += camera_stats["detection_fps"].value
stats[name] = {
- 'camera_fps': round(camera_stats['camera_fps'].value, 2),
- 'process_fps': round(camera_stats['process_fps'].value, 2),
- 'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
- 'detection_fps': round(camera_stats['detection_fps'].value, 2),
- 'pid': camera_stats['process'].pid,
- 'capture_pid': camera_stats['capture_process'].pid
+ "camera_fps": round(camera_stats["camera_fps"].value, 2),
+ "process_fps": round(camera_stats["process_fps"].value, 2),
+ "skipped_fps": round(camera_stats["skipped_fps"].value, 2),
+ "detection_fps": round(camera_stats["detection_fps"].value, 2),
+ "pid": camera_stats["process"].pid,
+ "capture_pid": camera_stats["capture_process"].pid,
}
- stats['detectors'] = {}
+ stats["detectors"] = {}
for name, detector in stats_tracking["detectors"].items():
- stats['detectors'][name] = {
- 'inference_speed': round(detector.avg_inference_speed.value * 1000, 2),
- 'detection_start': detector.detection_start.value,
- 'pid': detector.detect_process.pid
+ stats["detectors"][name] = {
+ "inference_speed": round(detector.avg_inference_speed.value * 1000, 2),
+ "detection_start": detector.detection_start.value,
+ "pid": detector.detect_process.pid,
}
- stats['detection_fps'] = round(total_detection_fps, 2)
+ stats["detection_fps"] = round(total_detection_fps, 2)
- stats['service'] = {
- 'uptime': (int(time.time()) - stats_tracking['started']),
- 'version': VERSION,
- 'storage': {}
+ stats["service"] = {
+ "uptime": (int(time.time()) - stats_tracking["started"]),
+ "version": VERSION,
+ "storage": {},
}
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:
storage_stats = shutil.disk_usage(path)
- stats['service']['storage'][path] = {
- 'total': round(storage_stats.total/1000000, 1),
- 'used': round(storage_stats.used/1000000, 1),
- 'free': round(storage_stats.free/1000000, 1),
- 'mount_type': get_fs_type(path)
+ stats["service"]["storage"][path] = {
+ "total": round(storage_stats.total / 1000000, 1),
+ "used": round(storage_stats.used / 1000000, 1),
+ "free": round(storage_stats.free / 1000000, 1),
+ "mount_type": get_fs_type(path),
}
return stats
+
class StatsEmitter(threading.Thread):
- def __init__(self, config: FrigateConfig, stats_tracking, mqtt_client, topic_prefix, stop_event):
+ def __init__(
+ self,
+ config: FrigateConfig,
+ stats_tracking,
+ mqtt_client,
+ topic_prefix,
+ stop_event,
+ ):
threading.Thread.__init__(self)
- self.name = 'frigate_stats_emitter'
+ self.name = "frigate_stats_emitter"
self.config = config
self.stats_tracking = stats_tracking
self.mqtt_client = mqtt_client
@@ -83,10 +94,9 @@ def __init__(self, config: FrigateConfig, stats_tracking, mqtt_client, topic_pre
def run(self):
time.sleep(10)
- while True:
- if self.stop_event.is_set():
- logger.info(f"Exiting watchdog...")
- break
+ while not self.stop_event.wait(self.config.mqtt.stats_interval):
stats = stats_snapshot(self.stats_tracking)
- self.mqtt_client.publish(f"{self.topic_prefix}/stats", json.dumps(stats), retain=False)
- time.sleep(self.config.mqtt.stats_interval)
+ self.mqtt_client.publish(
+ f"{self.topic_prefix}/stats", json.dumps(stats), retain=False
+ )
+ logger.info(f"Exiting watchdog...")
diff --git a/frigate/util.py b/frigate/util.py
--- a/frigate/util.py
+++ b/frigate/util.py
@@ -1,8 +1,10 @@
import collections
+import copy
import datetime
import hashlib
import json
import logging
+import math
import signal
import subprocess as sp
import threading
@@ -15,13 +17,139 @@
import cv2
import matplotlib.pyplot as plt
import numpy as np
+import os
+import psutil
logger = logging.getLogger(__name__)
-def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
+def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict:
+ """
+ :param dct1: First dict to merge
+ :param dct2: Second dict to merge
+ :param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True)
+ :return: The merge dictionary
+ """
+ merged = copy.deepcopy(dct1)
+ for k, v2 in dct2.items():
+ if k in merged:
+ v1 = merged[k]
+ if isinstance(v1, dict) and isinstance(v2, collections.Mapping):
+ merged[k] = deep_merge(v1, v2, override)
+ elif isinstance(v1, list) and isinstance(v2, list):
+ if merge_lists:
+ merged[k] = v1 + v2
+ else:
+ if override:
+ merged[k] = copy.deepcopy(v2)
+ else:
+ merged[k] = copy.deepcopy(v2)
+ return merged
+
+
+def draw_timestamp(
+ frame,
+ timestamp,
+ timestamp_format,
+ font_effect=None,
+ font_thickness=2,
+ font_color=(255, 255, 255),
+ position="tl",
+):
+ time_to_show = datetime.datetime.fromtimestamp(timestamp).strftime(timestamp_format)
+
+ # calculate a dynamic font size
+ size = cv2.getTextSize(
+ time_to_show,
+ cv2.FONT_HERSHEY_SIMPLEX,
+ fontScale=1.0,
+ thickness=font_thickness,
+ )
+
+ text_width = size[0][0]
+ desired_size = max(150, 0.33 * frame.shape[1])
+ font_scale = desired_size / text_width
+
+ # calculate the actual size with the dynamic scale
+ size = cv2.getTextSize(
+ time_to_show,
+ cv2.FONT_HERSHEY_SIMPLEX,
+ fontScale=font_scale,
+ thickness=font_thickness,
+ )
+
+ image_width = frame.shape[1]
+ image_height = frame.shape[0]
+ text_width = size[0][0]
+ text_height = size[0][1]
+ line_height = text_height + size[1]
+
+ if position == "tl":
+ text_offset_x = 0
+ text_offset_y = 0 if 0 < line_height else 0 - (line_height + 8)
+ elif position == "tr":
+ text_offset_x = image_width - text_width
+ text_offset_y = 0 if 0 < line_height else 0 - (line_height + 8)
+ elif position == "bl":
+ text_offset_x = 0
+ text_offset_y = image_height - (line_height + 8)
+ elif position == "br":
+ text_offset_x = image_width - text_width
+ text_offset_y = image_height - (line_height + 8)
+
+ if font_effect == "solid":
+ # make the coords of the box with a small padding of two pixels
+ timestamp_box_coords = np.array(
+ [
+ [text_offset_x, text_offset_y],
+ [text_offset_x + text_width, text_offset_y],
+ [text_offset_x + text_width, text_offset_y + line_height + 8],
+ [text_offset_x, text_offset_y + line_height + 8],
+ ]
+ )
+
+ cv2.fillPoly(
+ frame,
+ [timestamp_box_coords],
+ # inverse color of text for background for max. contrast
+ (255 - font_color[0], 255 - font_color[1], 255 - font_color[2]),
+ )
+ elif font_effect == "shadow":
+ cv2.putText(
+ frame,
+ time_to_show,
+ (text_offset_x + 3, text_offset_y + line_height),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ fontScale=font_scale,
+ color=(255 - font_color[0], 255 - font_color[1], 255 - font_color[2]),
+ thickness=font_thickness,
+ )
+
+ cv2.putText(
+ frame,
+ time_to_show,
+ (text_offset_x, text_offset_y + line_height - 3),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ fontScale=font_scale,
+ color=font_color,
+ thickness=font_thickness,
+ )
+
+
+def draw_box_with_label(
+ frame,
+ x_min,
+ y_min,
+ x_max,
+ y_max,
+ label,
+ info,
+ thickness=2,
+ color=None,
+ position="ul",
+):
if color is None:
- color = (0,0,255)
+ color = (0, 0, 255)
display_text = "{}: {}".format(label, info)
cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, thickness)
font_scale = 0.5
@@ -32,208 +160,350 @@ def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thicknes
text_height = size[0][1]
line_height = text_height + size[1]
# set the text start position
- if position == 'ul':
+ if position == "ul":
text_offset_x = x_min
- text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
- elif position == 'ur':
- text_offset_x = x_max - (text_width+8)
- text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
- elif position == 'bl':
+ text_offset_y = 0 if y_min < line_height else y_min - (line_height + 8)
+ elif position == "ur":
+ text_offset_x = x_max - (text_width + 8)
+ text_offset_y = 0 if y_min < line_height else y_min - (line_height + 8)
+ elif position == "bl":
text_offset_x = x_min
text_offset_y = y_max
- elif position == 'br':
- text_offset_x = x_max - (text_width+8)
+ elif position == "br":
+ text_offset_x = x_max - (text_width + 8)
text_offset_y = y_max
# make the coords of the box with a small padding of two pixels
- textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
+ textbox_coords = (
+ (text_offset_x, text_offset_y),
+ (text_offset_x + text_width + 2, text_offset_y + line_height),
+ )
cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
- cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
+ cv2.putText(
+ frame,
+ display_text,
+ (text_offset_x, text_offset_y + line_height - 3),
+ font,
+ fontScale=font_scale,
+ color=(0, 0, 0),
+ thickness=2,
+ )
+
-def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
+def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
# size is the longest edge and divisible by 4
- size = int(max(xmax-xmin, ymax-ymin)//4*4*multiplier)
+ size = int(max(xmax - xmin, ymax - ymin) // 4 * 4 * multiplier)
# dont go any smaller than 300
if size < 300:
size = 300
# x_offset is midpoint of bounding box minus half the size
- x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
+ x_offset = int((xmax - xmin) / 2.0 + xmin - size / 2.0)
# if outside the image
if x_offset < 0:
x_offset = 0
- elif x_offset > (frame_shape[1]-size):
- x_offset = max(0, (frame_shape[1]-size))
+ elif x_offset > (frame_shape[1] - size):
+ x_offset = max(0, (frame_shape[1] - size))
# y_offset is midpoint of bounding box minus half the size
- y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
+ y_offset = int((ymax - ymin) / 2.0 + ymin - size / 2.0)
# # if outside the image
if y_offset < 0:
y_offset = 0
- elif y_offset > (frame_shape[0]-size):
- y_offset = max(0, (frame_shape[0]-size))
+ elif y_offset > (frame_shape[0] - size):
+ y_offset = max(0, (frame_shape[0] - size))
+
+ return (x_offset, y_offset, x_offset + size, y_offset + size)
- return (x_offset, y_offset, x_offset+size, y_offset+size)
def get_yuv_crop(frame_shape, crop):
# crop should be (x1,y1,x2,y2)
- frame_height = frame_shape[0]//3*2
+ frame_height = frame_shape[0] // 3 * 2
frame_width = frame_shape[1]
# compute the width/height of the uv channels
- uv_width = frame_width//2 # width of the uv channels
- uv_height = frame_height//4 # height of the uv channels
+ uv_width = frame_width // 2 # width of the uv channels
+ uv_height = frame_height // 4 # height of the uv channels
# compute the offset for upper left corner of the uv channels
- uv_x_offset = crop[0]//2 # x offset of the uv channels
- uv_y_offset = crop[1]//4 # y offset of the uv channels
+ uv_x_offset = crop[0] // 2 # x offset of the uv channels
+ uv_y_offset = crop[1] // 4 # y offset of the uv channels
# compute the width/height of the uv crops
- uv_crop_width = (crop[2] - crop[0])//2 # width of the cropped uv channels
- uv_crop_height = (crop[3] - crop[1])//4 # height of the cropped uv channels
+ uv_crop_width = (crop[2] - crop[0]) // 2 # width of the cropped uv channels
+ uv_crop_height = (crop[3] - crop[1]) // 4 # height of the cropped uv channels
# ensure crop dimensions are multiples of 2 and 4
- y = (
- crop[0],
- crop[1],
- crop[0] + uv_crop_width*2,
- crop[1] + uv_crop_height*4
- )
+ y = (crop[0], crop[1], crop[0] + uv_crop_width * 2, crop[1] + uv_crop_height * 4)
u1 = (
- 0 + uv_x_offset,
+ 0 + uv_x_offset,
frame_height + uv_y_offset,
- 0 + uv_x_offset + uv_crop_width,
- frame_height + uv_y_offset + uv_crop_height
+ 0 + uv_x_offset + uv_crop_width,
+ frame_height + uv_y_offset + uv_crop_height,
)
u2 = (
- uv_width + uv_x_offset,
+ uv_width + uv_x_offset,
frame_height + uv_y_offset,
- uv_width + uv_x_offset + uv_crop_width,
- frame_height + uv_y_offset + uv_crop_height
+ uv_width + uv_x_offset + uv_crop_width,
+ frame_height + uv_y_offset + uv_crop_height,
)
v1 = (
- 0 + uv_x_offset,
- frame_height + uv_height + uv_y_offset,
- 0 + uv_x_offset + uv_crop_width,
- frame_height + uv_height + uv_y_offset + uv_crop_height
+ 0 + uv_x_offset,
+ frame_height + uv_height + uv_y_offset,
+ 0 + uv_x_offset + uv_crop_width,
+ frame_height + uv_height + uv_y_offset + uv_crop_height,
)
v2 = (
- uv_width + uv_x_offset,
- frame_height + uv_height + uv_y_offset,
- uv_width + uv_x_offset + uv_crop_width,
- frame_height + uv_height + uv_y_offset + uv_crop_height
+ uv_width + uv_x_offset,
+ frame_height + uv_height + uv_y_offset,
+ uv_width + uv_x_offset + uv_crop_width,
+ frame_height + uv_height + uv_y_offset + uv_crop_height,
)
return y, u1, u2, v1, v2
-def yuv_region_2_rgb(frame, region):
- try:
- height = frame.shape[0]//3*2
- width = frame.shape[1]
-
- # get the crop box if the region extends beyond the frame
- crop_x1 = max(0, region[0])
- crop_y1 = max(0, region[1])
- # ensure these are a multiple of 4
- crop_x2 = min(width, region[2])
- crop_y2 = min(height, region[3])
- crop_box = (crop_x1, crop_y1, crop_x2, crop_y2)
-
- y, u1, u2, v1, v2 = get_yuv_crop(frame.shape, crop_box)
-
- # if the region starts outside the frame, indent the start point in the cropped frame
- y_channel_x_offset = abs(min(0, region[0]))
- y_channel_y_offset = abs(min(0, region[1]))
-
- uv_channel_x_offset = y_channel_x_offset//2
- uv_channel_y_offset = y_channel_y_offset//4
-
- # create the yuv region frame
- # make sure the size is a multiple of 4
- size = (region[3] - region[1])//4*4
- yuv_cropped_frame = np.zeros((size+size//2, size), np.uint8)
- # fill in black
- yuv_cropped_frame[:] = 128
- yuv_cropped_frame[0:size,0:size] = 16
-
- # copy the y channel
- yuv_cropped_frame[
- y_channel_y_offset:y_channel_y_offset + y[3] - y[1],
- y_channel_x_offset:y_channel_x_offset + y[2] - y[0]
- ] = frame[
- y[1]:y[3],
- y[0]:y[2]
- ]
-
- uv_crop_width = u1[2] - u1[0]
- uv_crop_height = u1[3] - u1[1]
- # copy u1
- yuv_cropped_frame[
- size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
- 0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
- ] = frame[
- u1[1]:u1[3],
- u1[0]:u1[2]
- ]
+def yuv_crop_and_resize(frame, region, height=None):
+ # Crops and resizes a YUV frame while maintaining aspect ratio
+ # https://stackoverflow.com/a/57022634
+ height = frame.shape[0] // 3 * 2
+ width = frame.shape[1]
+
+ # get the crop box if the region extends beyond the frame
+ crop_x1 = max(0, region[0])
+ crop_y1 = max(0, region[1])
+ # ensure these are a multiple of 4
+ crop_x2 = min(width, region[2])
+ crop_y2 = min(height, region[3])
+ crop_box = (crop_x1, crop_y1, crop_x2, crop_y2)
+
+ y, u1, u2, v1, v2 = get_yuv_crop(frame.shape, crop_box)
+
+ # if the region starts outside the frame, indent the start point in the cropped frame
+ y_channel_x_offset = abs(min(0, region[0]))
+ y_channel_y_offset = abs(min(0, region[1]))
+
+ uv_channel_x_offset = y_channel_x_offset // 2
+ uv_channel_y_offset = y_channel_y_offset // 4
+
+ # create the yuv region frame
+ # make sure the size is a multiple of 4
+ # TODO: this should be based on the size after resize now
+ size = (region[3] - region[1]) // 4 * 4
+ yuv_cropped_frame = np.zeros((size + size // 2, size), np.uint8)
+ # fill in black
+ yuv_cropped_frame[:] = 128
+ yuv_cropped_frame[0:size, 0:size] = 16
+
+ # copy the y channel
+ yuv_cropped_frame[
+ y_channel_y_offset : y_channel_y_offset + y[3] - y[1],
+ y_channel_x_offset : y_channel_x_offset + y[2] - y[0],
+ ] = frame[y[1] : y[3], y[0] : y[2]]
+
+ uv_crop_width = u1[2] - u1[0]
+ uv_crop_height = u1[3] - u1[1]
+
+ # copy u1
+ yuv_cropped_frame[
+ size + uv_channel_y_offset : size + uv_channel_y_offset + uv_crop_height,
+ 0 + uv_channel_x_offset : 0 + uv_channel_x_offset + uv_crop_width,
+ ] = frame[u1[1] : u1[3], u1[0] : u1[2]]
+
+ # copy u2
+ yuv_cropped_frame[
+ size + uv_channel_y_offset : size + uv_channel_y_offset + uv_crop_height,
+ size // 2
+ + uv_channel_x_offset : size // 2
+ + uv_channel_x_offset
+ + uv_crop_width,
+ ] = frame[u2[1] : u2[3], u2[0] : u2[2]]
+
+ # copy v1
+ yuv_cropped_frame[
+ size
+ + size // 4
+ + uv_channel_y_offset : size
+ + size // 4
+ + uv_channel_y_offset
+ + uv_crop_height,
+ 0 + uv_channel_x_offset : 0 + uv_channel_x_offset + uv_crop_width,
+ ] = frame[v1[1] : v1[3], v1[0] : v1[2]]
+
+ # copy v2
+ yuv_cropped_frame[
+ size
+ + size // 4
+ + uv_channel_y_offset : size
+ + size // 4
+ + uv_channel_y_offset
+ + uv_crop_height,
+ size // 2
+ + uv_channel_x_offset : size // 2
+ + uv_channel_x_offset
+ + uv_crop_width,
+ ] = frame[v2[1] : v2[3], v2[0] : v2[2]]
+
+ return yuv_cropped_frame
+
+
+def copy_yuv_to_position(
+ destination_frame,
+ destination_offset,
+ destination_shape,
+ source_frame=None,
+ source_channel_dim=None,
+):
+ # get the coordinates of the channels for this position in the layout
+ y, u1, u2, v1, v2 = get_yuv_crop(
+ destination_frame.shape,
+ (
+ destination_offset[1],
+ destination_offset[0],
+ destination_offset[1] + destination_shape[1],
+ destination_offset[0] + destination_shape[0],
+ ),
+ )
- # copy u2
- yuv_cropped_frame[
- size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
- size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
- ] = frame[
- u2[1]:u2[3],
- u2[0]:u2[2]
- ]
+ # clear y
+ destination_frame[
+ y[1] : y[3],
+ y[0] : y[2],
+ ] = 16
+
+ # clear u1
+ destination_frame[u1[1] : u1[3], u1[0] : u1[2]] = 128
+ # clear u2
+ destination_frame[u2[1] : u2[3], u2[0] : u2[2]] = 128
+ # clear v1
+ destination_frame[v1[1] : v1[3], v1[0] : v1[2]] = 128
+ # clear v2
+ destination_frame[v2[1] : v2[3], v2[0] : v2[2]] = 128
+
+ if not source_frame is None:
+ # calculate the resized frame, maintaining the aspect ratio
+ source_aspect_ratio = source_frame.shape[1] / (source_frame.shape[0] // 3 * 2)
+ dest_aspect_ratio = destination_shape[1] / destination_shape[0]
+
+ if source_aspect_ratio <= dest_aspect_ratio:
+ y_resize_height = int(destination_shape[0] // 4 * 4)
+ y_resize_width = int((y_resize_height * source_aspect_ratio) // 4 * 4)
+ else:
+ y_resize_width = int(destination_shape[1] // 4 * 4)
+ y_resize_height = int((y_resize_width / source_aspect_ratio) // 4 * 4)
+
+ uv_resize_width = int(y_resize_width // 2)
+ uv_resize_height = int(y_resize_height // 4)
+
+ y_y_offset = int((destination_shape[0] - y_resize_height) / 4 // 4 * 4)
+ y_x_offset = int((destination_shape[1] - y_resize_width) / 2 // 4 * 4)
+
+ uv_y_offset = y_y_offset // 4
+ uv_x_offset = y_x_offset // 2
+
+ interpolation = cv2.INTER_LINEAR
+ # resize/copy y channel
+ destination_frame[
+ y[1] + y_y_offset : y[1] + y_y_offset + y_resize_height,
+ y[0] + y_x_offset : y[0] + y_x_offset + y_resize_width,
+ ] = cv2.resize(
+ source_frame[
+ source_channel_dim["y"][1] : source_channel_dim["y"][3],
+ source_channel_dim["y"][0] : source_channel_dim["y"][2],
+ ],
+ dsize=(y_resize_width, y_resize_height),
+ interpolation=interpolation,
+ )
+
+ # resize/copy u1
+ destination_frame[
+ u1[1] + uv_y_offset : u1[1] + uv_y_offset + uv_resize_height,
+ u1[0] + uv_x_offset : u1[0] + uv_x_offset + uv_resize_width,
+ ] = cv2.resize(
+ source_frame[
+ source_channel_dim["u1"][1] : source_channel_dim["u1"][3],
+ source_channel_dim["u1"][0] : source_channel_dim["u1"][2],
+ ],
+ dsize=(uv_resize_width, uv_resize_height),
+ interpolation=interpolation,
+ )
+ # resize/copy u2
+ destination_frame[
+ u2[1] + uv_y_offset : u2[1] + uv_y_offset + uv_resize_height,
+ u2[0] + uv_x_offset : u2[0] + uv_x_offset + uv_resize_width,
+ ] = cv2.resize(
+ source_frame[
+ source_channel_dim["u2"][1] : source_channel_dim["u2"][3],
+ source_channel_dim["u2"][0] : source_channel_dim["u2"][2],
+ ],
+ dsize=(uv_resize_width, uv_resize_height),
+ interpolation=interpolation,
+ )
+ # resize/copy v1
+ destination_frame[
+ v1[1] + uv_y_offset : v1[1] + uv_y_offset + uv_resize_height,
+ v1[0] + uv_x_offset : v1[0] + uv_x_offset + uv_resize_width,
+ ] = cv2.resize(
+ source_frame[
+ source_channel_dim["v1"][1] : source_channel_dim["v1"][3],
+ source_channel_dim["v1"][0] : source_channel_dim["v1"][2],
+ ],
+ dsize=(uv_resize_width, uv_resize_height),
+ interpolation=interpolation,
+ )
+ # resize/copy v2
+ destination_frame[
+ v2[1] + uv_y_offset : v2[1] + uv_y_offset + uv_resize_height,
+ v2[0] + uv_x_offset : v2[0] + uv_x_offset + uv_resize_width,
+ ] = cv2.resize(
+ source_frame[
+ source_channel_dim["v2"][1] : source_channel_dim["v2"][3],
+ source_channel_dim["v2"][0] : source_channel_dim["v2"][2],
+ ],
+ dsize=(uv_resize_width, uv_resize_height),
+ interpolation=interpolation,
+ )
- # copy v1
- yuv_cropped_frame[
- size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
- 0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
- ] = frame[
- v1[1]:v1[3],
- v1[0]:v1[2]
- ]
-
- # copy v2
- yuv_cropped_frame[
- size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
- size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
- ] = frame[
- v2[1]:v2[3],
- v2[0]:v2[2]
- ]
+def yuv_region_2_rgb(frame, region):
+ try:
+ # TODO: does this copy the numpy array?
+ yuv_cropped_frame = yuv_crop_and_resize(frame, region)
return cv2.cvtColor(yuv_cropped_frame, cv2.COLOR_YUV2RGB_I420)
except:
print(f"frame.shape: {frame.shape}")
print(f"region: {region}")
raise
+
def intersection(box_a, box_b):
return (
max(box_a[0], box_b[0]),
max(box_a[1], box_b[1]),
min(box_a[2], box_b[2]),
- min(box_a[3], box_b[3])
+ min(box_a[3], box_b[3]),
)
+
def area(box):
- return (box[2]-box[0] + 1)*(box[3]-box[1] + 1)
-
+ return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)
+
+
def intersection_over_union(box_a, box_b):
# determine the (x, y)-coordinates of the intersection rectangle
intersect = intersection(box_a, box_b)
# compute the area of intersection rectangle
- inter_area = max(0, intersect[2] - intersect[0] + 1) * max(0, intersect[3] - intersect[1] + 1)
+ inter_area = max(0, intersect[2] - intersect[0] + 1) * max(
+ 0, intersect[3] - intersect[1] + 1
+ )
if inter_area == 0:
return 0.0
-
+
# compute the area of both the prediction and ground-truth
# rectangles
box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
@@ -247,25 +517,39 @@ def intersection_over_union(box_a, box_b):
# return the intersection over union value
return iou
+
def clipped(obj, frame_shape):
# if the object is within 5 pixels of the region border, and the region is not on the edge
# consider the object to be clipped
box = obj[2]
region = obj[4]
- if ((region[0] > 5 and box[0]-region[0] <= 5) or
- (region[1] > 5 and box[1]-region[1] <= 5) or
- (frame_shape[1]-region[2] > 5 and region[2]-box[2] <= 5) or
- (frame_shape[0]-region[3] > 5 and region[3]-box[3] <= 5)):
+ if (
+ (region[0] > 5 and box[0] - region[0] <= 5)
+ or (region[1] > 5 and box[1] - region[1] <= 5)
+ or (frame_shape[1] - region[2] > 5 and region[2] - box[2] <= 5)
+ or (frame_shape[0] - region[3] > 5 and region[3] - box[3] <= 5)
+ ):
return True
else:
return False
+
+def restart_frigate():
+ proc = psutil.Process(1)
+ # if this is running via s6, sigterm pid 1
+ if proc.name() == "s6-svscan":
+ proc.terminate()
+ # otherwise, just try and exit frigate
+ else:
+ os.kill(os.getpid(), signal.SIGTERM)
+
+
class EventsPerSecond:
def __init__(self, max_events=1000):
self._start = None
self._max_events = max_events
self._timestamps = []
-
+
def start(self):
self._start = datetime.datetime.now().timestamp()
@@ -274,23 +558,28 @@ def update(self):
self.start()
self._timestamps.append(datetime.datetime.now().timestamp())
# truncate the list when it goes 100 over the max_size
- if len(self._timestamps) > self._max_events+100:
- self._timestamps = self._timestamps[(1-self._max_events):]
+ if len(self._timestamps) > self._max_events + 100:
+ self._timestamps = self._timestamps[(1 - self._max_events) :]
def eps(self, last_n_seconds=10):
if self._start is None:
self.start()
- # compute the (approximate) events in the last n seconds
+ # compute the (approximate) events in the last n seconds
now = datetime.datetime.now().timestamp()
- seconds = min(now-self._start, last_n_seconds)
- return len([t for t in self._timestamps if t > (now-last_n_seconds)]) / seconds
+ seconds = min(now - self._start, last_n_seconds)
+ return (
+ len([t for t in self._timestamps if t > (now - last_n_seconds)]) / seconds
+ )
+
def print_stack(sig, frame):
traceback.print_stack(frame)
+
def listen():
signal.signal(signal.SIGUSR1, print_stack)
+
def create_mask(frame_shape, mask):
mask_img = np.zeros(frame_shape, np.uint8)
mask_img[:] = 255
@@ -304,11 +593,15 @@ def create_mask(frame_shape, mask):
return mask_img
+
def add_mask(mask, mask_img):
- points = mask.split(',')
- contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
+ points = mask.split(",")
+ contour = np.array(
+ [[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
+ )
cv2.fillPoly(mask_img, pts=[contour], color=(0))
+
class FrameManager(ABC):
@abstractmethod
def create(self, name, size) -> AnyStr:
@@ -326,29 +619,31 @@ def close(self, name):
def delete(self, name):
pass
+
class DictFrameManager(FrameManager):
def __init__(self):
self.frames = {}
-
+
def create(self, name, size) -> AnyStr:
mem = bytearray(size)
self.frames[name] = mem
return mem
-
+
def get(self, name, shape):
mem = self.frames[name]
return np.ndarray(shape, dtype=np.uint8, buffer=mem)
-
+
def close(self, name):
pass
-
+
def delete(self, name):
del self.frames[name]
+
class SharedMemoryFrameManager(FrameManager):
def __init__(self):
self.shm_store = {}
-
+
def create(self, name, size) -> AnyStr:
shm = shared_memory.SharedMemory(name=name, create=True, size=size)
self.shm_store[name] = shm
diff --git a/frigate/video.py b/frigate/video.py
--- a/frigate/video.py
+++ b/frigate/video.py
@@ -1,12 +1,7 @@
-import base64
-import copy
-import ctypes
import datetime
import itertools
-import json
import logging
import multiprocessing as mp
-import os
import queue
import subprocess as sp
import signal
@@ -16,7 +11,7 @@
from setproctitle import setproctitle
from typing import Dict, List
-import cv2
+from cv2 import cv2
import numpy as np
from frigate.config import CameraConfig
@@ -24,19 +19,25 @@
from frigate.log import LogPipe
from frigate.motion import MotionDetector
from frigate.objects import ObjectTracker
-from frigate.util import (EventsPerSecond, FrameManager,
- SharedMemoryFrameManager, area, calculate_region,
- clipped, draw_box_with_label, intersection,
- intersection_over_union, listen, yuv_region_2_rgb)
+from frigate.util import (
+ EventsPerSecond,
+ FrameManager,
+ SharedMemoryFrameManager,
+ calculate_region,
+ clipped,
+ listen,
+ yuv_region_2_rgb,
+)
logger = logging.getLogger(__name__)
+
def filtered(obj, objects_to_track, object_filters):
object_name = obj[0]
if not object_name in objects_to_track:
return True
-
+
if object_name in object_filters:
obj_settings = object_filters[object_name]
@@ -44,7 +45,7 @@ def filtered(obj, objects_to_track, object_filters):
# detected object, don't add it to detected objects
if obj_settings.min_area > obj[3]:
return True
-
+
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.max_area < obj[3]:
@@ -53,29 +54,36 @@ def filtered(obj, objects_to_track, object_filters):
# if the score is lower than the min_score, skip
if obj_settings.min_score > obj[1]:
return True
-
+
if not obj_settings.mask is None:
# compute the coordinates of the object and make sure
# the location isnt outside the bounds of the image (can happen from rounding)
- y_location = min(int(obj[2][3]), len(obj_settings.mask)-1)
- x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(obj_settings.mask[0])-1)
+ y_location = min(int(obj[2][3]), len(obj_settings.mask) - 1)
+ x_location = min(
+ int((obj[2][2] - obj[2][0]) / 2.0) + obj[2][0],
+ len(obj_settings.mask[0]) - 1,
+ )
# if the object is in a masked location, don't add it to detected objects
if obj_settings.mask[y_location][x_location] == 0:
return True
-
+
return False
+
def create_tensor_input(frame, model_shape, region):
cropped_frame = yuv_region_2_rgb(frame, region)
# Resize to 300x300 if needed
if cropped_frame.shape != (model_shape[0], model_shape[1], 3):
- cropped_frame = cv2.resize(cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR)
-
+ cropped_frame = cv2.resize(
+ cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR
+ )
+
# Expand dimensions since the model expects images to have shape: [1, height, width, 3]
return np.expand_dims(cropped_frame, axis=0)
+
def stop_ffmpeg(ffmpeg_process, logger):
logger.info("Terminating the existing ffmpeg process...")
ffmpeg_process.terminate()
@@ -88,18 +96,43 @@ def stop_ffmpeg(ffmpeg_process, logger):
ffmpeg_process.communicate()
ffmpeg_process = None
-def start_or_restart_ffmpeg(ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None):
- if not ffmpeg_process is None:
+
+def start_or_restart_ffmpeg(
+ ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None
+):
+ if ffmpeg_process is not None:
stop_ffmpeg(ffmpeg_process, logger)
if frame_size is None:
- process = sp.Popen(ffmpeg_cmd, stdout = sp.DEVNULL, stderr=logpipe, stdin = sp.DEVNULL, start_new_session=True)
+ process = sp.Popen(
+ ffmpeg_cmd,
+ stdout=sp.DEVNULL,
+ stderr=logpipe,
+ stdin=sp.DEVNULL,
+ start_new_session=True,
+ )
else:
- process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, stderr=logpipe, stdin = sp.DEVNULL, bufsize=frame_size*10, start_new_session=True)
+ process = sp.Popen(
+ ffmpeg_cmd,
+ stdout=sp.PIPE,
+ stderr=logpipe,
+ stdin=sp.DEVNULL,
+ bufsize=frame_size * 10,
+ start_new_session=True,
+ )
return process
-def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager,
- frame_queue, fps:mp.Value, skipped_fps: mp.Value, current_frame: mp.Value):
+
+def capture_frames(
+ ffmpeg_process,
+ camera_name,
+ frame_shape,
+ frame_manager: FrameManager,
+ frame_queue,
+ fps: mp.Value,
+ skipped_fps: mp.Value,
+ current_frame: mp.Value,
+):
frame_size = frame_shape[0] * frame_shape[1]
frame_rate = EventsPerSecond()
@@ -119,7 +152,9 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
logger.info(f"{camera_name}: ffmpeg sent a broken frame. {e}")
if ffmpeg_process.poll() != None:
- logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
+ logger.info(
+ f"{camera_name}: ffmpeg process is not running. exiting capture thread..."
+ )
frame_manager.delete(frame_name)
break
continue
@@ -138,8 +173,11 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
# add to the queue
frame_queue.put(current_frame.value)
+
class CameraWatchdog(threading.Thread):
- def __init__(self, camera_name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event):
+ def __init__(
+ self, camera_name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event
+ ):
threading.Thread.__init__(self)
self.logger = logging.getLogger(f"watchdog.{camera_name}")
self.camera_name = camera_name
@@ -159,32 +197,38 @@ def run(self):
self.start_ffmpeg_detect()
for c in self.config.ffmpeg_cmds:
- if 'detect' in c['roles']:
+ if "detect" in c["roles"]:
continue
- logpipe = LogPipe(f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}", logging.ERROR)
- self.ffmpeg_other_processes.append({
- 'cmd': c['cmd'],
- 'logpipe': logpipe,
- 'process': start_or_restart_ffmpeg(c['cmd'], self.logger, logpipe)
- })
-
- time.sleep(10)
- while True:
- if self.stop_event.is_set():
- stop_ffmpeg(self.ffmpeg_detect_process, self.logger)
- for p in self.ffmpeg_other_processes:
- stop_ffmpeg(p['process'], self.logger)
- p['logpipe'].close()
- self.logpipe.close()
- break
+ logpipe = LogPipe(
+ f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}",
+ logging.ERROR,
+ )
+ self.ffmpeg_other_processes.append(
+ {
+ "cmd": c["cmd"],
+ "logpipe": logpipe,
+ "process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe),
+ }
+ )
+ time.sleep(10)
+ while not self.stop_event.wait(10):
now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive():
+ self.logger.error(
+ f"FFMPEG process crashed unexpectedly for {self.camera_name}."
+ )
+ self.logger.error(
+ "The following ffmpeg logs include the last 100 lines prior to exit."
+ )
+ self.logger.error("You may have invalid args defined for this camera.")
self.logpipe.dump()
self.start_ffmpeg_detect()
elif now - self.capture_thread.current_frame.value > 20:
- self.logger.info(f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg...")
+ self.logger.info(
+ f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg..."
+ )
self.ffmpeg_detect_process.terminate()
try:
self.logger.info("Waiting for ffmpeg to exit gracefully...")
@@ -193,25 +237,40 @@ def run(self):
self.logger.info("FFmpeg didnt exit. Force killing...")
self.ffmpeg_detect_process.kill()
self.ffmpeg_detect_process.communicate()
-
+
for p in self.ffmpeg_other_processes:
- poll = p['process'].poll()
- if poll == None:
+ poll = p["process"].poll()
+ if poll is None:
continue
- p['logpipe'].dump()
- p['process'] = start_or_restart_ffmpeg(p['cmd'], self.logger, p['logpipe'], ffmpeg_process=p['process'])
-
- # wait a bit before checking again
- time.sleep(10)
-
+ p["logpipe"].dump()
+ p["process"] = start_or_restart_ffmpeg(
+ p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]
+ )
+
+ stop_ffmpeg(self.ffmpeg_detect_process, self.logger)
+ for p in self.ffmpeg_other_processes:
+ stop_ffmpeg(p["process"], self.logger)
+ p["logpipe"].close()
+ self.logpipe.close()
+
def start_ffmpeg_detect(self):
- ffmpeg_cmd = [c['cmd'] for c in self.config.ffmpeg_cmds if 'detect' in c['roles']][0]
- self.ffmpeg_detect_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.logger, self.logpipe, self.frame_size)
+ ffmpeg_cmd = [
+ c["cmd"] for c in self.config.ffmpeg_cmds if "detect" in c["roles"]
+ ][0]
+ self.ffmpeg_detect_process = start_or_restart_ffmpeg(
+ ffmpeg_cmd, self.logger, self.logpipe, self.frame_size
+ )
self.ffmpeg_pid.value = self.ffmpeg_detect_process.pid
- self.capture_thread = CameraCapture(self.camera_name, self.ffmpeg_detect_process, self.frame_shape, self.frame_queue,
- self.camera_fps)
+ self.capture_thread = CameraCapture(
+ self.camera_name,
+ self.ffmpeg_detect_process,
+ self.frame_shape,
+ self.frame_queue,
+ self.camera_fps,
+ )
self.capture_thread.start()
+
class CameraCapture(threading.Thread):
def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
threading.Thread.__init__(self)
@@ -223,32 +282,60 @@ def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
self.skipped_fps = EventsPerSecond()
self.frame_manager = SharedMemoryFrameManager()
self.ffmpeg_process = ffmpeg_process
- self.current_frame = mp.Value('d', 0.0)
+ self.current_frame = mp.Value("d", 0.0)
self.last_frame = 0
def run(self):
self.skipped_fps.start()
- capture_frames(self.ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue,
- self.fps, self.skipped_fps, self.current_frame)
+ capture_frames(
+ self.ffmpeg_process,
+ self.camera_name,
+ self.frame_shape,
+ self.frame_manager,
+ self.frame_queue,
+ self.fps,
+ self.skipped_fps,
+ self.current_frame,
+ )
+
def capture_camera(name, config: CameraConfig, process_info):
stop_event = mp.Event()
+
def receiveSignal(signalNumber, frame):
stop_event.set()
-
+
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
- frame_queue = process_info['frame_queue']
- camera_watchdog = CameraWatchdog(name, config, frame_queue, process_info['camera_fps'], process_info['ffmpeg_pid'], stop_event)
+ frame_queue = process_info["frame_queue"]
+ camera_watchdog = CameraWatchdog(
+ name,
+ config,
+ frame_queue,
+ process_info["camera_fps"],
+ process_info["ffmpeg_pid"],
+ stop_event,
+ )
camera_watchdog.start()
camera_watchdog.join()
-def track_camera(name, config: CameraConfig, model_shape, detection_queue, result_connection, detected_objects_queue, process_info):
+
+def track_camera(
+ name,
+ config: CameraConfig,
+ model_shape,
+ labelmap,
+ detection_queue,
+ result_connection,
+ detected_objects_queue,
+ process_info,
+):
stop_event = mp.Event()
+
def receiveSignal(signalNumber, frame):
stop_event.set()
-
+
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
@@ -256,79 +343,118 @@ def receiveSignal(signalNumber, frame):
setproctitle(f"frigate.process:{name}")
listen()
- frame_queue = process_info['frame_queue']
- detection_enabled = process_info['detection_enabled']
+ frame_queue = process_info["frame_queue"]
+ detection_enabled = process_info["detection_enabled"]
frame_shape = config.frame_shape
objects_to_track = config.objects.track
object_filters = config.objects.filters
motion_detector = MotionDetector(frame_shape, config.motion)
- object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
+ object_detector = RemoteObjectDetector(
+ name, labelmap, detection_queue, result_connection, model_shape
+ )
object_tracker = ObjectTracker(config.detect)
frame_manager = SharedMemoryFrameManager()
- process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
- object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, detection_enabled, stop_event)
+ process_frames(
+ name,
+ frame_queue,
+ frame_shape,
+ model_shape,
+ frame_manager,
+ motion_detector,
+ object_detector,
+ object_tracker,
+ detected_objects_queue,
+ process_info,
+ objects_to_track,
+ object_filters,
+ detection_enabled,
+ stop_event,
+ )
logger.info(f"{name}: exiting subprocess")
+
def reduce_boxes(boxes):
if len(boxes) == 0:
return []
- reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
+ reduced_boxes = cv2.groupRectangles(
+ [list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2
+ )[0]
return [tuple(b) for b in reduced_boxes]
+
# modified from https://stackoverflow.com/a/40795835
def intersects_any(box_a, boxes):
for box in boxes:
- if box_a[2] < box[0] or box_a[0] > box[2] or box_a[1] > box[3] or box_a[3] < box[1]:
+ if (
+ box_a[2] < box[0]
+ or box_a[0] > box[2]
+ or box_a[1] > box[3]
+ or box_a[3] < box[1]
+ ):
continue
return True
-def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters):
+
+def detect(
+ object_detector, frame, model_shape, region, objects_to_track, object_filters
+):
tensor_input = create_tensor_input(frame, model_shape, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
- size = region[2]-region[0]
+ size = region[2] - region[0]
x_min = int((box[1] * size) + region[0])
y_min = int((box[0] * size) + region[1])
x_max = int((box[3] * size) + region[0])
y_max = int((box[2] * size) + region[1])
- det = (d[0],
+ det = (
+ d[0],
d[1],
(x_min, y_min, x_max, y_max),
- (x_max-x_min)*(y_max-y_min),
- region)
+ (x_max - x_min) * (y_max - y_min),
+ region,
+ )
# apply object filters
if filtered(det, objects_to_track, object_filters):
continue
detections.append(det)
return detections
-def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_shape,
- frame_manager: FrameManager, motion_detector: MotionDetector,
- object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
- detected_objects_queue: mp.Queue, process_info: Dict,
- objects_to_track: List[str], object_filters, detection_enabled: mp.Value, stop_event,
- exit_on_empty: bool = False):
-
- fps = process_info['process_fps']
- detection_fps = process_info['detection_fps']
- current_frame_time = process_info['detection_frame']
+
+def process_frames(
+ camera_name: str,
+ frame_queue: mp.Queue,
+ frame_shape,
+ model_shape,
+ frame_manager: FrameManager,
+ motion_detector: MotionDetector,
+ object_detector: RemoteObjectDetector,
+ object_tracker: ObjectTracker,
+ detected_objects_queue: mp.Queue,
+ process_info: Dict,
+ objects_to_track: List[str],
+ object_filters,
+ detection_enabled: mp.Value,
+ stop_event,
+ exit_on_empty: bool = False,
+):
+
+ fps = process_info["process_fps"]
+ detection_fps = process_info["detection_fps"]
+ current_frame_time = process_info["detection_frame"]
fps_tracker = EventsPerSecond()
fps_tracker.start()
- while True:
- if stop_event.is_set():
- break
-
+ while not stop_event.is_set():
if exit_on_empty and frame_queue.empty():
logger.info(f"Exiting track_objects...")
break
@@ -340,7 +466,9 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
current_frame_time.value = frame_time
- frame = frame_manager.get(f"{camera_name}{frame_time}", (frame_shape[0]*3//2, frame_shape[1]))
+ frame = frame_manager.get(
+ f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
+ )
if frame is None:
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
@@ -349,7 +477,9 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
if not detection_enabled.value:
fps.value = fps_tracker.eps()
object_tracker.match_and_update(frame_time, [])
- detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, [], []))
+ detected_objects_queue.put(
+ (camera_name, frame_time, object_tracker.tracked_objects, [], [])
+ )
detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{camera_name}{frame_time}")
continue
@@ -358,27 +488,44 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
motion_boxes = motion_detector.detect(frame)
# only get the tracked object boxes that intersect with motion
- tracked_object_boxes = [obj['box'] for obj in object_tracker.tracked_objects.values() if intersects_any(obj['box'], motion_boxes)]
+ tracked_object_boxes = [
+ obj["box"]
+ for obj in object_tracker.tracked_objects.values()
+ if intersects_any(obj["box"], motion_boxes)
+ ]
# combine motion boxes with known locations of existing objects
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
# compute regions
- regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
- for a in combined_boxes]
+ regions = [
+ calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
+ for a in combined_boxes
+ ]
# combine overlapping regions
combined_regions = reduce_boxes(regions)
# re-compute regions
- regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
- for a in combined_regions]
+ regions = [
+ calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
+ for a in combined_regions
+ ]
# resize regions and detect
detections = []
for region in regions:
- detections.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
-
+ detections.extend(
+ detect(
+ object_detector,
+ frame,
+ model_shape,
+ region,
+ objects_to_track,
+ object_filters,
+ )
+ )
+
#########
# merge objects, check for clipped objects and look again up to 4 times
#########
@@ -396,8 +543,10 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
for group in detected_object_groups.values():
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
- boxes = [(o[2][0], o[2][1], o[2][2]-o[2][0], o[2][3]-o[2][1])
- for o in group]
+ boxes = [
+ (o[2][0], o[2][1], o[2][2] - o[2][0], o[2][3] - o[2][1])
+ for o in group
+ ]
confidences = [o[1] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
@@ -406,17 +555,26 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
if clipped(obj, frame_shape):
box = obj[2]
# calculate a new region that will hopefully get the entire object
- region = calculate_region(frame_shape,
- box[0], box[1],
- box[2], box[3])
+ region = calculate_region(
+ frame_shape, box[0], box[1], box[2], box[3]
+ )
regions.append(region)
-
- selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
+
+ selected_objects.extend(
+ detect(
+ object_detector,
+ frame,
+ model_shape,
+ region,
+ objects_to_track,
+ object_filters,
+ )
+ )
refining = True
else:
- selected_objects.append(obj)
+ selected_objects.append(obj)
# set the detections list to only include top, complete objects
# and new detections
detections = selected_objects
@@ -426,18 +584,28 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
# Limit to the detections overlapping with motion areas
# to avoid picking up stationary background objects
- detections_with_motion = [d for d in detections if intersects_any(d[2], motion_boxes)]
+ detections_with_motion = [
+ d for d in detections if intersects_any(d[2], motion_boxes)
+ ]
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, detections_with_motion)
# add to the queue if not full
- if(detected_objects_queue.full()):
+ if detected_objects_queue.full():
frame_manager.delete(f"{camera_name}{frame_time}")
continue
else:
fps_tracker.update()
fps.value = fps_tracker.eps()
- detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
+ detected_objects_queue.put(
+ (
+ camera_name,
+ frame_time,
+ object_tracker.tracked_objects,
+ motion_boxes,
+ regions,
+ )
+ )
detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{camera_name}{frame_time}")
diff --git a/frigate/watchdog.py b/frigate/watchdog.py
--- a/frigate/watchdog.py
+++ b/frigate/watchdog.py
@@ -5,34 +5,35 @@
import os
import signal
+from frigate.util import (
+ restart_frigate,
+)
+
logger = logging.getLogger(__name__)
+
class FrigateWatchdog(threading.Thread):
def __init__(self, detectors, stop_event):
threading.Thread.__init__(self)
- self.name = 'frigate_watchdog'
+ self.name = "frigate_watchdog"
self.detectors = detectors
self.stop_event = stop_event
def run(self):
time.sleep(10)
- while True:
- # wait a bit before checking
- time.sleep(10)
-
- if self.stop_event.is_set():
- logger.info(f"Exiting watchdog...")
- break
-
+ while not self.stop_event.wait(10):
now = datetime.datetime.now().timestamp()
# check the detection processes
for detector in self.detectors.values():
detection_start = detector.detection_start.value
- if (detection_start > 0.0 and
- now - detection_start > 10):
- logger.info("Detection appears to be stuck. Restarting detection process...")
+ if detection_start > 0.0 and now - detection_start > 10:
+ logger.info(
+ "Detection appears to be stuck. Restarting detection process..."
+ )
detector.start_or_restart()
elif not detector.detect_process.is_alive():
logger.info("Detection appears to have stopped. Exiting frigate...")
- os.kill(os.getpid(), signal.SIGTERM)
+ restart_frigate()
+
+ logger.info(f"Exiting watchdog...")
diff --git a/frigate/zeroconf.py b/frigate/zeroconf.py
--- a/frigate/zeroconf.py
+++ b/frigate/zeroconf.py
@@ -31,6 +31,7 @@ def get_local_ip() -> str:
finally:
sock.close()
+
def broadcast_zeroconf(frigate_id):
zeroconf = Zeroconf(interfaces=InterfaceChoice.Default, ip_version=IPVersion.V4Only)
diff --git a/migrations/001_create_events_table.py b/migrations/001_create_events_table.py
--- a/migrations/001_create_events_table.py
+++ b/migrations/001_create_events_table.py
@@ -32,10 +32,14 @@
SQL = pw.SQL
+
def migrate(migrator, database, fake=False, **kwargs):
- migrator.sql('CREATE TABLE IF NOT EXISTS "event" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "label" VARCHAR(20) NOT NULL, "camera" VARCHAR(20) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "top_score" REAL NOT NULL, "false_positive" INTEGER NOT NULL, "zones" JSON NOT NULL, "thumbnail" TEXT NOT NULL)')
+ migrator.sql(
+ 'CREATE TABLE IF NOT EXISTS "event" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "label" VARCHAR(20) NOT NULL, "camera" VARCHAR(20) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "top_score" REAL NOT NULL, "false_positive" INTEGER NOT NULL, "zones" JSON NOT NULL, "thumbnail" TEXT NOT NULL)'
+ )
migrator.sql('CREATE INDEX IF NOT EXISTS "event_label" ON "event" ("label")')
migrator.sql('CREATE INDEX IF NOT EXISTS "event_camera" ON "event" ("camera")')
+
def rollback(migrator, database, fake=False, **kwargs):
pass
diff --git a/migrations/002_add_clip_snapshot.py b/migrations/002_add_clip_snapshot.py
--- a/migrations/002_add_clip_snapshot.py
+++ b/migrations/002_add_clip_snapshot.py
@@ -35,7 +35,12 @@
def migrate(migrator, database, fake=False, **kwargs):
- migrator.add_fields(Event, has_clip=pw.BooleanField(default=True), has_snapshot=pw.BooleanField(default=True))
+ migrator.add_fields(
+ Event,
+ has_clip=pw.BooleanField(default=True),
+ has_snapshot=pw.BooleanField(default=True),
+ )
+
def rollback(migrator, database, fake=False, **kwargs):
- migrator.remove_fields(Event, ['has_clip', 'has_snapshot'])
+ migrator.remove_fields(Event, ["has_clip", "has_snapshot"])
diff --git a/migrations/003_create_recordings_table.py b/migrations/003_create_recordings_table.py
new file mode 100644
--- /dev/null
+++ b/migrations/003_create_recordings_table.py
@@ -0,0 +1,44 @@
+"""Peewee migrations -- 003_create_recordings_table.py.
+
+Some examples (model - class or model name)::
+
+ > Model = migrator.orm['model_name'] # Return model in current state by name
+
+ > migrator.sql(sql) # Run custom SQL
+ > migrator.python(func, *args, **kwargs) # Run python code
+ > migrator.create_model(Model) # Create a model (could be used as decorator)
+ > migrator.remove_model(model, cascade=True) # Remove a model
+ > migrator.add_fields(model, **fields) # Add fields to a model
+ > migrator.change_fields(model, **fields) # Change fields
+ > migrator.remove_fields(model, *field_names, cascade=True)
+ > migrator.rename_field(model, old_field_name, new_field_name)
+ > migrator.rename_table(model, new_table_name)
+ > migrator.add_index(model, *col_names, unique=False)
+ > migrator.drop_index(model, *col_names)
+ > migrator.add_not_null(model, *field_names)
+ > migrator.drop_not_null(model, *field_names)
+ > migrator.add_default(model, field_name, default)
+
+"""
+import peewee as pw
+
+from frigate.models import Recordings
+
+SQL = pw.SQL
+
+
+def migrate(migrator, database, fake=False, **kwargs):
+ migrator.create_model(Recordings)
+
+ def add_index():
+ # First add the index here, because there is a bug in peewee_migrate
+ # when trying to create an multi-column index in the same migration
+ # as the table: https://github.com/klen/peewee_migrate/issues/19
+ Recordings.add_index("start_time", "end_time")
+ Recordings.create_table()
+
+ migrator.python(add_index)
+
+
+def rollback(migrator, database, fake=False, **kwargs):
+ migrator.remove_model(Recordings)
diff --git a/migrations/004_add_bbox_region_area.py b/migrations/004_add_bbox_region_area.py
new file mode 100644
--- /dev/null
+++ b/migrations/004_add_bbox_region_area.py
@@ -0,0 +1,48 @@
+"""Peewee migrations -- 004_add_bbox_region_area.py.
+
+Some examples (model - class or model name)::
+
+ > Model = migrator.orm['model_name'] # Return model in current state by name
+
+ > migrator.sql(sql) # Run custom SQL
+ > migrator.python(func, *args, **kwargs) # Run python code
+ > migrator.create_model(Model) # Create a model (could be used as decorator)
+ > migrator.remove_model(model, cascade=True) # Remove a model
+ > migrator.add_fields(model, **fields) # Add fields to a model
+ > migrator.change_fields(model, **fields) # Change fields
+ > migrator.remove_fields(model, *field_names, cascade=True)
+ > migrator.rename_field(model, old_field_name, new_field_name)
+ > migrator.rename_table(model, new_table_name)
+ > migrator.add_index(model, *col_names, unique=False)
+ > migrator.drop_index(model, *col_names)
+ > migrator.add_not_null(model, *field_names)
+ > migrator.drop_not_null(model, *field_names)
+ > migrator.add_default(model, field_name, default)
+
+"""
+
+import datetime as dt
+import peewee as pw
+from playhouse.sqlite_ext import *
+from decimal import ROUND_HALF_EVEN
+from frigate.models import Event
+
+try:
+ import playhouse.postgres_ext as pw_pext
+except ImportError:
+ pass
+
+SQL = pw.SQL
+
+
+def migrate(migrator, database, fake=False, **kwargs):
+ migrator.add_fields(
+ Event,
+ region=JSONField(default=[]),
+ box=JSONField(default=[]),
+ area=pw.IntegerField(default=0),
+ )
+
+
+def rollback(migrator, database, fake=False, **kwargs):
+ migrator.remove_fields(Event, ["region", "box", "area"])
| diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
--- a/frigate/test/test_config.py
+++ b/frigate/test/test_config.py
@@ -1,433 +1,1136 @@
-import json
-from unittest import TestCase, main
-import voluptuous as vol
-from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
+import unittest
+import numpy as np
+from pydantic import ValidationError
+from frigate.config import (
+ FrigateConfig,
+ DetectorTypeEnum,
+)
-class TestConfig(TestCase):
+
+class TestConfig(unittest.TestCase):
def setUp(self):
self.minimal = {
- 'mqtt': {
- 'host': 'mqtt'
- },
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
- 'height': 1080,
- 'width': 1920
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
- }
+ },
}
- def test_empty(self):
- FRIGATE_CONFIG_SCHEMA({})
- def test_minimal(self):
- FRIGATE_CONFIG_SCHEMA(self.minimal)
-
def test_config_class(self):
- FrigateConfig(config=self.minimal)
-
+ frigate_config = FrigateConfig(**self.minimal)
+ assert self.minimal == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "cpu" in runtime_config.detectors.keys()
+ assert runtime_config.detectors["cpu"].type == DetectorTypeEnum.cpu
+
+ def test_invalid_mqtt_config(self):
+ config = {
+ "mqtt": {"host": "mqtt", "user": "test"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ }
+ },
+ }
+ self.assertRaises(ValidationError, lambda: FrigateConfig(**config))
+
def test_inherit_tracked_objects(self):
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "objects": {"track": ["person", "dog"]},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ }
},
- 'objects': {
- 'track': ['person', 'dog']
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "dog" in runtime_config.cameras["back"].objects.track
+
+ def test_override_tracked_objects(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "objects": {"track": ["person", "dog"]},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "objects": {"track": ["cat"]},
+ }
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "cat" in runtime_config.cameras["back"].objects.track
+
+ def test_default_object_filters(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "objects": {"track": ["person", "dog"]},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
- 'height': 1080,
- 'width': 1920
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert('dog' in frigate_config.cameras['back'].objects.track)
-
- def test_override_tracked_objects(self):
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "dog" in runtime_config.cameras["back"].objects.filters
+
+ def test_inherit_object_filters(self):
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "objects": {
+ "track": ["person", "dog"],
+ "filters": {"dog": {"threshold": 0.7}},
},
- 'objects': {
- 'track': ['person', 'dog']
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ }
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "dog" in runtime_config.cameras["back"].objects.filters
+ assert runtime_config.cameras["back"].objects.filters["dog"].threshold == 0.7
+
+ def test_override_object_filters(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
- 'height': 1080,
- 'width': 1920,
- 'objects': {
- 'track': ['cat']
- }
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "objects": {
+ "track": ["person", "dog"],
+ "filters": {"dog": {"threshold": 0.7}},
+ },
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert('cat' in frigate_config.cameras['back'].objects.track)
-
- def test_default_object_filters(self):
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "dog" in runtime_config.cameras["back"].objects.filters
+ assert runtime_config.cameras["back"].objects.filters["dog"].threshold == 0.7
+
+ def test_global_object_mask(self):
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "objects": {"track": ["person", "dog"]},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "objects": {
+ "mask": "0,0,1,1,0,1",
+ "filters": {"dog": {"mask": "1,1,1,1,1,1"}},
+ },
+ }
},
- 'objects': {
- 'track': ['person', 'dog']
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ back_camera = runtime_config.cameras["back"]
+ assert "dog" in back_camera.objects.filters
+ assert len(back_camera.objects.filters["dog"].raw_mask) == 2
+ assert len(back_camera.objects.filters["person"].raw_mask) == 1
+
+ def test_default_input_args(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ }
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "-rtsp_transport" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+
+ def test_ffmpeg_params_global(self):
+ config = {
+ "ffmpeg": {"input_args": "-re"},
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
- 'height': 1080,
- 'width': 1920
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "objects": {
+ "track": ["person", "dog"],
+ "filters": {"dog": {"threshold": 0.7}},
+ },
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert('dog' in frigate_config.cameras['back'].objects.filters)
-
- def test_inherit_object_filters(self):
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "-re" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+
+ def test_ffmpeg_params_camera(self):
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "ffmpeg": {"input_args": ["test"]},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ],
+ "input_args": ["-re"],
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "objects": {
+ "track": ["person", "dog"],
+ "filters": {"dog": {"threshold": 0.7}},
+ },
+ }
},
- 'objects': {
- 'track': ['person', 'dog'],
- 'filters': {
- 'dog': {
- 'threshold': 0.7
- }
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "-re" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+ assert "test" not in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+
+ def test_ffmpeg_params_input(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "ffmpeg": {"input_args": ["test2"]},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ "input_args": "-re test",
+ }
+ ],
+ "input_args": "test3",
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "objects": {
+ "track": ["person", "dog"],
+ "filters": {"dog": {"threshold": 0.7}},
+ },
}
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert "-re" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+ assert "test" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+ assert "test2" not in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+ assert "test3" not in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+
+ def test_inherit_clips_retention(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "record": {
+ "events": {"retain": {"default": 20, "objects": {"person": 30}}}
+ },
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
- 'height': 1080,
- 'width': 1920
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert('dog' in frigate_config.cameras['back'].objects.filters)
- assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
-
- def test_override_object_filters(self):
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert (
+ runtime_config.cameras["back"].record.events.retain.objects["person"] == 30
+ )
+
+ def test_roles_listed_twice_throws_error(self):
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "record": {
+ "events": {"retain": {"default": 20, "objects": {"person": 30}}}
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]},
+ {"path": "rtsp://10.0.0.1:554/video2", "roles": ["detect"]},
]
},
- 'height': 1080,
- 'width': 1920,
- 'objects': {
- 'track': ['person', 'dog'],
- 'filters': {
- 'dog': {
- 'threshold': 0.7
- }
- }
- }
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert('dog' in frigate_config.cameras['back'].objects.filters)
- assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
-
- def test_global_object_mask(self):
+ self.assertRaises(ValidationError, lambda: FrigateConfig(**config))
+
+ def test_zone_matching_camera_name_throws_error(self):
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "record": {
+ "events": {"retain": {"default": 20, "objects": {"person": 30}}}
+ },
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "zones": {"back": {"coordinates": "1,1,1,1,1,1"}},
+ }
},
- 'objects': {
- 'track': ['person', 'dog']
+ }
+ self.assertRaises(ValidationError, lambda: FrigateConfig(**config))
+
+ def test_zone_assigns_color_and_contour(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "record": {
+ "events": {"retain": {"default": 20, "objects": {"person": 30}}}
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
- 'height': 1080,
- 'width': 1920,
- 'objects': {
- 'mask': '0,0,1,1,0,1',
- 'filters': {
- 'dog': {
- 'mask': '1,1,1,1,1,1'
- }
- }
- }
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "zones": {"test": {"coordinates": "1,1,1,1,1,1"}},
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert('dog' in frigate_config.cameras['back'].objects.filters)
- assert(len(frigate_config.cameras['back'].objects.filters['dog']._raw_mask) == 2)
- assert(len(frigate_config.cameras['back'].objects.filters['person']._raw_mask) == 1)
-
- def test_ffmpeg_params_global(self):
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert isinstance(
+ runtime_config.cameras["back"].zones["test"].contour, np.ndarray
+ )
+ assert runtime_config.cameras["back"].zones["test"].color != (0, 0, 0)
+
+ def test_clips_should_default_to_global_objects(self):
config = {
- 'ffmpeg': {
- 'input_args': ['-re']
+ "mqtt": {"host": "mqtt"},
+ "record": {
+ "events": {"retain": {"default": 20, "objects": {"person": 30}}}
},
- 'mqtt': {
- 'host': 'mqtt'
+ "objects": {"track": ["person", "dog"]},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "record": {"events": {}},
+ }
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ back_camera = runtime_config.cameras["back"]
+ assert back_camera.record.events.objects is None
+ assert back_camera.record.events.retain.objects["person"] == 30
+
+ def test_role_assigned_but_not_enabled(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect", "rtmp"],
+ },
+ {"path": "rtsp://10.0.0.1:554/record", "roles": ["record"]},
]
},
- 'height': 1080,
- 'width': 1920,
- 'objects': {
- 'track': ['person', 'dog'],
- 'filters': {
- 'dog': {
- 'threshold': 0.7
- }
- }
- }
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
- def test_ffmpeg_params_camera(self):
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ ffmpeg_cmds = runtime_config.cameras["back"].ffmpeg_cmds
+ assert len(ffmpeg_cmds) == 1
+ assert not "clips" in ffmpeg_cmds[0]["roles"]
+
+ def test_max_disappeared_default(self):
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "detect": {
+ "enabled": True,
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ }
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
- ],
- 'input_args': ['-re']
- },
- 'height': 1080,
- 'width': 1920,
- 'objects': {
- 'track': ['person', 'dog'],
- 'filters': {
- 'dog': {
- 'threshold': 0.7
- }
- }
- }
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].detect.max_disappeared == 5 * 5
+
+ def test_motion_frame_height_wont_go_below_120(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
- def test_ffmpeg_params_input(self):
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].motion.frame_height >= 120
+
+ def test_motion_contour_area_dynamic(self):
+
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ }
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'], 'input_args': ['-re'] }
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert round(runtime_config.cameras["back"].motion.contour_area) == 99
+
+ def test_merge_labelmap(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "model": {"labelmap": {7: "truck"}},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
]
},
- 'height': 1080,
- 'width': 1920,
- 'objects': {
- 'track': ['person', 'dog'],
- 'filters': {
- 'dog': {
- 'threshold': 0.7
- }
- }
- }
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
-
-
- def test_inherit_clips_retention(self):
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.model.merged_labelmap[7] == "truck"
+
+ def test_default_labelmap_empty(self):
+
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ }
},
- 'clips': {
- 'retain': {
- 'default': 20,
- 'objects': {
- 'person': 30
- }
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.model.merged_labelmap[0] == "person"
+
+ def test_default_labelmap(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "model": {"width": 320, "height": 320},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.model.merged_labelmap[0] == "person"
+
+ def test_fails_on_invalid_role(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect", "clips"],
+ },
]
},
- 'height': 1080,
- 'width': 1920
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
- }
+ },
}
- frigate_config = FrigateConfig(config=config)
- assert(frigate_config.cameras['back'].clips.retain.objects['person'] == 30)
-
- def test_roles_listed_twice_throws_error(self):
+
+ self.assertRaises(ValidationError, lambda: FrigateConfig(**config))
+
+ def test_global_detect(self):
+
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "detect": {"max_disappeared": 1},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ }
},
- 'clips': {
- 'retain': {
- 'default': 20,
- 'objects': {
- 'person': 30
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].detect.max_disappeared == 1
+ assert runtime_config.cameras["back"].detect.height == 1080
+
+ def test_default_detect(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
}
}
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] },
- { 'path': 'rtsp://10.0.0.1:554/video2', 'roles': ['detect'] }
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].detect.max_disappeared == 25
+ assert runtime_config.cameras["back"].detect.height == 720
+
+ def test_global_detect_merge(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "detect": {"max_disappeared": 1, "height": 720},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
]
},
- 'height': 1080,
- 'width': 1920
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
}
- }
+ },
}
- self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
-
- def test_zone_matching_camera_name_throws_error(self):
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].detect.max_disappeared == 1
+ assert runtime_config.cameras["back"].detect.height == 1080
+ assert runtime_config.cameras["back"].detect.width == 1920
+
+ def test_global_snapshots(self):
+
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "snapshots": {"enabled": True},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "snapshots": {
+ "height": 100,
+ },
+ }
},
- 'clips': {
- 'retain': {
- 'default': 20,
- 'objects': {
- 'person': 30
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].snapshots.enabled
+ assert runtime_config.cameras["back"].snapshots.height == 100
+
+ def test_default_snapshots(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
}
}
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].snapshots.bounding_box
+ assert runtime_config.cameras["back"].snapshots.quality == 70
+
+ def test_global_snapshots_merge(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "snapshots": {"bounding_box": False, "height": 300},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "snapshots": {
+ "height": 150,
+ "enabled": True,
+ },
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].snapshots.bounding_box == False
+ assert runtime_config.cameras["back"].snapshots.height == 150
+ assert runtime_config.cameras["back"].snapshots.enabled
+
+ def test_global_rtmp(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "rtmp": {"enabled": True},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
]
},
- 'height': 1080,
- 'width': 1920,
- 'zones': {
- 'back': {
- 'coordinates': '1,1,1,1,1,1'
- }
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].rtmp.enabled
+
+ def test_default_rtmp(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
}
}
- }
+ },
}
- self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
-
- def test_clips_should_default_to_global_objects(self):
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].rtmp.enabled
+
+ def test_global_rtmp_merge(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "rtmp": {"enabled": False},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "rtmp": {
+ "enabled": True,
+ },
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].rtmp.enabled
+
+ def test_global_live(self):
+
config = {
- 'mqtt': {
- 'host': 'mqtt'
+ "mqtt": {"host": "mqtt"},
+ "live": {"quality": 4},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ }
},
- 'clips': {
- 'retain': {
- 'default': 20,
- 'objects': {
- 'person': 30
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].live.quality == 4
+
+ def test_default_live(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
}
}
},
- 'objects': {
- 'track': ['person', 'dog']
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].live.quality == 8
+
+ def test_global_live_merge(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "live": {"quality": 4, "height": 480},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "live": {
+ "quality": 7,
+ },
+ }
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].live.quality == 7
+ assert runtime_config.cameras["back"].live.height == 480
+
+ def test_global_timestamp_style(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "timestamp_style": {"position": "bl"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
]
},
- 'height': 1080,
- 'width': 1920,
- 'clips': {
- 'enabled': True
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].timestamp_style.position == "bl"
+
+ def test_default_timestamp_style(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
}
}
- }
+ },
}
- config = FrigateConfig(config=config)
- assert(config.cameras['back'].clips.objects is None)
-
- def test_role_assigned_but_not_enabled(self):
- json_config = {
- 'mqtt': {
- 'host': 'mqtt'
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].timestamp_style.position == "tl"
+
+ def test_global_timestamp_style_merge(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "rtmp": {"enabled": False},
+ "timestamp_style": {"position": "br", "thickness": 2},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "timestamp_style": {"position": "bl", "thickness": 4},
+ }
},
- 'cameras': {
- 'back': {
- 'ffmpeg': {
- 'inputs': [
- { 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect', 'rtmp'] },
- { 'path': 'rtsp://10.0.0.1:554/record', 'roles': ['record'] }
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].timestamp_style.position == "bl"
+ assert runtime_config.cameras["back"].timestamp_style.thickness == 4
+
+ def test_allow_retain_to_be_a_decimal(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "snapshots": {"retain": {"default": 1.5}},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
]
},
- 'height': 1080,
- 'width': 1920
}
- }
+ },
}
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
- config = FrigateConfig(config=json_config)
- ffmpeg_cmds = config.cameras['back'].ffmpeg_cmds
- assert(len(ffmpeg_cmds) == 1)
- assert(not 'clips' in ffmpeg_cmds[0]['roles'])
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].snapshots.retain.default == 1.5
-if __name__ == '__main__':
- main(verbosity=2)
+if __name__ == "__main__":
+ unittest.main(verbosity=2)
diff --git a/frigate/test/test_copy_yuv_to_position.py b/frigate/test/test_copy_yuv_to_position.py
new file mode 100644
--- /dev/null
+++ b/frigate/test/test_copy_yuv_to_position.py
@@ -0,0 +1,66 @@
+import cv2
+import numpy as np
+from unittest import TestCase, main
+from frigate.util import get_yuv_crop, copy_yuv_to_position
+
+
+class TestCopyYuvToPosition(TestCase):
+ def setUp(self):
+ self.source_frame_bgr = np.zeros((400, 800, 3), np.uint8)
+ self.source_frame_bgr[:] = (0, 0, 255)
+ self.source_yuv_frame = cv2.cvtColor(
+ self.source_frame_bgr, cv2.COLOR_BGR2YUV_I420
+ )
+ y, u1, u2, v1, v2 = get_yuv_crop(
+ self.source_yuv_frame.shape,
+ (
+ 0,
+ 0,
+ self.source_frame_bgr.shape[1],
+ self.source_frame_bgr.shape[0],
+ ),
+ )
+ self.source_channel_dims = {
+ "y": y,
+ "u1": u1,
+ "u2": u2,
+ "v1": v1,
+ "v2": v2,
+ }
+
+ self.dest_frame_bgr = np.zeros((400, 800, 3), np.uint8)
+ self.dest_frame_bgr[:] = (112, 202, 50)
+ self.dest_frame_bgr[100:300, 200:600] = (255, 0, 0)
+ self.dest_yuv_frame = cv2.cvtColor(self.dest_frame_bgr, cv2.COLOR_BGR2YUV_I420)
+
+ def test_clear_position(self):
+ copy_yuv_to_position(self.dest_yuv_frame, (100, 100), (100, 100))
+ # cv2.imwrite(f"source_frame_yuv.jpg", self.source_yuv_frame)
+ # cv2.imwrite(f"dest_frame_yuv.jpg", self.dest_yuv_frame)
+
+ def test_copy_position(self):
+ copy_yuv_to_position(
+ self.dest_yuv_frame,
+ (100, 100),
+ (100, 200),
+ self.source_yuv_frame,
+ self.source_channel_dims,
+ )
+
+ # cv2.imwrite(f"source_frame_yuv.jpg", self.source_yuv_frame)
+ # cv2.imwrite(f"dest_frame_yuv.jpg", self.dest_yuv_frame)
+
+ def test_copy_position_full_screen(self):
+ copy_yuv_to_position(
+ self.dest_yuv_frame,
+ (0, 0),
+ (400, 800),
+ self.source_yuv_frame,
+ self.source_channel_dims,
+ )
+ # cv2.imwrite(f"source_frame_yuv.jpg", self.source_yuv_frame)
+ # cv2.imwrite(f"dest_frame_yuv.jpg", self.dest_yuv_frame)
+
+
+if __name__ == "__main__":
+ main(verbosity=2)
diff --git a/frigate/test/test_yuv_region_2_rgb.py b/frigate/test/test_yuv_region_2_rgb.py
--- a/frigate/test/test_yuv_region_2_rgb.py
+++ b/frigate/test/test_yuv_region_2_rgb.py
@@ -3,37 +3,39 @@
from unittest import TestCase, main
from frigate.util import yuv_region_2_rgb
+
class TestYuvRegion2RGB(TestCase):
def setUp(self):
self.bgr_frame = np.zeros((100, 200, 3), np.uint8)
self.bgr_frame[:] = (0, 0, 255)
- self.bgr_frame[5:55, 5:55] = (255,0,0)
+ self.bgr_frame[5:55, 5:55] = (255, 0, 0)
# cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
self.yuv_frame = cv2.cvtColor(self.bgr_frame, cv2.COLOR_BGR2YUV_I420)
def test_crop_yuv(self):
- cropped = yuv_region_2_rgb(self.yuv_frame, (10,10,50,50))
+ cropped = yuv_region_2_rgb(self.yuv_frame, (10, 10, 50, 50))
# ensure the upper left pixel is blue
- assert(np.all(cropped[0, 0] == [0, 0, 255]))
+ assert np.all(cropped[0, 0] == [0, 0, 255])
def test_crop_yuv_out_of_bounds(self):
- cropped = yuv_region_2_rgb(self.yuv_frame, (0,0,200,200))
+ cropped = yuv_region_2_rgb(self.yuv_frame, (0, 0, 200, 200))
# cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
# ensure the upper left pixel is red
# the yuv conversion has some noise
- assert(np.all(cropped[0, 0] == [255, 1, 0]))
+ assert np.all(cropped[0, 0] == [255, 1, 0])
# ensure the bottom right is black
- assert(np.all(cropped[199, 199] == [0, 0, 0]))
+ assert np.all(cropped[199, 199] == [0, 0, 0])
def test_crop_yuv_portrait(self):
bgr_frame = np.zeros((1920, 1080, 3), np.uint8)
bgr_frame[:] = (0, 0, 255)
- bgr_frame[5:55, 5:55] = (255,0,0)
+ bgr_frame[5:55, 5:55] = (255, 0, 0)
# cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
yuv_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2YUV_I420)
cropped = yuv_region_2_rgb(yuv_frame, (0, 852, 648, 1500))
# cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main(verbosity=2)
\ No newline at end of file
diff --git a/web/src/__tests__/Sidebar.test.jsx b/web/src/__tests__/Sidebar.test.jsx
--- a/web/src/__tests__/Sidebar.test.jsx
+++ b/web/src/__tests__/Sidebar.test.jsx
@@ -9,8 +9,8 @@ describe('Sidebar', () => {
jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
data: {
cameras: {
- front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } },
- side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] } },
+ front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] }, record: { enabled: true } },
+ side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] }, record: { enabled: false } },
},
},
}));
@@ -30,4 +30,11 @@ describe('Sidebar', () => {
expect(screen.queryByRole('link', { name: 'front' })).toBeInTheDocument();
expect(screen.queryByRole('link', { name: 'side' })).toBeInTheDocument();
});
+
+ test('render cameras if in record route', async () => {
+ window.history.replaceState({}, 'Front Recordings', '/recording/front');
+ render(<Sidebar />);
+ expect(screen.queryByRole('link', { name: 'front' })).toBeInTheDocument();
+ expect(screen.queryByRole('link', { name: 'side' })).not.toBeInTheDocument();
+ });
});
diff --git a/web/src/api/__tests__/mqtt.test.jsx b/web/src/api/__tests__/mqtt.test.jsx
--- a/web/src/api/__tests__/mqtt.test.jsx
+++ b/web/src/api/__tests__/mqtt.test.jsx
@@ -107,12 +107,12 @@ describe('MqttProvider', () => {
);
});
- test('prefills the clips/detect/snapshots state from config', async () => {
+ test('prefills the recordings/detect/snapshots state from config', async () => {
jest.spyOn(Date, 'now').mockReturnValue(123456);
const config = {
cameras: {
- front: { name: 'front', detect: { enabled: true }, clips: { enabled: false }, snapshots: { enabled: true } },
- side: { name: 'side', detect: { enabled: false }, clips: { enabled: false }, snapshots: { enabled: false } },
+ front: { name: 'front', detect: { enabled: true }, record: { enabled: false }, snapshots: { enabled: true } },
+ side: { name: 'side', detect: { enabled: false }, record: { enabled: false }, snapshots: { enabled: false } },
},
};
render(
@@ -122,10 +122,10 @@ describe('MqttProvider', () => {
);
await screen.findByTestId('data');
expect(screen.getByTestId('front/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}');
- expect(screen.getByTestId('front/clips/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
+ expect(screen.getByTestId('front/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
expect(screen.getByTestId('front/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}');
expect(screen.getByTestId('side/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
- expect(screen.getByTestId('side/clips/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
+ expect(screen.getByTestId('side/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
expect(screen.getByTestId('side/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
});
});
diff --git a/web/src/components/__tests__/CameraImage.test.jsx b/web/src/components/__tests__/CameraImage.test.jsx
--- a/web/src/components/__tests__/CameraImage.test.jsx
+++ b/web/src/components/__tests__/CameraImage.test.jsx
@@ -7,7 +7,7 @@ import { render, screen } from '@testing-library/preact';
describe('CameraImage', () => {
beforeEach(() => {
jest.spyOn(Api, 'useConfig').mockImplementation(() => {
- return { data: { cameras: { front: { name: 'front', width: 1280, height: 720 } } } };
+ return { data: { cameras: { front: { name: 'front', detect: { width: 1280, height: 720 } } } } };
});
jest.spyOn(Api, 'useApiHost').mockReturnValue('http://base-url.local:5000');
jest.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 0 }]);
diff --git a/web/src/components/__tests__/Dialog.test.jsx b/web/src/components/__tests__/Dialog.test.jsx
new file mode 100644
--- /dev/null
+++ b/web/src/components/__tests__/Dialog.test.jsx
@@ -0,0 +1,38 @@
+import { h } from 'preact';
+import Dialog from '../Dialog';
+import { fireEvent, render, screen } from '@testing-library/preact';
+
+describe('Dialog', () => {
+ let portal;
+
+ beforeAll(() => {
+ portal = document.createElement('div');
+ portal.id = 'dialogs';
+ document.body.appendChild(portal);
+ });
+
+ afterAll(() => {
+ document.body.removeChild(portal);
+ });
+
+ test('renders to a portal', async () => {
+ render(<Dialog title="Tacos" text="This is the dialog" />);
+ expect(screen.getByText('Tacos')).toBeInTheDocument();
+ expect(screen.getByRole('modal').closest('#dialogs')).not.toBeNull();
+ });
+
+ test('renders action buttons', async () => {
+ const handleClick = jest.fn();
+ render(
+ <Dialog
+ actions={[
+ { color: 'red', text: 'Delete' },
+ { text: 'Okay', onClick: handleClick },
+ ]}
+ title="Tacos"
+ />
+ );
+ fireEvent.click(screen.getByRole('button', { name: 'Okay' }));
+ expect(handleClick).toHaveBeenCalled();
+ });
+});
diff --git a/web/src/routes/__tests__/Camera.test.jsx b/web/src/routes/__tests__/Camera.test.jsx
--- a/web/src/routes/__tests__/Camera.test.jsx
+++ b/web/src/routes/__tests__/Camera.test.jsx
@@ -3,6 +3,7 @@ import * as AutoUpdatingCameraImage from '../../components/AutoUpdatingCameraIma
import * as Api from '../../api';
import * as Context from '../../context';
import Camera from '../Camera';
+import * as JSMpegPlayer from '../../components/JSMpegPlayer';
import { fireEvent, render, screen } from '@testing-library/preact';
describe('Camera Route', () => {
@@ -18,6 +19,9 @@ describe('Camera Route', () => {
jest.spyOn(AutoUpdatingCameraImage, 'default').mockImplementation(({ searchParams }) => {
return <div data-testid="mock-image">{searchParams.toString()}</div>;
});
+ jest.spyOn(JSMpegPlayer, 'default').mockImplementation(() => {
+ return <div data-testid="mock-jsmpeg" />;
+ });
});
test('reads camera feed options from persistence', async () => {
@@ -32,7 +36,10 @@ describe('Camera Route', () => {
},
mockSetOptions,
]);
+
render(<Camera camera="front" />);
+
+ fireEvent.click(screen.queryByText('Debug'));
fireEvent.click(screen.queryByText('Show Options'));
expect(screen.queryByTestId('mock-image')).toHaveTextContent(
'bbox=1×tamp=0&zones=1&mask=0&motion=1®ions=0'
@@ -41,17 +48,21 @@ describe('Camera Route', () => {
test('updates camera feed options to persistence', async () => {
mockUsePersistence
+ .mockReturnValueOnce([{}, mockSetOptions])
.mockReturnValueOnce([{}, mockSetOptions])
.mockReturnValueOnce([{ bbox: true }, mockSetOptions])
.mockReturnValueOnce([{ bbox: true, timestamp: true }, mockSetOptions]);
render(<Camera camera="front" />);
+ fireEvent.click(screen.queryByText('Debug'));
fireEvent.click(screen.queryByText('Show Options'));
fireEvent.change(screen.queryByTestId('bbox-input'), { target: { checked: true } });
fireEvent.change(screen.queryByTestId('timestamp-input'), { target: { checked: true } });
fireEvent.click(screen.queryByText('Hide Options'));
+ expect(mockUsePersistence).toHaveBeenCalledTimes(4);
+ expect(mockSetOptions).toHaveBeenCalledTimes(2);
expect(mockSetOptions).toHaveBeenCalledWith({ bbox: true, timestamp: true });
expect(screen.queryByTestId('mock-image')).toHaveTextContent('bbox=1×tamp=1');
});
diff --git a/web/src/routes/__tests__/Cameras.test.jsx b/web/src/routes/__tests__/Cameras.test.jsx
--- a/web/src/routes/__tests__/Cameras.test.jsx
+++ b/web/src/routes/__tests__/Cameras.test.jsx
@@ -12,8 +12,8 @@ describe('Cameras Route', () => {
useConfigMock = jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
data: {
cameras: {
- front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } },
- side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] } },
+ front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] }, record: { enabled: true } },
+ side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] }, record: { enabled: false } },
},
},
status: 'loaded',
@@ -41,15 +41,23 @@ describe('Cameras Route', () => {
expect(screen.queryByText('side').closest('a')).toHaveAttribute('href', '/cameras/side');
});
+ test('shows recordings link', async () => {
+ render(<Cameras />);
+
+ expect(screen.queryByLabelText('Loading…')).not.toBeInTheDocument();
+
+ expect(screen.queryAllByText('Recordings')).toHaveLength(1);
+ });
+
test('buttons toggle detect, clips, and snapshots', async () => {
const sendDetect = jest.fn();
- const sendClips = jest.fn();
+ const sendRecordings = jest.fn();
const sendSnapshots = jest.fn();
jest.spyOn(Mqtt, 'useDetectState').mockImplementation(() => {
return { payload: 'ON', send: sendDetect };
});
- jest.spyOn(Mqtt, 'useClipsState').mockImplementation(() => {
- return { payload: 'OFF', send: sendClips };
+ jest.spyOn(Mqtt, 'useRecordingsState').mockImplementation(() => {
+ return { payload: 'OFF', send: sendRecordings };
});
jest.spyOn(Mqtt, 'useSnapshotsState').mockImplementation(() => {
return { payload: 'ON', send: sendSnapshots };
@@ -64,11 +72,11 @@ describe('Cameras Route', () => {
fireEvent.click(screen.getAllByLabelText('Toggle snapshots off')[0]);
expect(sendSnapshots).toHaveBeenCalledWith('OFF');
- fireEvent.click(screen.getAllByLabelText('Toggle clips on')[0]);
- expect(sendClips).toHaveBeenCalledWith('ON');
+ fireEvent.click(screen.getAllByLabelText('Toggle recordings on')[0]);
+ expect(sendRecordings).toHaveBeenCalledWith('ON');
expect(sendDetect).toHaveBeenCalledTimes(1);
expect(sendSnapshots).toHaveBeenCalledTimes(1);
- expect(sendClips).toHaveBeenCalledTimes(1);
+ expect(sendRecordings).toHaveBeenCalledTimes(1);
});
});
diff --git a/web/src/routes/__tests__/Event.test.jsx b/web/src/routes/__tests__/Event.test.jsx
--- a/web/src/routes/__tests__/Event.test.jsx
+++ b/web/src/routes/__tests__/Event.test.jsx
@@ -26,43 +26,37 @@ describe('Event Route', () => {
expect(screen.queryByLabelText('Loading…')).not.toBeInTheDocument();
expect(screen.queryByText('Clip')).toBeInTheDocument();
- expect(screen.queryByLabelText('Clip for event 1613257326.237365-83cgl2')).toHaveAttribute(
- 'src',
- 'http://localhost:5000/clips/front-1613257326.237365-83cgl2.mp4'
- );
- expect(screen.queryByText('Best image')).toBeInTheDocument();
+ expect(screen.queryByLabelText('Video Player')).toBeInTheDocument();
+ expect(screen.queryByText('Best Image')).not.toBeInTheDocument();
+ expect(screen.queryByText('Thumbnail')).not.toBeInTheDocument();
+ });
+
+ test('does not render a video if there is no clip', async () => {
+ useEventMock.mockReturnValue({ data: { ...mockEvent, has_clip: false }, status: 'loaded' });
+ render(<Event eventId={mockEvent.id} />);
+
+ expect(screen.queryByText('Clip')).not.toBeInTheDocument();
+ expect(screen.queryByLabelText('Video Player')).not.toBeInTheDocument();
+ expect(screen.queryByText('Best Image')).toBeInTheDocument();
expect(screen.queryByText('Thumbnail')).not.toBeInTheDocument();
- expect(screen.queryByAltText('person at 82.0% confidence')).toHaveAttribute(
- 'src',
- 'http://localhost:5000/clips/front-1613257326.237365-83cgl2.jpg'
- );
});
test('shows the thumbnail if no snapshot available', async () => {
- useEventMock.mockReturnValue({ data: { ...mockEvent, has_snapshot: false }, status: 'loaded' });
+ useEventMock.mockReturnValue({ data: { ...mockEvent, has_clip: false, has_snapshot: false }, status: 'loaded' });
render(<Event eventId={mockEvent.id} />);
- expect(screen.queryByText('Best image')).not.toBeInTheDocument();
+ expect(screen.queryByText('Best Image')).not.toBeInTheDocument();
expect(screen.queryByText('Thumbnail')).toBeInTheDocument();
expect(screen.queryByAltText('person at 82.0% confidence')).toHaveAttribute(
'src',
'data:image/jpeg;base64,/9j/4aa...'
);
});
-
- test('does not render a video if there is no clip', async () => {
- useEventMock.mockReturnValue({ data: { ...mockEvent, has_clip: false }, status: 'loaded' });
- render(<Event eventId={mockEvent.id} />);
-
- expect(screen.queryByText('Clip')).not.toBeInTheDocument();
- expect(screen.queryByLabelText('Clip for event 1613257326.237365-83cgl2')).not.toBeInTheDocument();
- });
});
const mockEvent = {
camera: 'front',
end_time: 1613257337.841237,
- false_positive: false,
has_clip: true,
has_snapshot: true,
id: '1613257326.237365-83cgl2',
diff --git a/web/src/routes/__tests__/Events.test.jsx b/web/src/routes/__tests__/Events.test.jsx
--- a/web/src/routes/__tests__/Events.test.jsx
+++ b/web/src/routes/__tests__/Events.test.jsx
@@ -71,7 +71,6 @@ describe('Events Route', () => {
const mockEvents = new Array(12).fill(null).map((v, i) => ({
end_time: 1613257337 + i,
- false_positive: false,
has_clip: true,
has_snapshot: true,
id: i,
| Motion detection defaults don't work well for low resolution substreams
**Describe the bug**
A clear and concise description of what your issue is.
The default motion detection configuration appears tuned for a 1080p stream at 5fps. However, my ReoLink RLC-410 provides a 640x480 substream at 7fps. So with 1080p, you'd get the following default configuration:
```json
"fps": 5,
"frame_shape": [
1080,
1920
],
"height": 1080,
"motion": {
"contour_area": 100,
"delta_alpha": 0.2,
"frame_alpha": 0.2,
"frame_height": 180,
"threshold": 25
},
```
Very reasonable. The contour size tracks a reasonable, small portion of the resized image - the resized frame height is still large enough to not lose thin things like legs. However, configure that substream:
```json
"fps": 7,
"frame_shape": [
480,
640
],
"height": 480,
"motion": {
"contour_area": 100,
"delta_alpha": 0.2,
"frame_alpha": 0.2,
"frame_height": 80,
"threshold": 25
},
```
We now have a tiny, 80 pixels high image to do motion detection on. But the `contour_area` has not scaled - so we need 6.8 times the area to trigger motion compared to the 1080 stream. Similarly, the `frame_alpha` and `delta_alpha` have not changed - so to the motion detector, it looks like the objects are moving a lot slower.
My proposal:
1. Change `delta_alpha` and `frame_alpha` from constant defaults to 1/fps
2. Change `contour_area` from a constant default to `scaled width * scaled height * 0.00173611111` (or some more reasonable constant value, possibly taking into account masked area)
3. Change `frame_height` from default `height/6` to `max(height/6, 180)`
There are risks; changing the alpha rates might make motion detection worse if the default values were working well already despite being very different from the framerate, or if `contour_area` was configured explicitly to match the automatic scaled value of `frame_height`. And the increased frame height might increase CPU for people with small substreams like I have. But overall, I think this would result in the configuration that seems to have been originally intended, and it certainly works better on my cameras.
**Version of frigate**
0.8.4-5043040
Object detected but clip does not include detected object (0.9.0-7B063A1)
**Describe the bug**
Event list show a detected object but when watching the recorded clip the object is not there.
**Version of frigate**
0.9.0-7b063a1
**Config file**
Include your full config file wrapped in triple back ticks.
```
cameras:
#############################
# PARKERING #
#############################
frigate_parkering:
ffmpeg:
inputs:
- path: rtmp://192.168.1.216/bcs/channel0_main.bcs?channel=0&stream=1&user=admin&password=xxxxxxxx
roles:
- record
- detect
detect:
width: 2560
height: 1920
fps: 5
enabled: True
max_disappeared: 25
objects:
track:
- person
- djur
- motorfordon
- cykel
filters:
person:
min_score: 0.7
motorfordon:
min_score: 0.6
mqtt:
enabled: True
timestamp: False
bounding_box: False
crop: True
height: 960
quality: 100
motion:
threshold: 20
mask:
- 0,1920,347,1920,744,920,156,947,852,142,2560,129,2560,0,0,0
zones:
street_area:
# HI-RES
coordinates: 1093,386,1195,445,2464,496,2428,89,781,48,586,478,771,403
objects:
- person
- djur
- cykel
- motorfordon
parking_area:
coordinates: 2560,1920,331,1920,748,916,147,932,556,451,579,567,400,898,923,880,1177,487,2560,543
objects:
- motorfordon
- person
- cykel
- djur
filters:
person:
threshold: 0.7
motorfordon:
threshold: 0.7
####################################
# #
# MAIN SETTINGS #
# #
####################################
detectors:
coral1:
type: edgetpu
device: 'usb:0'
coral2:
type: edgetpu
device: 'usb:1'
mqtt:
host: 192.168.1.121
topic_prefix: frigate
user: home-assistant-server
password: xxxxx
stats_interval: 20
ffmpeg:
hwaccel_args:
- -hwaccel
- qsv
- -qsv_device
- /dev/dri/renderD128
output_args:
record:
- -f
- segment
- -segment_time
- 10
- -segment_format
- mp4
- -reset_timestamps
- 1
- -strftime
- 1
- -c:v
- copy
input_args:
- '-avoid_negative_ts'
- make_zero
- '-fflags'
- nobuffer
- '-flags'
- low_delay
- '-strict'
- experimental
- '-fflags'
- +genpts+discardcorrupt
- '-use_wallclock_as_timestamps'
- '1'
detect:
max_disappeared: 20
motion:
threshold: 20
contour_area: 100
delta_alpha: 0.2
frame_alpha: 0.2
frame_height: 180
snapshots:
bounding_box: true
clean_copy: false
crop: true
enabled: true
height: 600
quality: 100
retain:
default: 6
timestamp: false
objects:
track:
- djur
- person
- cykel
- motorfordon
- råtta
filters:
djur:
threshold: 0.7
person:
threshold: 0.7
cykel:
threshold: 0.7
motorfordon:
threshold: 0.7
råtta:
threshold: 0.65
rtmp:
enabled: False
record:
enabled: True
retain_days: 0
events:
max_seconds: 300
pre_capture: 5
post_capture: 5
retain:
default: 6
birdseye:
enabled: true
height: 600
width: 800
mode: motion
quality: 10
logger:
default: info
```
**Frigate container logs**
```
Include relevant log output here
```
**Frigate stats**
```
{
"detection_fps": 0.0,
"detectors": {
"coral1": {
"detection_start": 0.0,
"inference_speed": 11.94,
"pid": 218
},
"coral2": {
"detection_start": 0.0,
"inference_speed": 11.65,
"pid": 220
}
},
"frigate_baksida": {
"camera_fps": 5.1,
"capture_pid": 267,
"detection_fps": 0.0,
"pid": 243,
"process_fps": 5.1,
"skipped_fps": 0.0
},
"frigate_entren": {
"camera_fps": 4.1,
"capture_pid": 249,
"detection_fps": 0.0,
"pid": 234,
"process_fps": 4.1,
"skipped_fps": 0.0
},
"frigate_inne_garage": {
"camera_fps": 4.1,
"capture_pid": 274,
"detection_fps": 0.0,
"pid": 246,
"process_fps": 4.1,
"skipped_fps": 0.0
},
"frigate_lillstugan": {
"camera_fps": 4.1,
"capture_pid": 257,
"detection_fps": 0.0,
"pid": 240,
"process_fps": 4.1,
"skipped_fps": 0.0
},
"frigate_parkering": {
"camera_fps": 5.0,
"capture_pid": 248,
"detection_fps": 0.0,
"pid": 229,
"process_fps": 5.0,
"skipped_fps": 0.0
},
"frigate_pathway": {
"camera_fps": 4.1,
"capture_pid": 260,
"detection_fps": 0.0,
"pid": 241,
"process_fps": 4.1,
"skipped_fps": 0.0
},
"frigate_pryttelverkstad": {
"camera_fps": 4.0,
"capture_pid": 270,
"detection_fps": 0.0,
"pid": 244,
"process_fps": 4.0,
"skipped_fps": 0.0
},
"frigate_tomten": {
"camera_fps": 4.1,
"capture_pid": 254,
"detection_fps": 0.0,
"pid": 238,
"process_fps": 4.1,
"skipped_fps": 0.0
},
"service": {
"storage": {
"/dev/shm": {
"free": 6400.9,
"mount_type": "tmpfs",
"total": 6442.5,
"used": 41.5
},
"/media/frigate/clips": {
"free": 117205.5,
"mount_type": "ext3",
"total": 125488.4,
"used": 1881.1
},
"/media/frigate/recordings": {
"free": 117205.5,
"mount_type": "ext3",
"total": 125488.4,
"used": 1881.1
},
"/tmp/cache": {
"free": 22861.7,
"mount_type": "overlay",
"total": 58275.8,
"used": 32423.3
}
},
"uptime": 6824,
"version": "0.9.0-7b063a1"
}
}
```
**FFprobe from your camera**
Run the following command and paste output below
```
ffprobe <stream_url>
```
**Screenshots**
![image](https://user-images.githubusercontent.com/58695742/133296853-c68a70c9-e970-401e-b41c-1f2d5b988e52.png)
![Missing object_2](https://user-images.githubusercontent.com/58695742/133297908-8e9ab0b1-481d-470b-a4f8-3da44c577886.gif)
**Computer Hardware**
- OS: [e.g. Ubuntu, Windows]
- Install method: [e.g. Addon, Docker Compose, Docker Command]
- Virtualization: [e.g. Proxmox, Virtualbox]
- Coral Version: [e.g. USB, PCIe, None]
- Network Setup: [e.g. Wired, WiFi]
**Camera Info:**
- Manufacturer: Reolink
- Model: 510A
- Resolution: 2560x1920
- FPS: 5
**Additional context**
Add any other context about the problem here.
Add global timestamp configuration
**Describe what you are trying to accomplish and why in non technical terms**
In frigate It looks like the new timestamp (pre-release 9.0) in config is only defined for camera specific configuration... there is no global... Thoughts on added a global for the timestamp? This would allow a generic to be setup for global timestamp setup, and then you could tweak it per camera if needed.
**Describe the solution you'd like**
Add the ability to globally define timestamp.
**Describe alternatives you've considered**
Continue to be only defined at the camera level.
**Additional context**
None.
Deleted events remain in list view until a full refresh
The delete functionality works to delete events, but the frontend doesn't remove the deleted event from the list view until I refresh the page. I am testing in development where I have only 5 events.
cc @mitchross
| 2021-02-22T13:31:05Z | [] | [] |
|
blakeblackshear/frigate | 1,182 | blakeblackshear__frigate-1182 | [
"1180"
] | 0bb998c465604c16c4a99f15b70d46d732130275 | diff --git a/frigate/http.py b/frigate/http.py
--- a/frigate/http.py
+++ b/frigate/http.py
@@ -456,7 +456,7 @@ def recordings(camera_name):
files = glob.glob(f"{RECORD_DIR}/*/*/*/{camera_name}")
if len(files) == 0:
- return "No recordings found.", 404
+ return jsonify([])
files.sort()
| diff --git a/web/src/__tests__/Sidebar.test.jsx b/web/src/__tests__/Sidebar.test.jsx
--- a/web/src/__tests__/Sidebar.test.jsx
+++ b/web/src/__tests__/Sidebar.test.jsx
@@ -9,8 +9,8 @@ describe('Sidebar', () => {
jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
data: {
cameras: {
- front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } },
- side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] } },
+ front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] }, record: { enabled: true } },
+ side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] }, record: { enabled: false } },
},
},
}));
@@ -30,4 +30,11 @@ describe('Sidebar', () => {
expect(screen.queryByRole('link', { name: 'front' })).toBeInTheDocument();
expect(screen.queryByRole('link', { name: 'side' })).toBeInTheDocument();
});
+
+ test('render cameras if in record route', async () => {
+ window.history.replaceState({}, 'Front Recordings', '/recording/front');
+ render(<Sidebar />);
+ expect(screen.queryByRole('link', { name: 'front' })).toBeInTheDocument();
+ expect(screen.queryByRole('link', { name: 'side' })).not.toBeInTheDocument();
+ });
});
diff --git a/web/src/routes/__tests__/Cameras.test.jsx b/web/src/routes/__tests__/Cameras.test.jsx
--- a/web/src/routes/__tests__/Cameras.test.jsx
+++ b/web/src/routes/__tests__/Cameras.test.jsx
@@ -12,8 +12,8 @@ describe('Cameras Route', () => {
useConfigMock = jest.spyOn(Api, 'useConfig').mockImplementation(() => ({
data: {
cameras: {
- front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] } },
- side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] } },
+ front: { name: 'front', objects: { track: ['taco', 'cat', 'dog'] }, record: { enabled: true } },
+ side: { name: 'side', objects: { track: ['taco', 'cat', 'dog'] }, record: { enabled: false } },
},
},
status: 'loaded',
@@ -41,6 +41,14 @@ describe('Cameras Route', () => {
expect(screen.queryByText('side').closest('a')).toHaveAttribute('href', '/cameras/side');
});
+ test('shows recordings link', async () => {
+ render(<Cameras />);
+
+ expect(screen.queryByLabelText('Loading…')).not.toBeInTheDocument();
+
+ expect(screen.queryAllByText('Recordings')).toHaveLength(1);
+ });
+
test('buttons toggle detect, clips, and snapshots', async () => {
const sendDetect = jest.fn();
const sendClips = jest.fn();
| Recordings UI fails when no recordings exist
The current recordings UI doesn't gracefully handle when there are no recordings. It seems to get stuck loading recordings in an infinite loop.
![image](https://user-images.githubusercontent.com/569905/120902095-06bab400-c604-11eb-8bfb-99b45c50a2f8.png)
@hunterjm
| 2021-06-06T00:41:56Z | [] | [] |
|
blakeblackshear/frigate | 1,314 | blakeblackshear__frigate-1314 | [
"1176"
] | 09b0068d1690fabffb8e0cff0913fc976731e345 | diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -82,7 +82,7 @@ class MotionConfig(BaseModel):
ge=1,
le=255,
)
- contour_area: int = Field(default=100, title="Contour Area")
+ contour_area: Optional[int] = Field(title="Contour Area")
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
frame_alpha: float = Field(default=0.2, title="Frame Alpha")
frame_height: Optional[int] = Field(title="Frame Height")
@@ -99,7 +99,11 @@ def __init__(self, **config):
frame_shape = config.get("frame_shape", (1, 1))
if "frame_height" not in config:
- config["frame_height"] = frame_shape[0] // 6
+ config["frame_height"] = max(frame_shape[0] // 6, 120)
+
+ if "contour_area" not in config:
+ frame_width = frame_shape[1] * config["frame_height"] / frame_shape[0]
+ config["contour_area"] = config["frame_height"] * frame_width * 0.003912363
mask = config.get("mask", "")
config["raw_mask"] = mask
| diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
--- a/frigate/test/test_config.py
+++ b/frigate/test/test_config.py
@@ -449,9 +449,60 @@ def test_max_disappeared_default(self):
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config
- ffmpeg_cmds = runtime_config.cameras["back"].ffmpeg_cmds
assert runtime_config.cameras["back"].detect.max_disappeared == 5 * 5
+ def test_motion_frame_height_wont_go_below_120(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "height": 480,
+ "width": 640,
+ }
+ },
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].motion.frame_height >= 120
+
+ def test_motion_contour_area_dynamic(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "height": 1080,
+ "width": 1920,
+ }
+ },
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert round(runtime_config.cameras["back"].motion.contour_area) == 225
+
if __name__ == "__main__":
unittest.main(verbosity=2)
| Motion detection defaults don't work well for low resolution substreams
**Describe the bug**
A clear and concise description of what your issue is.
The default motion detection configuration appears tuned for a 1080p stream at 5fps. However, my ReoLink RLC-410 provides a 640x480 substream at 7fps. So with 1080p, you'd get the following default configuration:
```json
"fps": 5,
"frame_shape": [
1080,
1920
],
"height": 1080,
"motion": {
"contour_area": 100,
"delta_alpha": 0.2,
"frame_alpha": 0.2,
"frame_height": 180,
"threshold": 25
},
```
Very reasonable. The contour size tracks a reasonable, small portion of the resized image - the resized frame height is still large enough to not lose thin things like legs. However, configure that substream:
```json
"fps": 7,
"frame_shape": [
480,
640
],
"height": 480,
"motion": {
"contour_area": 100,
"delta_alpha": 0.2,
"frame_alpha": 0.2,
"frame_height": 80,
"threshold": 25
},
```
We now have a tiny, 80 pixels high image to do motion detection on. But the `contour_area` has not scaled - so we need 6.8 times the area to trigger motion compared to the 1080 stream. Similarly, the `frame_alpha` and `delta_alpha` have not changed - so to the motion detector, it looks like the objects are moving a lot slower.
My proposal:
1. Change `delta_alpha` and `frame_alpha` from constant defaults to 1/fps
2. Change `contour_area` from a constant default to `scaled width * scaled height * 0.00173611111` (or some more reasonable constant value, possibly taking into account masked area)
3. Change `frame_height` from default `height/6` to `max(height/6, 180)`
There are risks; changing the alpha rates might make motion detection worse if the default values were working well already despite being very different from the framerate, or if `contour_area` was configured explicitly to match the automatic scaled value of `frame_height`. And the increased frame height might increase CPU for people with small substreams like I have. But overall, I think this would result in the configuration that seems to have been originally intended, and it certainly works better on my cameras.
**Version of frigate**
0.8.4-5043040
| I'm not so sure about adjusting the alpha rates, but the contour area and frame height make sense. | 2021-07-01T12:53:29Z | [] | [] |
blakeblackshear/frigate | 1,342 | blakeblackshear__frigate-1342 | [
"507"
] | f26f7b8d3f61edab184e9350ba7bc995fbea4595 | diff --git a/frigate/app.py b/frigate/app.py
--- a/frigate/app.py
+++ b/frigate/app.py
@@ -259,6 +259,7 @@ def start_camera_processors(self):
name,
config,
model_shape,
+ self.config.model.merged_labelmap,
self.detection_queue,
self.detection_out_events[name],
self.detected_frames_queue,
diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -13,6 +13,7 @@
import yaml
from frigate.const import BASE_DIR, RECORD_DIR, CACHE_DIR
+from frigate.edgetpu import load_labels
from frigate.util import create_mask, deep_merge
logger = logging.getLogger(__name__)
@@ -615,6 +616,22 @@ class DatabaseConfig(BaseModel):
class ModelConfig(BaseModel):
width: int = Field(default=320, title="Object detection model input width.")
height: int = Field(default=320, title="Object detection model input height.")
+ labelmap: Dict[int, str] = Field(
+ default_factory=dict, title="Labelmap customization."
+ )
+ _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
+
+ @property
+ def merged_labelmap(self) -> Dict[int, str]:
+ return self._merged_labelmap
+
+ def __init__(self, **config):
+ super().__init__(**config)
+
+ self._merged_labelmap = {
+ **load_labels("/labelmap.txt"),
+ **config.get("labelmap", {}),
+ }
class LogLevelEnum(str, Enum):
diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py
--- a/frigate/edgetpu.py
+++ b/frigate/edgetpu.py
@@ -231,7 +231,7 @@ def start_or_restart(self):
class RemoteObjectDetector:
def __init__(self, name, labels, detection_queue, event, model_shape):
- self.labels = load_labels(labels)
+ self.labels = labels
self.name = name
self.fps = EventsPerSecond()
self.detection_queue = detection_queue
diff --git a/frigate/video.py b/frigate/video.py
--- a/frigate/video.py
+++ b/frigate/video.py
@@ -318,6 +318,7 @@ def track_camera(
name,
config: CameraConfig,
model_shape,
+ labelmap,
detection_queue,
result_connection,
detected_objects_queue,
@@ -344,7 +345,7 @@ def receiveSignal(signalNumber, frame):
motion_detector = MotionDetector(frame_shape, config.motion)
object_detector = RemoteObjectDetector(
- name, "/labelmap.txt", detection_queue, result_connection, model_shape
+ name, labelmap, detection_queue, result_connection, model_shape
)
object_tracker = ObjectTracker(config.detect)
| diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
--- a/frigate/test/test_config.py
+++ b/frigate/test/test_config.py
@@ -501,7 +501,87 @@ def test_motion_contour_area_dynamic(self):
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config
- assert round(runtime_config.cameras["back"].motion.contour_area) == 225
+ assert round(runtime_config.cameras["back"].motion.contour_area) == 99
+
+ def test_merge_labelmap(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "model": {"labelmap": {7: "truck"}},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "height": 1080,
+ "width": 1920,
+ }
+ },
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.model.merged_labelmap[7] == "truck"
+
+ def test_default_labelmap_empty(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "height": 1080,
+ "width": 1920,
+ }
+ },
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.model.merged_labelmap[0] == "person"
+
+ def test_default_labelmap(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "model": {"width": 320, "height": 320},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "height": 1080,
+ "width": 1920,
+ }
+ },
+ }
+
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.model.merged_labelmap[0] == "person"
if __name__ == "__main__":
| How do you edit coco labels.txt in HassOS addon?
Hey,
I downloaded it as per docs but it doesnt say how to import it into the addon.
Thanks
| This isn't possible with the addon unfortunately. I can look into it for future versions.
> This isn't possible with the addon unfortunately. I can look into it for future versions.
Yes please, my cat is everything but a cat at this point!
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.
don't know what 'stale' means but please don't forget about this!
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.
| 2021-07-08T03:58:09Z | [] | [] |
blakeblackshear/frigate | 1,674 | blakeblackshear__frigate-1674 | [
"1656",
"1656"
] | d74021af4733d7d59c5c8eb0e4fbc50cac9bf109 | diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -432,7 +432,7 @@ class CameraMqttConfig(BaseModel):
)
-class CameraRtmpConfig(BaseModel):
+class RtmpConfig(BaseModel):
enabled: bool = Field(default=True, title="RTMP restreaming enabled.")
@@ -454,8 +454,8 @@ class CameraConfig(BaseModel):
record: RecordConfig = Field(
default_factory=RecordConfig, title="Record configuration."
)
- rtmp: CameraRtmpConfig = Field(
- default_factory=CameraRtmpConfig, title="RTMP restreaming configuration."
+ rtmp: RtmpConfig = Field(
+ default_factory=RtmpConfig, title="RTMP restreaming configuration."
)
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings."
@@ -656,6 +656,9 @@ class FrigateConfig(BaseModel):
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, title="Global snapshots configuration."
)
+ rtmp: RtmpConfig = Field(
+ default_factory=RtmpConfig, title="Global RTMP restreaming configuration."
+ )
birdseye: BirdseyeConfig = Field(
default_factory=BirdseyeConfig, title="Birdseye configuration."
)
@@ -672,6 +675,10 @@ class FrigateConfig(BaseModel):
default_factory=DetectConfig, title="Global object tracking configuration."
)
cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.")
+ timestamp_style: TimestampStyleConfig = Field(
+ default_factory=TimestampStyleConfig,
+ title="Global timestamp style configuration.",
+ )
@property
def runtime_config(self) -> FrigateConfig:
@@ -687,10 +694,12 @@ def runtime_config(self) -> FrigateConfig:
include={
"record": ...,
"snapshots": ...,
+ "rtmp": ...,
"objects": ...,
"motion": ...,
"detect": ...,
"ffmpeg": ...,
+ "timestamp_style": ...,
},
exclude_unset=True,
)
| diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
--- a/frigate/test/test_config.py
+++ b/frigate/test/test_config.py
@@ -884,6 +884,156 @@ def test_global_snapshots_merge(self):
assert runtime_config.cameras["back"].snapshots.height == 150
assert runtime_config.cameras["back"].snapshots.enabled
+ def test_global_rtmp(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "rtmp": {"enabled": True},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].rtmp.enabled
+
+ def test_default_rtmp(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ }
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].rtmp.enabled
+
+ def test_global_rtmp_merge(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "rtmp": {"enabled": False},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "rtmp": {
+ "enabled": True,
+ },
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].rtmp.enabled
+
+ def test_global_timestamp_style(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "timestamp_style": {"position": "bl", "scale": 1.5},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].timestamp_style.position == "bl"
+ assert runtime_config.cameras["back"].timestamp_style.scale == 1.5
+
+ def test_default_timestamp_style(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ }
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].timestamp_style.position == "tl"
+ assert runtime_config.cameras["back"].timestamp_style.scale == 1.0
+
+ def test_global_timestamp_style_merge(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "rtmp": {"enabled": False},
+ "timestamp_style": {"position": "br", "scale": 2.0},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "timestamp_style": {"position": "bl", "scale": 1.5},
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert runtime_config.cameras["back"].timestamp_style.position == "bl"
+ assert runtime_config.cameras["back"].timestamp_style.scale == 1.5
+
if __name__ == "__main__":
unittest.main(verbosity=2)
| Add global timestamp configuration
**Describe what you are trying to accomplish and why in non technical terms**
In frigate It looks like the new timestamp (pre-release 9.0) in config is only defined for camera specific configuration... there is no global... Thoughts on added a global for the timestamp? This would allow a generic to be setup for global timestamp setup, and then you could tweak it per camera if needed.
**Describe the solution you'd like**
Add the ability to globally define timestamp.
**Describe alternatives you've considered**
Continue to be only defined at the camera level.
**Additional context**
None.
Add global timestamp configuration
**Describe what you are trying to accomplish and why in non technical terms**
In frigate It looks like the new timestamp (pre-release 9.0) in config is only defined for camera specific configuration... there is no global... Thoughts on added a global for the timestamp? This would allow a generic to be setup for global timestamp setup, and then you could tweak it per camera if needed.
**Describe the solution you'd like**
Add the ability to globally define timestamp.
**Describe alternatives you've considered**
Continue to be only defined at the camera level.
**Additional context**
None.
| 2021-08-31T19:26:27Z | [] | [] |
|
blakeblackshear/frigate | 2,079 | blakeblackshear__frigate-2079 | [
"2055"
] | 25bb515afcc7e284df9760094fd69098eb9f1f69 | diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -298,13 +298,14 @@ class BirdseyeConfig(FrigateBaseModel):
"-segment_time",
"10",
"-segment_format",
- "ts",
+ "mp4",
"-reset_timestamps",
"1",
"-strftime",
"1",
"-c",
"copy",
+ "-an",
]
@@ -564,16 +565,9 @@ def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
else self.ffmpeg.output_args.record.split(" ")
)
- # backwards compatibility check for segment_format change from mp4 to ts
- record_args = (
- " ".join(record_args)
- .replace("-segment_format mp4", "-segment_format ts")
- .split(" ")
- )
-
ffmpeg_output_args = (
record_args
- + [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.ts"]
+ + [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"]
+ ffmpeg_output_args
)
@@ -800,6 +794,21 @@ def runtime_config(self) -> FrigateConfig:
config.cameras[name] = camera_config
+ # check runtime config
+ for name, camera in config.cameras.items():
+ assigned_roles = list(
+ set([r for i in camera.ffmpeg.inputs for r in i.roles])
+ )
+ if camera.record.enabled and not "record" in assigned_roles:
+ raise ValueError(
+ f"Camera {name} has record enabled, but record is not assigned to an input."
+ )
+
+ if camera.rtmp.enabled and not "rtmp" in assigned_roles:
+ raise ValueError(
+ f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input."
+ )
+
return config
@validator("cameras")
@@ -810,23 +819,6 @@ def ensure_zones_and_cameras_have_different_names(cls, v: Dict[str, CameraConfig
raise ValueError("Zones cannot share names with cameras")
return v
- @validator("cameras")
- def ensure_cameras_are_not_missing_roles(cls, v: Dict[str, CameraConfig]):
- for name, camera in v.items():
- assigned_roles = list(
- set([r for i in camera.ffmpeg.inputs for r in i.roles])
- )
- if camera.record.enabled and not "record" in assigned_roles:
- raise ValueError(
- f"Camera {name} has record enabled, but record is not assigned to an input."
- )
-
- if camera.rtmp.enabled and not "rtmp" in assigned_roles:
- raise ValueError(
- f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input."
- )
- return v
-
@classmethod
def parse_file(cls, config_file):
with open(config_file) as f:
diff --git a/frigate/record.py b/frigate/record.py
--- a/frigate/record.py
+++ b/frigate/record.py
@@ -48,7 +48,9 @@ def move_files(self):
recordings = [
d
for d in os.listdir(CACHE_DIR)
- if os.path.isfile(os.path.join(CACHE_DIR, d)) and d.endswith(".ts")
+ if os.path.isfile(os.path.join(CACHE_DIR, d))
+ and d.endswith(".mp4")
+ and not d.startswith("clip_")
]
files_in_use = []
@@ -111,30 +113,9 @@ def move_files(self):
file_name = f"{start_time.strftime('%M.%S.mp4')}"
file_path = os.path.join(directory, file_name)
- ffmpeg_cmd = [
- "ffmpeg",
- "-y",
- "-i",
- cache_path,
- "-c",
- "copy",
- "-movflags",
- "+faststart",
- file_path,
- ]
-
- p = sp.run(
- ffmpeg_cmd,
- encoding="ascii",
- capture_output=True,
- )
-
- Path(cache_path).unlink(missing_ok=True)
-
- if p.returncode != 0:
- logger.error(f"Unable to convert {cache_path} to {file_path}")
- logger.error(p.stderr)
- continue
+ # copy then delete is required when recordings are stored on some network drives
+ shutil.copyfile(cache_path, file_path)
+ os.remove(cache_path)
rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
@@ -150,8 +131,11 @@ def move_files(self):
def run(self):
# Check for new files every 5 seconds
- while not self.stop_event.wait(5):
+ wait_time = 5
+ while not self.stop_event.wait(wait_time):
+ run_start = datetime.datetime.now().timestamp()
self.move_files()
+ wait_time = max(0, 5 - (datetime.datetime.now().timestamp() - run_start))
logger.info(f"Exiting recording maintenance...")
@@ -290,9 +274,7 @@ def expire_files(self):
# find all the recordings older than the oldest recording in the db
try:
- oldest_recording = (
- Recordings.select().order_by(Recordings.start_time.desc()).get()
- )
+ oldest_recording = Recordings.select().order_by(Recordings.start_time).get()
p = Path(oldest_recording.path)
oldest_timestamp = p.stat().st_mtime - 1
@@ -301,7 +283,7 @@ def expire_files(self):
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
process = sp.run(
- ["find", RECORD_DIR, "-type", "f", "-newermt", f"@{oldest_timestamp}"],
+ ["find", RECORD_DIR, "-type", "f", "!", "-newermt", f"@{oldest_timestamp}"],
capture_output=True,
text=True,
)
| diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
--- a/frigate/test/test_config.py
+++ b/frigate/test/test_config.py
@@ -702,7 +702,11 @@ def test_fails_on_invalid_role(self):
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
- "roles": ["detect", "clips"],
+ "roles": ["detect"],
+ },
+ {
+ "path": "rtsp://10.0.0.1:554/video2",
+ "roles": ["clips"],
},
]
},
@@ -717,6 +721,37 @@ def test_fails_on_invalid_role(self):
self.assertRaises(ValidationError, lambda: FrigateConfig(**config))
+ def test_fails_on_missing_role(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ {
+ "path": "rtsp://10.0.0.1:554/video2",
+ "roles": ["record"],
+ },
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "rtmp": {"enabled": True},
+ }
+ },
+ }
+
+ frigate_config = FrigateConfig(**config)
+ self.assertRaises(ValueError, lambda: frigate_config.runtime_config)
+
def test_global_detect(self):
config = {
@@ -958,6 +993,34 @@ def test_global_rtmp_merge(self):
runtime_config = frigate_config.runtime_config
assert runtime_config.cameras["back"].rtmp.enabled
+ def test_global_rtmp_default(self):
+
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "rtmp": {"enabled": False},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ {
+ "path": "rtsp://10.0.0.1:554/video2",
+ "roles": ["record"],
+ },
+ ]
+ },
+ }
+ },
+ }
+ frigate_config = FrigateConfig(**config)
+ assert config == frigate_config.dict(exclude_unset=True)
+
+ runtime_config = frigate_config.runtime_config
+ assert not runtime_config.cameras["back"].rtmp.enabled
+
def test_global_live(self):
config = {
| [Support]: rtmp can no longer be disabled globally
### Describe the problem you are having
rtmp can no longer be disabled globally in 0.9.2 (as documented [here](https://docs.frigate.video/configuration/index)) and now results in a validation error.
Quick fix is to explicitly disable rtmp on all cameras in the cameras section.
### Version
0.9.2-25bb515
### Frigate config file
```yaml
mqtt:
host: mqtt
detectors:
coral:
type: edgetpu
device: usb
ffmpeg:
hwaccel_args: -c:v h264_v4l2m2m # Raspberry Pi 3/4 (64-bit OS)
detect:
width: 640
height: 360
fps: 3
# Optional: RTMP configuration
# NOTE: Can be overridden at the camera level
rtmp:
# Optional: Enable the RTMP stream (default: True)
enabled: False
cameras:
Kitchen:
ffmpeg:
inputs:
- path: rtsp://192.168.1.39/ch0_1.h264
roles:
- detect
- path: rtsp://192.168.1.39/ch0_0.h264
roles:
- record
objects:
track:
- person
```
### Relevant log output
```shell
[2021-10-20 13:29:01] frigate.app INFO : Starting Frigate (0.9.2-25bb515),
*************************************************************,
*** Your config file is not valid! ***,
*** Please check the docs at ***,
*** https://docs.frigate.video/configuration/index ***,
*** Config Validation Errors ***,
1 validation error for FrigateConfig,
Camera Kitchen has rtmp enabled, but rtmp is not assigned to an input. (type=value_error),
cameras,
*** End Config Validation Errors ***,
[cmd] python3 exited 1,
[cont-finish.d] executing container finish scripts...,
[cont-finish.d] done.,
[s6-finish] waiting for services.,
[s6-finish] sending all processes the KILL signal and exiting.,
[s6-finish] sending all processes the TERM signal.
```
### FFprobe output from your camera
```shell
N/A
```
### Frigate stats
_No response_
### Operating system
Debian
### Install method
Docker Compose
### Coral version
USB
### Network connection
Wired
### Camera make and model
N/A
### Any other information that may be helpful
_No response_
| added this to my config but still getting error
```
rtmp:
enabled: False
```
>
>
> added this to my config but still getting error
>
> ```
> rtmp:
> enabled: False
> ```
Did you add it in to all of your cameras like this?
```
cameras:
Kitchen:
rtmp:
enabled: False
ffmpeg:
inputs:
- path: rtsp://192.168.1.39/ch0_1.h264
roles:
- detect
- record
```
ah i see, the global settings isnt working and you have to add it to each camera. reading it again makes sense now. thanks | 2021-10-22T12:48:26Z | [] | [] |
blakeblackshear/frigate | 2,747 | blakeblackshear__frigate-2747 | [
"2734"
] | 273f803c7c6cbf1e076318704600643112d2cd93 | diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -72,9 +72,7 @@ class RetainModeEnum(str, Enum):
class RetainConfig(FrigateBaseModel):
default: float = Field(default=10, title="Default retention period.")
- mode: RetainModeEnum = Field(
- default=RetainModeEnum.active_objects, title="Retain mode."
- )
+ mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
objects: Dict[str, float] = Field(
default_factory=dict, title="Object retention period."
)
@@ -103,6 +101,10 @@ class RecordRetainConfig(FrigateBaseModel):
class RecordConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable record on all cameras.")
+ expire_interval: int = Field(
+ default=60,
+ title="Number of minutes to wait between cleanup runs.",
+ )
# deprecated - to be removed in a future version
retain_days: Optional[float] = Field(title="Recording retention period in days.")
retain: RecordRetainConfig = Field(
@@ -171,8 +173,9 @@ class DetectConfig(FrigateBaseModel):
title="Maximum number of frames the object can dissapear before detection ends."
)
stationary_interval: Optional[int] = Field(
+ default=0,
title="Frame interval for checking stationary objects.",
- ge=1,
+ ge=0,
)
@@ -473,7 +476,7 @@ class CameraLiveConfig(FrigateBaseModel):
class CameraConfig(FrigateBaseModel):
- name: Optional[str] = Field(title="Camera name.")
+ name: Optional[str] = Field(title="Camera name.", regex="^[a-zA-Z0-9_]+$")
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
best_image_timeout: int = Field(
default=60,
@@ -763,11 +766,6 @@ def runtime_config(self) -> FrigateConfig:
if camera_config.detect.max_disappeared is None:
camera_config.detect.max_disappeared = max_disappeared
- # Default stationary_interval configuration
- stationary_interval = camera_config.detect.fps * 10
- if camera_config.detect.stationary_interval is None:
- camera_config.detect.stationary_interval = stationary_interval
-
# FFMPEG input substitution
for input in camera_config.ffmpeg.inputs:
input.path = input.path.format(**FRIGATE_ENV_VARS)
diff --git a/frigate/http.py b/frigate/http.py
--- a/frigate/http.py
+++ b/frigate/http.py
@@ -133,6 +133,8 @@ def delete_event(id):
if event.has_snapshot:
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media.unlink(missing_ok=True)
+ media = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
+ media.unlink(missing_ok=True)
if event.has_clip:
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media.unlink(missing_ok=True)
diff --git a/frigate/object_processing.py b/frigate/object_processing.py
--- a/frigate/object_processing.py
+++ b/frigate/object_processing.py
@@ -178,6 +178,7 @@ def to_dict(self, include_thumbnail: bool = False):
"area": self.obj_data["area"],
"region": self.obj_data["region"],
"motionless_count": self.obj_data["motionless_count"],
+ "position_changes": self.obj_data["position_changes"],
"current_zones": self.current_zones.copy(),
"entered_zones": self.entered_zones.copy(),
"has_clip": self.has_clip,
@@ -266,7 +267,13 @@ def get_jpg_bytes(
box = self.thumbnail_data["box"]
box_size = 300
region = calculate_region(
- best_frame.shape, box[0], box[1], box[2], box[3], box_size, multiplier=1.1
+ best_frame.shape,
+ box[0],
+ box[1],
+ box[2],
+ box[3],
+ box_size,
+ multiplier=1.1,
)
best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
@@ -732,6 +739,10 @@ def should_save_snapshot(self, camera, obj: TrackedObject):
if not snapshot_config.enabled:
return False
+ # object never changed position
+ if obj.obj_data["position_changes"] == 0:
+ return False
+
# if there are required zones and there is no overlap
required_zones = snapshot_config.required_zones
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
@@ -752,6 +763,10 @@ def should_retain_recording(self, camera, obj: TrackedObject):
if not record_config.enabled:
return False
+ # object never changed position
+ if obj.obj_data["position_changes"] == 0:
+ return False
+
# If there are required zones and there is no overlap
required_zones = record_config.events.required_zones
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
@@ -773,6 +788,10 @@ def should_retain_recording(self, camera, obj: TrackedObject):
return True
def should_mqtt_snapshot(self, camera, obj: TrackedObject):
+ # object never changed position
+ if obj.obj_data["position_changes"] == 0:
+ return False
+
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].mqtt.required_zones
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
diff --git a/frigate/objects.py b/frigate/objects.py
--- a/frigate/objects.py
+++ b/frigate/objects.py
@@ -20,7 +20,9 @@ class ObjectTracker:
def __init__(self, config: DetectConfig):
self.tracked_objects = {}
self.disappeared = {}
+ self.positions = {}
self.max_disappeared = config.max_disappeared
+ self.detect_config = config
def register(self, index, obj):
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
@@ -28,24 +30,83 @@ def register(self, index, obj):
obj["id"] = id
obj["start_time"] = obj["frame_time"]
obj["motionless_count"] = 0
+ obj["position_changes"] = 0
self.tracked_objects[id] = obj
self.disappeared[id] = 0
+ self.positions[id] = {
+ "xmins": [],
+ "ymins": [],
+ "xmaxs": [],
+ "ymaxs": [],
+ "xmin": 0,
+ "ymin": 0,
+ "xmax": self.detect_config.width,
+ "ymax": self.detect_config.height,
+ }
def deregister(self, id):
del self.tracked_objects[id]
del self.disappeared[id]
+ # tracks the current position of the object based on the last 10 bounding boxes
+ # returns False if the object has moved outside its previous position
+ def update_position(self, id, box):
+ position = self.positions[id]
+ position_box = (
+ position["xmin"],
+ position["ymin"],
+ position["xmax"],
+ position["ymax"],
+ )
+
+ xmin, ymin, xmax, ymax = box
+
+ iou = intersection_over_union(position_box, box)
+
+ # if the iou drops below the threshold
+ # assume the object has moved to a new position and reset the computed box
+ if iou < 0.6:
+ self.positions[id] = {
+ "xmins": [xmin],
+ "ymins": [ymin],
+ "xmaxs": [xmax],
+ "ymaxs": [ymax],
+ "xmin": xmin,
+ "ymin": ymin,
+ "xmax": xmax,
+ "ymax": ymax,
+ }
+ return False
+
+ # if there are less than 10 entries for the position, add the bounding box
+ # and recompute the position box
+ if len(position["xmins"]) < 10:
+ position["xmins"].append(xmin)
+ position["ymins"].append(ymin)
+ position["xmaxs"].append(xmax)
+ position["ymaxs"].append(ymax)
+ # by using percentiles here, we hopefully remove outliers
+ position["xmin"] = np.percentile(position["xmins"], 15)
+ position["ymin"] = np.percentile(position["ymins"], 15)
+ position["xmax"] = np.percentile(position["xmaxs"], 85)
+ position["ymax"] = np.percentile(position["ymaxs"], 85)
+
+ return True
+
def update(self, id, new_obj):
self.disappeared[id] = 0
- if (
- intersection_over_union(self.tracked_objects[id]["box"], new_obj["box"])
- > 0.9
- ):
+ # update the motionless count if the object has not moved to a new position
+ if self.update_position(id, new_obj["box"]):
self.tracked_objects[id]["motionless_count"] += 1
else:
self.tracked_objects[id]["motionless_count"] = 0
+ self.tracked_objects[id]["position_changes"] += 1
self.tracked_objects[id].update(new_obj)
+ def update_frame_times(self, frame_time):
+ for id in self.tracked_objects.keys():
+ self.tracked_objects[id]["frame_time"] = frame_time
+
def match_and_update(self, frame_time, new_objects):
# group by name
new_object_groups = defaultdict(lambda: [])
diff --git a/frigate/record.py b/frigate/record.py
--- a/frigate/record.py
+++ b/frigate/record.py
@@ -497,7 +497,8 @@ def expire_files(self):
oldest_timestamp = datetime.datetime.now().timestamp()
except FileNotFoundError:
logger.warning(f"Unable to find file from recordings database: {p}")
- oldest_timestamp = datetime.datetime.now().timestamp()
+ Recordings.delete().where(Recordings.id == oldest_recording.id).execute()
+ return
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
process = sp.run(
@@ -548,7 +549,7 @@ def run(self):
# self.sync_recordings()
# Expire tmp clips every minute, recordings and clean directories every hour.
- for counter in itertools.cycle(range(60)):
+ for counter in itertools.cycle(range(self.config.record.expire_interval)):
if self.stop_event.wait(60):
logger.info(f"Exiting recording cleanup...")
break
diff --git a/frigate/video.py b/frigate/video.py
--- a/frigate/video.py
+++ b/frigate/video.py
@@ -3,6 +3,7 @@
import logging
import multiprocessing as mp
import queue
+import random
import signal
import subprocess as sp
import threading
@@ -469,6 +470,8 @@ def process_frames(
fps_tracker = EventsPerSecond()
fps_tracker.start()
+ startup_scan_counter = 0
+
while not stop_event.is_set():
if exit_on_empty and frame_queue.empty():
logger.info(f"Exiting track_objects...")
@@ -512,7 +515,10 @@ def process_frames(
# if there hasn't been motion for 10 frames
if obj["motionless_count"] >= 10
# and it isn't due for a periodic check
- and obj["motionless_count"] % detect_config.stationary_interval != 0
+ and (
+ detect_config.stationary_interval == 0
+ or obj["motionless_count"] % detect_config.stationary_interval != 0
+ )
# and it hasn't disappeared
and object_tracker.disappeared[obj["id"]] == 0
# and it doesn't overlap with any current motion boxes
@@ -532,16 +538,39 @@ def process_frames(
region_min_size = max(model_shape[0], model_shape[1])
# compute regions
regions = [
- calculate_region(frame_shape, a[0], a[1], a[2], a[3], region_min_size, multiplier=1.2)
+ calculate_region(
+ frame_shape,
+ a[0],
+ a[1],
+ a[2],
+ a[3],
+ region_min_size,
+ multiplier=random.uniform(1.2, 1.5),
+ )
for a in combined_boxes
]
# consolidate regions with heavy overlap
regions = [
- calculate_region(frame_shape, a[0], a[1], a[2], a[3], region_min_size, multiplier=1.0)
+ calculate_region(
+ frame_shape, a[0], a[1], a[2], a[3], region_min_size, multiplier=1.0
+ )
for a in reduce_boxes(regions, 0.4)
]
+ # if starting up, get the next startup scan region
+ if startup_scan_counter < 9:
+ ymin = int(frame_shape[0] / 3 * startup_scan_counter / 3)
+ ymax = int(frame_shape[0] / 3 + ymin)
+ xmin = int(frame_shape[1] / 3 * startup_scan_counter / 3)
+ xmax = int(frame_shape[1] / 3 + xmin)
+ regions.append(
+ calculate_region(
+ frame_shape, xmin, ymin, xmax, ymax, region_min_size, multiplier=1.2
+ )
+ )
+ startup_scan_counter += 1
+
# resize regions and detect
# seed with stationary objects
detections = [
@@ -555,6 +584,7 @@ def process_frames(
for obj in object_tracker.tracked_objects.values()
if obj["id"] in stationary_object_ids
]
+
for region in regions:
detections.extend(
detect(
@@ -570,7 +600,7 @@ def process_frames(
#########
# merge objects, check for clipped objects and look again up to 4 times
#########
- refining = True
+ refining = len(regions) > 0
refine_count = 0
while refining and refine_count < 4:
refining = False
@@ -625,44 +655,49 @@ def process_frames(
## drop detections that overlap too much
consolidated_detections = []
- # group by name
- detected_object_groups = defaultdict(lambda: [])
- for detection in detections:
- detected_object_groups[detection[0]].append(detection)
-
- # loop over detections grouped by label
- for group in detected_object_groups.values():
- # if the group only has 1 item, skip
- if len(group) == 1:
- consolidated_detections.append(group[0])
- continue
- # sort smallest to largest by area
- sorted_by_area = sorted(group, key=lambda g: g[3])
-
- for current_detection_idx in range(0, len(sorted_by_area)):
- current_detection = sorted_by_area[current_detection_idx][2]
- overlap = 0
- for to_check_idx in range(
- min(current_detection_idx + 1, len(sorted_by_area)),
- len(sorted_by_area),
- ):
- to_check = sorted_by_area[to_check_idx][2]
- # if 90% of smaller detection is inside of another detection, consolidate
- if (
- area(intersection(current_detection, to_check))
- / area(current_detection)
- > 0.9
+ # if detection was run on this frame, consolidate
+ if len(regions) > 0:
+ # group by name
+ detected_object_groups = defaultdict(lambda: [])
+ for detection in detections:
+ detected_object_groups[detection[0]].append(detection)
+
+ # loop over detections grouped by label
+ for group in detected_object_groups.values():
+ # if the group only has 1 item, skip
+ if len(group) == 1:
+ consolidated_detections.append(group[0])
+ continue
+
+ # sort smallest to largest by area
+ sorted_by_area = sorted(group, key=lambda g: g[3])
+
+ for current_detection_idx in range(0, len(sorted_by_area)):
+ current_detection = sorted_by_area[current_detection_idx][2]
+ overlap = 0
+ for to_check_idx in range(
+ min(current_detection_idx + 1, len(sorted_by_area)),
+ len(sorted_by_area),
):
- overlap = 1
- break
- if overlap == 0:
- consolidated_detections.append(
- sorted_by_area[current_detection_idx]
- )
-
- # now that we have refined our detections, we need to track objects
- object_tracker.match_and_update(frame_time, consolidated_detections)
+ to_check = sorted_by_area[to_check_idx][2]
+ # if 90% of smaller detection is inside of another detection, consolidate
+ if (
+ area(intersection(current_detection, to_check))
+ / area(current_detection)
+ > 0.9
+ ):
+ overlap = 1
+ break
+ if overlap == 0:
+ consolidated_detections.append(
+ sorted_by_area[current_detection_idx]
+ )
+ # now that we have refined our detections, we need to track objects
+ object_tracker.match_and_update(frame_time, consolidated_detections)
+ # else, just update the frame times for the stationary objects
+ else:
+ object_tracker.update_frame_times(frame_time)
# add to the queue if not full
if detected_objects_queue.full():
| diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
--- a/frigate/test/test_config.py
+++ b/frigate/test/test_config.py
@@ -1244,6 +1244,30 @@ def test_allow_retain_to_be_a_decimal(self):
runtime_config = frigate_config.runtime_config
assert runtime_config.cameras["back"].snapshots.retain.default == 1.5
+ def test_fails_on_bad_camera_name(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "snapshots": {"retain": {"default": 1.5}},
+ "cameras": {
+ "back camer#": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ }
+ },
+ }
+
+ frigate_config = FrigateConfig(**config)
+
+ self.assertRaises(
+ ValidationError, lambda: frigate_config.runtime_config.cameras
+ )
+
if __name__ == "__main__":
unittest.main(verbosity=2)
| Make wait_time for recording cleanup configurable
When running frigate on unraid, it is constantly deleting files (every 5 seconds) causing array disks to remain spun up all the time, while new recordings will go to the cache disk. I think it would be useful to be able to set the recording cleanup to run one larger job once per day, or even once per week.
| This is still a valid request, but 0.10.0 runs cleanup much less frequently. I think it's once per hour. It also doesn't move things from cache unless there is actually an event ongoing. Disk activity is significantly reduced. | 2022-02-05T15:30:39Z | [] | [] |
blakeblackshear/frigate | 4,055 | blakeblackshear__frigate-4055 | [
"4716"
] | be2408b36617f719b81dc3935a8e598f0791a521 | diff --git a/benchmark.py b/benchmark.py
--- a/benchmark.py
+++ b/benchmark.py
@@ -3,10 +3,16 @@
import multiprocessing as mp
import numpy as np
import datetime
-from frigate.edgetpu import LocalObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels
+from frigate.config import DetectorTypeEnum
+from frigate.object_detection import (
+ LocalObjectDetector,
+ ObjectDetectProcess,
+ RemoteObjectDetector,
+ load_labels,
+)
-my_frame = np.expand_dims(np.full((300,300,3), 1, np.uint8), axis=0)
-labels = load_labels('/labelmap.txt')
+my_frame = np.expand_dims(np.full((300, 300, 3), 1, np.uint8), axis=0)
+labels = load_labels("/labelmap.txt")
######
# Minimal same process runner
@@ -39,20 +45,23 @@
def start(id, num_detections, detection_queue, event):
- object_detector = RemoteObjectDetector(str(id), '/labelmap.txt', detection_queue, event)
- start = datetime.datetime.now().timestamp()
+ object_detector = RemoteObjectDetector(
+ str(id), "/labelmap.txt", detection_queue, event
+ )
+ start = datetime.datetime.now().timestamp()
- frame_times = []
- for x in range(0, num_detections):
- start_frame = datetime.datetime.now().timestamp()
- detections = object_detector.detect(my_frame)
- frame_times.append(datetime.datetime.now().timestamp()-start_frame)
+ frame_times = []
+ for x in range(0, num_detections):
+ start_frame = datetime.datetime.now().timestamp()
+ detections = object_detector.detect(my_frame)
+ frame_times.append(datetime.datetime.now().timestamp() - start_frame)
+
+ duration = datetime.datetime.now().timestamp() - start
+ object_detector.cleanup()
+ print(f"{id} - Processed for {duration:.2f} seconds.")
+ print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
+ print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
- duration = datetime.datetime.now().timestamp()-start
- object_detector.cleanup()
- print(f"{id} - Processed for {duration:.2f} seconds.")
- print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
- print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
######
# Separate process runner
@@ -71,23 +80,29 @@ def start(id, num_detections, detection_queue, event):
events = {}
for x in range(0, 10):
- events[str(x)] = mp.Event()
+ events[str(x)] = mp.Event()
detection_queue = mp.Queue()
-edgetpu_process_1 = EdgeTPUProcess(detection_queue, events, 'usb:0')
-edgetpu_process_2 = EdgeTPUProcess(detection_queue, events, 'usb:1')
+edgetpu_process_1 = ObjectDetectProcess(
+ detection_queue, events, DetectorTypeEnum.edgetpu, "usb:0"
+)
+edgetpu_process_2 = ObjectDetectProcess(
+ detection_queue, events, DetectorTypeEnum.edgetpu, "usb:1"
+)
for x in range(0, 10):
- camera_process = mp.Process(target=start, args=(x, 300, detection_queue, events[str(x)]))
- camera_process.daemon = True
- camera_processes.append(camera_process)
+ camera_process = mp.Process(
+ target=start, args=(x, 300, detection_queue, events[str(x)])
+ )
+ camera_process.daemon = True
+ camera_processes.append(camera_process)
start_time = datetime.datetime.now().timestamp()
for p in camera_processes:
- p.start()
+ p.start()
for p in camera_processes:
- p.join()
+ p.join()
-duration = datetime.datetime.now().timestamp()-start_time
-print(f"Total - Processed for {duration:.2f} seconds.")
\ No newline at end of file
+duration = datetime.datetime.now().timestamp() - start_time
+print(f"Total - Processed for {duration:.2f} seconds.")
diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/rootfs/usr/local/go2rtc/create_config.py
new file mode 100644
--- /dev/null
+++ b/docker/rootfs/usr/local/go2rtc/create_config.py
@@ -0,0 +1,106 @@
+"""Creates a go2rtc config file."""
+
+import json
+import os
+import sys
+import yaml
+
+sys.path.insert(0, "/opt/frigate")
+from frigate.const import BIRDSEYE_PIPE, BTBN_PATH
+from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
+
+sys.path.remove("/opt/frigate")
+
+
+FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
+config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
+
+# Check if we can use .yaml instead of .yml
+config_file_yaml = config_file.replace(".yml", ".yaml")
+if os.path.isfile(config_file_yaml):
+ config_file = config_file_yaml
+
+with open(config_file) as f:
+ raw_config = f.read()
+
+if config_file.endswith((".yaml", ".yml")):
+ config: dict[str, any] = yaml.safe_load(raw_config)
+elif config_file.endswith(".json"):
+ config: dict[str, any] = json.loads(raw_config)
+
+go2rtc_config: dict[str, any] = config.get("go2rtc", {})
+
+# Need to enable CORS for go2rtc so the frigate integration / card work automatically
+if go2rtc_config.get("api") is None:
+ go2rtc_config["api"] = {"origin": "*"}
+elif go2rtc_config["api"].get("origin") is None:
+ go2rtc_config["api"]["origin"] = "*"
+
+# we want to ensure that logs are easy to read
+if go2rtc_config.get("log") is None:
+ go2rtc_config["log"] = {"format": "text"}
+elif go2rtc_config["log"].get("format") is None:
+ go2rtc_config["log"]["format"] = "text"
+
+if not go2rtc_config.get("webrtc", {}).get("candidates", []):
+ default_candidates = []
+ # use internal candidate if it was discovered when running through the add-on
+ internal_candidate = os.environ.get(
+ "FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL", None
+ )
+ if internal_candidate is not None:
+ default_candidates.append(internal_candidate)
+ # should set default stun server so webrtc can work
+ default_candidates.append("stun:8555")
+
+ go2rtc_config["webrtc"] = {"candidates": default_candidates}
+else:
+ print(
+ "[INFO] Not injecting WebRTC candidates into go2rtc config as it has been set manually",
+ )
+
+# sets default RTSP response to be equivalent to ?video=h264,h265&audio=aac
+# this means user does not need to specify audio codec when using restream
+# as source for frigate and the integration supports HLS playback
+if go2rtc_config.get("rtsp") is None:
+ go2rtc_config["rtsp"] = {"default_query": "mp4"}
+elif go2rtc_config["rtsp"].get("default_query") is None:
+ go2rtc_config["rtsp"]["default_query"] = "mp4"
+
+# need to replace ffmpeg command when using ffmpeg4
+if not os.path.exists(BTBN_PATH):
+ if go2rtc_config.get("ffmpeg") is None:
+ go2rtc_config["ffmpeg"] = {
+ "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
+ }
+ elif go2rtc_config["ffmpeg"].get("rtsp") is None:
+ go2rtc_config["ffmpeg"][
+ "rtsp"
+ ] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
+
+for name in go2rtc_config.get("streams", {}):
+ stream = go2rtc_config["streams"][name]
+
+ if isinstance(stream, str):
+ go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format(
+ **FRIGATE_ENV_VARS
+ )
+ elif isinstance(stream, list):
+ for i, stream in enumerate(stream):
+ go2rtc_config["streams"][name][i] = stream.format(**FRIGATE_ENV_VARS)
+
+# add birdseye restream stream if enabled
+if config.get("birdseye", {}).get("restream", False):
+ birdseye: dict[str, any] = config.get("birdseye")
+
+ input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
+ ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
+
+ if go2rtc_config.get("streams"):
+ go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd
+ else:
+ go2rtc_config["streams"] = {"birdseye": ffmpeg_cmd}
+
+# Write go2rtc_config to /dev/shm/go2rtc.yaml
+with open("/dev/shm/go2rtc.yaml", "w") as f:
+ yaml.dump(go2rtc_config, f)
diff --git a/frigate/app.py b/frigate/app.py
--- a/frigate/app.py
+++ b/frigate/app.py
@@ -1,37 +1,35 @@
-import json
import logging
import multiprocessing as mp
from multiprocessing.queues import Queue
-from multiprocessing.synchronize import Event
-from multiprocessing.context import Process
+from multiprocessing.synchronize import Event as MpEvent
import os
+import shutil
import signal
import sys
-import threading
-from logging.handlers import QueueHandler
from typing import Optional
from types import FrameType
import traceback
-import yaml
from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase
-from pydantic import ValidationError
-from frigate.config import DetectorTypeEnum, FrigateConfig
+from frigate.comms.dispatcher import Communicator, Dispatcher
+from frigate.comms.mqtt import MqttClient
+from frigate.comms.ws import WebSocketClient
+from frigate.config import FrigateConfig
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
-from frigate.edgetpu import EdgeTPUProcess
+from frigate.object_detection import ObjectDetectProcess
from frigate.events import EventCleanup, EventProcessor
from frigate.http import create_app
from frigate.log import log_process, root_configurer
from frigate.models import Event, Recordings
-from frigate.mqtt import MqttSocketRelay, create_mqtt_client
from frigate.object_processing import TrackedObjectProcessor
from frigate.output import output_frames
from frigate.plus import PlusApi
from frigate.record import RecordingCleanup, RecordingMaintainer
from frigate.stats import StatsEmitter, stats_init
+from frigate.storage import StorageMaintainer
from frigate.version import VERSION
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog
@@ -42,10 +40,10 @@
class FrigateApp:
def __init__(self) -> None:
- self.stop_event: Event = mp.Event()
+ self.stop_event: MpEvent = mp.Event()
self.detection_queue: Queue = mp.Queue()
- self.detectors: dict[str, EdgeTPUProcess] = {}
- self.detection_out_events: dict[str, Event] = {}
+ self.detectors: dict[str, ObjectDetectProcess] = {}
+ self.detection_out_events: dict[str, MpEvent] = {}
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue()
self.plus_api = PlusApi()
@@ -118,6 +116,9 @@ def set_log_levels(self) -> None:
if not "werkzeug" in self.config.logger.logs:
logging.getLogger("werkzeug").setLevel("ERROR")
+ if not "ws4py" in self.config.logger.logs:
+ logging.getLogger("ws4py").setLevel("ERROR")
+
def init_queues(self) -> None:
# Queues for clip processing
self.event_queue: Queue = mp.Queue()
@@ -157,7 +158,9 @@ def init_database(self) -> None:
self.db.bind(models)
def init_stats(self) -> None:
- self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
+ self.stats_tracking = stats_init(
+ self.config, self.camera_metrics, self.detectors
+ )
def init_web_server(self) -> None:
self.flask_app = create_app(
@@ -165,29 +168,34 @@ def init_web_server(self) -> None:
self.db,
self.stats_tracking,
self.detected_frames_processor,
+ self.storage_maintainer,
self.plus_api,
)
- def init_mqtt(self) -> None:
- self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
+ def init_dispatcher(self) -> None:
+ comms: list[Communicator] = []
- def start_mqtt_relay(self) -> None:
- self.mqtt_relay = MqttSocketRelay(
- self.mqtt_client, self.config.mqtt.topic_prefix
- )
- self.mqtt_relay.start()
+ if self.config.mqtt.enabled:
+ comms.append(MqttClient(self.config))
+
+ comms.append(WebSocketClient(self.config))
+ self.dispatcher = Dispatcher(self.config, self.camera_metrics, comms)
def start_detectors(self) -> None:
- model_path = self.config.model.path
- model_shape = (self.config.model.height, self.config.model.width)
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
try:
+ largest_frame = max(
+ [
+ det.model.height * det.model.width * 3
+ for (name, det) in self.config.detectors.items()
+ ]
+ )
shm_in = mp.shared_memory.SharedMemory(
name=name,
create=True,
- size=self.config.model.height * self.config.model.width * 3,
+ size=largest_frame,
)
except FileExistsError:
shm_in = mp.shared_memory.SharedMemory(name=name)
@@ -202,33 +210,18 @@ def start_detectors(self) -> None:
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
- for name, detector in self.config.detectors.items():
- if detector.type == DetectorTypeEnum.cpu:
- self.detectors[name] = EdgeTPUProcess(
- name,
- self.detection_queue,
- self.detection_out_events,
- model_path,
- model_shape,
- "cpu",
- detector.num_threads,
- )
- if detector.type == DetectorTypeEnum.edgetpu:
- self.detectors[name] = EdgeTPUProcess(
- name,
- self.detection_queue,
- self.detection_out_events,
- model_path,
- model_shape,
- detector.device,
- detector.num_threads,
- )
+ for name, detector_config in self.config.detectors.items():
+ self.detectors[name] = ObjectDetectProcess(
+ name,
+ self.detection_queue,
+ self.detection_out_events,
+ detector_config,
+ )
def start_detected_frames_processor(self) -> None:
self.detected_frames_processor = TrackedObjectProcessor(
self.config,
- self.mqtt_client,
- self.config.mqtt.topic_prefix,
+ self.dispatcher,
self.detected_frames_queue,
self.event_queue,
self.event_processed_queue,
@@ -253,15 +246,18 @@ def start_video_output_processor(self) -> None:
logger.info(f"Output process started: {output_processor.pid}")
def start_camera_processors(self) -> None:
- model_shape = (self.config.model.height, self.config.model.width)
for name, config in self.config.cameras.items():
+ if not self.config.cameras[name].enabled:
+ logger.info(f"Camera processor not started for disabled camera {name}")
+ continue
+
camera_process = mp.Process(
target=track_camera,
name=f"camera_processor:{name}",
args=(
name,
config,
- model_shape,
+ self.config.model,
self.config.model.merged_labelmap,
self.detection_queue,
self.detection_out_events[name],
@@ -276,6 +272,10 @@ def start_camera_processors(self) -> None:
def start_camera_capture_processes(self) -> None:
for name, config in self.config.cameras.items():
+ if not self.config.cameras[name].enabled:
+ logger.info(f"Capture process not started for disabled camera {name}")
+ continue
+
capture_process = mp.Process(
target=capture_camera,
name=f"camera_capture:{name}",
@@ -310,12 +310,15 @@ def start_recording_cleanup(self) -> None:
self.recording_cleanup = RecordingCleanup(self.config, self.stop_event)
self.recording_cleanup.start()
+ def start_storage_maintainer(self) -> None:
+ self.storage_maintainer = StorageMaintainer(self.config, self.stop_event)
+ self.storage_maintainer.start()
+
def start_stats_emitter(self) -> None:
self.stats_emitter = StatsEmitter(
self.config,
self.stats_tracking,
- self.mqtt_client,
- self.config.mqtt.topic_prefix,
+ self.dispatcher,
self.stop_event,
)
self.stats_emitter.start()
@@ -324,6 +327,22 @@ def start_watchdog(self) -> None:
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
self.frigate_watchdog.start()
+ def check_shm(self) -> None:
+ available_shm = round(shutil.disk_usage("/dev/shm").total / 1000000, 1)
+ min_req_shm = 30
+
+ for _, camera in self.config.cameras.items():
+ min_req_shm += round(
+ (camera.detect.width * camera.detect.height * 1.5 * 9 + 270480)
+ / 1048576,
+ 1,
+ )
+
+ if available_shm < min_req_shm:
+ logger.warning(
+ f"The current SHM size of {available_shm}MB is too small, recommend increasing it to at least {min_req_shm}MB."
+ )
+
def start(self) -> None:
self.init_logger()
logger.info(f"Starting Frigate ({VERSION})")
@@ -352,7 +371,7 @@ def start(self) -> None:
self.set_log_levels()
self.init_queues()
self.init_database()
- self.init_mqtt()
+ self.init_dispatcher()
except Exception as e:
print(e)
self.log_process.terminate()
@@ -362,15 +381,16 @@ def start(self) -> None:
self.start_detected_frames_processor()
self.start_camera_processors()
self.start_camera_capture_processes()
+ self.start_storage_maintainer()
self.init_stats()
self.init_web_server()
- self.start_mqtt_relay()
self.start_event_processor()
self.start_event_cleanup()
self.start_recording_maintainer()
self.start_recording_cleanup()
self.start_stats_emitter()
self.start_watchdog()
+ self.check_shm()
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
@@ -390,7 +410,17 @@ def stop(self) -> None:
logger.info(f"Stopping...")
self.stop_event.set()
- self.mqtt_relay.stop()
+ for detector in self.detectors.values():
+ detector.stop()
+
+ # Empty the detection queue and set the events for all requests
+ while not self.detection_queue.empty():
+ connection_id = self.detection_queue.get(timeout=1)
+ self.detection_out_events[connection_id].set()
+ self.detection_queue.close()
+ self.detection_queue.join_thread()
+
+ self.dispatcher.stop()
self.detected_frames_processor.join()
self.event_processor.join()
self.event_cleanup.join()
@@ -400,10 +430,20 @@ def stop(self) -> None:
self.frigate_watchdog.join()
self.db.stop()
- for detector in self.detectors.values():
- detector.stop()
-
while len(self.detection_shms) > 0:
shm = self.detection_shms.pop()
shm.close()
shm.unlink()
+
+ for queue in [
+ self.event_queue,
+ self.event_processed_queue,
+ self.video_output_queue,
+ self.detected_frames_queue,
+ self.recordings_info_queue,
+ self.log_queue,
+ ]:
+ while not queue.empty():
+ queue.get_nowait()
+ queue.close()
+ queue.join_thread()
diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py
new file mode 100644
--- /dev/null
+++ b/frigate/comms/dispatcher.py
@@ -0,0 +1,206 @@
+"""Handle communication between Frigate and other applications."""
+
+import logging
+
+from typing import Any, Callable
+
+from abc import ABC, abstractmethod
+
+from frigate.config import FrigateConfig
+from frigate.types import CameraMetricsTypes
+from frigate.util import restart_frigate
+
+
+logger = logging.getLogger(__name__)
+
+
+class Communicator(ABC):
+ """pub/sub model via specific protocol."""
+
+ @abstractmethod
+ def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
+ """Send data via specific protocol."""
+ pass
+
+ @abstractmethod
+ def subscribe(self, receiver: Callable) -> None:
+ """Pass receiver so communicators can pass commands."""
+ pass
+
+ @abstractmethod
+ def stop(self) -> None:
+ """Stop the communicator."""
+ pass
+
+
+class Dispatcher:
+ """Handle communication between Frigate and communicators."""
+
+ def __init__(
+ self,
+ config: FrigateConfig,
+ camera_metrics: dict[str, CameraMetricsTypes],
+ communicators: list[Communicator],
+ ) -> None:
+ self.config = config
+ self.camera_metrics = camera_metrics
+ self.comms = communicators
+
+ for comm in self.comms:
+ comm.subscribe(self._receive)
+
+ self._camera_settings_handlers: dict[str, Callable] = {
+ "detect": self._on_detect_command,
+ "improve_contrast": self._on_motion_improve_contrast_command,
+ "motion": self._on_motion_command,
+ "motion_contour_area": self._on_motion_contour_area_command,
+ "motion_threshold": self._on_motion_threshold_command,
+ "recordings": self._on_recordings_command,
+ "snapshots": self._on_snapshots_command,
+ }
+
+ def _receive(self, topic: str, payload: str) -> None:
+ """Handle receiving of payload from communicators."""
+ if topic.endswith("set"):
+ try:
+ camera_name = topic.split("/")[-3]
+ command = topic.split("/")[-2]
+ self._camera_settings_handlers[command](camera_name, payload)
+ except Exception as e:
+ logger.error(f"Received invalid set command: {topic}")
+ return
+ elif topic == "restart":
+ restart_frigate()
+
+ def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
+ """Handle publishing to communicators."""
+ for comm in self.comms:
+ comm.publish(topic, payload, retain)
+
+ def stop(self) -> None:
+ for comm in self.comms:
+ comm.stop()
+
+ def _on_detect_command(self, camera_name: str, payload: str) -> None:
+ """Callback for detect topic."""
+ detect_settings = self.config.cameras[camera_name].detect
+
+ if payload == "ON":
+ if not self.camera_metrics[camera_name]["detection_enabled"].value:
+ logger.info(f"Turning on detection for {camera_name}")
+ self.camera_metrics[camera_name]["detection_enabled"].value = True
+ detect_settings.enabled = True
+
+ if not self.camera_metrics[camera_name]["motion_enabled"].value:
+ logger.info(
+ f"Turning on motion for {camera_name} due to detection being enabled."
+ )
+ self.camera_metrics[camera_name]["motion_enabled"].value = True
+ self.publish(f"{camera_name}/motion/state", payload, retain=True)
+ elif payload == "OFF":
+ if self.camera_metrics[camera_name]["detection_enabled"].value:
+ logger.info(f"Turning off detection for {camera_name}")
+ self.camera_metrics[camera_name]["detection_enabled"].value = False
+ detect_settings.enabled = False
+
+ self.publish(f"{camera_name}/detect/state", payload, retain=True)
+
+ def _on_motion_command(self, camera_name: str, payload: str) -> None:
+ """Callback for motion topic."""
+ if payload == "ON":
+ if not self.camera_metrics[camera_name]["motion_enabled"].value:
+ logger.info(f"Turning on motion for {camera_name}")
+ self.camera_metrics[camera_name]["motion_enabled"].value = True
+ elif payload == "OFF":
+ if self.camera_metrics[camera_name]["detection_enabled"].value:
+ logger.error(
+ f"Turning off motion is not allowed when detection is enabled."
+ )
+ return
+
+ if self.camera_metrics[camera_name]["motion_enabled"].value:
+ logger.info(f"Turning off motion for {camera_name}")
+ self.camera_metrics[camera_name]["motion_enabled"].value = False
+
+ self.publish(f"{camera_name}/motion/state", payload, retain=True)
+
+ def _on_motion_improve_contrast_command(
+ self, camera_name: str, payload: str
+ ) -> None:
+ """Callback for improve_contrast topic."""
+ motion_settings = self.config.cameras[camera_name].motion
+
+ if payload == "ON":
+ if not self.camera_metrics[camera_name]["improve_contrast_enabled"].value:
+ logger.info(f"Turning on improve contrast for {camera_name}")
+ self.camera_metrics[camera_name][
+ "improve_contrast_enabled"
+ ].value = True
+ motion_settings.improve_contrast = True # type: ignore[union-attr]
+ elif payload == "OFF":
+ if self.camera_metrics[camera_name]["improve_contrast_enabled"].value:
+ logger.info(f"Turning off improve contrast for {camera_name}")
+ self.camera_metrics[camera_name][
+ "improve_contrast_enabled"
+ ].value = False
+ motion_settings.improve_contrast = False # type: ignore[union-attr]
+
+ self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True)
+
+ def _on_motion_contour_area_command(self, camera_name: str, payload: int) -> None:
+ """Callback for motion contour topic."""
+ try:
+ payload = int(payload)
+ except ValueError:
+ f"Received unsupported value for motion contour area: {payload}"
+ return
+
+ motion_settings = self.config.cameras[camera_name].motion
+ logger.info(f"Setting motion contour area for {camera_name}: {payload}")
+ self.camera_metrics[camera_name]["motion_contour_area"].value = payload
+ motion_settings.contour_area = payload # type: ignore[union-attr]
+ self.publish(f"{camera_name}/motion_contour_area/state", payload, retain=True)
+
+ def _on_motion_threshold_command(self, camera_name: str, payload: int) -> None:
+ """Callback for motion threshold topic."""
+ try:
+ payload = int(payload)
+ except ValueError:
+ f"Received unsupported value for motion threshold: {payload}"
+ return
+
+ motion_settings = self.config.cameras[camera_name].motion
+ logger.info(f"Setting motion threshold for {camera_name}: {payload}")
+ self.camera_metrics[camera_name]["motion_threshold"].value = payload
+ motion_settings.threshold = payload # type: ignore[union-attr]
+ self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True)
+
+ def _on_recordings_command(self, camera_name: str, payload: str) -> None:
+ """Callback for recordings topic."""
+ record_settings = self.config.cameras[camera_name].record
+
+ if payload == "ON":
+ if not record_settings.enabled:
+ logger.info(f"Turning on recordings for {camera_name}")
+ record_settings.enabled = True
+ elif payload == "OFF":
+ if record_settings.enabled:
+ logger.info(f"Turning off recordings for {camera_name}")
+ record_settings.enabled = False
+
+ self.publish(f"{camera_name}/recordings/state", payload, retain=True)
+
+ def _on_snapshots_command(self, camera_name: str, payload: str) -> None:
+ """Callback for snapshots topic."""
+ snapshots_settings = self.config.cameras[camera_name].snapshots
+
+ if payload == "ON":
+ if not snapshots_settings.enabled:
+ logger.info(f"Turning on snapshots for {camera_name}")
+ snapshots_settings.enabled = True
+ elif payload == "OFF":
+ if snapshots_settings.enabled:
+ logger.info(f"Turning off snapshots for {camera_name}")
+ snapshots_settings.enabled = False
+
+ self.publish(f"{camera_name}/snapshots/state", payload, retain=True)
diff --git a/frigate/comms/mqtt.py b/frigate/comms/mqtt.py
new file mode 100644
--- /dev/null
+++ b/frigate/comms/mqtt.py
@@ -0,0 +1,199 @@
+import logging
+import threading
+
+from typing import Any, Callable
+
+import paho.mqtt.client as mqtt
+
+from frigate.comms.dispatcher import Communicator
+from frigate.config import FrigateConfig
+
+
+logger = logging.getLogger(__name__)
+
+
+class MqttClient(Communicator): # type: ignore[misc]
+ """Frigate wrapper for mqtt client."""
+
+ def __init__(self, config: FrigateConfig) -> None:
+ self.config = config
+ self.mqtt_config = config.mqtt
+ self.connected: bool = False
+
+ def subscribe(self, receiver: Callable) -> None:
+ """Wrapper for allowing dispatcher to subscribe."""
+ self._dispatcher = receiver
+ self._start()
+
+ def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
+ """Wrapper for publishing when client is in valid state."""
+ if not self.connected:
+ logger.error(f"Unable to publish to {topic}: client is not connected")
+ return
+
+ self.client.publish(
+ f"{self.mqtt_config.topic_prefix}/{topic}", payload, retain=retain
+ )
+
+ def stop(self) -> None:
+ self.client.disconnect()
+
+ def _set_initial_topics(self) -> None:
+ """Set initial state topics."""
+ for camera_name, camera in self.config.cameras.items():
+ self.publish(
+ f"{camera_name}/recordings/state",
+ "ON" if camera.record.enabled else "OFF",
+ retain=True,
+ )
+ self.publish(
+ f"{camera_name}/snapshots/state",
+ "ON" if camera.snapshots.enabled else "OFF",
+ retain=True,
+ )
+ self.publish(
+ f"{camera_name}/detect/state",
+ "ON" if camera.detect.enabled else "OFF",
+ retain=True,
+ )
+ self.publish(
+ f"{camera_name}/motion/state",
+ "ON",
+ retain=True,
+ )
+ self.publish(
+ f"{camera_name}/improve_contrast/state",
+ "ON" if camera.motion.improve_contrast else "OFF", # type: ignore[union-attr]
+ retain=True,
+ )
+ self.publish(
+ f"{camera_name}/motion_threshold/state",
+ camera.motion.threshold, # type: ignore[union-attr]
+ retain=True,
+ )
+ self.publish(
+ f"{camera_name}/motion_contour_area/state",
+ camera.motion.contour_area, # type: ignore[union-attr]
+ retain=True,
+ )
+ self.publish(
+ f"{camera_name}/motion",
+ "OFF",
+ retain=False,
+ )
+
+ self.publish("available", "online", retain=True)
+
+ def on_mqtt_command(
+ self, client: mqtt.Client, userdata: Any, message: mqtt.MQTTMessage
+ ) -> None:
+ self._dispatcher(
+ message.topic.replace(f"{self.mqtt_config.topic_prefix}/", "", 1),
+ message.payload.decode(),
+ )
+
+ def _on_connect(
+ self,
+ client: mqtt.Client,
+ userdata: Any,
+ flags: Any,
+ rc: mqtt.ReasonCodes,
+ ) -> None:
+ """Mqtt connection callback."""
+ threading.current_thread().name = "mqtt"
+ if rc != 0:
+ if rc == 3:
+ logger.error(
+ "Unable to connect to MQTT server: MQTT Server unavailable"
+ )
+ elif rc == 4:
+ logger.error(
+ "Unable to connect to MQTT server: MQTT Bad username or password"
+ )
+ elif rc == 5:
+ logger.error("Unable to connect to MQTT server: MQTT Not authorized")
+ else:
+ logger.error(
+ "Unable to connect to MQTT server: Connection refused. Error code: "
+ + str(rc)
+ )
+
+ self.connected = True
+ logger.debug("MQTT connected")
+ client.subscribe(f"{self.mqtt_config.topic_prefix}/#")
+ self._set_initial_topics()
+
+ def _on_disconnect(
+ self, client: mqtt.Client, userdata: Any, flags: Any, rc: mqtt
+ ) -> None:
+ """Mqtt disconnection callback."""
+ self.connected = False
+ logger.error("MQTT disconnected")
+
+ def _start(self) -> None:
+ """Start mqtt client."""
+ self.client = mqtt.Client(client_id=self.mqtt_config.client_id)
+ self.client.on_connect = self._on_connect
+ self.client.will_set(
+ self.mqtt_config.topic_prefix + "/available",
+ payload="offline",
+ qos=1,
+ retain=True,
+ )
+
+ # register callbacks
+ callback_types = [
+ "recordings",
+ "snapshots",
+ "detect",
+ "motion",
+ "improve_contrast",
+ "motion_threshold",
+ "motion_contour_area",
+ ]
+
+ for name in self.config.cameras.keys():
+ for callback in callback_types:
+ # We need to pre-clear existing set topics because in previous
+ # versions the webUI retained on the /set topic but this is
+ # no longer the case.
+ self.client.publish(
+ f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set",
+ None,
+ retain=True,
+ )
+ self.client.message_callback_add(
+ f"{self.mqtt_config.topic_prefix}/{name}/{callback}/set",
+ self.on_mqtt_command,
+ )
+
+ self.client.message_callback_add(
+ f"{self.mqtt_config.topic_prefix}/restart", self.on_mqtt_command
+ )
+
+ if not self.mqtt_config.tls_ca_certs is None:
+ if (
+ not self.mqtt_config.tls_client_cert is None
+ and not self.mqtt_config.tls_client_key is None
+ ):
+ self.client.tls_set(
+ self.mqtt_config.tls_ca_certs,
+ self.mqtt_config.tls_client_cert,
+ self.mqtt_config.tls_client_key,
+ )
+ else:
+ self.client.tls_set(self.mqtt_config.tls_ca_certs)
+ if not self.mqtt_config.tls_insecure is None:
+ self.client.tls_insecure_set(self.mqtt_config.tls_insecure)
+ if not self.mqtt_config.user is None:
+ self.client.username_pw_set(
+ self.mqtt_config.user, password=self.mqtt_config.password
+ )
+ try:
+ # https://stackoverflow.com/a/55390477
+ # with connect_async, retries are handled automatically
+ self.client.connect_async(self.mqtt_config.host, self.mqtt_config.port, 60)
+ self.client.loop_start()
+ except Exception as e:
+ logger.error(f"Unable to connect to MQTT server: {e}")
+ return
diff --git a/frigate/comms/ws.py b/frigate/comms/ws.py
new file mode 100644
--- /dev/null
+++ b/frigate/comms/ws.py
@@ -0,0 +1,98 @@
+"""Websocket communicator."""
+
+import json
+import logging
+import threading
+
+from typing import Callable
+
+from wsgiref.simple_server import make_server
+from ws4py.server.wsgirefserver import (
+ WebSocketWSGIHandler,
+ WebSocketWSGIRequestHandler,
+ WSGIServer,
+)
+from ws4py.server.wsgiutils import WebSocketWSGIApplication
+from ws4py.websocket import WebSocket
+
+from frigate.comms.dispatcher import Communicator
+from frigate.config import FrigateConfig
+
+
+logger = logging.getLogger(__name__)
+
+
+class WebSocketClient(Communicator): # type: ignore[misc]
+ """Frigate wrapper for ws client."""
+
+ def __init__(self, config: FrigateConfig) -> None:
+ self.config = config
+
+ def subscribe(self, receiver: Callable) -> None:
+ self._dispatcher = receiver
+ self.start()
+
+ def start(self) -> None:
+ """Start the websocket client."""
+
+ class _WebSocketHandler(WebSocket): # type: ignore[misc]
+ receiver = self._dispatcher
+
+ def received_message(self, message: WebSocket.received_message) -> None:
+ try:
+ json_message = json.loads(message.data.decode("utf-8"))
+ json_message = {
+ "topic": json_message.get("topic"),
+ "payload": json_message.get("payload"),
+ }
+ except Exception as e:
+ logger.warning(
+ f"Unable to parse websocket message as valid json: {message.data.decode('utf-8')}"
+ )
+ return
+
+ logger.debug(
+ f"Publishing mqtt message from websockets at {json_message['topic']}."
+ )
+ self.receiver(
+ json_message["topic"],
+ json_message["payload"],
+ )
+
+ # start a websocket server on 5002
+ WebSocketWSGIHandler.http_version = "1.1"
+ self.websocket_server = make_server(
+ "127.0.0.1",
+ 5002,
+ server_class=WSGIServer,
+ handler_class=WebSocketWSGIRequestHandler,
+ app=WebSocketWSGIApplication(handler_cls=_WebSocketHandler),
+ )
+ self.websocket_server.initialize_websockets_manager()
+ self.websocket_thread = threading.Thread(
+ target=self.websocket_server.serve_forever
+ )
+ self.websocket_thread.start()
+
+ def publish(self, topic: str, payload: str, _: bool) -> None:
+ try:
+ ws_message = json.dumps(
+ {
+ "topic": topic,
+ "payload": payload,
+ }
+ )
+ except Exception as e:
+ # if the payload can't be decoded don't relay to clients
+ logger.debug(f"payload for {topic} wasn't text. Skipping...")
+ return
+
+ self.websocket_server.manager.broadcast(ws_message)
+
+ def stop(self) -> None:
+ self.websocket_server.manager.close_all()
+ self.websocket_server.manager.stop()
+ self.websocket_server.manager.join()
+ self.websocket_server.shutdown()
+ self.websocket_thread.join()
+ logger.info("Exiting websocket client...")
diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -9,11 +9,38 @@
import matplotlib.pyplot as plt
import numpy as np
import yaml
-from pydantic import BaseModel, Extra, Field, validator
+from pydantic import BaseModel, Extra, Field, validator, parse_obj_as
from pydantic.fields import PrivateAttr
-from frigate.const import BASE_DIR, CACHE_DIR, YAML_EXT
-from frigate.util import create_mask, deep_merge, load_labels
+from frigate.const import (
+ BASE_DIR,
+ CACHE_DIR,
+ REGEX_CAMERA_NAME,
+ YAML_EXT,
+)
+from frigate.util import (
+ create_mask,
+ deep_merge,
+ get_ffmpeg_arg_list,
+ escape_special_characters,
+ load_config_with_no_duplicates,
+ load_labels,
+)
+from frigate.ffmpeg_presets import (
+ parse_preset_hardware_acceleration_decode,
+ parse_preset_hardware_acceleration_scale,
+ parse_preset_input,
+ parse_preset_output_record,
+ parse_preset_output_rtmp,
+)
+from frigate.detectors import (
+ PixelFormatEnum,
+ InputTensorEnum,
+ ModelConfig,
+ DetectorConfig,
+)
+from frigate.version import VERSION
+
logger = logging.getLogger(__name__)
@@ -33,23 +60,52 @@ class Config:
extra = Extra.forbid
-class DetectorTypeEnum(str, Enum):
- edgetpu = "edgetpu"
- cpu = "cpu"
+class LiveModeEnum(str, Enum):
+ jsmpeg = "jsmpeg"
+ mse = "mse"
+ webrtc = "webrtc"
+
+
+class TimeFormatEnum(str, Enum):
+ browser = "browser"
+ hours12 = "12hour"
+ hours24 = "24hour"
-class DetectorConfig(FrigateBaseModel):
- type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type")
- device: str = Field(default="usb", title="Device Type")
- num_threads: int = Field(default=3, title="Number of detection threads")
+class DateTimeStyleEnum(str, Enum):
+ full = "full"
+ long = "long"
+ medium = "medium"
+ short = "short"
class UIConfig(FrigateBaseModel):
+ live_mode: LiveModeEnum = Field(
+ default=LiveModeEnum.mse, title="Default Live Mode."
+ )
+ timezone: Optional[str] = Field(title="Override UI timezone.")
use_experimental: bool = Field(default=False, title="Experimental UI")
+ time_format: TimeFormatEnum = Field(
+ default=TimeFormatEnum.browser, title="Override UI time format."
+ )
+ date_style: DateTimeStyleEnum = Field(
+ default=DateTimeStyleEnum.short, title="Override UI dateStyle."
+ )
+ time_style: DateTimeStyleEnum = Field(
+ default=DateTimeStyleEnum.medium, title="Override UI timeStyle."
+ )
+ strftime_fmt: Optional[str] = Field(
+ default=None, title="Override date and time format using strftime syntax."
+ )
+
+
+class TelemetryConfig(FrigateBaseModel):
+ version_check: bool = Field(default=True, title="Enable latest version check.")
class MqttConfig(FrigateBaseModel):
- host: str = Field(title="MQTT Host")
+ enabled: bool = Field(title="Enable MQTT Communication.", default=True)
+ host: str = Field(default="", title="MQTT Host")
port: int = Field(default=1883, title="MQTT Port")
topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix")
client_id: str = Field(default="frigate", title="MQTT Client ID")
@@ -317,6 +373,7 @@ class BirdseyeModeEnum(str, Enum):
class BirdseyeConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable birdseye view.")
+ restream: bool = Field(default=False, title="Restream birdseye via RTSP.")
width: int = Field(default=1280, title="Birdseye width.")
height: int = Field(default=720, title="Birdseye height.")
quality: int = Field(
@@ -338,36 +395,20 @@ class BirdseyeCameraConfig(BaseModel):
)
-FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning"]
-FFMPEG_INPUT_ARGS_DEFAULT = [
- "-avoid_negative_ts",
- "make_zero",
- "-fflags",
- "+genpts+discardcorrupt",
- "-rtsp_transport",
- "tcp",
- "-timeout",
- "5000000",
- "-use_wallclock_as_timestamps",
- "1",
-]
-DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "rawvideo", "-pix_fmt", "yuv420p"]
-RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-c", "copy", "-f", "flv"]
-RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = [
+# Note: Setting threads to less than 2 caused several issues with recording segments
+# https://github.com/blakeblackshear/frigate/issues/5659
+FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
+FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
+DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
+ "-threads",
+ "2",
"-f",
- "segment",
- "-segment_time",
- "10",
- "-segment_format",
- "mp4",
- "-reset_timestamps",
- "1",
- "-strftime",
- "1",
- "-c",
- "copy",
- "-an",
+ "rawvideo",
+ "-pix_fmt",
+ "yuv420p",
]
+RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-rtmp-generic"
+RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic"
class FfmpegOutputArgsConfig(FrigateBaseModel):
@@ -513,14 +554,20 @@ class CameraMqttConfig(FrigateBaseModel):
class RtmpConfig(FrigateBaseModel):
- enabled: bool = Field(default=True, title="RTMP restreaming enabled.")
+ enabled: bool = Field(default=False, title="RTMP restreaming enabled.")
class CameraLiveConfig(FrigateBaseModel):
+ stream_name: str = Field(default="", title="Name of restream to use as live view.")
height: int = Field(default=720, title="Live camera view height")
quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")
+class RestreamConfig(BaseModel):
+ class Config:
+ extra = Extra.allow
+
+
class CameraUiConfig(FrigateBaseModel):
order: int = Field(default=0, title="Order of camera in UI.")
dashboard: bool = Field(
@@ -529,7 +576,8 @@ class CameraUiConfig(FrigateBaseModel):
class CameraConfig(FrigateBaseModel):
- name: Optional[str] = Field(title="Camera name.", regex="^[a-zA-Z0-9_-]+$")
+ name: Optional[str] = Field(title="Camera name.", regex=REGEX_CAMERA_NAME)
+ enabled: bool = Field(default=True, title="Enable camera.")
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
best_image_timeout: int = Field(
default=60,
@@ -582,7 +630,15 @@ def __init__(self, **config):
# add roles to the input if there is only one
if len(config["ffmpeg"]["inputs"]) == 1:
- config["ffmpeg"]["inputs"][0]["roles"] = ["record", "rtmp", "detect"]
+ has_rtmp = "rtmp" in config["ffmpeg"]["inputs"][0].get("roles", [])
+
+ config["ffmpeg"]["inputs"][0]["roles"] = [
+ "record",
+ "detect",
+ ]
+
+ if has_rtmp:
+ config["ffmpeg"]["inputs"][0]["roles"].append("rtmp")
super().__init__(**config)
@@ -613,36 +669,29 @@ def create_ffmpeg_cmds(self):
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
ffmpeg_output_args = []
if "detect" in ffmpeg_input.roles:
- detect_args = (
- self.ffmpeg.output_args.detect
- if isinstance(self.ffmpeg.output_args.detect, list)
- else self.ffmpeg.output_args.detect.split(" ")
- )
- ffmpeg_output_args = (
- [
- "-r",
- str(self.detect.fps),
- "-s",
- f"{self.detect.width}x{self.detect.height}",
- ]
- + detect_args
- + ffmpeg_output_args
- + ["pipe:"]
+ detect_args = get_ffmpeg_arg_list(self.ffmpeg.output_args.detect)
+ scale_detect_args = parse_preset_hardware_acceleration_scale(
+ ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args,
+ detect_args,
+ self.detect.fps,
+ self.detect.width,
+ self.detect.height,
)
+
+ ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
if "rtmp" in ffmpeg_input.roles and self.rtmp.enabled:
- rtmp_args = (
- self.ffmpeg.output_args.rtmp
- if isinstance(self.ffmpeg.output_args.rtmp, list)
- else self.ffmpeg.output_args.rtmp.split(" ")
+ rtmp_args = get_ffmpeg_arg_list(
+ parse_preset_output_rtmp(self.ffmpeg.output_args.rtmp)
+ or self.ffmpeg.output_args.rtmp
)
+
ffmpeg_output_args = (
rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
)
if "record" in ffmpeg_input.roles and self.record.enabled:
- record_args = (
- self.ffmpeg.output_args.record
- if isinstance(self.ffmpeg.output_args.record, list)
- else self.ffmpeg.output_args.record.split(" ")
+ record_args = get_ffmpeg_arg_list(
+ parse_preset_output_record(self.ffmpeg.output_args.record)
+ or self.ffmpeg.output_args.record
)
ffmpeg_output_args = (
@@ -655,18 +704,20 @@ def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
if len(ffmpeg_output_args) == 0:
return None
- global_args = ffmpeg_input.global_args or self.ffmpeg.global_args
- hwaccel_args = ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args
- input_args = ffmpeg_input.input_args or self.ffmpeg.input_args
-
- global_args = (
- global_args if isinstance(global_args, list) else global_args.split(" ")
+ global_args = get_ffmpeg_arg_list(
+ ffmpeg_input.global_args or self.ffmpeg.global_args
)
- hwaccel_args = (
- hwaccel_args if isinstance(hwaccel_args, list) else hwaccel_args.split(" ")
+ hwaccel_args = get_ffmpeg_arg_list(
+ parse_preset_hardware_acceleration_decode(ffmpeg_input.hwaccel_args)
+ or ffmpeg_input.hwaccel_args
+ or parse_preset_hardware_acceleration_decode(self.ffmpeg.hwaccel_args)
+ or self.ffmpeg.hwaccel_args
)
- input_args = (
- input_args if isinstance(input_args, list) else input_args.split(" ")
+ input_args = get_ffmpeg_arg_list(
+ parse_preset_input(ffmpeg_input.input_args, self.detect.fps)
+ or ffmpeg_input.input_args
+ or parse_preset_input(self.ffmpeg.input_args, self.detect.fps)
+ or self.ffmpeg.input_args
)
cmd = (
@@ -674,7 +725,7 @@ def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
+ global_args
+ hwaccel_args
+ input_args
- + ["-i", ffmpeg_input.path]
+ + ["-i", escape_special_characters(ffmpeg_input.path)]
+ ffmpeg_output_args
)
@@ -687,40 +738,6 @@ class DatabaseConfig(FrigateBaseModel):
)
-class ModelConfig(FrigateBaseModel):
- path: Optional[str] = Field(title="Custom Object detection model path.")
- labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
- width: int = Field(default=320, title="Object detection model input width.")
- height: int = Field(default=320, title="Object detection model input height.")
- labelmap: Dict[int, str] = Field(
- default_factory=dict, title="Labelmap customization."
- )
- _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
- _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
-
- @property
- def merged_labelmap(self) -> Dict[int, str]:
- return self._merged_labelmap
-
- @property
- def colormap(self) -> Dict[int, Tuple[int, int, int]]:
- return self._colormap
-
- def __init__(self, **config):
- super().__init__(**config)
-
- self._merged_labelmap = {
- **load_labels(config.get("labelmap_path", "/labelmap.txt")),
- **config.get("labelmap", {}),
- }
-
- cmap = plt.cm.get_cmap("tab10", len(self._merged_labelmap.keys()))
-
- self._colormap = {}
- for key, val in self._merged_labelmap.items():
- self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
-
-
class LogLevelEnum(str, Enum):
debug = "debug"
info = "info"
@@ -738,6 +755,98 @@ class LoggerConfig(FrigateBaseModel):
)
+def verify_config_roles(camera_config: CameraConfig) -> None:
+ """Verify that roles are setup in the config correctly."""
+ assigned_roles = list(
+ set([r for i in camera_config.ffmpeg.inputs for r in i.roles])
+ )
+
+ if camera_config.record.enabled and not "record" in assigned_roles:
+ raise ValueError(
+ f"Camera {camera_config.name} has record enabled, but record is not assigned to an input."
+ )
+
+ if camera_config.rtmp.enabled and not "rtmp" in assigned_roles:
+ raise ValueError(
+ f"Camera {camera_config.name} has rtmp enabled, but rtmp is not assigned to an input."
+ )
+
+
+def verify_valid_live_stream_name(
+ frigate_config: FrigateConfig, camera_config: CameraConfig
+) -> None:
+ """Verify that a restream exists to use for live view."""
+ if (
+ camera_config.live.stream_name
+ not in frigate_config.go2rtc.dict().get("streams", {}).keys()
+ ):
+ return ValueError(
+ f"No restream with name {camera_config.live.stream_name} exists for camera {camera_config.name}."
+ )
+
+
+def verify_old_retain_config(camera_config: CameraConfig) -> None:
+ """Leave log if old retain_days is used."""
+ if not camera_config.record.retain_days is None:
+ logger.warning(
+ "The 'retain_days' config option has been DEPRECATED and will be removed in a future version. Please use the 'days' setting under 'retain'"
+ )
+ if camera_config.record.retain.days == 0:
+ camera_config.record.retain.days = camera_config.record.retain_days
+
+
+def verify_recording_retention(camera_config: CameraConfig) -> None:
+ """Verify that recording retention modes are ranked correctly."""
+ rank_map = {
+ RetainModeEnum.all: 0,
+ RetainModeEnum.motion: 1,
+ RetainModeEnum.active_objects: 2,
+ }
+
+ if (
+ camera_config.record.retain.days != 0
+ and rank_map[camera_config.record.retain.mode]
+ > rank_map[camera_config.record.events.retain.mode]
+ ):
+ logger.warning(
+ f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and event retention is configured for {camera_config.record.events.retain.mode}. The more restrictive retention policy will be applied."
+ )
+
+
+def verify_recording_segments_setup_with_reasonable_time(
+ camera_config: CameraConfig,
+) -> None:
+ """Verify that recording segments are setup and segment time is not greater than 60."""
+ record_args: list[str] = get_ffmpeg_arg_list(
+ camera_config.ffmpeg.output_args.record
+ )
+
+ if record_args[0].startswith("preset"):
+ return
+
+ seg_arg_index = record_args.index("-segment_time")
+
+ if seg_arg_index < 0:
+ raise ValueError(
+ f"Camera {camera_config.name} has no segment_time in recording output args, segment args are required for record."
+ )
+
+ if int(record_args[seg_arg_index + 1]) > 60:
+ raise ValueError(
+ f"Camera {camera_config.name} has invalid segment_time output arg, segment_time must be 60 or less."
+ )
+
+
+def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None:
+ """Verify that user has not entered zone objects that are not in the tracking config."""
+ for zone_name, zone in camera_config.zones.items():
+ for obj in zone.objects:
+ if obj not in camera_config.objects.track:
+ raise ValueError(
+ f"Zone {zone_name} is configured to track {obj} but that object type is not added to objects -> track."
+ )
+
+
class FrigateConfig(FrigateBaseModel):
mqtt: MqttConfig = Field(title="MQTT Configuration.")
database: DatabaseConfig = Field(
@@ -747,11 +856,14 @@ class FrigateConfig(FrigateBaseModel):
default_factory=dict, title="Frigate environment variables."
)
ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.")
+ telemetry: TelemetryConfig = Field(
+ default_factory=TelemetryConfig, title="Telemetry configuration."
+ )
model: ModelConfig = Field(
default_factory=ModelConfig, title="Detection model configuration."
)
detectors: Dict[str, DetectorConfig] = Field(
- default={name: DetectorConfig(**d) for name, d in DEFAULT_DETECTORS.items()},
+ default=DEFAULT_DETECTORS,
title="Detector hardware configuration.",
)
logger: LoggerConfig = Field(
@@ -763,12 +875,15 @@ class FrigateConfig(FrigateBaseModel):
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, title="Global snapshots configuration."
)
- live: CameraLiveConfig = Field(
- default_factory=CameraLiveConfig, title="Global live configuration."
- )
rtmp: RtmpConfig = Field(
default_factory=RtmpConfig, title="Global RTMP restreaming configuration."
)
+ live: CameraLiveConfig = Field(
+ default_factory=CameraLiveConfig, title="Live playback settings."
+ )
+ go2rtc: RestreamConfig = Field(
+ default_factory=RestreamConfig, title="Global restream configuration."
+ )
birdseye: BirdseyeConfig = Field(
default_factory=BirdseyeConfig, title="Birdseye configuration."
)
@@ -795,18 +910,19 @@ def runtime_config(self) -> FrigateConfig:
"""Merge camera config with globals."""
config = self.copy(deep=True)
- # MQTT password substitution
- if config.mqtt.password:
+ # MQTT user/password substitutions
+ if config.mqtt.user or config.mqtt.password:
+ config.mqtt.user = config.mqtt.user.format(**FRIGATE_ENV_VARS)
config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS)
- # Global config to propegate down to camera level
+ # Global config to propagate down to camera level
global_config = config.dict(
include={
"birdseye": ...,
"record": ...,
"snapshots": ...,
- "live": ...,
"rtmp": ...,
+ "live": ...,
"objects": ...,
"motion": ...,
"detect": ...,
@@ -879,46 +995,60 @@ def runtime_config(self) -> FrigateConfig:
**camera_config.motion.dict(exclude_unset=True),
)
- # check runtime config
- assigned_roles = list(
- set([r for i in camera_config.ffmpeg.inputs for r in i.roles])
- )
- if camera_config.record.enabled and not "record" in assigned_roles:
- raise ValueError(
- f"Camera {name} has record enabled, but record is not assigned to an input."
- )
+ # Set live view stream if none is set
+ if not camera_config.live.stream_name:
+ camera_config.live.stream_name = name
- if camera_config.rtmp.enabled and not "rtmp" in assigned_roles:
- raise ValueError(
- f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input."
- )
+ verify_config_roles(camera_config)
+ verify_valid_live_stream_name(config, camera_config)
+ verify_old_retain_config(camera_config)
+ verify_recording_retention(camera_config)
+ verify_recording_segments_setup_with_reasonable_time(camera_config)
+ verify_zone_objects_are_tracked(camera_config)
- # backwards compatibility for retain_days
- if not camera_config.record.retain_days is None:
- logger.warning(
- "The 'retain_days' config option has been DEPRECATED and will be removed in a future version. Please use the 'days' setting under 'retain'"
- )
- if camera_config.record.retain.days == 0:
- camera_config.record.retain.days = camera_config.record.retain_days
-
- # warning if the higher level record mode is potentially more restrictive than the events
- rank_map = {
- RetainModeEnum.all: 0,
- RetainModeEnum.motion: 1,
- RetainModeEnum.active_objects: 2,
- }
- if (
- camera_config.record.retain.days != 0
- and rank_map[camera_config.record.retain.mode]
- > rank_map[camera_config.record.events.retain.mode]
- ):
+ if camera_config.rtmp.enabled:
logger.warning(
- f"{name}: Recording retention is configured for {camera_config.record.retain.mode} and event retention is configured for {camera_config.record.events.retain.mode}. The more restrictive retention policy will be applied."
+ "RTMP restream is deprecated in favor of the restream role, recommend disabling RTMP."
)
- # generage the ffmpeg commands
+
+ # generate the ffmpeg commands
camera_config.create_ffmpeg_cmds()
config.cameras[name] = camera_config
+ # get list of unique enabled labels for tracking
+ enabled_labels = set(config.objects.track)
+
+ for _, camera in config.cameras.items():
+ enabled_labels.update(camera.objects.track)
+
+ config.model.create_colormap(sorted(enabled_labels))
+
+ for key, detector in config.detectors.items():
+ detector_config: DetectorConfig = parse_obj_as(DetectorConfig, detector)
+ if detector_config.model is None:
+ detector_config.model = config.model
+ else:
+ model = detector_config.model
+ schema = ModelConfig.schema()["properties"]
+ if (
+ model.width != schema["width"]["default"]
+ or model.height != schema["height"]["default"]
+ or model.labelmap_path is not None
+ or model.labelmap is not {}
+ or model.input_tensor != schema["input_tensor"]["default"]
+ or model.input_pixel_format
+ != schema["input_pixel_format"]["default"]
+ ):
+ logger.warning(
+ "Customizing more than a detector model path is unsupported."
+ )
+ merged_model = deep_merge(
+ detector_config.model.dict(exclude_unset=True),
+ config.model.dict(exclude_unset=True),
+ )
+ detector_config.model = ModelConfig.parse_obj(merged_model)
+ config.detectors[key] = detector_config
+
return config
@validator("cameras")
@@ -935,8 +1065,13 @@ def parse_file(cls, config_file):
raw_config = f.read()
if config_file.endswith(YAML_EXT):
- config = yaml.safe_load(raw_config)
+ config = load_config_with_no_duplicates(raw_config)
elif config_file.endswith(".json"):
config = json.loads(raw_config)
return cls.parse_obj(config)
+
+ @classmethod
+ def parse_raw(cls, raw_config):
+ config = load_config_with_no_duplicates(raw_config)
+ return cls.parse_obj(config)
diff --git a/frigate/const.py b/frigate/const.py
--- a/frigate/const.py
+++ b/frigate/const.py
@@ -1,7 +1,23 @@
BASE_DIR = "/media/frigate"
CLIPS_DIR = f"{BASE_DIR}/clips"
RECORD_DIR = f"{BASE_DIR}/recordings"
+BIRDSEYE_PIPE = "/tmp/cache/birdseye"
CACHE_DIR = "/tmp/cache"
YAML_EXT = (".yaml", ".yml")
PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video"
+MAX_SEGMENT_DURATION = 600
+BTBN_PATH = "/usr/lib/btbn-ffmpeg"
+
+# Regex Consts
+
+REGEX_CAMERA_NAME = "^[a-zA-Z0-9_-]+$"
+REGEX_RTSP_CAMERA_USER_PASS = ":\/\/[a-zA-Z0-9_-]+:[\S]+@"
+REGEX_HTTP_CAMERA_USER_PASS = "user=[a-zA-Z0-9_-]+&password=[\S]+"
+
+# Known Driver Names
+
+DRIVER_ENV_VAR = "LIBVA_DRIVER_NAME"
+DRIVER_AMD = "radeonsi"
+DRIVER_INTEL_i965 = "i965"
+DRIVER_INTEL_iHD = "iHD"
diff --git a/frigate/detectors/__init__.py b/frigate/detectors/__init__.py
new file mode 100644
--- /dev/null
+++ b/frigate/detectors/__init__.py
@@ -0,0 +1,24 @@
+import logging
+
+from .detection_api import DetectionApi
+from .detector_config import (
+ PixelFormatEnum,
+ InputTensorEnum,
+ ModelConfig,
+)
+from .detector_types import DetectorTypeEnum, api_types, DetectorConfig
+
+
+logger = logging.getLogger(__name__)
+
+
+def create_detector(detector_config):
+ if detector_config.type == DetectorTypeEnum.cpu:
+ logger.warning(
+ "CPU detectors are not recommended and should only be used for testing or for trial purposes."
+ )
+
+ api = api_types.get(detector_config.type)
+ if not api:
+ raise ValueError(detector_config.type)
+ return api(detector_config)
diff --git a/frigate/detectors/detection_api.py b/frigate/detectors/detection_api.py
new file mode 100644
--- /dev/null
+++ b/frigate/detectors/detection_api.py
@@ -0,0 +1,17 @@
+import logging
+from abc import ABC, abstractmethod
+
+
+logger = logging.getLogger(__name__)
+
+
+class DetectionApi(ABC):
+ type_key: str
+
+ @abstractmethod
+ def __init__(self, detector_config):
+ pass
+
+ @abstractmethod
+ def detect_raw(self, tensor_input):
+ pass
diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py
new file mode 100644
--- /dev/null
+++ b/frigate/detectors/detector_config.py
@@ -0,0 +1,90 @@
+import logging
+from enum import Enum
+from typing import Dict, List, Optional, Tuple, Union, Literal
+
+import matplotlib.pyplot as plt
+from pydantic import BaseModel, Extra, Field, validator
+from pydantic.fields import PrivateAttr
+
+from frigate.util import load_labels
+
+
+logger = logging.getLogger(__name__)
+
+
+class PixelFormatEnum(str, Enum):
+ rgb = "rgb"
+ bgr = "bgr"
+ yuv = "yuv"
+
+
+class InputTensorEnum(str, Enum):
+ nchw = "nchw"
+ nhwc = "nhwc"
+
+
+class ModelTypeEnum(str, Enum):
+ ssd = "ssd"
+ yolox = "yolox"
+ yolov5 = "yolov5"
+ yolov8 = "yolov8"
+
+
+class ModelConfig(BaseModel):
+ path: Optional[str] = Field(title="Custom Object detection model path.")
+ labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
+ width: int = Field(default=320, title="Object detection model input width.")
+ height: int = Field(default=320, title="Object detection model input height.")
+ labelmap: Dict[int, str] = Field(
+ default_factory=dict, title="Labelmap customization."
+ )
+ input_tensor: InputTensorEnum = Field(
+ default=InputTensorEnum.nhwc, title="Model Input Tensor Shape"
+ )
+ input_pixel_format: PixelFormatEnum = Field(
+ default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
+ )
+ model_type: ModelTypeEnum = Field(
+ default=ModelTypeEnum.ssd, title="Object Detection Model Type"
+ )
+ _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
+ _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
+
+ @property
+ def merged_labelmap(self) -> Dict[int, str]:
+ return self._merged_labelmap
+
+ @property
+ def colormap(self) -> Dict[int, Tuple[int, int, int]]:
+ return self._colormap
+
+ def __init__(self, **config):
+ super().__init__(**config)
+
+ self._merged_labelmap = {
+ **load_labels(config.get("labelmap_path", "/labelmap.txt")),
+ **config.get("labelmap", {}),
+ }
+ self._colormap = {}
+
+ def create_colormap(self, enabled_labels: set[str]) -> None:
+ """Get a list of colors for enabled labels."""
+ cmap = plt.cm.get_cmap("tab10", len(enabled_labels))
+
+ for key, val in enumerate(enabled_labels):
+ self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
+
+ class Config:
+ extra = Extra.forbid
+
+
+class BaseDetectorConfig(BaseModel):
+ # the type field must be defined in all subclasses
+ type: str = Field(default="cpu", title="Detector Type")
+ model: ModelConfig = Field(
+ default=None, title="Detector specific model configuration."
+ )
+
+ class Config:
+ extra = Extra.allow
+ arbitrary_types_allowed = True
diff --git a/frigate/detectors/detector_types.py b/frigate/detectors/detector_types.py
new file mode 100644
--- /dev/null
+++ b/frigate/detectors/detector_types.py
@@ -0,0 +1,42 @@
+import logging
+import importlib
+import pkgutil
+from typing import Union
+from typing_extensions import Annotated
+from enum import Enum
+from pydantic import Field
+
+from . import plugins
+from .detection_api import DetectionApi
+from .detector_config import BaseDetectorConfig
+
+
+logger = logging.getLogger(__name__)
+
+
+_included_modules = pkgutil.iter_modules(plugins.__path__, plugins.__name__ + ".")
+
+plugin_modules = []
+
+for _, name, _ in _included_modules:
+ try:
+ # currently openvino may fail when importing
+ # on an arm device with 64 KiB page size.
+ plugin_modules.append(importlib.import_module(name))
+ except ImportError as e:
+ logger.error(f"Error importing detector runtime: {e}")
+
+
+api_types = {det.type_key: det for det in DetectionApi.__subclasses__()}
+
+
+class StrEnum(str, Enum):
+ pass
+
+
+DetectorTypeEnum = StrEnum("DetectorTypeEnum", {k: k for k in api_types})
+
+DetectorConfig = Annotated[
+ Union[tuple(BaseDetectorConfig.__subclasses__())],
+ Field(discriminator="type"),
+]
diff --git a/frigate/detectors/plugins/__init__.py b/frigate/detectors/plugins/__init__.py
new file mode 100644
diff --git a/frigate/detectors/plugins/cpu_tfl.py b/frigate/detectors/plugins/cpu_tfl.py
new file mode 100644
--- /dev/null
+++ b/frigate/detectors/plugins/cpu_tfl.py
@@ -0,0 +1,64 @@
+import logging
+import numpy as np
+
+from frigate.detectors.detection_api import DetectionApi
+from frigate.detectors.detector_config import BaseDetectorConfig
+from typing import Literal
+from pydantic import Extra, Field
+
+try:
+ from tflite_runtime.interpreter import Interpreter
+except ModuleNotFoundError:
+ from tensorflow.lite.python.interpreter import Interpreter
+
+
+logger = logging.getLogger(__name__)
+
+DETECTOR_KEY = "cpu"
+
+
+class CpuDetectorConfig(BaseDetectorConfig):
+ type: Literal[DETECTOR_KEY]
+ num_threads: int = Field(default=3, title="Number of detection threads")
+
+
+class CpuTfl(DetectionApi):
+ type_key = DETECTOR_KEY
+
+ def __init__(self, detector_config: CpuDetectorConfig):
+ self.interpreter = Interpreter(
+ model_path=detector_config.model.path or "/cpu_model.tflite",
+ num_threads=detector_config.num_threads or 3,
+ )
+
+ self.interpreter.allocate_tensors()
+
+ self.tensor_input_details = self.interpreter.get_input_details()
+ self.tensor_output_details = self.interpreter.get_output_details()
+
+ def detect_raw(self, tensor_input):
+ self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
+ self.interpreter.invoke()
+
+ boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
+ class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
+ scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
+ count = int(
+ self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
+ )
+
+ detections = np.zeros((20, 6), np.float32)
+
+ for i in range(count):
+ if scores[i] < 0.4 or i == 20:
+ break
+ detections[i] = [
+ class_ids[i],
+ float(scores[i]),
+ boxes[i][0],
+ boxes[i][1],
+ boxes[i][2],
+ boxes[i][3],
+ ]
+
+ return detections
diff --git a/frigate/detectors/plugins/edgetpu_tfl.py b/frigate/detectors/plugins/edgetpu_tfl.py
new file mode 100644
--- /dev/null
+++ b/frigate/detectors/plugins/edgetpu_tfl.py
@@ -0,0 +1,79 @@
+import logging
+import numpy as np
+
+from frigate.detectors.detection_api import DetectionApi
+from frigate.detectors.detector_config import BaseDetectorConfig
+from typing import Literal
+from pydantic import Extra, Field
+
+try:
+ from tflite_runtime.interpreter import Interpreter, load_delegate
+except ModuleNotFoundError:
+ from tensorflow.lite.python.interpreter import Interpreter, load_delegate
+
+
+logger = logging.getLogger(__name__)
+
+DETECTOR_KEY = "edgetpu"
+
+
+class EdgeTpuDetectorConfig(BaseDetectorConfig):
+ type: Literal[DETECTOR_KEY]
+ device: str = Field(default=None, title="Device Type")
+
+
+class EdgeTpuTfl(DetectionApi):
+ type_key = DETECTOR_KEY
+
+ def __init__(self, detector_config: EdgeTpuDetectorConfig):
+ device_config = {"device": "usb"}
+ if detector_config.device is not None:
+ device_config = {"device": detector_config.device}
+
+ edge_tpu_delegate = None
+
+ try:
+ logger.info(f"Attempting to load TPU as {device_config['device']}")
+ edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
+ logger.info("TPU found")
+ self.interpreter = Interpreter(
+ model_path=detector_config.model.path or "/edgetpu_model.tflite",
+ experimental_delegates=[edge_tpu_delegate],
+ )
+ except ValueError:
+ logger.error(
+ "No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
+ )
+ raise
+
+ self.interpreter.allocate_tensors()
+
+ self.tensor_input_details = self.interpreter.get_input_details()
+ self.tensor_output_details = self.interpreter.get_output_details()
+
+ def detect_raw(self, tensor_input):
+ self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
+ self.interpreter.invoke()
+
+ boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
+ class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
+ scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
+ count = int(
+ self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
+ )
+
+ detections = np.zeros((20, 6), np.float32)
+
+ for i in range(count):
+ if scores[i] < 0.4 or i == 20:
+ break
+ detections[i] = [
+ class_ids[i],
+ float(scores[i]),
+ boxes[i][0],
+ boxes[i][1],
+ boxes[i][2],
+ boxes[i][3],
+ ]
+
+ return detections
diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py
new file mode 100644
--- /dev/null
+++ b/frigate/detectors/plugins/openvino.py
@@ -0,0 +1,174 @@
+import logging
+import numpy as np
+import openvino.runtime as ov
+
+from frigate.detectors.detection_api import DetectionApi
+from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
+from typing import Literal
+from pydantic import Extra, Field
+
+
+logger = logging.getLogger(__name__)
+
+DETECTOR_KEY = "openvino"
+
+
+class OvDetectorConfig(BaseDetectorConfig):
+ type: Literal[DETECTOR_KEY]
+ device: str = Field(default=None, title="Device Type")
+
+
+class OvDetector(DetectionApi):
+ type_key = DETECTOR_KEY
+
+ def __init__(self, detector_config: OvDetectorConfig):
+ self.ov_core = ov.Core()
+ self.ov_model = self.ov_core.read_model(detector_config.model.path)
+ self.ov_model_type = detector_config.model.model_type
+
+ self.h = detector_config.model.height
+ self.w = detector_config.model.width
+
+ self.interpreter = self.ov_core.compile_model(
+ model=self.ov_model, device_name=detector_config.device
+ )
+
+ logger.info(f"Model Input Shape: {self.interpreter.input(0).shape}")
+ self.output_indexes = 0
+
+ while True:
+ try:
+ tensor_shape = self.interpreter.output(self.output_indexes).shape
+ logger.info(f"Model Output-{self.output_indexes} Shape: {tensor_shape}")
+ self.output_indexes += 1
+ except:
+ logger.info(f"Model has {self.output_indexes} Output Tensors")
+ break
+ if self.ov_model_type == ModelTypeEnum.yolox:
+ self.num_classes = tensor_shape[2] - 5
+ logger.info(f"YOLOX model has {self.num_classes} classes")
+ self.set_strides_grids()
+
+ def set_strides_grids(self):
+ grids = []
+ expanded_strides = []
+
+ strides = [8, 16, 32]
+
+ hsizes = [self.h // stride for stride in strides]
+ wsizes = [self.w // stride for stride in strides]
+
+ for hsize, wsize, stride in zip(hsizes, wsizes, strides):
+ xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
+ grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
+ grids.append(grid)
+ shape = grid.shape[:2]
+ expanded_strides.append(np.full((*shape, 1), stride))
+ self.grids = np.concatenate(grids, 1)
+ self.expanded_strides = np.concatenate(expanded_strides, 1)
+
+ ## Takes in class ID, confidence score, and array of [x, y, w, h] that describes detection position,
+ ## returns an array that's easily passable back to Frigate.
+ def process_yolo(self, class_id, conf, pos):
+ return [
+ class_id, # class ID
+ conf, # confidence score
+ (pos[1] - (pos[3] / 2)) / self.h, # y_min
+ (pos[0] - (pos[2] / 2)) / self.w, # x_min
+ (pos[1] + (pos[3] / 2)) / self.h, # y_max
+ (pos[0] + (pos[2] / 2)) / self.w, # x_max
+ ]
+
+ def detect_raw(self, tensor_input):
+ infer_request = self.interpreter.create_infer_request()
+ infer_request.infer([tensor_input])
+
+ if self.ov_model_type == ModelTypeEnum.ssd:
+ results = infer_request.get_output_tensor()
+
+ detections = np.zeros((20, 6), np.float32)
+ i = 0
+ for object_detected in results.data[0, 0, :]:
+ if object_detected[0] != -1:
+ logger.debug(object_detected)
+ if object_detected[2] < 0.1 or i == 20:
+ break
+ detections[i] = [
+ object_detected[1], # Label ID
+ float(object_detected[2]), # Confidence
+ object_detected[4], # y_min
+ object_detected[3], # x_min
+ object_detected[6], # y_max
+ object_detected[5], # x_max
+ ]
+ i += 1
+ return detections
+ elif self.ov_model_type == ModelTypeEnum.yolox:
+ out_tensor = infer_request.get_output_tensor()
+ # [x, y, h, w, box_score, class_no_1, ..., class_no_80],
+ results = out_tensor.data
+ results[..., :2] = (results[..., :2] + self.grids) * self.expanded_strides
+ results[..., 2:4] = np.exp(results[..., 2:4]) * self.expanded_strides
+ image_pred = results[0, ...]
+
+ class_conf = np.max(
+ image_pred[:, 5 : 5 + self.num_classes], axis=1, keepdims=True
+ )
+ class_pred = np.argmax(image_pred[:, 5 : 5 + self.num_classes], axis=1)
+ class_pred = np.expand_dims(class_pred, axis=1)
+
+ conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze()
+ # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
+ dets = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
+ dets = dets[conf_mask]
+
+ ordered = dets[dets[:, 5].argsort()[::-1]][:20]
+
+ detections = np.zeros((20, 6), np.float32)
+
+ for i, object_detected in enumerate(ordered):
+ detections[i] = self.process_yolo(
+ object_detected[6], object_detected[5], object_detected[:4]
+ )
+ return detections
+ elif self.ov_model_type == ModelTypeEnum.yolov8:
+ out_tensor = infer_request.get_output_tensor()
+ results = out_tensor.data[0]
+ output_data = np.transpose(results)
+ scores = np.max(output_data[:, 4:], axis=1)
+ if len(scores) == 0:
+ return np.zeros((20, 6), np.float32)
+ scores = np.expand_dims(scores, axis=1)
+ # add scores to the last column
+ dets = np.concatenate((output_data, scores), axis=1)
+ # filter out lines with scores below threshold
+ dets = dets[dets[:, -1] > 0.5, :]
+ # limit to top 20 scores, descending order
+ ordered = dets[dets[:, -1].argsort()[::-1]][:20]
+ detections = np.zeros((20, 6), np.float32)
+
+ for i, object_detected in enumerate(ordered):
+ detections[i] = self.process_yolo(
+ np.argmax(object_detected[4:-1]),
+ object_detected[-1],
+ object_detected[:4],
+ )
+ return detections
+ elif self.ov_model_type == ModelTypeEnum.yolov5:
+ out_tensor = infer_request.get_output_tensor()
+ output_data = out_tensor.data[0]
+ # filter out lines with scores below threshold
+ conf_mask = (output_data[:, 4] >= 0.5).squeeze()
+ output_data = output_data[conf_mask]
+ # limit to top 20 scores, descending order
+ ordered = output_data[output_data[:, 4].argsort()[::-1]][:20]
+
+ detections = np.zeros((20, 6), np.float32)
+
+ for i, object_detected in enumerate(ordered):
+ detections[i] = self.process_yolo(
+ np.argmax(object_detected[5:]),
+ object_detected[4],
+ object_detected[:4],
+ )
+ return detections
diff --git a/frigate/detectors/plugins/tensorrt.py b/frigate/detectors/plugins/tensorrt.py
new file mode 100644
--- /dev/null
+++ b/frigate/detectors/plugins/tensorrt.py
@@ -0,0 +1,312 @@
+import logging
+
+import ctypes
+import numpy as np
+
+try:
+ import tensorrt as trt
+ from cuda import cuda
+
+ TRT_SUPPORT = True
+except ModuleNotFoundError as e:
+ TRT_SUPPORT = False
+
+from frigate.detectors.detection_api import DetectionApi
+from frigate.detectors.detector_config import BaseDetectorConfig
+from typing import Literal
+from pydantic import Field
+
+logger = logging.getLogger(__name__)
+
+DETECTOR_KEY = "tensorrt"
+
+if TRT_SUPPORT:
+
+ class TrtLogger(trt.ILogger):
+ def __init__(self):
+ trt.ILogger.__init__(self)
+
+ def log(self, severity, msg):
+ logger.log(self.getSeverity(severity), msg)
+
+ def getSeverity(self, sev: trt.ILogger.Severity) -> int:
+ if sev == trt.ILogger.VERBOSE:
+ return logging.DEBUG
+ elif sev == trt.ILogger.INFO:
+ return logging.INFO
+ elif sev == trt.ILogger.WARNING:
+ return logging.WARNING
+ elif sev == trt.ILogger.ERROR:
+ return logging.ERROR
+ elif sev == trt.ILogger.INTERNAL_ERROR:
+ return logging.CRITICAL
+ else:
+ return logging.DEBUG
+
+
+class TensorRTDetectorConfig(BaseDetectorConfig):
+ type: Literal[DETECTOR_KEY]
+ device: int = Field(default=0, title="GPU Device Index")
+
+
+class HostDeviceMem(object):
+ """Simple helper data class that's a little nicer to use than a 2-tuple."""
+
+ def __init__(self, host_mem, device_mem, nbytes, size):
+ self.host = host_mem
+ err, self.host_dev = cuda.cuMemHostGetDevicePointer(self.host, 0)
+ self.device = device_mem
+ self.nbytes = nbytes
+ self.size = size
+
+ def __str__(self):
+ return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
+
+ def __repr__(self):
+ return self.__str__()
+
+ def __del__(self):
+ cuda.cuMemFreeHost(self.host)
+ cuda.cuMemFree(self.device)
+
+
+class TensorRtDetector(DetectionApi):
+ type_key = DETECTOR_KEY
+
+ def _load_engine(self, model_path):
+ try:
+ trt.init_libnvinfer_plugins(self.trt_logger, "")
+
+ ctypes.cdll.LoadLibrary("/trt-models/libyolo_layer.so")
+ except OSError as e:
+ logger.error(
+ "ERROR: failed to load libraries. %s",
+ e,
+ )
+
+ with open(model_path, "rb") as f, trt.Runtime(self.trt_logger) as runtime:
+ return runtime.deserialize_cuda_engine(f.read())
+
+ def _get_input_shape(self):
+ """Get input shape of the TensorRT YOLO engine."""
+ binding = self.engine[0]
+ assert self.engine.binding_is_input(binding)
+ binding_dims = self.engine.get_binding_shape(binding)
+ if len(binding_dims) == 4:
+ return (
+ tuple(binding_dims[2:]),
+ trt.nptype(self.engine.get_binding_dtype(binding)),
+ )
+ elif len(binding_dims) == 3:
+ return (
+ tuple(binding_dims[1:]),
+ trt.nptype(self.engine.get_binding_dtype(binding)),
+ )
+ else:
+ raise ValueError(
+ "bad dims of binding %s: %s" % (binding, str(binding_dims))
+ )
+
+ def _allocate_buffers(self):
+ """Allocates all host/device in/out buffers required for an engine."""
+ inputs = []
+ outputs = []
+ bindings = []
+ output_idx = 0
+ for binding in self.engine:
+ binding_dims = self.engine.get_binding_shape(binding)
+ if len(binding_dims) == 4:
+ # explicit batch case (TensorRT 7+)
+ size = trt.volume(binding_dims)
+ elif len(binding_dims) == 3:
+ # implicit batch case (TensorRT 6 or older)
+ size = trt.volume(binding_dims) * self.engine.max_batch_size
+ else:
+ raise ValueError(
+ "bad dims of binding %s: %s" % (binding, str(binding_dims))
+ )
+ nbytes = size * self.engine.get_binding_dtype(binding).itemsize
+ # Allocate host and device buffers
+ err, host_mem = cuda.cuMemHostAlloc(
+ nbytes, Flags=cuda.CU_MEMHOSTALLOC_DEVICEMAP
+ )
+ assert err is cuda.CUresult.CUDA_SUCCESS, f"cuMemAllocHost returned {err}"
+ logger.debug(
+ f"Allocated Tensor Binding {binding} Memory {nbytes} Bytes ({size} * {self.engine.get_binding_dtype(binding)})"
+ )
+ err, device_mem = cuda.cuMemAlloc(nbytes)
+ assert err is cuda.CUresult.CUDA_SUCCESS, f"cuMemAlloc returned {err}"
+ # Append the device buffer to device bindings.
+ bindings.append(int(device_mem))
+ # Append to the appropriate list.
+ if self.engine.binding_is_input(binding):
+ logger.debug(f"Input has Shape {binding_dims}")
+ inputs.append(HostDeviceMem(host_mem, device_mem, nbytes, size))
+ else:
+ # each grid has 3 anchors, each anchor generates a detection
+ # output of 7 float32 values
+ assert size % 7 == 0, f"output size was {size}"
+ logger.debug(f"Output has Shape {binding_dims}")
+ outputs.append(HostDeviceMem(host_mem, device_mem, nbytes, size))
+ output_idx += 1
+ assert len(inputs) == 1, f"inputs len was {len(inputs)}"
+ assert len(outputs) == 1, f"output len was {len(outputs)}"
+ return inputs, outputs, bindings
+
+ def _do_inference(self):
+ """do_inference (for TensorRT 7.0+)
+ This function is generalized for multiple inputs/outputs for full
+ dimension networks.
+ Inputs and outputs are expected to be lists of HostDeviceMem objects.
+ """
+ # Push CUDA Context
+ cuda.cuCtxPushCurrent(self.cu_ctx)
+
+ # Transfer input data to the GPU.
+ [
+ cuda.cuMemcpyHtoDAsync(inp.device, inp.host, inp.nbytes, self.stream)
+ for inp in self.inputs
+ ]
+
+ # Run inference.
+ if not self.context.execute_async_v2(
+ bindings=self.bindings, stream_handle=self.stream
+ ):
+ logger.warn(f"Execute returned false")
+
+ # Transfer predictions back from the GPU.
+ [
+ cuda.cuMemcpyDtoHAsync(out.host, out.device, out.nbytes, self.stream)
+ for out in self.outputs
+ ]
+
+ # Synchronize the stream
+ cuda.cuStreamSynchronize(self.stream)
+
+ # Pop CUDA Context
+ cuda.cuCtxPopCurrent()
+
+ # Return only the host outputs.
+ return [
+ np.array(
+ (ctypes.c_float * out.size).from_address(out.host), dtype=np.float32
+ )
+ for out in self.outputs
+ ]
+
+ def __init__(self, detector_config: TensorRTDetectorConfig):
+ assert (
+ TRT_SUPPORT
+ ), f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
+
+ (cuda_err,) = cuda.cuInit(0)
+ assert (
+ cuda_err == cuda.CUresult.CUDA_SUCCESS
+ ), f"Failed to initialize cuda {cuda_err}"
+ err, dev_count = cuda.cuDeviceGetCount()
+ logger.debug(f"Num Available Devices: {dev_count}")
+ assert (
+ detector_config.device < dev_count
+ ), f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
+ err, self.cu_ctx = cuda.cuCtxCreate(
+ cuda.CUctx_flags.CU_CTX_MAP_HOST, detector_config.device
+ )
+
+ self.conf_th = 0.4 ##TODO: model config parameter
+ self.nms_threshold = 0.4
+ err, self.stream = cuda.cuStreamCreate(0)
+ self.trt_logger = TrtLogger()
+ self.engine = self._load_engine(detector_config.model.path)
+ self.input_shape = self._get_input_shape()
+
+ try:
+ self.context = self.engine.create_execution_context()
+ (
+ self.inputs,
+ self.outputs,
+ self.bindings,
+ ) = self._allocate_buffers()
+ except Exception as e:
+ logger.error(e)
+ raise RuntimeError("fail to allocate CUDA resources") from e
+
+ logger.debug("TensorRT loaded. Input shape is %s", self.input_shape)
+ logger.debug("TensorRT version is %s", trt.__version__[0])
+
+ def __del__(self):
+ """Free CUDA memories."""
+ if self.outputs is not None:
+ del self.outputs
+ if self.inputs is not None:
+ del self.inputs
+ if self.stream is not None:
+ cuda.cuStreamDestroy(self.stream)
+ del self.stream
+ del self.engine
+ del self.context
+ del self.trt_logger
+ cuda.cuCtxDestroy(self.cu_ctx)
+
+ def _postprocess_yolo(self, trt_outputs, conf_th):
+ """Postprocess TensorRT outputs.
+ # Args
+ trt_outputs: a list of 2 or 3 tensors, where each tensor
+ contains a multiple of 7 float32 numbers in
+ the order of [x, y, w, h, box_confidence, class_id, class_prob]
+ conf_th: confidence threshold
+ # Returns
+ boxes, scores, classes
+ """
+ # filter low-conf detections and concatenate results of all yolo layers
+ detections = []
+ for o in trt_outputs:
+ dets = o.reshape((-1, 7))
+ dets = dets[dets[:, 4] * dets[:, 6] >= conf_th]
+ detections.append(dets)
+ detections = np.concatenate(detections, axis=0)
+
+ return detections
+
+ def detect_raw(self, tensor_input):
+ # Input tensor has the shape of the [height, width, 3]
+ # Output tensor of float32 of shape [20, 6] where:
+ # O - class id
+ # 1 - score
+ # 2..5 - a value between 0 and 1 of the box: [top, left, bottom, right]
+
+ # normalize
+ if self.input_shape[-1] != trt.int8:
+ tensor_input = tensor_input.astype(self.input_shape[-1])
+ tensor_input /= 255.0
+
+ self.inputs[0].host = np.ascontiguousarray(
+ tensor_input.astype(self.input_shape[-1])
+ )
+ trt_outputs = self._do_inference()
+
+ raw_detections = self._postprocess_yolo(trt_outputs, self.conf_th)
+
+ if len(raw_detections) == 0:
+ return np.zeros((20, 6), np.float32)
+
+ # raw_detections: Nx7 numpy arrays of
+ # [[x, y, w, h, box_confidence, class_id, class_prob],
+
+ # Calculate score as box_confidence x class_prob
+ raw_detections[:, 4] = raw_detections[:, 4] * raw_detections[:, 6]
+ # Reorder elements by the score, best on top, remove class_prob
+ ordered = raw_detections[raw_detections[:, 4].argsort()[::-1]][:, 0:6]
+ # transform width to right with clamp to 0..1
+ ordered[:, 2] = np.clip(ordered[:, 2] + ordered[:, 0], 0, 1)
+ # transform height to bottom with clamp to 0..1
+ ordered[:, 3] = np.clip(ordered[:, 3] + ordered[:, 1], 0, 1)
+ # put result into the correct order and limit to top 20
+ detections = ordered[:, [5, 4, 1, 0, 3, 2]][:20]
+ # pad to 20x6 shape
+ append_cnt = 20 - len(detections)
+ if append_cnt > 0:
+ detections = np.append(
+ detections, np.zeros((append_cnt, 6), np.float32), axis=0
+ )
+
+ return detections
diff --git a/frigate/events.py b/frigate/events.py
--- a/frigate/events.py
+++ b/frigate/events.py
@@ -11,43 +11,55 @@
from frigate.config import EventsConfig, FrigateConfig, RecordConfig
from frigate.const import CLIPS_DIR
from frigate.models import Event
+from frigate.types import CameraMetricsTypes
+
+from multiprocessing.queues import Queue
+from multiprocessing.synchronize import Event as MpEvent
+from typing import Dict
logger = logging.getLogger(__name__)
-def should_insert_db(prev_event, current_event):
+def should_insert_db(prev_event: Event, current_event: Event) -> bool:
"""If current event has new clip or snapshot."""
return (not prev_event["has_clip"] and not prev_event["has_snapshot"]) and (
current_event["has_clip"] or current_event["has_snapshot"]
)
-def should_update_db(prev_event, current_event):
+def should_update_db(prev_event: Event, current_event: Event) -> bool:
"""If current_event has updated fields and (clip or snapshot)."""
- return (current_event["has_clip"] or current_event["has_snapshot"]) and (
- prev_event["top_score"] != current_event["top_score"]
- or prev_event["entered_zones"] != current_event["entered_zones"]
- or prev_event["thumbnail"] != current_event["thumbnail"]
- or prev_event["has_clip"] != current_event["has_clip"]
- or prev_event["has_snapshot"] != current_event["has_snapshot"]
- )
+ if current_event["has_clip"] or current_event["has_snapshot"]:
+ if (
+ prev_event["top_score"] != current_event["top_score"]
+ or prev_event["entered_zones"] != current_event["entered_zones"]
+ or prev_event["thumbnail"] != current_event["thumbnail"]
+ or prev_event["has_clip"] != current_event["has_clip"]
+ or prev_event["has_snapshot"] != current_event["has_snapshot"]
+ ):
+ return True
+ return False
class EventProcessor(threading.Thread):
def __init__(
- self, config, camera_processes, event_queue, event_processed_queue, stop_event
+ self,
+ config: FrigateConfig,
+ camera_processes: dict[str, CameraMetricsTypes],
+ event_queue: Queue,
+ event_processed_queue: Queue,
+ stop_event: MpEvent,
):
threading.Thread.__init__(self)
self.name = "event_processor"
self.config = config
self.camera_processes = camera_processes
- self.cached_clips = {}
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
- self.events_in_process = {}
+ self.events_in_process: Dict[str, Event] = {}
self.stop_event = stop_event
- def run(self):
+ def run(self) -> None:
# set an end_time on events without an end_time on startup
Event.update(end_time=Event.start_time + 30).where(
Event.end_time == None
@@ -55,7 +67,7 @@ def run(self):
while not self.stop_event.is_set():
try:
- event_type, camera, event_data = self.event_queue.get(timeout=10)
+ event_type, camera, event_data = self.event_queue.get(timeout=1)
except queue.Empty:
continue
@@ -147,14 +159,15 @@ def run(self):
class EventCleanup(threading.Thread):
- def __init__(self, config: FrigateConfig, stop_event):
+ def __init__(self, config: FrigateConfig, stop_event: MpEvent):
threading.Thread.__init__(self)
self.name = "event_cleanup"
self.config = config
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
- def expire(self, media_type):
+ def expire(self, media_type: str) -> None:
+ # TODO: Refactor media_type to enum
## Expire events from unlisted cameras based on the global config
if media_type == "clips":
retain_config = self.config.record.events.retain
@@ -253,7 +266,7 @@ def expire(self, media_type):
)
update_query.execute()
- def purge_duplicates(self):
+ def purge_duplicates(self) -> None:
duplicate_query = """with grouped_events as (
select id,
label,
@@ -287,7 +300,7 @@ def purge_duplicates(self):
.execute()
)
- def run(self):
+ def run(self) -> None:
# only expire events every 5 minutes
while not self.stop_event.wait(300):
self.expire("clips")
diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py
new file mode 100644
--- /dev/null
+++ b/frigate/ffmpeg_presets.py
@@ -0,0 +1,456 @@
+"""Handles inserting and maintaining ffmpeg presets."""
+
+import logging
+import os
+
+from typing import Any
+
+from frigate.version import VERSION
+from frigate.const import BTBN_PATH
+from frigate.util import vainfo_hwaccel
+
+
+logger = logging.getLogger(__name__)
+
+
+class LibvaGpuSelector:
+ "Automatically selects the correct libva GPU."
+
+ _selected_gpu = None
+
+ def get_selected_gpu(self) -> str:
+ """Get selected libva GPU."""
+ if not os.path.exists("/dev/dri"):
+ return ""
+
+ if self._selected_gpu:
+ return self._selected_gpu
+
+ devices = list(filter(lambda d: d.startswith("render"), os.listdir("/dev/dri")))
+
+ if len(devices) < 2:
+ self._selected_gpu = "/dev/dri/renderD128"
+ return self._selected_gpu
+
+ for device in devices:
+ check = vainfo_hwaccel(device_name=device)
+
+ logger.debug(f"{device} return vainfo status code: {check.returncode}")
+
+ if check.returncode == 0:
+ self._selected_gpu = f"/dev/dri/{device}"
+ return self._selected_gpu
+
+ return ""
+
+
+TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout"
+
+_gpu_selector = LibvaGpuSelector()
+_user_agent_args = [
+ "-user_agent",
+ f"FFmpeg Frigate/{VERSION}",
+]
+
+PRESETS_HW_ACCEL_DECODE = {
+ "preset-rpi-32-h264": ["-c:v", "h264_v4l2m2m"],
+ "preset-rpi-64-h264": ["-c:v", "h264_v4l2m2m"],
+ "preset-vaapi": [
+ "-hwaccel_flags",
+ "allow_profile_mismatch",
+ "-hwaccel",
+ "vaapi",
+ "-hwaccel_device",
+ _gpu_selector.get_selected_gpu(),
+ "-hwaccel_output_format",
+ "vaapi",
+ ],
+ "preset-intel-qsv-h264": [
+ "-hwaccel",
+ "qsv",
+ "-qsv_device",
+ _gpu_selector.get_selected_gpu(),
+ "-hwaccel_output_format",
+ "qsv",
+ "-c:v",
+ "h264_qsv",
+ ],
+ "preset-intel-qsv-h265": [
+ "-load_plugin",
+ "hevc_hw",
+ "-hwaccel",
+ "qsv",
+ "-qsv_device",
+ _gpu_selector.get_selected_gpu(),
+ "-hwaccel_output_format",
+ "qsv",
+ "-c:v",
+ "hevc_qsv",
+ ],
+ "preset-nvidia-h264": [
+ "-hwaccel",
+ "cuda",
+ "-hwaccel_output_format",
+ "cuda",
+ ],
+ "preset-nvidia-h265": [
+ "-hwaccel",
+ "cuda",
+ "-hwaccel_output_format",
+ "cuda",
+ ],
+ "preset-nvidia-mjpeg": [
+ "-hwaccel",
+ "cuda",
+ "-hwaccel_output_format",
+ "cuda",
+ ],
+}
+
+PRESETS_HW_ACCEL_SCALE = {
+ "preset-rpi-32-h264": "-r {0} -s {1}x{2}",
+ "preset-rpi-64-h264": "-r {0} -s {1}x{2}",
+ "preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p",
+ "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
+ "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
+ "preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
+ "preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
+ "default": "-r {0} -s {1}x{2}",
+}
+
+PRESETS_HW_ACCEL_ENCODE = {
+ "preset-rpi-32-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
+ "preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
+ "preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}",
+ "preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
+ "preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
+ "preset-nvidia-h264": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
+ "preset-nvidia-h265": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
+ "default": "ffmpeg -hide_banner {0} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {1}",
+}
+
+
+def parse_preset_hardware_acceleration_decode(arg: Any) -> list[str]:
+ """Return the correct preset if in preset format otherwise return None."""
+ if not isinstance(arg, str):
+ return None
+
+ return PRESETS_HW_ACCEL_DECODE.get(arg, None)
+
+
+def parse_preset_hardware_acceleration_scale(
+ arg: Any,
+ detect_args: list[str],
+ fps: int,
+ width: int,
+ height: int,
+) -> list[str]:
+ """Return the correct scaling preset or default preset if none is set."""
+ if not isinstance(arg, str) or " " in arg:
+ scale = PRESETS_HW_ACCEL_SCALE["default"].format(fps, width, height).split(" ")
+ scale.extend(detect_args)
+ return scale
+
+ scale = PRESETS_HW_ACCEL_SCALE.get(arg, "")
+
+ if scale:
+ scale = scale.format(fps, width, height).split(" ")
+ scale.extend(detect_args)
+ return scale
+ else:
+ scale = scale.format(fps, width, height).split(" ")
+ scale.extend(detect_args)
+ return scale
+
+
+def parse_preset_hardware_acceleration_encode(arg: Any, input: str, output: str) -> str:
+ """Return the correct scaling preset or default preset if none is set."""
+ if not isinstance(arg, str):
+ return PRESETS_HW_ACCEL_ENCODE["default"].format(input, output)
+
+ return PRESETS_HW_ACCEL_ENCODE.get(arg, PRESETS_HW_ACCEL_ENCODE["default"]).format(
+ input,
+ output,
+ _gpu_selector.get_selected_gpu(),
+ )
+
+
+PRESETS_INPUT = {
+ "preset-http-jpeg-generic": _user_agent_args
+ + [
+ "-r",
+ "{}",
+ "-stream_loop",
+ "-1",
+ "-f",
+ "image2",
+ "-avoid_negative_ts",
+ "make_zero",
+ "-fflags",
+ "nobuffer",
+ "-flags",
+ "low_delay",
+ "-strict",
+ "experimental",
+ "-fflags",
+ "+genpts+discardcorrupt",
+ "-use_wallclock_as_timestamps",
+ "1",
+ ],
+ "preset-http-mjpeg-generic": _user_agent_args
+ + [
+ "-avoid_negative_ts",
+ "make_zero",
+ "-fflags",
+ "nobuffer",
+ "-flags",
+ "low_delay",
+ "-strict",
+ "experimental",
+ "-fflags",
+ "+genpts+discardcorrupt",
+ "-use_wallclock_as_timestamps",
+ "1",
+ ],
+ "preset-http-reolink": _user_agent_args
+ + [
+ "-avoid_negative_ts",
+ "make_zero",
+ "-fflags",
+ "+genpts+discardcorrupt",
+ "-flags",
+ "low_delay",
+ "-strict",
+ "experimental",
+ "-analyzeduration",
+ "1000M",
+ "-probesize",
+ "1000M",
+ "-rw_timeout",
+ "5000000",
+ ],
+ "preset-rtmp-generic": [
+ "-avoid_negative_ts",
+ "make_zero",
+ "-fflags",
+ "nobuffer",
+ "-flags",
+ "low_delay",
+ "-strict",
+ "experimental",
+ "-fflags",
+ "+genpts+discardcorrupt",
+ "-rw_timeout",
+ "5000000",
+ "-use_wallclock_as_timestamps",
+ "1",
+ "-f",
+ "live_flv",
+ ],
+ "preset-rtsp-generic": _user_agent_args
+ + [
+ "-avoid_negative_ts",
+ "make_zero",
+ "-fflags",
+ "+genpts+discardcorrupt",
+ "-rtsp_transport",
+ "tcp",
+ TIMEOUT_PARAM,
+ "5000000",
+ "-use_wallclock_as_timestamps",
+ "1",
+ ],
+ "preset-rtsp-restream": _user_agent_args
+ + [
+ "-rtsp_transport",
+ "tcp",
+ TIMEOUT_PARAM,
+ "5000000",
+ ],
+ "preset-rtsp-restream-low-latency": _user_agent_args
+ + [
+ "-rtsp_transport",
+ "tcp",
+ TIMEOUT_PARAM,
+ "5000000",
+ "-fflags",
+ "nobuffer",
+ "-flags",
+ "low_delay",
+ ],
+ "preset-rtsp-udp": _user_agent_args
+ + [
+ "-avoid_negative_ts",
+ "make_zero",
+ "-fflags",
+ "+genpts+discardcorrupt",
+ "-rtsp_transport",
+ "udp",
+ TIMEOUT_PARAM,
+ "5000000",
+ "-use_wallclock_as_timestamps",
+ "1",
+ ],
+ "preset-rtsp-blue-iris": _user_agent_args
+ + [
+ "-user_agent",
+ f"FFmpeg Frigate/{VERSION}",
+ "-avoid_negative_ts",
+ "make_zero",
+ "-flags",
+ "low_delay",
+ "-strict",
+ "experimental",
+ "-fflags",
+ "+genpts+discardcorrupt",
+ "-rtsp_transport",
+ "tcp",
+ TIMEOUT_PARAM,
+ "5000000",
+ "-use_wallclock_as_timestamps",
+ "1",
+ ],
+}
+
+
+def parse_preset_input(arg: Any, detect_fps: int) -> list[str]:
+ """Return the correct preset if in preset format otherwise return None."""
+ if not isinstance(arg, str):
+ return None
+
+ if arg == "preset-http-jpeg-generic":
+ input = PRESETS_INPUT[arg].copy()
+ input[1] = str(detect_fps)
+ return input
+
+ return PRESETS_INPUT.get(arg, None)
+
+
+PRESETS_RECORD_OUTPUT = {
+ "preset-record-generic": [
+ "-f",
+ "segment",
+ "-segment_time",
+ "10",
+ "-segment_format",
+ "mp4",
+ "-reset_timestamps",
+ "1",
+ "-strftime",
+ "1",
+ "-c",
+ "copy",
+ "-an",
+ ],
+ "preset-record-generic-audio-aac": [
+ "-f",
+ "segment",
+ "-segment_time",
+ "10",
+ "-segment_format",
+ "mp4",
+ "-reset_timestamps",
+ "1",
+ "-strftime",
+ "1",
+ "-c:v",
+ "copy",
+ "-c:a",
+ "aac",
+ ],
+ "preset-record-generic-audio-copy": [
+ "-f",
+ "segment",
+ "-segment_time",
+ "10",
+ "-segment_format",
+ "mp4",
+ "-reset_timestamps",
+ "1",
+ "-strftime",
+ "1",
+ "-c",
+ "copy",
+ ],
+ "preset-record-mjpeg": [
+ "-f",
+ "segment",
+ "-segment_time",
+ "10",
+ "-segment_format",
+ "mp4",
+ "-reset_timestamps",
+ "1",
+ "-strftime",
+ "1",
+ "-c:v",
+ "libx264",
+ "-an",
+ ],
+ "preset-record-jpeg": [
+ "-f",
+ "segment",
+ "-segment_time",
+ "10",
+ "-segment_format",
+ "mp4",
+ "-reset_timestamps",
+ "1",
+ "-strftime",
+ "1",
+ "-c:v",
+ "libx264",
+ "-an",
+ ],
+ "preset-record-ubiquiti": [
+ "-f",
+ "segment",
+ "-segment_time",
+ "10",
+ "-segment_format",
+ "mp4",
+ "-reset_timestamps",
+ "1",
+ "-strftime",
+ "1",
+ "-c:v",
+ "copy",
+ "-ar",
+ "44100",
+ "-c:a",
+ "aac",
+ ],
+}
+
+
+def parse_preset_output_record(arg: Any) -> list[str]:
+ """Return the correct preset if in preset format otherwise return None."""
+ if not isinstance(arg, str):
+ return None
+
+ return PRESETS_RECORD_OUTPUT.get(arg, None)
+
+
+PRESETS_RTMP_OUTPUT = {
+ "preset-rtmp-generic": ["-c", "copy", "-f", "flv"],
+ "preset-rtmp-mjpeg": ["-c:v", "libx264", "-an", "-f", "flv"],
+ "preset-rtmp-jpeg": ["-c:v", "libx264", "-an", "-f", "flv"],
+ "preset-rtmp-ubiquiti": [
+ "-c:v",
+ "copy",
+ "-f",
+ "flv",
+ "-ar",
+ "44100",
+ "-c:a",
+ "aac",
+ ],
+}
+
+
+def parse_preset_output_rtmp(arg: Any) -> list[str]:
+ """Return the correct preset if in preset format otherwise return None."""
+ if not isinstance(arg, str):
+ return None
+
+ return PRESETS_RTMP_OUTPUT.get(arg, None)
diff --git a/frigate/http.py b/frigate/http.py
--- a/frigate/http.py
+++ b/frigate/http.py
@@ -1,13 +1,18 @@
import base64
-from collections import OrderedDict
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, timezone
import copy
+import glob
import logging
+import json
import os
import subprocess as sp
+import pytz
import time
+import traceback
+
from functools import reduce
from pathlib import Path
+from tzlocal import get_localzone_name
from urllib.parse import unquote
import cv2
@@ -26,10 +31,19 @@
from peewee import SqliteDatabase, operator, fn, DoesNotExist
from playhouse.shortcuts import model_to_dict
-from frigate.const import CLIPS_DIR
+from frigate.config import FrigateConfig
+from frigate.const import CLIPS_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
from frigate.models import Event, Recordings
-from frigate.object_processing import TrackedObject, TrackedObjectProcessor
+from frigate.object_processing import TrackedObject
from frigate.stats import stats_snapshot
+from frigate.util import (
+ clean_camera_user_pass,
+ ffprobe_stream,
+ restart_frigate,
+ vainfo_hwaccel,
+ get_tz_modifiers,
+)
+from frigate.storage import StorageMaintainer
from frigate.version import VERSION
logger = logging.getLogger(__name__)
@@ -42,6 +56,7 @@ def create_app(
database: SqliteDatabase,
stats_tracking,
detected_frames_processor,
+ storage_maintainer: StorageMaintainer,
plus_api,
):
app = Flask(__name__)
@@ -59,7 +74,10 @@ def _db_close(exc):
app.frigate_config = frigate_config
app.stats_tracking = stats_tracking
app.detected_frames_processor = detected_frames_processor
+ app.storage_maintainer = storage_maintainer
app.plus_api = plus_api
+ app.camera_error_image = None
+ app.hwaccel_errors = []
app.register_blueprint(bp)
@@ -73,6 +91,8 @@ def is_healthy():
@bp.route("/events/summary")
def events_summary():
+ tz_name = request.args.get("timezone", default="utc", type=str)
+ hour_modifier, minute_modifier = get_tz_modifiers(tz_name)
has_clip = request.args.get("has_clip", type=int)
has_snapshot = request.args.get("has_snapshot", type=int)
@@ -91,8 +111,12 @@ def events_summary():
Event.select(
Event.camera,
Event.label,
+ Event.sub_label,
fn.strftime(
- "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
+ "%Y-%m-%d",
+ fn.datetime(
+ Event.start_time, "unixepoch", hour_modifier, minute_modifier
+ ),
).alias("day"),
Event.zones,
fn.COUNT(Event.id).alias("count"),
@@ -101,8 +125,12 @@ def events_summary():
.group_by(
Event.camera,
Event.label,
+ Event.sub_label,
fn.strftime(
- "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
+ "%Y-%m-%d",
+ fn.datetime(
+ Event.start_time, "unixepoch", hour_modifier, minute_modifier
+ ),
),
Event.zones,
)
@@ -158,6 +186,18 @@ def send_to_plus(id):
logger.error(message)
return make_response(jsonify({"success": False, "message": message}), 404)
+ if event.end_time is None:
+ logger.error(f"Unable to load clean png for in-progress event: {event.id}")
+ return make_response(
+ jsonify(
+ {
+ "success": False,
+ "message": "Unable to load clean png for in-progress event",
+ }
+ ),
+ 400,
+ )
+
if event.plus_id:
message = "Already submitted to plus"
logger.error(message)
@@ -176,6 +216,15 @@ def send_to_plus(id):
400,
)
+ if image is None or image.size == 0:
+ logger.error(f"Unable to load clean png for event: {event.id}")
+ return make_response(
+ jsonify(
+ {"success": False, "message": "Unable to load clean png for event"}
+ ),
+ 400,
+ )
+
try:
plus_id = current_app.plus_api.upload_image(image, event.camera)
except Exception as ex:
@@ -260,6 +309,8 @@ def set_sub_label(id):
@bp.route("/sub_labels")
def get_sub_labels():
+ split_joined = request.args.get("split_joined", type=int)
+
try:
events = Event.select(Event.sub_label).distinct()
except Exception as e:
@@ -272,6 +323,19 @@ def get_sub_labels():
if None in sub_labels:
sub_labels.remove(None)
+ if split_joined:
+ original_labels = sub_labels.copy()
+
+ for label in original_labels:
+ if "," in label:
+ sub_labels.remove(label)
+ parts = label.split(",")
+
+ for part in parts:
+ if not (part.strip()) in sub_labels:
+ sub_labels.append(part.strip())
+
+ sub_labels.sort()
return jsonify(sub_labels)
@@ -501,16 +565,42 @@ def event_clip(id):
@bp.route("/events")
def events():
- limit = request.args.get("limit", 100)
camera = request.args.get("camera", "all")
+ cameras = request.args.get("cameras", "all")
+
+ # handle old camera arg
+ if cameras == "all" and camera != "all":
+ cameras = camera
+
label = unquote(request.args.get("label", "all"))
+ labels = request.args.get("labels", "all")
+
+ # handle old label arg
+ if labels == "all" and label != "all":
+ labels = label
+
sub_label = request.args.get("sub_label", "all")
+ sub_labels = request.args.get("sub_labels", "all")
+
+ # handle old sub_label arg
+ if sub_labels == "all" and sub_label != "all":
+ sub_labels = sub_label
+
zone = request.args.get("zone", "all")
+ zones = request.args.get("zones", "all")
+
+ # handle old label arg
+ if zones == "all" and zone != "all":
+ zones = zone
+
+ limit = request.args.get("limit", 100)
after = request.args.get("after", type=float)
before = request.args.get("before", type=float)
has_clip = request.args.get("has_clip", type=int)
has_snapshot = request.args.get("has_snapshot", type=int)
+ in_progress = request.args.get("in_progress", type=int)
include_thumbnails = request.args.get("include_thumbnails", default=1, type=int)
+ favorites = request.args.get("favorites", type=int)
clauses = []
excluded_fields = []
@@ -533,14 +623,52 @@ def events():
if camera != "all":
clauses.append((Event.camera == camera))
- if label != "all":
- clauses.append((Event.label == label))
+ if cameras != "all":
+ camera_list = cameras.split(",")
+ clauses.append((Event.camera << camera_list))
+
+ if labels != "all":
+ label_list = labels.split(",")
+ clauses.append((Event.label << label_list))
+
+ if sub_labels != "all":
+ # use matching so joined sub labels are included
+ # for example a sub label 'bob' would get events
+ # with sub labels 'bob' and 'bob, john'
+ sub_label_clauses = []
+ filtered_sub_labels = sub_labels.split(",")
+
+ if "None" in filtered_sub_labels:
+ filtered_sub_labels.remove("None")
+ sub_label_clauses.append((Event.sub_label.is_null()))
+
+ for label in filtered_sub_labels:
+ sub_label_clauses.append(
+ (Event.sub_label.cast("text") == label)
+ ) # include exact matches
+
+ # include this label when part of a list
+ sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label},*"))
+ sub_label_clauses.append((Event.sub_label.cast("text") % f"*, {label}*"))
- if sub_label != "all":
- clauses.append((Event.sub_label == sub_label))
+ sub_label_clause = reduce(operator.or_, sub_label_clauses)
+ clauses.append((sub_label_clause))
- if zone != "all":
- clauses.append((Event.zones.cast("text") % f'*"{zone}"*'))
+ if zones != "all":
+ # use matching so events with multiple zones
+ # still match on a search where any zone matches
+ zone_clauses = []
+ filtered_zones = zones.split(",")
+
+ if "None" in filtered_zones:
+ filtered_zones.remove("None")
+ zone_clauses.append((Event.zones.length() == 0))
+
+ for zone in filtered_zones:
+ zone_clauses.append((Event.zones.cast("text") % f'*"{zone}"*'))
+
+ zone_clause = reduce(operator.or_, zone_clauses)
+ clauses.append((zone_clause))
if after:
clauses.append((Event.start_time > after))
@@ -554,11 +682,17 @@ def events():
if not has_snapshot is None:
clauses.append((Event.has_snapshot == has_snapshot))
+ if not in_progress is None:
+ clauses.append((Event.end_time.is_null(in_progress)))
+
if not include_thumbnails:
excluded_fields.append(Event.thumbnail)
else:
selected_columns.append(Event.thumbnail)
+ if favorites:
+ clauses.append((Event.retain_indefinitely == favorites))
+
if len(clauses) == 0:
clauses.append((True))
@@ -576,19 +710,106 @@ def events():
def config():
config = current_app.frigate_config.dict()
- # add in the ffmpeg_cmds
for camera_name, camera in current_app.frigate_config.cameras.items():
camera_dict = config["cameras"][camera_name]
+
+ # clean paths
+ for input in camera_dict.get("ffmpeg", {}).get("inputs", []):
+ input["path"] = clean_camera_user_pass(input["path"])
+
+ # add clean ffmpeg_cmds
camera_dict["ffmpeg_cmds"] = copy.deepcopy(camera.ffmpeg_cmds)
for cmd in camera_dict["ffmpeg_cmds"]:
- cmd["cmd"] = " ".join(cmd["cmd"])
+ cmd["cmd"] = clean_camera_user_pass(" ".join(cmd["cmd"]))
config["plus"] = {"enabled": current_app.plus_api.is_active()}
return jsonify(config)
[email protected]("/config/schema")
[email protected]("/config/raw")
+def config_raw():
+ config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
+
+ # Check if we can use .yaml instead of .yml
+ config_file_yaml = config_file.replace(".yml", ".yaml")
+
+ if os.path.isfile(config_file_yaml):
+ config_file = config_file_yaml
+
+ if not os.path.isfile(config_file):
+ return "Could not find file", 410
+
+ with open(config_file, "r") as f:
+ raw_config = f.read()
+ f.close()
+
+ return raw_config, 200
+
+
[email protected]("/config/save", methods=["POST"])
+def config_save():
+ save_option = request.args.get("save_option")
+
+ new_config = request.get_data().decode()
+
+ if not new_config:
+ return "Config with body param is required", 400
+
+ # Validate the config schema
+ try:
+ new_yaml = FrigateConfig.parse_raw(new_config)
+ except Exception as e:
+ return make_response(
+ jsonify(
+ {
+ "success": False,
+ "message": f"\nConfig Error:\n\n{str(traceback.format_exc())}",
+ }
+ ),
+ 400,
+ )
+
+ # Save the config to file
+ try:
+ config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
+
+ # Check if we can use .yaml instead of .yml
+ config_file_yaml = config_file.replace(".yml", ".yaml")
+
+ if os.path.isfile(config_file_yaml):
+ config_file = config_file_yaml
+
+ with open(config_file, "w") as f:
+ f.write(new_config)
+ f.close()
+ except Exception as e:
+ return make_response(
+ jsonify(
+ {
+ "success": False,
+ "message": f"Could not write config file, be sure that Frigate has write permission on the config file.",
+ }
+ ),
+ 400,
+ )
+
+ if save_option == "restart":
+ try:
+ restart_frigate()
+ except Exception as e:
+ logging.error(f"Error restarting Frigate: {e}")
+ return "Config successfully saved, unable to restart Frigate", 200
+
+ return (
+ "Config successfully saved, restarting (this can take up to one minute)...",
+ 200,
+ )
+ else:
+ return "Config successfully saved.", 200
+
+
[email protected]("/config/schema.json")
def config_schema():
return current_app.response_class(
current_app.frigate_config.schema_json(), mimetype="application/json"
@@ -602,7 +823,11 @@ def version():
@bp.route("/stats")
def stats():
- stats = stats_snapshot(current_app.stats_tracking)
+ stats = stats_snapshot(
+ current_app.frigate_config,
+ current_app.stats_tracking,
+ current_app.hwaccel_errors,
+ )
return jsonify(stats)
@@ -650,8 +875,38 @@ def latest_frame(camera_name):
frame = current_app.detected_frames_processor.get_current_frame(
camera_name, draw_options
)
- if frame is None:
- frame = np.zeros((720, 1280, 3), np.uint8)
+
+ if frame is None or datetime.now().timestamp() > (
+ current_app.detected_frames_processor.get_current_frame_time(camera_name)
+ + 10
+ ):
+ if current_app.camera_error_image is None:
+ error_image = glob.glob("/opt/frigate/frigate/images/camera-error.jpg")
+
+ if len(error_image) > 0:
+ current_app.camera_error_image = cv2.imread(
+ error_image[0], cv2.IMREAD_UNCHANGED
+ )
+
+ frame = current_app.camera_error_image
+
+ height = int(request.args.get("h", str(frame.shape[0])))
+ width = int(height * frame.shape[1] / frame.shape[0])
+
+ frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
+
+ ret, jpg = cv2.imencode(
+ ".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
+ )
+ response = make_response(jpg.tobytes())
+ response.headers["Content-Type"] = "image/jpeg"
+ response.headers["Cache-Control"] = "no-store"
+ return response
+ elif camera_name == "birdseye" and current_app.frigate_config.birdseye.restream:
+ frame = cv2.cvtColor(
+ current_app.detected_frames_processor.get_current_frame(camera_name),
+ cv2.COLOR_YUV2BGR_I420,
+ )
height = int(request.args.get("h", str(frame.shape[0])))
width = int(height * frame.shape[1] / frame.shape[0])
@@ -669,14 +924,44 @@ def latest_frame(camera_name):
return "Camera named {} not found".format(camera_name), 404
[email protected]("/recordings/storage", methods=["GET"])
+def get_recordings_storage_usage():
+ recording_stats = stats_snapshot(
+ current_app.frigate_config,
+ current_app.stats_tracking,
+ current_app.hwaccel_errors,
+ )["service"]["storage"][RECORD_DIR]
+
+ if not recording_stats:
+ return jsonify({})
+
+ total_mb = recording_stats["total"]
+
+ camera_usages: dict[
+ str, dict
+ ] = current_app.storage_maintainer.calculate_camera_usages()
+
+ for camera_name in camera_usages.keys():
+ if camera_usages.get(camera_name, {}).get("usage"):
+ camera_usages[camera_name]["usage_percent"] = (
+ camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
+ ) * 100
+
+ return jsonify(camera_usages)
+
+
# return hourly summary for recordings of camera
@bp.route("/<camera_name>/recordings/summary")
def recordings_summary(camera_name):
+ tz_name = request.args.get("timezone", default="utc", type=str)
+ hour_modifier, minute_modifier = get_tz_modifiers(tz_name)
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
- fn.datetime(Recordings.start_time, "unixepoch", "localtime"),
+ fn.datetime(
+ Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
+ ),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
@@ -686,13 +971,17 @@ def recordings_summary(camera_name):
.group_by(
fn.strftime(
"%Y-%m-%d %H",
- fn.datetime(Recordings.start_time, "unixepoch", "localtime"),
+ fn.datetime(
+ Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
+ ),
)
)
.order_by(
fn.strftime(
"%Y-%m-%d H",
- fn.datetime(Recordings.start_time, "unixepoch", "localtime"),
+ fn.datetime(
+ Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
+ ),
).desc()
)
)
@@ -700,14 +989,20 @@ def recordings_summary(camera_name):
event_groups = (
Event.select(
fn.strftime(
- "%Y-%m-%d %H", fn.datetime(Event.start_time, "unixepoch", "localtime")
+ "%Y-%m-%d %H",
+ fn.datetime(
+ Event.start_time, "unixepoch", hour_modifier, minute_modifier
+ ),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.group_by(
fn.strftime(
- "%Y-%m-%d %H", fn.datetime(Event.start_time, "unixepoch", "localtime")
+ "%Y-%m-%d %H",
+ fn.datetime(
+ Event.start_time, "unixepoch", hour_modifier, minute_modifier
+ ),
),
)
.objects()
@@ -751,6 +1046,7 @@ def recordings(camera_name):
Recordings.id,
Recordings.start_time,
Recordings.end_time,
+ Recordings.segment_size,
Recordings.motion,
Recordings.objects,
)
@@ -858,6 +1154,7 @@ def vod_ts(camera_name, start_ts, end_ts):
clips = []
durations = []
+ max_duration_ms = MAX_SEGMENT_DURATION * 1000
recording: Recordings
for recording in recordings:
@@ -868,7 +1165,7 @@ def vod_ts(camera_name, start_ts, end_ts):
if recording.end_time > end_ts:
duration -= int((recording.end_time - end_ts) * 1000)
- if duration > 0:
+ if 0 < duration < max_duration_ms:
clip["keyFrameDurations"] = [duration]
clips.append(clip)
durations.append(duration)
@@ -884,15 +1181,29 @@ def vod_ts(camera_name, start_ts, end_ts):
{
"cache": hour_ago.timestamp() > start_ts,
"discontinuity": False,
+ "consistentSequenceMediaInfo": True,
"durations": durations,
+ "segment_duration": max(durations),
"sequences": [{"clips": clips}],
}
)
@bp.route("/vod/<year_month>/<day>/<hour>/<camera_name>")
-def vod_hour(year_month, day, hour, camera_name):
- start_date = datetime.strptime(f"{year_month}-{day} {hour}", "%Y-%m-%d %H")
+def vod_hour_no_timezone(year_month, day, hour, camera_name):
+ return vod_hour(
+ year_month, day, hour, camera_name, get_localzone_name().replace("/", ",")
+ )
+
+
+# TODO make this nicer when vod module is removed
[email protected]("/vod/<year_month>/<day>/<hour>/<camera_name>/<tz_name>")
+def vod_hour(year_month, day, hour, camera_name, tz_name):
+ parts = year_month.split("-")
+ start_date = (
+ datetime(int(parts[0]), int(parts[1]), int(day), int(hour), tzinfo=timezone.utc)
+ - datetime.now(pytz.timezone(tz_name.replace(",", "/"))).utcoffset()
+ )
end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1)
start_ts = start_date.timestamp()
end_ts = end_date.timestamp()
@@ -955,3 +1266,91 @@ def imagestream(detected_frames_processor, camera_name, fps, height, draw_option
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" + jpg.tobytes() + b"\r\n\r\n"
)
+
+
[email protected]("/ffprobe", methods=["GET"])
+def ffprobe():
+ path_param = request.args.get("paths", "")
+
+ if not path_param:
+ return jsonify(
+ {"success": False, "message": f"Path needs to be provided."}, "404"
+ )
+
+ if path_param.startswith("camera"):
+ camera = path_param[7:]
+
+ if camera not in current_app.frigate_config.cameras.keys():
+ return jsonify(
+ {"success": False, "message": f"{camera} is not a valid camera."}, "404"
+ )
+
+ if not current_app.frigate_config.cameras[camera].enabled:
+ return jsonify(
+ {"success": False, "message": f"{camera} is not enabled."}, "404"
+ )
+
+ paths = map(
+ lambda input: input.path,
+ current_app.frigate_config.cameras[camera].ffmpeg.inputs,
+ )
+ elif "," in clean_camera_user_pass(path_param):
+ paths = path_param.split(",")
+ else:
+ paths = [path_param]
+
+ # user has multiple streams
+ output = []
+
+ for path in paths:
+ ffprobe = ffprobe_stream(path.strip())
+ output.append(
+ {
+ "return_code": ffprobe.returncode,
+ "stderr": ffprobe.stderr.decode("unicode_escape").strip()
+ if ffprobe.returncode != 0
+ else "",
+ "stdout": json.loads(ffprobe.stdout.decode("unicode_escape").strip())
+ if ffprobe.returncode == 0
+ else "",
+ }
+ )
+
+ return jsonify(output)
+
+
[email protected]("/vainfo", methods=["GET"])
+def vainfo():
+ vainfo = vainfo_hwaccel()
+ return jsonify(
+ {
+ "return_code": vainfo.returncode,
+ "stderr": vainfo.stderr.decode("unicode_escape").strip()
+ if vainfo.returncode != 0
+ else "",
+ "stdout": vainfo.stdout.decode("unicode_escape").strip()
+ if vainfo.returncode == 0
+ else "",
+ }
+ )
+
+
[email protected]("/logs/<service>", methods=["GET"])
+def logs(service: str):
+ log_locations = {
+ "frigate": "/dev/shm/logs/frigate/current",
+ "go2rtc": "/dev/shm/logs/go2rtc/current",
+ "nginx": "/dev/shm/logs/nginx/current",
+ }
+ service_location = log_locations.get(service)
+
+ if not service:
+ return f"{service} is not a valid service", 404
+
+ try:
+ file = open(service_location, "r")
+ contents = file.read()
+ file.close()
+ return contents, 200
+ except FileNotFoundError as e:
+ return f"Could not find log file: {e}", 500
diff --git a/frigate/log.py b/frigate/log.py
--- a/frigate/log.py
+++ b/frigate/log.py
@@ -4,12 +4,18 @@
import os
import signal
import queue
+import multiprocessing as mp
from multiprocessing.queues import Queue
from logging import handlers
+from typing import Optional
+from types import FrameType
from setproctitle import setproctitle
-from typing import Deque
+from typing import Deque, Optional
+from types import FrameType
from collections import deque
+from frigate.util import clean_camera_user_pass
+
def listener_configurer() -> None:
root = logging.getLogger()
@@ -33,10 +39,21 @@ def log_process(log_queue: Queue) -> None:
threading.current_thread().name = f"logger"
setproctitle("frigate.logger")
listener_configurer()
+
+ stop_event = mp.Event()
+
+ def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
+ stop_event.set()
+
+ signal.signal(signal.SIGTERM, receiveSignal)
+ signal.signal(signal.SIGINT, receiveSignal)
+
while True:
try:
- record = log_queue.get(timeout=5)
+ record = log_queue.get(timeout=1)
except (queue.Empty, KeyboardInterrupt):
+ if stop_event.is_set():
+ break
continue
logger = logging.getLogger(record.name)
logger.handle(record)
@@ -55,6 +72,11 @@ def __init__(self, log_name: str):
self.pipeReader = os.fdopen(self.fdRead)
self.start()
+ def cleanup_log(self, log: str) -> str:
+ """Cleanup the log line to remove sensitive info and string tokens."""
+ log = clean_camera_user_pass(log).strip("\n")
+ return log
+
def fileno(self) -> int:
"""Return the write file descriptor of the pipe"""
return self.fdWrite
@@ -62,7 +84,7 @@ def fileno(self) -> int:
def run(self) -> None:
"""Run the thread, logging everything."""
for line in iter(self.pipeReader.readline, ""):
- self.deque.append(line.strip("\n"))
+ self.deque.append(self.cleanup_log(line))
self.pipeReader.close()
diff --git a/frigate/models.py b/frigate/models.py
--- a/frigate/models.py
+++ b/frigate/models.py
@@ -41,3 +41,4 @@ class Recordings(Model): # type: ignore[misc]
duration = FloatField()
motion = IntegerField(null=True)
objects = IntegerField(null=True)
+ segment_size = FloatField(default=0) # this should be stored as MB
diff --git a/frigate/mqtt.py b/frigate/mqtt.py
deleted file mode 100644
--- a/frigate/mqtt.py
+++ /dev/null
@@ -1,401 +0,0 @@
-import json
-import logging
-import threading
-from wsgiref.simple_server import make_server
-
-import paho.mqtt.client as mqtt
-from ws4py.server.wsgirefserver import (
- WebSocketWSGIHandler,
- WebSocketWSGIRequestHandler,
- WSGIServer,
-)
-from ws4py.server.wsgiutils import WebSocketWSGIApplication
-from ws4py.websocket import WebSocket
-
-from frigate.config import FrigateConfig
-from frigate.util import restart_frigate
-
-logger = logging.getLogger(__name__)
-
-
-def create_mqtt_client(config: FrigateConfig, camera_metrics):
- mqtt_config = config.mqtt
-
- def on_recordings_command(client, userdata, message):
- payload = message.payload.decode()
- logger.debug(f"on_recordings_toggle: {message.topic} {payload}")
-
- camera_name = message.topic.split("/")[-3]
-
- record_settings = config.cameras[camera_name].record
-
- if payload == "ON":
- if not record_settings.enabled:
- logger.info(f"Turning on recordings for {camera_name} via mqtt")
- record_settings.enabled = True
- elif payload == "OFF":
- if record_settings.enabled:
- logger.info(f"Turning off recordings for {camera_name} via mqtt")
- record_settings.enabled = False
- else:
- logger.warning(f"Received unsupported value at {message.topic}: {payload}")
-
- state_topic = f"{message.topic[:-4]}/state"
- client.publish(state_topic, payload, retain=True)
-
- def on_snapshots_command(client, userdata, message):
- payload = message.payload.decode()
- logger.debug(f"on_snapshots_toggle: {message.topic} {payload}")
-
- camera_name = message.topic.split("/")[-3]
-
- snapshots_settings = config.cameras[camera_name].snapshots
-
- if payload == "ON":
- if not snapshots_settings.enabled:
- logger.info(f"Turning on snapshots for {camera_name} via mqtt")
- snapshots_settings.enabled = True
- elif payload == "OFF":
- if snapshots_settings.enabled:
- logger.info(f"Turning off snapshots for {camera_name} via mqtt")
- snapshots_settings.enabled = False
- else:
- logger.warning(f"Received unsupported value at {message.topic}: {payload}")
-
- state_topic = f"{message.topic[:-4]}/state"
- client.publish(state_topic, payload, retain=True)
-
- def on_detect_command(client, userdata, message):
- payload = message.payload.decode()
- logger.debug(f"on_detect_toggle: {message.topic} {payload}")
-
- camera_name = message.topic.split("/")[-3]
-
- detect_settings = config.cameras[camera_name].detect
-
- if payload == "ON":
- if not camera_metrics[camera_name]["detection_enabled"].value:
- logger.info(f"Turning on detection for {camera_name} via mqtt")
- camera_metrics[camera_name]["detection_enabled"].value = True
- detect_settings.enabled = True
-
- if not camera_metrics[camera_name]["motion_enabled"].value:
- logger.info(
- f"Turning on motion for {camera_name} due to detection being enabled."
- )
- camera_metrics[camera_name]["motion_enabled"].value = True
- state_topic = f"{message.topic[:-11]}/motion/state"
- client.publish(state_topic, payload, retain=True)
- elif payload == "OFF":
- if camera_metrics[camera_name]["detection_enabled"].value:
- logger.info(f"Turning off detection for {camera_name} via mqtt")
- camera_metrics[camera_name]["detection_enabled"].value = False
- detect_settings.enabled = False
- else:
- logger.warning(f"Received unsupported value at {message.topic}: {payload}")
-
- state_topic = f"{message.topic[:-4]}/state"
- client.publish(state_topic, payload, retain=True)
-
- def on_motion_command(client, userdata, message):
- payload = message.payload.decode()
- logger.debug(f"on_motion_toggle: {message.topic} {payload}")
-
- camera_name = message.topic.split("/")[-3]
-
- if payload == "ON":
- if not camera_metrics[camera_name]["motion_enabled"].value:
- logger.info(f"Turning on motion for {camera_name} via mqtt")
- camera_metrics[camera_name]["motion_enabled"].value = True
- elif payload == "OFF":
- if camera_metrics[camera_name]["detection_enabled"].value:
- logger.error(
- f"Turning off motion is not allowed when detection is enabled."
- )
- return
-
- if camera_metrics[camera_name]["motion_enabled"].value:
- logger.info(f"Turning off motion for {camera_name} via mqtt")
- camera_metrics[camera_name]["motion_enabled"].value = False
- else:
- logger.warning(f"Received unsupported value at {message.topic}: {payload}")
-
- state_topic = f"{message.topic[:-4]}/state"
- client.publish(state_topic, payload, retain=True)
-
- def on_improve_contrast_command(client, userdata, message):
- payload = message.payload.decode()
- logger.debug(f"on_improve_contrast_toggle: {message.topic} {payload}")
-
- camera_name = message.topic.split("/")[-3]
-
- motion_settings = config.cameras[camera_name].motion
-
- if payload == "ON":
- if not camera_metrics[camera_name]["improve_contrast_enabled"].value:
- logger.info(f"Turning on improve contrast for {camera_name} via mqtt")
- camera_metrics[camera_name]["improve_contrast_enabled"].value = True
- motion_settings.improve_contrast = True
- elif payload == "OFF":
- if camera_metrics[camera_name]["improve_contrast_enabled"].value:
- logger.info(f"Turning off improve contrast for {camera_name} via mqtt")
- camera_metrics[camera_name]["improve_contrast_enabled"].value = False
- motion_settings.improve_contrast = False
- else:
- logger.warning(f"Received unsupported value at {message.topic}: {payload}")
-
- state_topic = f"{message.topic[:-4]}/state"
- client.publish(state_topic, payload, retain=True)
-
- def on_motion_threshold_command(client, userdata, message):
- try:
- payload = int(message.payload.decode())
- except ValueError:
- logger.warning(
- f"Received unsupported value at {message.topic}: {message.payload.decode()}"
- )
- return
-
- logger.debug(f"on_motion_threshold_toggle: {message.topic} {payload}")
-
- camera_name = message.topic.split("/")[-3]
-
- motion_settings = config.cameras[camera_name].motion
-
- logger.info(f"Setting motion threshold for {camera_name} via mqtt: {payload}")
- camera_metrics[camera_name]["motion_threshold"].value = payload
- motion_settings.threshold = payload
-
- state_topic = f"{message.topic[:-4]}/state"
- client.publish(state_topic, payload, retain=True)
-
- def on_motion_contour_area_command(client, userdata, message):
- try:
- payload = int(message.payload.decode())
- except ValueError:
- logger.warning(
- f"Received unsupported value at {message.topic}: {message.payload.decode()}"
- )
- return
-
- logger.debug(f"on_motion_contour_area_toggle: {message.topic} {payload}")
-
- camera_name = message.topic.split("/")[-3]
-
- motion_settings = config.cameras[camera_name].motion
-
- logger.info(
- f"Setting motion contour area for {camera_name} via mqtt: {payload}"
- )
- camera_metrics[camera_name]["motion_contour_area"].value = payload
- motion_settings.contour_area = payload
-
- state_topic = f"{message.topic[:-4]}/state"
- client.publish(state_topic, payload, retain=True)
-
- def on_restart_command(client, userdata, message):
- restart_frigate()
-
- def on_connect(client, userdata, flags, rc):
- threading.current_thread().name = "mqtt"
- if rc != 0:
- if rc == 3:
- logger.error(
- "Unable to connect to MQTT server: MQTT Server unavailable"
- )
- elif rc == 4:
- logger.error(
- "Unable to connect to MQTT server: MQTT Bad username or password"
- )
- elif rc == 5:
- logger.error("Unable to connect to MQTT server: MQTT Not authorized")
- else:
- logger.error(
- "Unable to connect to MQTT server: Connection refused. Error code: "
- + str(rc)
- )
-
- logger.debug("MQTT connected")
- client.subscribe(f"{mqtt_config.topic_prefix}/#")
- client.publish(mqtt_config.topic_prefix + "/available", "online", retain=True)
-
- client = mqtt.Client(client_id=mqtt_config.client_id)
- client.on_connect = on_connect
- client.will_set(
- mqtt_config.topic_prefix + "/available", payload="offline", qos=1, retain=True
- )
-
- # register callbacks
- for name in config.cameras.keys():
- client.message_callback_add(
- f"{mqtt_config.topic_prefix}/{name}/recordings/set", on_recordings_command
- )
- client.message_callback_add(
- f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command
- )
- client.message_callback_add(
- f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command
- )
- client.message_callback_add(
- f"{mqtt_config.topic_prefix}/{name}/motion/set", on_motion_command
- )
- client.message_callback_add(
- f"{mqtt_config.topic_prefix}/{name}/improve_contrast/set",
- on_improve_contrast_command,
- )
- client.message_callback_add(
- f"{mqtt_config.topic_prefix}/{name}/motion_threshold/set",
- on_motion_threshold_command,
- )
- client.message_callback_add(
- f"{mqtt_config.topic_prefix}/{name}/motion_contour_area/set",
- on_motion_contour_area_command,
- )
-
- client.message_callback_add(
- f"{mqtt_config.topic_prefix}/restart", on_restart_command
- )
-
- if not mqtt_config.tls_ca_certs is None:
- if (
- not mqtt_config.tls_client_cert is None
- and not mqtt_config.tls_client_key is None
- ):
- client.tls_set(
- mqtt_config.tls_ca_certs,
- mqtt_config.tls_client_cert,
- mqtt_config.tls_client_key,
- )
- else:
- client.tls_set(mqtt_config.tls_ca_certs)
- if not mqtt_config.tls_insecure is None:
- client.tls_insecure_set(mqtt_config.tls_insecure)
- if not mqtt_config.user is None:
- client.username_pw_set(mqtt_config.user, password=mqtt_config.password)
- try:
- client.connect(mqtt_config.host, mqtt_config.port, 60)
- except Exception as e:
- logger.error(f"Unable to connect to MQTT server: {e}")
- raise
-
- client.loop_start()
-
- for name in config.cameras.keys():
- client.publish(
- f"{mqtt_config.topic_prefix}/{name}/recordings/state",
- "ON" if config.cameras[name].record.enabled else "OFF",
- retain=True,
- )
- client.publish(
- f"{mqtt_config.topic_prefix}/{name}/snapshots/state",
- "ON" if config.cameras[name].snapshots.enabled else "OFF",
- retain=True,
- )
- client.publish(
- f"{mqtt_config.topic_prefix}/{name}/detect/state",
- "ON" if config.cameras[name].detect.enabled else "OFF",
- retain=True,
- )
- client.publish(
- f"{mqtt_config.topic_prefix}/{name}/motion/state",
- "ON",
- retain=True,
- )
- client.publish(
- f"{mqtt_config.topic_prefix}/{name}/improve_contrast/state",
- "ON" if config.cameras[name].motion.improve_contrast else "OFF",
- retain=True,
- )
- client.publish(
- f"{mqtt_config.topic_prefix}/{name}/motion_threshold/state",
- config.cameras[name].motion.threshold,
- retain=True,
- )
- client.publish(
- f"{mqtt_config.topic_prefix}/{name}/motion_contour_area/state",
- config.cameras[name].motion.contour_area,
- retain=True,
- )
- client.publish(
- f"{mqtt_config.topic_prefix}/{name}/motion",
- "OFF",
- retain=False,
- )
-
- return client
-
-
-class MqttSocketRelay:
- def __init__(self, mqtt_client, topic_prefix):
- self.mqtt_client = mqtt_client
- self.topic_prefix = topic_prefix
-
- def start(self):
- class MqttWebSocket(WebSocket):
- topic_prefix = self.topic_prefix
- mqtt_client = self.mqtt_client
-
- def received_message(self, message):
- try:
- json_message = json.loads(message.data.decode("utf-8"))
- json_message = {
- "topic": f"{self.topic_prefix}/{json_message['topic']}",
- "payload": json_message.get("payload"),
- "retain": json_message.get("retain", False),
- }
- except Exception as e:
- logger.warning("Unable to parse websocket message as valid json.")
- return
-
- logger.debug(
- f"Publishing mqtt message from websockets at {json_message['topic']}."
- )
- self.mqtt_client.publish(
- json_message["topic"],
- json_message["payload"],
- retain=json_message["retain"],
- )
-
- # start a websocket server on 5002
- WebSocketWSGIHandler.http_version = "1.1"
- self.websocket_server = make_server(
- "127.0.0.1",
- 5002,
- server_class=WSGIServer,
- handler_class=WebSocketWSGIRequestHandler,
- app=WebSocketWSGIApplication(handler_cls=MqttWebSocket),
- )
- self.websocket_server.initialize_websockets_manager()
- self.websocket_thread = threading.Thread(
- target=self.websocket_server.serve_forever
- )
-
- def send(client, userdata, message):
- """Sends mqtt messages to clients."""
- try:
- logger.debug(f"Received mqtt message on {message.topic}.")
- ws_message = json.dumps(
- {
- "topic": message.topic.replace(f"{self.topic_prefix}/", ""),
- "payload": message.payload.decode(),
- }
- )
- except Exception as e:
- # if the payload can't be decoded don't relay to clients
- logger.debug(
- f"MQTT payload for {message.topic} wasn't text. Skipping..."
- )
- return
-
- self.websocket_server.manager.broadcast(ws_message)
-
- self.mqtt_client.message_callback_add(f"{self.topic_prefix}/#", send)
-
- self.websocket_thread.start()
-
- def stop(self):
- self.websocket_server.manager.close_all()
- self.websocket_server.manager.stop()
- self.websocket_server.manager.join()
- self.websocket_server.shutdown()
- self.websocket_thread.join()
diff --git a/frigate/edgetpu.py b/frigate/object_detection.py
similarity index 61%
rename from frigate/edgetpu.py
rename to frigate/object_detection.py
--- a/frigate/edgetpu.py
+++ b/frigate/object_detection.py
@@ -8,9 +8,10 @@
from abc import ABC, abstractmethod
import numpy as np
-import tflite_runtime.interpreter as tflite
from setproctitle import setproctitle
-from tflite_runtime.interpreter import load_delegate
+
+from frigate.config import InputTensorEnum
+from frigate.detectors import create_detector
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels
@@ -23,46 +24,32 @@ def detect(self, tensor_input, threshold=0.4):
pass
+def tensor_transform(desired_shape):
+ # Currently this function only supports BHWC permutations
+ if desired_shape == InputTensorEnum.nhwc:
+ return None
+ elif desired_shape == InputTensorEnum.nchw:
+ return (0, 3, 1, 2)
+
+
class LocalObjectDetector(ObjectDetector):
- def __init__(self, tf_device=None, model_path=None, num_threads=3, labels=None):
+ def __init__(
+ self,
+ detector_config=None,
+ labels=None,
+ ):
self.fps = EventsPerSecond()
if labels is None:
self.labels = {}
else:
self.labels = load_labels(labels)
- device_config = {"device": "usb"}
- if not tf_device is None:
- device_config = {"device": tf_device}
-
- edge_tpu_delegate = None
-
- if tf_device != "cpu":
- try:
- logger.info(f"Attempting to load TPU as {device_config['device']}")
- edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
- logger.info("TPU found")
- self.interpreter = tflite.Interpreter(
- model_path=model_path or "/edgetpu_model.tflite",
- experimental_delegates=[edge_tpu_delegate],
- )
- except ValueError:
- logger.error(
- "No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
- )
- raise
+ if detector_config:
+ self.input_transform = tensor_transform(detector_config.model.input_tensor)
else:
- logger.warning(
- "CPU detectors are not recommended and should only be used for testing or for trial purposes."
- )
- self.interpreter = tflite.Interpreter(
- model_path=model_path or "/cpu_model.tflite", num_threads=num_threads
- )
-
- self.interpreter.allocate_tensors()
+ self.input_transform = None
- self.tensor_input_details = self.interpreter.get_input_details()
- self.tensor_output_details = self.interpreter.get_output_details()
+ self.detect_api = create_detector(detector_config)
def detect(self, tensor_input, threshold=0.4):
detections = []
@@ -79,31 +66,9 @@ def detect(self, tensor_input, threshold=0.4):
return detections
def detect_raw(self, tensor_input):
- self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
- self.interpreter.invoke()
-
- boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
- class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
- scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
- count = int(
- self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
- )
-
- detections = np.zeros((20, 6), np.float32)
-
- for i in range(count):
- if scores[i] < 0.4 or i == 20:
- break
- detections[i] = [
- class_ids[i],
- float(scores[i]),
- boxes[i][0],
- boxes[i][1],
- boxes[i][2],
- boxes[i][3],
- ]
-
- return detections
+ if self.input_transform:
+ tensor_input = np.transpose(tensor_input, self.input_transform)
+ return self.detect_api.detect_raw(tensor_input=tensor_input)
def run_detector(
@@ -112,10 +77,7 @@ def run_detector(
out_events: dict[str, mp.Event],
avg_speed,
start,
- model_path,
- model_shape,
- tf_device,
- num_threads,
+ detector_config,
):
threading.current_thread().name = f"detector:{name}"
logger = logging.getLogger(f"detector.{name}")
@@ -126,15 +88,14 @@ def run_detector(
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
+ logger.info("Signal to exit detection process...")
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
frame_manager = SharedMemoryFrameManager()
- object_detector = LocalObjectDetector(
- tf_device=tf_device, model_path=model_path, num_threads=num_threads
- )
+ object_detector = LocalObjectDetector(detector_config=detector_config)
outputs = {}
for name in out_events.keys():
@@ -144,11 +105,12 @@ def receiveSignal(signalNumber, frame):
while not stop_event.is_set():
try:
- connection_id = detection_queue.get(timeout=5)
+ connection_id = detection_queue.get(timeout=1)
except queue.Empty:
continue
input_frame = frame_manager.get(
- connection_id, (1, model_shape[0], model_shape[1], 3)
+ connection_id,
+ (1, detector_config.model.height, detector_config.model.width, 3),
)
if input_frame is None:
@@ -164,17 +126,16 @@ def receiveSignal(signalNumber, frame):
avg_speed.value = (avg_speed.value * 9 + duration) / 10
+ logger.info("Exited detection process...")
+
-class EdgeTPUProcess:
+class ObjectDetectProcess:
def __init__(
self,
name,
detection_queue,
out_events,
- model_path,
- model_shape,
- tf_device=None,
- num_threads=3,
+ detector_config,
):
self.name = name
self.out_events = out_events
@@ -182,13 +143,13 @@ def __init__(
self.avg_inference_speed = mp.Value("d", 0.01)
self.detection_start = mp.Value("d", 0.0)
self.detect_process = None
- self.model_path = model_path
- self.model_shape = model_shape
- self.tf_device = tf_device
- self.num_threads = num_threads
+ self.detector_config = detector_config
self.start_or_restart()
def stop(self):
+ # if the process has already exited on its own, just return
+ if self.detect_process and self.detect_process.exitcode:
+ return
self.detect_process.terminate()
logging.info("Waiting for detection process to exit gracefully...")
self.detect_process.join(timeout=30)
@@ -196,6 +157,7 @@ def stop(self):
logging.info("Detection process didnt exit. Force killing...")
self.detect_process.kill()
self.detect_process.join()
+ logging.info("Detection process has exited...")
def start_or_restart(self):
self.detection_start.value = 0.0
@@ -210,10 +172,7 @@ def start_or_restart(self):
self.out_events,
self.avg_inference_speed,
self.detection_start,
- self.model_path,
- self.model_shape,
- self.tf_device,
- self.num_threads,
+ self.detector_config,
),
)
self.detect_process.daemon = True
@@ -221,15 +180,18 @@ def start_or_restart(self):
class RemoteObjectDetector:
- def __init__(self, name, labels, detection_queue, event, model_shape):
+ def __init__(self, name, labels, detection_queue, event, model_config, stop_event):
self.labels = labels
self.name = name
self.fps = EventsPerSecond()
self.detection_queue = detection_queue
self.event = event
+ self.stop_event = stop_event
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
self.np_shm = np.ndarray(
- (1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
+ (1, model_config.height, model_config.width, 3),
+ dtype=np.uint8,
+ buffer=self.shm.buf,
)
self.out_shm = mp.shared_memory.SharedMemory(
name=f"out-{self.name}", create=False
@@ -239,11 +201,14 @@ def __init__(self, name, labels, detection_queue, event, model_shape):
def detect(self, tensor_input, threshold=0.4):
detections = []
+ if self.stop_event.is_set():
+ return detections
+
# copy input to shared memory
self.np_shm[:] = tensor_input[:]
self.event.clear()
self.detection_queue.put(self.name)
- result = self.event.wait(timeout=10.0)
+ result = self.event.wait(timeout=5.0)
# if it timed out
if result is None:
diff --git a/frigate/object_processing.py b/frigate/object_processing.py
--- a/frigate/object_processing.py
+++ b/frigate/object_processing.py
@@ -12,7 +12,14 @@
import cv2
import numpy as np
-from frigate.config import CameraConfig, SnapshotsConfig, RecordConfig, FrigateConfig
+from frigate.comms.dispatcher import Dispatcher
+from frigate.config import (
+ CameraConfig,
+ MqttConfig,
+ SnapshotsConfig,
+ RecordConfig,
+ FrigateConfig,
+)
from frigate.const import CLIPS_DIR
from frigate.util import (
SharedMemoryFrameManager,
@@ -626,8 +633,7 @@ class TrackedObjectProcessor(threading.Thread):
def __init__(
self,
config: FrigateConfig,
- client,
- topic_prefix,
+ dispatcher: Dispatcher,
tracked_objects_queue,
event_queue,
event_processed_queue,
@@ -638,8 +644,7 @@ def __init__(
threading.Thread.__init__(self)
self.name = "detected_frames_processor"
self.config = config
- self.client = client
- self.topic_prefix = topic_prefix
+ self.dispatcher = dispatcher
self.tracked_objects_queue = tracked_objects_queue
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
@@ -662,9 +667,7 @@ def update(camera, obj: TrackedObject, current_frame_time):
"after": after,
"type": "new" if obj.previous["false_positive"] else "update",
}
- self.client.publish(
- f"{self.topic_prefix}/events", json.dumps(message), retain=False
- )
+ self.dispatcher.publish("events", json.dumps(message), retain=False)
obj.previous = after
self.event_queue.put(
("update", camera, obj.to_dict(include_thumbnail=True))
@@ -717,14 +720,12 @@ def end(camera, obj: TrackedObject, current_frame_time):
"after": obj.to_dict(),
"type": "end",
}
- self.client.publish(
- f"{self.topic_prefix}/events", json.dumps(message), retain=False
- )
+ self.dispatcher.publish("events", json.dumps(message), retain=False)
self.event_queue.put(("end", camera, obj.to_dict(include_thumbnail=True)))
def snapshot(camera, obj: TrackedObject, current_frame_time):
- mqtt_config = self.config.cameras[camera].mqtt
+ mqtt_config: MqttConfig = self.config.cameras[camera].mqtt
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
jpg_bytes = obj.get_jpg_bytes(
timestamp=mqtt_config.timestamp,
@@ -739,16 +740,14 @@ def snapshot(camera, obj: TrackedObject, current_frame_time):
f"Unable to send mqtt snapshot for {obj.obj_data['id']}."
)
else:
- self.client.publish(
- f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot",
+ self.dispatcher.publish(
+ f"{camera}/{obj.obj_data['label']}/snapshot",
jpg_bytes,
retain=True,
)
def object_status(camera, object_name, status):
- self.client.publish(
- f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False
- )
+ self.dispatcher.publish(f"{camera}/{object_name}", status, retain=False)
for camera in self.config.cameras.keys():
camera_state = CameraState(camera, self.config, self.frame_manager)
@@ -846,8 +845,8 @@ def update_mqtt_motion(self, camera, frame_time, motion_boxes):
if motion_boxes:
# only send ON if motion isn't already active
if self.last_motion_detected.get(camera, 0) == 0:
- self.client.publish(
- f"{self.topic_prefix}/{camera}/motion",
+ self.dispatcher.publish(
+ f"{camera}/motion",
"ON",
retain=False,
)
@@ -859,8 +858,8 @@ def update_mqtt_motion(self, camera, frame_time, motion_boxes):
# If no motion, make sure the off_delay has passed
if frame_time - self.last_motion_detected.get(camera, 0) >= mqtt_delay:
- self.client.publish(
- f"{self.topic_prefix}/{camera}/motion",
+ self.dispatcher.publish(
+ f"{camera}/motion",
"OFF",
retain=False,
)
@@ -881,8 +880,18 @@ def get_best(self, camera, label):
return {}
def get_current_frame(self, camera, draw_options={}):
+ if camera == "birdseye":
+ return self.frame_manager.get(
+ "birdseye",
+ (self.config.birdseye.height * 3 // 2, self.config.birdseye.width),
+ )
+
return self.camera_states[camera].get_current_frame(draw_options)
+ def get_current_frame_time(self, camera) -> int:
+ """Returns the latest frame time for a given camera."""
+ return self.camera_states[camera].current_frame_time
+
def run(self):
while not self.stop_event.is_set():
try:
@@ -892,7 +901,7 @@ def run(self):
current_tracked_objects,
motion_boxes,
regions,
- ) = self.tracked_objects_queue.get(True, 10)
+ ) = self.tracked_objects_queue.get(True, 1)
except queue.Empty:
continue
@@ -955,8 +964,8 @@ def run(self):
)
new_count = sum(zone_label.values())
if new_count != current_count:
- self.client.publish(
- f"{self.topic_prefix}/{zone}/{label}",
+ self.dispatcher.publish(
+ f"{zone}/{label}",
new_count,
retain=False,
)
@@ -968,8 +977,8 @@ def run(self):
else:
if label in obj_counter:
zone_label[camera] = obj_counter[label]
- self.client.publish(
- f"{self.topic_prefix}/{zone}/{label}",
+ self.dispatcher.publish(
+ f"{zone}/{label}",
obj_counter[label],
retain=False,
)
@@ -985,16 +994,16 @@ def run(self):
new_count = sum(zone_label.values())
if new_count != current_count:
- self.client.publish(
- f"{self.topic_prefix}/{zone}/all",
+ self.dispatcher.publish(
+ f"{zone}/all",
new_count,
retain=False,
)
# if this is a new zone all label for this camera
else:
zone_label[camera] = total_label_count
- self.client.publish(
- f"{self.topic_prefix}/{zone}/all",
+ self.dispatcher.publish(
+ f"{zone}/all",
total_label_count,
retain=False,
)
diff --git a/frigate/output.py b/frigate/output.py
--- a/frigate/output.py
+++ b/frigate/output.py
@@ -3,11 +3,11 @@
import logging
import math
import multiprocessing as mp
+import os
import queue
import signal
import subprocess as sp
import threading
-from multiprocessing import shared_memory
from wsgiref.simple_server import make_server
import cv2
@@ -22,17 +22,50 @@
from ws4py.websocket import WebSocket
from frigate.config import BirdseyeModeEnum, FrigateConfig
-from frigate.const import BASE_DIR
+from frigate.const import BASE_DIR, BIRDSEYE_PIPE
from frigate.util import SharedMemoryFrameManager, copy_yuv_to_position, get_yuv_crop
logger = logging.getLogger(__name__)
class FFMpegConverter:
- def __init__(self, in_width, in_height, out_width, out_height, quality):
- ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split(
- " "
- )
+ def __init__(
+ self,
+ in_width: int,
+ in_height: int,
+ out_width: int,
+ out_height: int,
+ quality: int,
+ birdseye_rtsp: bool = False,
+ ):
+ self.bd_pipe = None
+
+ if birdseye_rtsp:
+ self.recreate_birdseye_pipe()
+
+ ffmpeg_cmd = [
+ "ffmpeg",
+ "-f",
+ "rawvideo",
+ "-pix_fmt",
+ "yuv420p",
+ "-video_size",
+ f"{in_width}x{in_height}",
+ "-i",
+ "pipe:",
+ "-f",
+ "mpegts",
+ "-s",
+ f"{out_width}x{out_height}",
+ "-codec:v",
+ "mpeg1video",
+ "-q",
+ f"{quality}",
+ "-bf",
+ "0",
+ "pipe:",
+ ]
+
self.process = sp.Popen(
ffmpeg_cmd,
stdout=sp.PIPE,
@@ -41,9 +74,38 @@ def __init__(self, in_width, in_height, out_width, out_height, quality):
start_new_session=True,
)
- def write(self, b):
+ def recreate_birdseye_pipe(self) -> None:
+ if self.bd_pipe:
+ os.close(self.bd_pipe)
+
+ if os.path.exists(BIRDSEYE_PIPE):
+ os.remove(BIRDSEYE_PIPE)
+
+ os.mkfifo(BIRDSEYE_PIPE, mode=0o777)
+ stdin = os.open(BIRDSEYE_PIPE, os.O_RDONLY | os.O_NONBLOCK)
+ self.bd_pipe = os.open(BIRDSEYE_PIPE, os.O_WRONLY)
+ os.close(stdin)
+ self.reading_birdseye = False
+
+ def write(self, b) -> None:
self.process.stdin.write(b)
+ if self.bd_pipe:
+ try:
+ os.write(self.bd_pipe, b)
+ self.reading_birdseye = True
+ except BrokenPipeError:
+ if self.reading_birdseye:
+ # we know the pipe was being read from and now it is not
+ # so we should recreate the pipe to ensure no partially-read
+ # frames exist
+ logger.debug(
+ "Recreating the birdseye pipe because it was read from and now is not"
+ )
+ self.recreate_birdseye_pipe()
+
+ return
+
def read(self, length):
try:
return self.process.stdout.read1(length)
@@ -51,6 +113,9 @@ def read(self, length):
return False
def exit(self):
+ if self.bd_pipe:
+ os.close(self.bd_pipe)
+
self.process.terminate()
try:
self.process.communicate(timeout=30)
@@ -60,14 +125,15 @@ def exit(self):
class BroadcastThread(threading.Thread):
- def __init__(self, camera, converter, websocket_server):
+ def __init__(self, camera, converter, websocket_server, stop_event):
super(BroadcastThread, self).__init__()
self.camera = camera
self.converter = converter
self.websocket_server = websocket_server
+ self.stop_event = stop_event
def run(self):
- while True:
+ while not self.stop_event.is_set():
buf = self.converter.read(65536)
if buf:
manager = self.websocket_server.manager
@@ -89,7 +155,7 @@ def run(self):
class BirdsEyeFrameManager:
- def __init__(self, config, frame_manager: SharedMemoryFrameManager):
+ def __init__(self, config: FrigateConfig, frame_manager: SharedMemoryFrameManager):
self.config = config
self.mode = config.birdseye.mode
self.frame_manager = frame_manager
@@ -99,7 +165,7 @@ def __init__(self, config, frame_manager: SharedMemoryFrameManager):
self.yuv_shape = (height * 3 // 2, width)
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
- # initialize the frame as black and with the frigate logo
+ # initialize the frame as black and with the Frigate logo
self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
self.blank_frame[:] = 128
self.blank_frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]] = 16
@@ -113,7 +179,7 @@ def __init__(self, config, frame_manager: SharedMemoryFrameManager):
birdseye_logo = cv2.imread(custom_logo_files[0], cv2.IMREAD_UNCHANGED)
if birdseye_logo is None:
- logo_files = glob.glob("/opt/frigate/frigate/birdseye.png")
+ logo_files = glob.glob("/opt/frigate/frigate/images/birdseye.png")
if len(logo_files) > 0:
birdseye_logo = cv2.imread(logo_files[0], cv2.IMREAD_UNCHANGED)
@@ -127,7 +193,7 @@ def __init__(self, config, frame_manager: SharedMemoryFrameManager):
x_offset : x_offset + transparent_layer.shape[0],
] = transparent_layer
else:
- logger.warning("Unable to read frigate logo")
+ logger.warning("Unable to read Frigate logo")
self.frame[:] = self.blank_frame
@@ -377,7 +443,7 @@ def receiveSignal(signalNumber, frame):
cam_config.live.quality,
)
broadcasters[camera] = BroadcastThread(
- camera, converters[camera], websocket_server
+ camera, converters[camera], websocket_server, stop_event
)
if config.birdseye.enabled:
@@ -387,9 +453,10 @@ def receiveSignal(signalNumber, frame):
config.birdseye.width,
config.birdseye.height,
config.birdseye.quality,
+ config.birdseye.restream,
)
broadcasters["birdseye"] = BroadcastThread(
- "birdseye", converters["birdseye"], websocket_server
+ "birdseye", converters["birdseye"], websocket_server, stop_event
)
websocket_thread.start()
@@ -399,6 +466,12 @@ def receiveSignal(signalNumber, frame):
birdseye_manager = BirdsEyeFrameManager(config, frame_manager)
+ if config.birdseye.restream:
+ birdseye_buffer = frame_manager.create(
+ "birdseye",
+ birdseye_manager.yuv_shape[0] * birdseye_manager.yuv_shape[1],
+ )
+
while not stop_event.is_set():
try:
(
@@ -407,7 +480,7 @@ def receiveSignal(signalNumber, frame):
current_tracked_objects,
motion_boxes,
regions,
- ) = video_output_queue.get(True, 10)
+ ) = video_output_queue.get(True, 1)
except queue.Empty:
continue
@@ -422,10 +495,12 @@ def receiveSignal(signalNumber, frame):
# write to the converter for the camera if clients are listening to the specific camera
converters[camera].write(frame.tobytes())
- # update birdseye if websockets are connected
- if config.birdseye.enabled and any(
- ws.environ["PATH_INFO"].endswith("birdseye")
- for ws in websocket_server.manager
+ if config.birdseye.enabled and (
+ config.birdseye.restream
+ or any(
+ ws.environ["PATH_INFO"].endswith("birdseye")
+ for ws in websocket_server.manager
+ )
):
if birdseye_manager.update(
camera,
@@ -434,7 +509,12 @@ def receiveSignal(signalNumber, frame):
frame_time,
frame,
):
- converters["birdseye"].write(birdseye_manager.frame.tobytes())
+ frame_bytes = birdseye_manager.frame.tobytes()
+
+ if config.birdseye.restream:
+ birdseye_buffer[:] = frame_bytes
+
+ converters["birdseye"].write(frame_bytes)
if camera in previous_frames:
frame_manager.delete(f"{camera}{previous_frames[camera]}")
diff --git a/frigate/record.py b/frigate/record.py
--- a/frigate/record.py
+++ b/frigate/record.py
@@ -5,7 +5,6 @@
import os
import queue
import random
-import shutil
import string
import subprocess as sp
import threading
@@ -16,7 +15,7 @@
from peewee import JOIN, DoesNotExist
from frigate.config import RetainModeEnum, FrigateConfig
-from frigate.const import CACHE_DIR, RECORD_DIR
+from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
from frigate.models import Event, Recordings
from frigate.util import area
@@ -101,19 +100,12 @@ def move_files(self):
for camera in grouped_recordings.keys():
segment_count = len(grouped_recordings[camera])
if segment_count > keep_count:
- ####
- # Need to find a way to tell if these are aging out based on retention settings or if the system is overloaded.
- ####
- # logger.warning(
- # f"Too many recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {segment_count}, discarding the rest..."
- # )
+ logger.warning(
+ f"Unable to keep up with recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {segment_count} and discarding the rest..."
+ )
to_remove = grouped_recordings[camera][:-keep_count]
for f in to_remove:
cache_path = f["cache_path"]
- ####
- # Need to find a way to tell if these are aging out based on retention settings or if the system is overloaded.
- ####
- # logger.warning(f"Discarding a recording segment: {cache_path}")
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
@@ -169,10 +161,22 @@ def move_files(self):
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0 and p.stdout.decode():
duration = float(p.stdout.decode().strip())
+ else:
+ duration = -1
+
+ # ensure duration is within expected length
+ if 0 < duration < MAX_SEGMENT_DURATION:
end_time = start_time + datetime.timedelta(seconds=duration)
self.end_time_cache[cache_path] = (end_time, duration)
else:
- logger.warning(f"Discarding a corrupt recording segment: {f}")
+ if duration == -1:
+ logger.warning(
+ f"Failed to probe corrupt segment {cache_path}: {p.returncode} - {p.stderr}"
+ )
+
+ logger.warning(
+ f"Discarding a corrupt recording segment: {cache_path}"
+ )
Path(cache_path).unlink(missing_ok=True)
continue
@@ -218,6 +222,19 @@ def move_files(self):
cache_path,
record_mode,
)
+ # if it doesn't overlap with an event, go ahead and drop the segment
+ # if it ends more than the configured pre_capture for the camera
+ else:
+ pre_capture = self.config.cameras[
+ camera
+ ].record.events.pre_capture
+ most_recently_processed_frame_time = self.recordings_info[
+ camera
+ ][-1][0]
+ retain_cutoff = most_recently_processed_frame_time - pre_capture
+ if end_time.timestamp() < retain_cutoff:
+ Path(cache_path).unlink(missing_ok=True)
+ self.end_time_cache.pop(cache_path, None)
# else retain days includes this segment
else:
record_mode = self.config.cameras[camera].record.retain.mode
@@ -251,8 +268,8 @@ def segment_stats(self, camera, start_time, end_time):
def store_segment(
self,
camera,
- start_time,
- end_time,
+ start_time: datetime.datetime,
+ end_time: datetime.datetime,
duration,
cache_path,
store_mode: RetainModeEnum,
@@ -267,23 +284,63 @@ def store_segment(
self.end_time_cache.pop(cache_path, None)
return
- directory = os.path.join(RECORD_DIR, start_time.strftime("%Y-%m/%d/%H"), camera)
+ directory = os.path.join(
+ RECORD_DIR,
+ start_time.astimezone(tz=datetime.timezone.utc).strftime("%Y-%m-%d/%H"),
+ camera,
+ )
if not os.path.exists(directory):
os.makedirs(directory)
- file_name = f"{start_time.strftime('%M.%S.mp4')}"
+ file_name = (
+ f"{start_time.replace(tzinfo=datetime.timezone.utc).strftime('%M.%S.mp4')}"
+ )
file_path = os.path.join(directory, file_name)
try:
if not os.path.exists(file_path):
start_frame = datetime.datetime.now().timestamp()
- # copy then delete is required when recordings are stored on some network drives
- shutil.copyfile(cache_path, file_path)
- logger.debug(
- f"Copied {file_path} in {datetime.datetime.now().timestamp()-start_frame} seconds."
+
+ # add faststart to kept segments to improve metadata reading
+ ffmpeg_cmd = [
+ "ffmpeg",
+ "-y",
+ "-i",
+ cache_path,
+ "-c",
+ "copy",
+ "-movflags",
+ "+faststart",
+ file_path,
+ ]
+
+ p = sp.run(
+ ffmpeg_cmd,
+ encoding="ascii",
+ capture_output=True,
)
+ if p.returncode != 0:
+ logger.error(f"Unable to convert {cache_path} to {file_path}")
+ logger.error(p.stderr)
+ return
+ else:
+ logger.debug(
+ f"Copied {file_path} in {datetime.datetime.now().timestamp()-start_frame} seconds."
+ )
+
+ try:
+ # get the segment size of the cache file
+ # file without faststart is same size
+ segment_size = round(
+ float(os.path.getsize(cache_path)) / 1000000, 1
+ )
+ except OSError:
+ segment_size = 0
+
+ os.remove(cache_path)
+
rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6)
)
@@ -297,10 +354,8 @@ def store_segment(
motion=motion_count,
# TODO: update this to store list of active objects at some point
objects=active_count,
+ segment_size=segment_size,
)
- else:
- logger.warning(f"Ignoring segment because {file_path} already exists.")
- os.remove(cache_path)
except Exception as e:
logger.error(f"Unable to store recording segment {cache_path}")
Path(cache_path).unlink(missing_ok=True)
@@ -364,6 +419,10 @@ def clean_tmp_clips(self):
logger.debug(f"Checking tmp clip {p}.")
if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):
logger.debug("Deleting tmp clip.")
+
+ # empty contents of file before unlinking https://github.com/blakeblackshear/frigate/issues/4769
+ with open(p, "w"):
+ pass
p.unlink(missing_ok=True)
def expire_recordings(self):
diff --git a/frigate/stats.py b/frigate/stats.py
--- a/frigate/stats.py
+++ b/frigate/stats.py
@@ -1,3 +1,4 @@
+import asyncio
import json
import logging
import threading
@@ -7,19 +8,25 @@
import os
import requests
from typing import Optional, Any
-from paho.mqtt.client import Client
-from multiprocessing.synchronize import Event
+from multiprocessing.synchronize import Event as MpEvent
+from frigate.comms.dispatcher import Dispatcher
from frigate.config import FrigateConfig
-from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
+from frigate.const import DRIVER_AMD, DRIVER_ENV_VAR, RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.types import StatsTrackingTypes, CameraMetricsTypes
+from frigate.util import get_amd_gpu_stats, get_intel_gpu_stats, get_nvidia_gpu_stats
from frigate.version import VERSION
-from frigate.edgetpu import EdgeTPUProcess
+from frigate.util import get_cpu_stats
+from frigate.object_detection import ObjectDetectProcess
logger = logging.getLogger(__name__)
-def get_latest_version() -> str:
+def get_latest_version(config: FrigateConfig) -> str:
+
+ if not config.telemetry.version_check:
+ return "disabled"
+
try:
request = requests.get(
"https://api.github.com/repos/blakeblackshear/frigate/releases/latest",
@@ -37,13 +44,16 @@ def get_latest_version() -> str:
def stats_init(
- camera_metrics: dict[str, CameraMetricsTypes], detectors: dict[str, EdgeTPUProcess]
+ config: FrigateConfig,
+ camera_metrics: dict[str, CameraMetricsTypes],
+ detectors: dict[str, ObjectDetectProcess],
) -> StatsTrackingTypes:
stats_tracking: StatsTrackingTypes = {
"camera_metrics": camera_metrics,
"detectors": detectors,
"started": int(time.time()),
- "latest_frigate_version": get_latest_version(),
+ "latest_frigate_version": get_latest_version(config),
+ "last_updated": int(time.time()),
}
return stats_tracking
@@ -80,7 +90,116 @@ def get_temperatures() -> dict[str, float]:
return temps
-def stats_snapshot(stats_tracking: StatsTrackingTypes) -> dict[str, Any]:
+def get_processing_stats(
+ config: FrigateConfig, stats: dict[str, str], hwaccel_errors: list[str]
+) -> None:
+ """Get stats for cpu / gpu."""
+
+ async def run_tasks() -> None:
+ await asyncio.wait(
+ [
+ asyncio.create_task(set_gpu_stats(config, stats, hwaccel_errors)),
+ asyncio.create_task(set_cpu_stats(stats)),
+ ]
+ )
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ loop.run_until_complete(run_tasks())
+ loop.close()
+
+
+async def set_cpu_stats(all_stats: dict[str, Any]) -> None:
+ """Set cpu usage from top."""
+ cpu_stats = get_cpu_stats()
+
+ if cpu_stats:
+ all_stats["cpu_usages"] = cpu_stats
+
+
+async def set_gpu_stats(
+ config: FrigateConfig, all_stats: dict[str, Any], hwaccel_errors: list[str]
+) -> None:
+ """Parse GPUs from hwaccel args and use for stats."""
+ hwaccel_args = []
+
+ for camera in config.cameras.values():
+ args = camera.ffmpeg.hwaccel_args
+
+ if isinstance(args, list):
+ args = " ".join(args)
+
+ if args and args not in hwaccel_args:
+ hwaccel_args.append(args)
+
+ for stream_input in camera.ffmpeg.inputs:
+ args = stream_input.hwaccel_args
+
+ if isinstance(args, list):
+ args = " ".join(args)
+
+ if args and args not in hwaccel_args:
+ hwaccel_args.append(args)
+
+ stats: dict[str, dict] = {}
+
+ for args in hwaccel_args:
+ if args in hwaccel_errors:
+ # known erroring args should automatically return as error
+ stats["error-gpu"] = {"gpu": -1, "mem": -1}
+ elif "cuvid" in args or "nvidia" in args:
+ # nvidia GPU
+ nvidia_usage = get_nvidia_gpu_stats()
+
+ if nvidia_usage:
+ name = nvidia_usage["name"]
+ del nvidia_usage["name"]
+ stats[name] = nvidia_usage
+ else:
+ stats["nvidia-gpu"] = {"gpu": -1, "mem": -1}
+ hwaccel_errors.append(args)
+ elif "qsv" in args:
+ # intel QSV GPU
+ intel_usage = get_intel_gpu_stats()
+
+ if intel_usage:
+ stats["intel-qsv"] = intel_usage
+ else:
+ stats["intel-qsv"] = {"gpu": -1, "mem": -1}
+ hwaccel_errors.append(args)
+ elif "vaapi" in args:
+ driver = os.environ.get(DRIVER_ENV_VAR)
+
+ if driver == DRIVER_AMD:
+ # AMD VAAPI GPU
+ amd_usage = get_amd_gpu_stats()
+
+ if amd_usage:
+ stats["amd-vaapi"] = amd_usage
+ else:
+ stats["amd-vaapi"] = {"gpu": -1, "mem": -1}
+ hwaccel_errors.append(args)
+ else:
+ # intel VAAPI GPU
+ intel_usage = get_intel_gpu_stats()
+
+ if intel_usage:
+ stats["intel-vaapi"] = intel_usage
+ else:
+ stats["intel-vaapi"] = {"gpu": -1, "mem": -1}
+ hwaccel_errors.append(args)
+ elif "v4l2m2m" in args or "rpi" in args:
+ # RPi v4l2m2m is currently not able to get usage stats
+ stats["rpi-v4l2m2m"] = {"gpu": -1, "mem": -1}
+
+ if stats:
+ all_stats["gpu_usages"] = stats
+
+
+def stats_snapshot(
+ config: FrigateConfig, stats_tracking: StatsTrackingTypes, hwaccel_errors: list[str]
+) -> dict[str, Any]:
+ """Get a snapshot of the current stats that are being tracked."""
camera_metrics = stats_tracking["camera_metrics"]
stats: dict[str, Any] = {}
@@ -89,6 +208,9 @@ def stats_snapshot(stats_tracking: StatsTrackingTypes) -> dict[str, Any]:
for name, camera_stats in camera_metrics.items():
total_detection_fps += camera_stats["detection_fps"].value
pid = camera_stats["process"].pid if camera_stats["process"] else None
+ ffmpeg_pid = (
+ camera_stats["ffmpeg_pid"].value if camera_stats["ffmpeg_pid"] else None
+ )
cpid = (
camera_stats["capture_process"].pid
if camera_stats["capture_process"]
@@ -99,8 +221,10 @@ def stats_snapshot(stats_tracking: StatsTrackingTypes) -> dict[str, Any]:
"process_fps": round(camera_stats["process_fps"].value, 2),
"skipped_fps": round(camera_stats["skipped_fps"].value, 2),
"detection_fps": round(camera_stats["detection_fps"].value, 2),
+ "detection_enabled": camera_stats["detection_enabled"].value,
"pid": pid,
"capture_pid": cpid,
+ "ffmpeg_pid": ffmpeg_pid,
}
stats["detectors"] = {}
@@ -113,16 +237,23 @@ def stats_snapshot(stats_tracking: StatsTrackingTypes) -> dict[str, Any]:
}
stats["detection_fps"] = round(total_detection_fps, 2)
+ get_processing_stats(config, stats, hwaccel_errors)
+
stats["service"] = {
"uptime": (int(time.time()) - stats_tracking["started"]),
"version": VERSION,
"latest_version": stats_tracking["latest_frigate_version"],
"storage": {},
"temperatures": get_temperatures(),
+ "last_updated": int(time.time()),
}
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:
- storage_stats = shutil.disk_usage(path)
+ try:
+ storage_stats = shutil.disk_usage(path)
+ except FileNotFoundError:
+ stats["service"]["storage"][path] = {}
+
stats["service"]["storage"][path] = {
"total": round(storage_stats.total / 1000000, 1),
"used": round(storage_stats.used / 1000000, 1),
@@ -138,23 +269,24 @@ def __init__(
self,
config: FrigateConfig,
stats_tracking: StatsTrackingTypes,
- mqtt_client: Client,
- topic_prefix: str,
- stop_event: Event,
+ dispatcher: Dispatcher,
+ stop_event: MpEvent,
):
threading.Thread.__init__(self)
self.name = "frigate_stats_emitter"
self.config = config
self.stats_tracking = stats_tracking
- self.mqtt_client = mqtt_client
- self.topic_prefix = topic_prefix
+ self.dispatcher = dispatcher
self.stop_event = stop_event
+ self.hwaccel_errors: list[str] = []
def run(self) -> None:
time.sleep(10)
while not self.stop_event.wait(self.config.mqtt.stats_interval):
- stats = stats_snapshot(self.stats_tracking)
- self.mqtt_client.publish(
- f"{self.topic_prefix}/stats", json.dumps(stats), retain=False
+ logger.debug("Starting stats collection")
+ stats = stats_snapshot(
+ self.config, self.stats_tracking, self.hwaccel_errors
)
- logger.info(f"Exiting watchdog...")
+ self.dispatcher.publish("stats", json.dumps(stats), retain=False)
+ logger.debug("Finished stats collection")
+ logger.info(f"Exiting stats emitter...")
diff --git a/frigate/storage.py b/frigate/storage.py
new file mode 100644
--- /dev/null
+++ b/frigate/storage.py
@@ -0,0 +1,192 @@
+"""Handle storage retention and usage."""
+
+import logging
+from pathlib import Path
+import shutil
+import threading
+
+from peewee import fn
+
+from frigate.config import FrigateConfig
+from frigate.const import RECORD_DIR
+from frigate.models import Event, Recordings
+
+logger = logging.getLogger(__name__)
+bandwidth_equation = Recordings.segment_size / (
+ Recordings.end_time - Recordings.start_time
+)
+
+
+class StorageMaintainer(threading.Thread):
+ """Maintain frigates recording storage."""
+
+ def __init__(self, config: FrigateConfig, stop_event) -> None:
+ threading.Thread.__init__(self)
+ self.name = "storage_maintainer"
+ self.config = config
+ self.stop_event = stop_event
+ self.camera_storage_stats: dict[str, dict] = {}
+
+ def calculate_camera_bandwidth(self) -> None:
+ """Calculate an average MB/hr for each camera."""
+ for camera in self.config.cameras.keys():
+ # cameras with < 50 segments should be refreshed to keep size accurate
+ # when few segments are available
+ if self.camera_storage_stats.get(camera, {}).get("needs_refresh", True):
+ self.camera_storage_stats[camera] = {
+ "needs_refresh": (
+ Recordings.select(fn.COUNT(Recordings.id))
+ .where(
+ Recordings.camera == camera, Recordings.segment_size != 0
+ )
+ .scalar()
+ < 50
+ )
+ }
+
+ # calculate MB/hr
+ try:
+ bandwidth = round(
+ Recordings.select(fn.AVG(bandwidth_equation))
+ .where(Recordings.camera == camera, Recordings.segment_size != 0)
+ .limit(100)
+ .scalar()
+ * 3600,
+ 2,
+ )
+ except TypeError:
+ bandwidth = 0
+
+ self.camera_storage_stats[camera]["bandwidth"] = bandwidth
+ logger.debug(f"{camera} has a bandwidth of {bandwidth} MB/hr.")
+
+ def calculate_camera_usages(self) -> dict[str, dict]:
+ """Calculate the storage usage of each camera."""
+ usages: dict[str, dict] = {}
+
+ for camera in self.config.cameras.keys():
+ camera_storage = (
+ Recordings.select(fn.SUM(Recordings.segment_size))
+ .where(Recordings.camera == camera, Recordings.segment_size != 0)
+ .scalar()
+ )
+
+ usages[camera] = {
+ "usage": camera_storage,
+ "bandwidth": self.camera_storage_stats.get(camera, {}).get(
+ "bandwidth", 0
+ ),
+ }
+
+ return usages
+
+ def check_storage_needs_cleanup(self) -> bool:
+ """Return if storage needs cleanup."""
+ # currently runs cleanup if less than 1 hour of space is left
+ # disk_usage should not spin up disks
+ hourly_bandwidth = sum(
+ [b["bandwidth"] for b in self.camera_storage_stats.values()]
+ )
+ remaining_storage = round(shutil.disk_usage(RECORD_DIR).free / 1000000, 1)
+ logger.debug(
+ f"Storage cleanup check: {hourly_bandwidth} hourly with remaining storage: {remaining_storage}."
+ )
+ return remaining_storage < hourly_bandwidth
+
+ def reduce_storage_consumption(self) -> None:
+ """Remove oldest hour of recordings."""
+ logger.debug("Starting storage cleanup.")
+ deleted_segments_size = 0
+ hourly_bandwidth = sum(
+ [b["bandwidth"] for b in self.camera_storage_stats.values()]
+ )
+
+ recordings: Recordings = Recordings.select().order_by(
+ Recordings.start_time.asc()
+ )
+ retained_events: Event = (
+ Event.select()
+ .where(
+ Event.retain_indefinitely == True,
+ Event.has_clip,
+ )
+ .order_by(Event.start_time.asc())
+ .objects()
+ )
+
+ event_start = 0
+ deleted_recordings = set()
+ for recording in recordings.objects().iterator():
+ # check if 1 hour of storage has been reclaimed
+ if deleted_segments_size > hourly_bandwidth:
+ break
+
+ keep = False
+
+ # Now look for a reason to keep this recording segment
+ for idx in range(event_start, len(retained_events)):
+ event = retained_events[idx]
+
+ # if the event starts in the future, stop checking events
+ # and let this recording segment expire
+ if event.start_time > recording.end_time:
+ keep = False
+ break
+
+ # if the event is in progress or ends after the recording starts, keep it
+ # and stop looking at events
+ if event.end_time is None or event.end_time >= recording.start_time:
+ keep = True
+ break
+
+ # if the event ends before this recording segment starts, skip
+ # this event and check the next event for an overlap.
+ # since the events and recordings are sorted, we can skip events
+ # that end before the previous recording segment started on future segments
+ if event.end_time < recording.start_time:
+ event_start = idx
+
+ # Delete recordings not retained indefinitely
+ if not keep:
+ deleted_segments_size += recording.segment_size
+ Path(recording.path).unlink(missing_ok=True)
+ deleted_recordings.add(recording.id)
+
+ # check if need to delete retained segments
+ if deleted_segments_size < hourly_bandwidth:
+ logger.error(
+ f"Could not clear {hourly_bandwidth} currently {deleted_segments_size}, retained recordings must be deleted."
+ )
+ recordings = Recordings.select().order_by(Recordings.start_time.asc())
+
+ for recording in recordings.objects().iterator():
+ if deleted_segments_size > hourly_bandwidth:
+ break
+
+ deleted_segments_size += recording.segment_size
+ Path(recording.path).unlink(missing_ok=True)
+ deleted_recordings.add(recording.id)
+
+ logger.debug(f"Expiring {len(deleted_recordings)} recordings")
+ # delete up to 100,000 at a time
+ max_deletes = 100000
+ deleted_recordings_list = list(deleted_recordings)
+ for i in range(0, len(deleted_recordings_list), max_deletes):
+ Recordings.delete().where(
+ Recordings.id << deleted_recordings_list[i : i + max_deletes]
+ ).execute()
+
+ def run(self):
+ """Check every 5 minutes if storage needs to be cleaned up."""
+ while not self.stop_event.wait(300):
+
+ if not self.camera_storage_stats or True in [
+ r["needs_refresh"] for r in self.camera_storage_stats.values()
+ ]:
+ self.calculate_camera_bandwidth()
+ logger.debug(f"Default camera bandwidths: {self.camera_storage_stats}.")
+
+ if self.check_storage_needs_cleanup():
+ self.reduce_storage_consumption()
+
+ logger.info(f"Exiting storage maintainer...")
diff --git a/frigate/types.py b/frigate/types.py
--- a/frigate/types.py
+++ b/frigate/types.py
@@ -3,7 +3,7 @@
from multiprocessing.sharedctypes import Synchronized
from multiprocessing.context import Process
-from frigate.edgetpu import EdgeTPUProcess
+from frigate.object_detection import ObjectDetectProcess
class CameraMetricsTypes(TypedDict):
@@ -26,6 +26,7 @@ class CameraMetricsTypes(TypedDict):
class StatsTrackingTypes(TypedDict):
camera_metrics: dict[str, CameraMetricsTypes]
- detectors: dict[str, EdgeTPUProcess]
+ detectors: dict[str, ObjectDetectProcess]
started: int
latest_frigate_version: str
+ last_updated: int
diff --git a/frigate/util.py b/frigate/util.py
--- a/frigate/util.py
+++ b/frigate/util.py
@@ -1,24 +1,28 @@
import copy
import datetime
-import hashlib
-import json
import logging
-import math
-import signal
+import shlex
import subprocess as sp
-import threading
-import time
+import json
+import re
+import signal
import traceback
+import urllib.parse
+import yaml
+
from abc import ABC, abstractmethod
+from collections import Counter
from collections.abc import Mapping
from multiprocessing import shared_memory
-from typing import AnyStr
+from typing import Any, AnyStr, Optional, Tuple
import cv2
-import matplotlib.pyplot as plt
import numpy as np
import os
import psutil
+import pytz
+
+from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
logger = logging.getLogger(__name__)
@@ -47,6 +51,33 @@ def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dic
return merged
+def load_config_with_no_duplicates(raw_config) -> dict:
+ """Get config ensuring duplicate keys are not allowed."""
+
+ # https://stackoverflow.com/a/71751051
+ class PreserveDuplicatesLoader(yaml.loader.Loader):
+ pass
+
+ def map_constructor(loader, node, deep=False):
+ keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
+ vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
+ key_count = Counter(keys)
+ data = {}
+ for key, val in zip(keys, vals):
+ if key_count[key] > 1:
+ raise ValueError(
+ f"Config input {key} is defined multiple times for the same field, this is not allowed."
+ )
+ else:
+ data[key] = val
+ return data
+
+ PreserveDuplicatesLoader.add_constructor(
+ yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor
+ )
+ return yaml.load(raw_config, PreserveDuplicatesLoader)
+
+
def draw_timestamp(
frame,
timestamp,
@@ -350,6 +381,47 @@ def yuv_crop_and_resize(frame, region, height=None):
return yuv_cropped_frame
+def yuv_to_3_channel_yuv(yuv_frame):
+ height = yuv_frame.shape[0] // 3 * 2
+ width = yuv_frame.shape[1]
+
+ # flatten the image into array
+ yuv_data = yuv_frame.ravel()
+
+ # create a numpy array to hold all the 3 chanel yuv data
+ all_yuv_data = np.empty((height, width, 3), dtype=np.uint8)
+
+ y_count = height * width
+ uv_count = y_count // 4
+
+ # copy the y_channel
+ all_yuv_data[:, :, 0] = yuv_data[0:y_count].reshape((height, width))
+ # copy the u channel doubling each dimension
+ all_yuv_data[:, :, 1] = np.repeat(
+ np.reshape(
+ np.repeat(yuv_data[y_count : y_count + uv_count], repeats=2, axis=0),
+ (height // 2, width),
+ ),
+ repeats=2,
+ axis=0,
+ )
+ # copy the v channel doubling each dimension
+ all_yuv_data[:, :, 2] = np.repeat(
+ np.reshape(
+ np.repeat(
+ yuv_data[y_count + uv_count : y_count + uv_count + uv_count],
+ repeats=2,
+ axis=0,
+ ),
+ (height // 2, width),
+ ),
+ repeats=2,
+ axis=0,
+ )
+
+ return all_yuv_data
+
+
def copy_yuv_to_position(
destination_frame,
destination_offset,
@@ -468,6 +540,17 @@ def copy_yuv_to_position(
)
+def yuv_region_2_yuv(frame, region):
+ try:
+ # TODO: does this copy the numpy array?
+ yuv_cropped_frame = yuv_crop_and_resize(frame, region)
+ return yuv_to_3_channel_yuv(yuv_cropped_frame)
+ except:
+ print(f"frame.shape: {frame.shape}")
+ print(f"region: {region}")
+ raise
+
+
def yuv_region_2_rgb(frame, region):
try:
# TODO: does this copy the numpy array?
@@ -479,6 +562,16 @@ def yuv_region_2_rgb(frame, region):
raise
+def yuv_region_2_bgr(frame, region):
+ try:
+ yuv_cropped_frame = yuv_crop_and_resize(frame, region)
+ return cv2.cvtColor(yuv_cropped_frame, cv2.COLOR_YUV2BGR_I420)
+ except:
+ print(f"frame.shape: {frame.shape}")
+ print(f"region: {region}")
+ raise
+
+
def intersection(box_a, box_b):
return (
max(box_a[0], box_b[0]),
@@ -614,15 +707,288 @@ def load_labels(path, encoding="utf-8"):
Dictionary mapping indices to labels.
"""
with open(path, "r", encoding=encoding) as f:
+ labels = {index: "unknown" for index in range(91)}
lines = f.readlines()
if not lines:
return {}
if lines[0].split(" ", maxsplit=1)[0].isdigit():
pairs = [line.split(" ", maxsplit=1) for line in lines]
- return {int(index): label.strip() for index, label in pairs}
+ labels.update({int(index): label.strip() for index, label in pairs})
+ else:
+ labels.update({index: line.strip() for index, line in enumerate(lines)})
+ return labels
+
+
+def clean_camera_user_pass(line: str) -> str:
+ """Removes user and password from line."""
+ if "rtsp://" in line:
+ return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
+ else:
+ return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
+
+
+def escape_special_characters(path: str) -> str:
+ """Cleans reserved characters to encodings for ffmpeg."""
+ try:
+ found = re.search(REGEX_RTSP_CAMERA_USER_PASS, path).group(0)[3:-1]
+ pw = found[(found.index(":") + 1) :]
+ return path.replace(pw, urllib.parse.quote_plus(pw))
+ except AttributeError:
+ # path does not have user:pass
+ return path
+
+
+def get_cgroups_version() -> str:
+ """Determine what version of cgroups is enabled"""
+
+ stat_command = ["stat", "-fc", "%T", "/sys/fs/cgroup"]
+
+ p = sp.run(
+ stat_command,
+ encoding="ascii",
+ capture_output=True,
+ )
+
+ if p.returncode == 0:
+ value: str = p.stdout.strip().lower()
+
+ if value == "cgroup2fs":
+ return "cgroup2"
+ elif value == "tmpfs":
+ return "cgroup"
+ else:
+ logger.debug(
+ f"Could not determine cgroups version: unhandled filesystem {value}"
+ )
+ else:
+ logger.debug(f"Could not determine cgroups version: {p.stderr}")
+
+ return "unknown"
+
+
+def get_docker_memlimit_bytes() -> int:
+ """Get mem limit in bytes set in docker if present. Returns -1 if no limit detected"""
+
+ # check running a supported cgroups version
+ if get_cgroups_version() == "cgroup2":
+
+ memlimit_command = ["cat", "/sys/fs/cgroup/memory.max"]
+
+ p = sp.run(
+ memlimit_command,
+ encoding="ascii",
+ capture_output=True,
+ )
+
+ if p.returncode == 0:
+ value: str = p.stdout.strip()
+
+ if value.isnumeric():
+ return int(value)
+ elif value.lower() == "max":
+ return -1
else:
- return {index: line.strip() for index, line in enumerate(lines)}
+ logger.debug(f"Unable to get docker memlimit: {p.stderr}")
+
+ return -1
+
+
+def get_cpu_stats() -> dict[str, dict]:
+ """Get cpu usages for each process id"""
+ usages = {}
+ # -n=2 runs to ensure extraneous values are not included
+ top_command = ["top", "-b", "-n", "2"]
+
+ docker_memlimit = get_docker_memlimit_bytes() / 1024
+
+ p = sp.run(
+ top_command,
+ encoding="ascii",
+ capture_output=True,
+ )
+
+ if p.returncode != 0:
+ logger.error(p.stderr)
+ return usages
+ else:
+ lines = p.stdout.split("\n")
+
+ for line in lines:
+ stats = list(filter(lambda a: a != "", line.strip().split(" ")))
+ try:
+
+ if docker_memlimit > 0:
+ mem_res = int(stats[5])
+ mem_pct = str(
+ round((float(mem_res) / float(docker_memlimit)) * 100, 1)
+ )
+ else:
+ mem_pct = stats[9]
+
+ usages[stats[0]] = {
+ "cpu": stats[8],
+ "mem": mem_pct,
+ }
+ except:
+ continue
+
+ return usages
+
+
+def get_amd_gpu_stats() -> dict[str, str]:
+ """Get stats using radeontop."""
+ radeontop_command = ["radeontop", "-d", "-", "-l", "1"]
+
+ p = sp.run(
+ radeontop_command,
+ encoding="ascii",
+ capture_output=True,
+ )
+
+ if p.returncode != 0:
+ logger.error(f"Unable to poll radeon GPU stats: {p.stderr}")
+ return None
+ else:
+ usages = p.stdout.split(",")
+ results: dict[str, str] = {}
+
+ for hw in usages:
+ if "gpu" in hw:
+ results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')} %"
+ elif "vram" in hw:
+ results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')} %"
+
+ return results
+
+
+def get_intel_gpu_stats() -> dict[str, str]:
+ """Get stats using intel_gpu_top."""
+ intel_gpu_top_command = [
+ "timeout",
+ "0.5s",
+ "intel_gpu_top",
+ "-J",
+ "-o",
+ "-",
+ "-s",
+ "1",
+ ]
+
+ p = sp.run(
+ intel_gpu_top_command,
+ encoding="ascii",
+ capture_output=True,
+ )
+
+ # timeout has a non-zero returncode when timeout is reached
+ if p.returncode != 124:
+ logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
+ return None
+ else:
+ reading = "".join(p.stdout.split())
+ results: dict[str, str] = {}
+
+ # render is used for qsv
+ render = []
+ for result in re.findall('"Render/3D/0":{[a-z":\d.,%]+}', reading):
+ packet = json.loads(result[14:])
+ single = packet.get("busy", 0.0)
+ render.append(float(single))
+
+ if render:
+ render_avg = sum(render) / len(render)
+ else:
+ render_avg = 1
+
+ # video is used for vaapi
+ video = []
+ for result in re.findall('"Video/\d":{[a-z":\d.,%]+}', reading):
+ packet = json.loads(result[10:])
+ single = packet.get("busy", 0.0)
+ video.append(float(single))
+
+ if video:
+ video_avg = sum(video) / len(video)
+ else:
+ video_avg = 1
+
+ results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)} %"
+ results["mem"] = "- %"
+ return results
+
+
+def get_nvidia_gpu_stats() -> dict[str, str]:
+ """Get stats using nvidia-smi."""
+ nvidia_smi_command = [
+ "nvidia-smi",
+ "--query-gpu=gpu_name,utilization.gpu,memory.used,memory.total",
+ "--format=csv",
+ ]
+
+ if (
+ "CUDA_VISIBLE_DEVICES" in os.environ
+ and os.environ["CUDA_VISIBLE_DEVICES"].isdigit()
+ ):
+ nvidia_smi_command.extend(["--id", os.environ["CUDA_VISIBLE_DEVICES"]])
+ elif (
+ "NVIDIA_VISIBLE_DEVICES" in os.environ
+ and os.environ["NVIDIA_VISIBLE_DEVICES"].isdigit()
+ ):
+ nvidia_smi_command.extend(["--id", os.environ["NVIDIA_VISIBLE_DEVICES"]])
+
+ p = sp.run(
+ nvidia_smi_command,
+ encoding="ascii",
+ capture_output=True,
+ )
+
+ if p.returncode != 0:
+ logger.error(f"Unable to poll nvidia GPU stats: {p.stderr}")
+ return None
+ else:
+ usages = p.stdout.split("\n")[1].strip().split(",")
+ memory_percent = f"{round(float(usages[2].replace(' MiB', '').strip()) / float(usages[3].replace(' MiB', '').strip()) * 100, 1)} %"
+ results: dict[str, str] = {
+ "name": usages[0],
+ "gpu": usages[1].strip(),
+ "mem": memory_percent,
+ }
+
+ return results
+
+
+def ffprobe_stream(path: str) -> sp.CompletedProcess:
+ """Run ffprobe on stream."""
+ clean_path = escape_special_characters(path)
+ ffprobe_cmd = [
+ "ffprobe",
+ "-timeout",
+ "1000000",
+ "-print_format",
+ "json",
+ "-show_entries",
+ "stream=codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate",
+ "-loglevel",
+ "quiet",
+ clean_path,
+ ]
+ return sp.run(ffprobe_cmd, capture_output=True)
+
+
+def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
+ """Run vainfo."""
+ ffprobe_cmd = (
+ ["vainfo"]
+ if not device_name
+ else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
+ )
+ return sp.run(ffprobe_cmd, capture_output=True)
+
+
+def get_ffmpeg_arg_list(arg: Any) -> list:
+ """Use arg if list or convert to list format."""
+ return arg if isinstance(arg, list) else shlex.split(arg)
class FrameManager(ABC):
@@ -690,3 +1056,14 @@ def delete(self, name):
self.shm_store[name].close()
self.shm_store[name].unlink()
del self.shm_store[name]
+
+
+def get_tz_modifiers(tz_name: str) -> Tuple[str, str]:
+ seconds_offset = (
+ datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
+ )
+ hours_offset = int(seconds_offset / 60 / 60)
+ minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
+ hour_modifier = f"{hours_offset} hour"
+ minute_modifier = f"{minutes_offset} minute"
+ return hour_modifier, minute_modifier
diff --git a/frigate/video.py b/frigate/video.py
--- a/frigate/video.py
+++ b/frigate/video.py
@@ -1,7 +1,7 @@
import datetime
-import itertools
import logging
import multiprocessing as mp
+import os
import queue
import random
import signal
@@ -11,11 +11,12 @@
from collections import defaultdict
import numpy as np
-from cv2 import cv2, reduce
+import cv2
from setproctitle import setproctitle
-from frigate.config import CameraConfig, DetectConfig
-from frigate.edgetpu import RemoteObjectDetector
+from frigate.config import CameraConfig, DetectConfig, PixelFormatEnum
+from frigate.const import CACHE_DIR
+from frigate.object_detection import RemoteObjectDetector
from frigate.log import LogPipe
from frigate.motion import MotionDetector
from frigate.objects import ObjectTracker
@@ -30,6 +31,8 @@
intersection_over_union,
listen,
yuv_region_2_rgb,
+ yuv_region_2_bgr,
+ yuv_region_2_yuv,
)
logger = logging.getLogger(__name__)
@@ -89,13 +92,20 @@ def filtered(obj, objects_to_track, object_filters):
return False
-def create_tensor_input(frame, model_shape, region):
- cropped_frame = yuv_region_2_rgb(frame, region)
+def create_tensor_input(frame, model_config, region):
+ if model_config.input_pixel_format == PixelFormatEnum.rgb:
+ cropped_frame = yuv_region_2_rgb(frame, region)
+ elif model_config.input_pixel_format == PixelFormatEnum.bgr:
+ cropped_frame = yuv_region_2_bgr(frame, region)
+ else:
+ cropped_frame = yuv_region_2_yuv(frame, region)
- # Resize to 300x300 if needed
- if cropped_frame.shape != (model_shape[0], model_shape[1], 3):
+ # Resize if needed
+ if cropped_frame.shape != (model_config.height, model_config.width, 3):
cropped_frame = cv2.resize(
- cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR
+ cropped_frame,
+ dsize=(model_config.height, model_config.width),
+ interpolation=cv2.INTER_LINEAR,
)
# Expand dimensions since the model expects images to have shape: [1, height, width, 3]
@@ -150,6 +160,7 @@ def capture_frames(
fps: mp.Value,
skipped_fps: mp.Value,
current_frame: mp.Value,
+ stop_event: mp.Event,
):
frame_size = frame_shape[0] * frame_shape[1]
@@ -167,6 +178,9 @@ def capture_frames(
try:
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
except Exception as e:
+ # shutdown has been initiated
+ if stop_event.is_set():
+ break
logger.error(f"{camera_name}: Unable to read frames from ffmpeg process.")
if ffmpeg_process.poll() != None:
@@ -194,7 +208,13 @@ def capture_frames(
class CameraWatchdog(threading.Thread):
def __init__(
- self, camera_name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event
+ self,
+ camera_name,
+ config: CameraConfig,
+ frame_queue,
+ camera_fps,
+ ffmpeg_pid,
+ stop_event,
):
threading.Thread.__init__(self)
self.logger = logging.getLogger(f"watchdog.{camera_name}")
@@ -203,7 +223,7 @@ def __init__(
self.capture_thread = None
self.ffmpeg_detect_process = None
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
- self.ffmpeg_other_processes = []
+ self.ffmpeg_other_processes: list[dict[str, any]] = []
self.camera_fps = camera_fps
self.ffmpeg_pid = ffmpeg_pid
self.frame_queue = frame_queue
@@ -223,6 +243,7 @@ def run(self):
self.ffmpeg_other_processes.append(
{
"cmd": c["cmd"],
+ "roles": c["roles"],
"logpipe": logpipe,
"process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe),
}
@@ -233,6 +254,7 @@ def run(self):
now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive():
+ self.camera_fps.value = 0
self.logger.error(
f"Ffmpeg process crashed unexpectedly for {self.camera_name}."
)
@@ -242,6 +264,7 @@ def run(self):
self.logpipe.dump()
self.start_ffmpeg_detect()
elif now - self.capture_thread.current_frame.value > 20:
+ self.camera_fps.value = 0
self.logger.info(
f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg..."
)
@@ -250,14 +273,52 @@ def run(self):
self.logger.info("Waiting for ffmpeg to exit gracefully...")
self.ffmpeg_detect_process.communicate(timeout=30)
except sp.TimeoutExpired:
- self.logger.info("FFmpeg didnt exit. Force killing...")
+ self.logger.info("FFmpeg did not exit. Force killing...")
+ self.ffmpeg_detect_process.kill()
+ self.ffmpeg_detect_process.communicate()
+ elif self.camera_fps.value >= (self.config.detect.fps + 10):
+ self.camera_fps.value = 0
+ self.logger.info(
+ f"{self.camera_name} exceeded fps limit. Exiting ffmpeg..."
+ )
+ self.ffmpeg_detect_process.terminate()
+ try:
+ self.logger.info("Waiting for ffmpeg to exit gracefully...")
+ self.ffmpeg_detect_process.communicate(timeout=30)
+ except sp.TimeoutExpired:
+ self.logger.info("FFmpeg did not exit. Force killing...")
self.ffmpeg_detect_process.kill()
self.ffmpeg_detect_process.communicate()
for p in self.ffmpeg_other_processes:
poll = p["process"].poll()
+
+ if self.config.record.enabled and "record" in p["roles"]:
+ latest_segment_time = self.get_latest_segment_timestamp(
+ p.get(
+ "latest_segment_time", datetime.datetime.now().timestamp()
+ )
+ )
+
+ if datetime.datetime.now().timestamp() > (
+ latest_segment_time + 120
+ ):
+ self.logger.error(
+ f"No new recording segments were created for {self.camera_name} in the last 120s. restarting the ffmpeg record process..."
+ )
+ p["process"] = start_or_restart_ffmpeg(
+ p["cmd"],
+ self.logger,
+ p["logpipe"],
+ ffmpeg_process=p["process"],
+ )
+ continue
+ else:
+ p["latest_segment_time"] = latest_segment_time
+
if poll is None:
continue
+
p["logpipe"].dump()
p["process"] = start_or_restart_ffmpeg(
p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]
@@ -283,18 +344,45 @@ def start_ffmpeg_detect(self):
self.frame_shape,
self.frame_queue,
self.camera_fps,
+ self.stop_event,
)
self.capture_thread.start()
+ def get_latest_segment_timestamp(self, latest_timestamp) -> int:
+ """Checks if ffmpeg is still writing recording segments to cache."""
+ cache_files = sorted(
+ [
+ d
+ for d in os.listdir(CACHE_DIR)
+ if os.path.isfile(os.path.join(CACHE_DIR, d))
+ and d.endswith(".mp4")
+ and not d.startswith("clip_")
+ ]
+ )
+ newest_segment_timestamp = latest_timestamp
+
+ for file in cache_files:
+ if self.camera_name in file:
+ basename = os.path.splitext(file)[0]
+ _, date = basename.rsplit("-", maxsplit=1)
+ ts = datetime.datetime.strptime(date, "%Y%m%d%H%M%S").timestamp()
+ if ts > newest_segment_timestamp:
+ newest_segment_timestamp = ts
+
+ return newest_segment_timestamp
+
class CameraCapture(threading.Thread):
- def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
+ def __init__(
+ self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps, stop_event
+ ):
threading.Thread.__init__(self)
self.name = f"capture:{camera_name}"
self.camera_name = camera_name
self.frame_shape = frame_shape
self.frame_queue = frame_queue
self.fps = fps
+ self.stop_event = stop_event
self.skipped_fps = EventsPerSecond()
self.frame_manager = SharedMemoryFrameManager()
self.ffmpeg_process = ffmpeg_process
@@ -312,6 +400,7 @@ def run(self):
self.fps,
self.skipped_fps,
self.current_frame,
+ self.stop_event,
)
@@ -324,6 +413,9 @@ def receiveSignal(signalNumber, frame):
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
+ threading.current_thread().name = f"capture:{name}"
+ setproctitle(f"frigate.capture:{name}")
+
frame_queue = process_info["frame_queue"]
camera_watchdog = CameraWatchdog(
name,
@@ -340,7 +432,7 @@ def receiveSignal(signalNumber, frame):
def track_camera(
name,
config: CameraConfig,
- model_shape,
+ model_config,
labelmap,
detection_queue,
result_connection,
@@ -378,7 +470,7 @@ def receiveSignal(signalNumber, frame):
motion_contour_area,
)
object_detector = RemoteObjectDetector(
- name, labelmap, detection_queue, result_connection, model_shape
+ name, labelmap, detection_queue, result_connection, model_config, stop_event
)
object_tracker = ObjectTracker(config.detect)
@@ -389,7 +481,7 @@ def receiveSignal(signalNumber, frame):
name,
frame_queue,
frame_shape,
- model_shape,
+ model_config,
config.detect,
frame_manager,
motion_detector,
@@ -443,12 +535,12 @@ def detect(
detect_config: DetectConfig,
object_detector,
frame,
- model_shape,
+ model_config,
region,
objects_to_track,
object_filters,
):
- tensor_input = create_tensor_input(frame, model_shape, region)
+ tensor_input = create_tensor_input(frame, model_config, region)
detections = []
region_detections = object_detector.detect(tensor_input)
@@ -487,7 +579,7 @@ def process_frames(
camera_name: str,
frame_queue: mp.Queue,
frame_shape,
- model_shape,
+ model_config,
detect_config: DetectConfig,
frame_manager: FrameManager,
motion_detector: MotionDetector,
@@ -518,7 +610,7 @@ def process_frames(
break
try:
- frame_time = frame_queue.get(True, 10)
+ frame_time = frame_queue.get(True, 1)
except queue.Empty:
continue
@@ -571,7 +663,7 @@ def process_frames(
# combine motion boxes with known locations of existing objects
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
- region_min_size = max(model_shape[0], model_shape[1])
+ region_min_size = max(model_config.height, model_config.width)
# compute regions
regions = [
calculate_region(
@@ -634,7 +726,7 @@ def process_frames(
detect_config,
object_detector,
frame,
- model_shape,
+ model_config,
region,
objects_to_track,
object_filters,
@@ -694,7 +786,7 @@ def process_frames(
detect_config,
object_detector,
frame,
- model_shape,
+ model_config,
region,
objects_to_track,
object_filters,
@@ -704,6 +796,7 @@ def process_frames(
refining = True
else:
selected_objects.append(obj)
+
# set the detections list to only include top, complete objects
# and new detections
detections = selected_objects
diff --git a/frigate/watchdog.py b/frigate/watchdog.py
--- a/frigate/watchdog.py
+++ b/frigate/watchdog.py
@@ -5,15 +5,15 @@
import os
import signal
-from frigate.edgetpu import EdgeTPUProcess
+from frigate.object_detection import ObjectDetectProcess
from frigate.util import restart_frigate
-from multiprocessing.synchronize import Event
+from multiprocessing.synchronize import Event as MpEvent
logger = logging.getLogger(__name__)
class FrigateWatchdog(threading.Thread):
- def __init__(self, detectors: dict[str, EdgeTPUProcess], stop_event: Event):
+ def __init__(self, detectors: dict[str, ObjectDetectProcess], stop_event: MpEvent):
threading.Thread.__init__(self)
self.name = "frigate_watchdog"
self.detectors = detectors
@@ -36,7 +36,7 @@ def run(self) -> None:
detector.detect_process is not None
and not detector.detect_process.is_alive()
):
- logger.info("Detection appears to have stopped. Exiting frigate...")
+ logger.info("Detection appears to have stopped. Exiting Frigate...")
restart_frigate()
logger.info(f"Exiting watchdog...")
diff --git a/migrations/012_add_segment_size.py b/migrations/012_add_segment_size.py
new file mode 100644
--- /dev/null
+++ b/migrations/012_add_segment_size.py
@@ -0,0 +1,46 @@
+"""Peewee migrations -- 012_add_segment_size.py.
+
+Some examples (model - class or model name)::
+
+ > Model = migrator.orm['model_name'] # Return model in current state by name
+
+ > migrator.sql(sql) # Run custom SQL
+ > migrator.python(func, *args, **kwargs) # Run python code
+ > migrator.create_model(Model) # Create a model (could be used as decorator)
+ > migrator.remove_model(model, cascade=True) # Remove a model
+ > migrator.add_fields(model, **fields) # Add fields to a model
+ > migrator.change_fields(model, **fields) # Change fields
+ > migrator.remove_fields(model, *field_names, cascade=True)
+ > migrator.rename_field(model, old_field_name, new_field_name)
+ > migrator.rename_table(model, new_table_name)
+ > migrator.add_index(model, *col_names, unique=False)
+ > migrator.drop_index(model, *col_names)
+ > migrator.add_not_null(model, *field_names)
+ > migrator.drop_not_null(model, *field_names)
+ > migrator.add_default(model, field_name, default)
+
+"""
+
+import datetime as dt
+import peewee as pw
+from playhouse.sqlite_ext import *
+from decimal import ROUND_HALF_EVEN
+from frigate.models import Recordings
+
+try:
+ import playhouse.postgres_ext as pw_pext
+except ImportError:
+ pass
+
+SQL = pw.SQL
+
+
+def migrate(migrator, database, fake=False, **kwargs):
+ migrator.add_fields(
+ Recordings,
+ segment_size=pw.FloatField(default=0),
+ )
+
+
+def rollback(migrator, database, fake=False, **kwargs):
+ migrator.remove_fields(Recordings, ["segment_size"])
diff --git a/process_clip.py b/process_clip.py
--- a/process_clip.py
+++ b/process_clip.py
@@ -16,7 +16,7 @@
import numpy as np
from frigate.config import FrigateConfig
-from frigate.edgetpu import LocalObjectDetector
+from frigate.object_detection import LocalObjectDetector
from frigate.motion import MotionDetector
from frigate.object_processing import CameraState
from frigate.objects import ObjectTracker
@@ -117,13 +117,12 @@ def process_frames(
detection_enabled = mp.Value("d", 1)
motion_enabled = mp.Value("d", True)
stop_event = mp.Event()
- model_shape = (self.config.model.height, self.config.model.width)
process_frames(
self.camera_name,
self.frame_queue,
self.frame_shape,
- model_shape,
+ self.config.model,
self.camera_config.detect,
self.frame_manager,
motion_detector,
| diff --git a/frigate/test/test_camera_pw.py b/frigate/test/test_camera_pw.py
new file mode 100644
--- /dev/null
+++ b/frigate/test/test_camera_pw.py
@@ -0,0 +1,49 @@
+"""Test camera user and password cleanup."""
+
+import unittest
+
+from frigate.util import clean_camera_user_pass, escape_special_characters
+
+
+class TestUserPassCleanup(unittest.TestCase):
+ def setUp(self) -> None:
+ self.rtsp_with_pass = "rtsp://user:[email protected]:554/live"
+ self.rtsp_with_special_pass = (
+ "rtsp://user:password`~!@#$%^&*()-_;',.<>:\"\{\}\[\]@@192.168.0.2:554/live"
+ )
+ self.rtsp_no_pass = "rtsp://192.168.0.3:554/live"
+
+ def test_cleanup(self):
+ """Test that user / pass are cleaned up."""
+ clean = clean_camera_user_pass(self.rtsp_with_pass)
+ assert clean != self.rtsp_with_pass
+ assert "user:password" not in clean
+
+ def test_no_cleanup(self):
+ """Test that nothing changes when no user / pass are defined."""
+ clean = clean_camera_user_pass(self.rtsp_no_pass)
+ assert clean == self.rtsp_no_pass
+
+ def test_special_char_password(self):
+ """Test that special characters in pw are escaped, but not others."""
+ escaped = escape_special_characters(self.rtsp_with_special_pass)
+ assert (
+ escaped
+ == "rtsp://user:password%60~%21%40%23%24%25%5E%26%2A%28%29-_%3B%27%2C.%3C%3E%3A%22%5C%7B%5C%7D%5C%5B%5C%5D%[email protected]:554/live"
+ )
+
+ def test_no_special_char_password(self):
+ """Test that no change is made to path with no special characters."""
+ escaped = escape_special_characters(self.rtsp_with_pass)
+ assert escaped == self.rtsp_with_pass
+
+
+class TestUserPassMasking(unittest.TestCase):
+ def setUp(self) -> None:
+ self.rtsp_log_message = "Did you mean file:rtsp://user:[email protected]:554"
+
+ def test_rtsp_in_log_message(self):
+ """Test that the rtsp url in a log message is espaced."""
+ escaped = clean_camera_user_pass(self.rtsp_log_message)
+ print(f"The escaped is {escaped}")
+ assert escaped == "Did you mean file:rtsp://*:*@192.168.1.3:554"
diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py
--- a/frigate/test/test_config.py
+++ b/frigate/test/test_config.py
@@ -1,11 +1,13 @@
import unittest
import numpy as np
from pydantic import ValidationError
+
from frigate.config import (
BirdseyeModeEnum,
FrigateConfig,
- DetectorTypeEnum,
)
+from frigate.detectors import DetectorTypeEnum
+from frigate.util import deep_merge, load_config_with_no_duplicates
class TestConfig(unittest.TestCase):
@@ -35,6 +37,50 @@ def test_config_class(self):
runtime_config = frigate_config.runtime_config
assert "cpu" in runtime_config.detectors.keys()
assert runtime_config.detectors["cpu"].type == DetectorTypeEnum.cpu
+ assert runtime_config.detectors["cpu"].model.width == 320
+
+ def test_detector_custom_model_path(self):
+ config = {
+ "detectors": {
+ "cpu": {
+ "type": "cpu",
+ "model": {"path": "/cpu_model.tflite"},
+ },
+ "edgetpu": {
+ "type": "edgetpu",
+ "model": {"path": "/edgetpu_model.tflite", "width": 160},
+ },
+ "openvino": {
+ "type": "openvino",
+ },
+ },
+ "model": {"path": "/default.tflite", "width": 512},
+ }
+
+ frigate_config = FrigateConfig(**(deep_merge(config, self.minimal)))
+ runtime_config = frigate_config.runtime_config
+
+ assert "cpu" in runtime_config.detectors.keys()
+ assert "edgetpu" in runtime_config.detectors.keys()
+ assert "openvino" in runtime_config.detectors.keys()
+
+ assert runtime_config.detectors["cpu"].type == DetectorTypeEnum.cpu
+ assert runtime_config.detectors["edgetpu"].type == DetectorTypeEnum.edgetpu
+ assert runtime_config.detectors["openvino"].type == DetectorTypeEnum.openvino
+
+ assert runtime_config.detectors["cpu"].num_threads == 3
+ assert runtime_config.detectors["edgetpu"].device is None
+ assert runtime_config.detectors["openvino"].device is None
+
+ assert runtime_config.model.path == "/default.tflite"
+ assert runtime_config.detectors["cpu"].model.path == "/cpu_model.tflite"
+ assert runtime_config.detectors["edgetpu"].model.path == "/edgetpu_model.tflite"
+ assert runtime_config.detectors["openvino"].model.path == "/default.tflite"
+
+ assert runtime_config.model.width == 512
+ assert runtime_config.detectors["cpu"].model.width == 512
+ assert runtime_config.detectors["edgetpu"].model.width == 160
+ assert runtime_config.detectors["openvino"].model.width == 512
def test_invalid_mqtt_config(self):
config = {
@@ -837,7 +883,6 @@ def test_works_on_missing_role_multiple_cams(self):
config = {
"mqtt": {"host": "mqtt"},
- "rtmp": {"enabled": False},
"cameras": {
"back": {
"ffmpeg": {
@@ -1050,11 +1095,10 @@ def test_global_snapshots_merge(self):
assert runtime_config.cameras["back"].snapshots.height == 150
assert runtime_config.cameras["back"].snapshots.enabled
- def test_global_rtmp(self):
+ def test_global_rtmp_disabled(self):
config = {
"mqtt": {"host": "mqtt"},
- "rtmp": {"enabled": True},
"cameras": {
"back": {
"ffmpeg": {
@@ -1072,9 +1116,9 @@ def test_global_rtmp(self):
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config
- assert runtime_config.cameras["back"].rtmp.enabled
+ assert not runtime_config.cameras["back"].rtmp.enabled
- def test_default_rtmp(self):
+ def test_default_not_rtmp(self):
config = {
"mqtt": {"host": "mqtt"},
@@ -1095,7 +1139,7 @@ def test_default_rtmp(self):
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config
- assert runtime_config.cameras["back"].rtmp.enabled
+ assert not runtime_config.cameras["back"].rtmp.enabled
def test_global_rtmp_merge(self):
@@ -1108,7 +1152,7 @@ def test_global_rtmp_merge(self):
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
- "roles": ["detect"],
+ "roles": ["detect", "rtmp"],
},
]
},
@@ -1128,7 +1172,6 @@ def test_global_rtmp_default(self):
config = {
"mqtt": {"host": "mqtt"},
- "rtmp": {"enabled": False},
"cameras": {
"back": {
"ffmpeg": {
@@ -1152,7 +1195,7 @@ def test_global_rtmp_default(self):
runtime_config = frigate_config.runtime_config
assert not runtime_config.cameras["back"].rtmp.enabled
- def test_global_live(self):
+ def test_global_jsmpeg(self):
config = {
"mqtt": {"host": "mqtt"},
@@ -1349,6 +1392,78 @@ def test_fails_on_bad_camera_name(self):
ValidationError, lambda: frigate_config.runtime_config.cameras
)
+ def test_fails_on_bad_segment_time(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "record": {"enabled": True},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "output_args": {
+ "record": "-f segment -segment_time 70 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an"
+ },
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ],
+ },
+ }
+ },
+ }
+
+ frigate_config = FrigateConfig(**config)
+
+ self.assertRaises(
+ ValueError, lambda: frigate_config.runtime_config.ffmpeg.output_args.record
+ )
+
+ def test_fails_zone_defines_untracked_object(self):
+ config = {
+ "mqtt": {"host": "mqtt"},
+ "objects": {"track": ["person"]},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect"],
+ },
+ ]
+ },
+ "zones": {
+ "steps": {
+ "coordinates": "0,0,0,0",
+ "objects": ["car", "person"],
+ },
+ },
+ }
+ },
+ }
+
+ frigate_config = FrigateConfig(**config)
+
+ self.assertRaises(ValueError, lambda: frigate_config.runtime_config.cameras)
+
+ def test_fails_duplicate_keys(self):
+ raw_config = """
+ cameras:
+ test:
+ ffmpeg:
+ inputs:
+ - one
+ - two
+ inputs:
+ - three
+ - four
+ """
+
+ self.assertRaises(
+ ValueError, lambda: load_config_with_no_duplicates(raw_config)
+ )
+
def test_object_filter_ratios_work(self):
config = {
"mqtt": {"host": "mqtt"},
diff --git a/frigate/test/test_ffmpeg_presets.py b/frigate/test/test_ffmpeg_presets.py
new file mode 100644
--- /dev/null
+++ b/frigate/test/test_ffmpeg_presets.py
@@ -0,0 +1,184 @@
+import unittest
+from frigate.config import FFMPEG_INPUT_ARGS_DEFAULT, FrigateConfig
+from frigate.ffmpeg_presets import parse_preset_input
+
+
+class TestFfmpegPresets(unittest.TestCase):
+ def setUp(self):
+ self.default_ffmpeg = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "back": {
+ "ffmpeg": {
+ "inputs": [
+ {
+ "path": "rtsp://10.0.0.1:554/video",
+ "roles": ["detect", "rtmp"],
+ }
+ ],
+ "output_args": {
+ "detect": "-f rawvideo -pix_fmt yuv420p",
+ "record": "-f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an",
+ "rtmp": "-c copy -f flv",
+ },
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ "record": {
+ "enabled": True,
+ },
+ "rtmp": {
+ "enabled": True,
+ },
+ "name": "back",
+ }
+ },
+ }
+
+ def test_default_ffmpeg(self):
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert self.default_ffmpeg == frigate_config.dict(exclude_unset=True)
+
+ def test_ffmpeg_hwaccel_preset(self):
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
+ "hwaccel_args"
+ ] = "preset-rpi-64-h264"
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert "preset-rpi-64-h264" not in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+ assert "-c:v h264_v4l2m2m" in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+
+ def test_ffmpeg_hwaccel_not_preset(self):
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
+ "hwaccel_args"
+ ] = "-other-hwaccel args"
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert "-other-hwaccel args" in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+
+ def test_ffmpeg_hwaccel_scale_preset(self):
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
+ "hwaccel_args"
+ ] = "preset-nvidia-h264"
+ self.default_ffmpeg["cameras"]["back"]["detect"] = {
+ "height": 1920,
+ "width": 2560,
+ "fps": 10,
+ }
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert "preset-nvidia-h264" not in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+ assert (
+ "fps=10,scale_cuda=w=2560:h=1920:format=nv12,hwdownload,format=nv12,format=yuv420p"
+ in (" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]))
+ )
+
+ def test_default_ffmpeg_input_arg_preset(self):
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
+ "input_args"
+ ] = "preset-rtsp-generic"
+ frigate_preset_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ frigate_preset_config.cameras["back"].create_ffmpeg_cmds()
+ assert (
+ # Ignore global and user_agent args in comparison
+ frigate_preset_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+ == frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+ )
+
+ def test_ffmpeg_input_preset(self):
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
+ "input_args"
+ ] = "preset-rtmp-generic"
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert "preset-rtmp-generic" not in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+ assert (" ".join(parse_preset_input("preset-rtmp-generic", 5))) in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+
+ def test_ffmpeg_input_args_as_string(self):
+ # Strip user_agent args here to avoid handling quoting issues
+ defaultArgsList = parse_preset_input(FFMPEG_INPUT_ARGS_DEFAULT, 5)[2::]
+ argsString = " ".join(defaultArgsList) + ' -some "arg with space"'
+ argsList = defaultArgsList + ["-some", "arg with space"]
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = argsString
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert set(argsList).issubset(
+ frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+ )
+
+ def test_ffmpeg_input_not_preset(self):
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = "-some inputs"
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert "-some inputs" in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+
+ def test_ffmpeg_output_record_preset(self):
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
+ "record"
+ ] = "preset-record-generic-audio-aac"
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert "preset-record-generic-audio-aac" not in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+ assert "-c:v copy -c:a aac" in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+
+ def test_ffmpeg_output_record_not_preset(self):
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
+ "record"
+ ] = "-some output"
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert "-some output" in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+
+ def test_ffmpeg_output_rtmp_preset(self):
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
+ "rtmp"
+ ] = "preset-rtmp-jpeg"
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert "preset-rtmp-jpeg" not in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+ assert "-c:v libx264" in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+
+ def test_ffmpeg_output_rtmp_not_preset(self):
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
+ "rtmp"
+ ] = "-some output"
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert "-some output" in (
+ " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
+ )
+
+
+if __name__ == "__main__":
+ unittest.main(verbosity=2)
diff --git a/frigate/test/test_gpu_stats.py b/frigate/test/test_gpu_stats.py
new file mode 100644
--- /dev/null
+++ b/frigate/test/test_gpu_stats.py
@@ -0,0 +1,45 @@
+import unittest
+from unittest.mock import MagicMock, patch
+
+from frigate.util import get_amd_gpu_stats, get_intel_gpu_stats, get_nvidia_gpu_stats
+
+
+class TestGpuStats(unittest.TestCase):
+ def setUp(self):
+ self.amd_results = "Unknown Radeon card. <= R500 won't work, new cards might.\nDumping to -, line limit 1.\n1664070990.607556: bus 10, gpu 4.17%, ee 0.00%, vgt 0.00%, ta 0.00%, tc 0.00%, sx 0.00%, sh 0.00%, spi 0.83%, smx 0.00%, cr 0.00%, sc 0.00%, pa 0.00%, db 0.00%, cb 0.00%, vram 60.37% 294.04mb, gtt 0.33% 52.21mb, mclk 100.00% 1.800ghz, sclk 26.65% 0.533ghz\n"
+ self.intel_results = """{"period":{"duration":1.194033,"unit":"ms"},"frequency":{"requested":0.000000,"actual":0.000000,"unit":"MHz"},"interrupts":{"count":3349.991164,"unit":"irq/s"},"rc6":{"value":47.844741,"unit":"%"},"engines":{"Render/3D/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Blitter/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/0":{"busy":4.533124,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/1":{"busy":6.194385,"sema":0.000000,"wait":0.000000,"unit":"%"},"VideoEnhance/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"}}},{"period":{"duration":1.189291,"unit":"ms"},"frequency":{"requested":0.000000,"actual":0.000000,"unit":"MHz"},"interrupts":{"count":0.000000,"unit":"irq/s"},"rc6":{"value":100.000000,"unit":"%"},"engines":{"Render/3D/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Blitter/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"Video/1":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"},"VideoEnhance/0":{"busy":0.000000,"sema":0.000000,"wait":0.000000,"unit":"%"}}}"""
+ self.nvidia_results = "name, utilization.gpu [%], memory.used [MiB], memory.total [MiB]\nNVIDIA GeForce RTX 3050, 42 %, 5036 MiB, 8192 MiB\n"
+
+ @patch("subprocess.run")
+ def test_amd_gpu_stats(self, sp):
+ process = MagicMock()
+ process.returncode = 0
+ process.stdout = self.amd_results
+ sp.return_value = process
+ amd_stats = get_amd_gpu_stats()
+ assert amd_stats == {"gpu": "4.17 %", "mem": "60.37 %"}
+
+ @patch("subprocess.run")
+ def test_nvidia_gpu_stats(self, sp):
+ process = MagicMock()
+ process.returncode = 0
+ process.stdout = self.nvidia_results
+ sp.return_value = process
+ nvidia_stats = get_nvidia_gpu_stats()
+ assert nvidia_stats == {
+ "name": "NVIDIA GeForce RTX 3050",
+ "gpu": "42 %",
+ "mem": "61.5 %",
+ }
+
+ @patch("subprocess.run")
+ def test_intel_gpu_stats(self, sp):
+ process = MagicMock()
+ process.returncode = 124
+ process.stdout = self.intel_results
+ sp.return_value = process
+ intel_stats = get_intel_gpu_stats()
+ assert intel_stats == {
+ "gpu": "1.34 %",
+ "mem": "- %",
+ }
diff --git a/frigate/test/test_http.py b/frigate/test/test_http.py
--- a/frigate/test/test_http.py
+++ b/frigate/test/test_http.py
@@ -114,7 +114,7 @@ def tearDown(self):
def test_get_event_list(self):
app = create_app(
- FrigateConfig(**self.minimal_config), self.db, None, None, PlusApi()
+ FrigateConfig(**self.minimal_config), self.db, None, None, None, PlusApi()
)
id = "123456.random"
id2 = "7890.random"
@@ -143,7 +143,7 @@ def test_get_event_list(self):
def test_get_good_event(self):
app = create_app(
- FrigateConfig(**self.minimal_config), self.db, None, None, PlusApi()
+ FrigateConfig(**self.minimal_config), self.db, None, None, None, PlusApi()
)
id = "123456.random"
@@ -157,7 +157,7 @@ def test_get_good_event(self):
def test_get_bad_event(self):
app = create_app(
- FrigateConfig(**self.minimal_config), self.db, None, None, PlusApi()
+ FrigateConfig(**self.minimal_config), self.db, None, None, None, PlusApi()
)
id = "123456.random"
bad_id = "654321.other"
@@ -170,7 +170,7 @@ def test_get_bad_event(self):
def test_delete_event(self):
app = create_app(
- FrigateConfig(**self.minimal_config), self.db, None, None, PlusApi()
+ FrigateConfig(**self.minimal_config), self.db, None, None, None, PlusApi()
)
id = "123456.random"
@@ -185,7 +185,7 @@ def test_delete_event(self):
def test_event_retention(self):
app = create_app(
- FrigateConfig(**self.minimal_config), self.db, None, None, PlusApi()
+ FrigateConfig(**self.minimal_config), self.db, None, None, None, PlusApi()
)
id = "123456.random"
@@ -204,7 +204,7 @@ def test_event_retention(self):
def test_set_delete_sub_label(self):
app = create_app(
- FrigateConfig(**self.minimal_config), self.db, None, None, PlusApi()
+ FrigateConfig(**self.minimal_config), self.db, None, None, None, PlusApi()
)
id = "123456.random"
sub_label = "sub"
@@ -232,7 +232,7 @@ def test_set_delete_sub_label(self):
def test_sub_label_list(self):
app = create_app(
- FrigateConfig(**self.minimal_config), self.db, None, None, PlusApi()
+ FrigateConfig(**self.minimal_config), self.db, None, None, None, PlusApi()
)
id = "123456.random"
sub_label = "sub"
@@ -254,6 +254,7 @@ def test_config(self):
self.db,
None,
None,
+ None,
PlusApi(),
)
@@ -268,6 +269,7 @@ def test_recordings(self):
self.db,
None,
None,
+ None,
PlusApi(),
)
id = "123456.random"
@@ -285,6 +287,7 @@ def test_stats(self, mock_stats):
self.db,
None,
None,
+ None,
PlusApi(),
)
mock_stats.return_value = self.test_stats
diff --git a/frigate/test/test_object_detector.py b/frigate/test/test_object_detector.py
new file mode 100644
--- /dev/null
+++ b/frigate/test/test_object_detector.py
@@ -0,0 +1,137 @@
+import unittest
+from unittest.mock import Mock, patch
+
+import numpy as np
+from pydantic import parse_obj_as
+
+from frigate.config import DetectorConfig, InputTensorEnum, ModelConfig
+from frigate.detectors import DetectorTypeEnum
+import frigate.detectors as detectors
+import frigate.object_detection
+
+
+class TestLocalObjectDetector(unittest.TestCase):
+ def test_localdetectorprocess_should_only_create_specified_detector_type(self):
+ for det_type in detectors.api_types:
+ with self.subTest(det_type=det_type):
+ with patch.dict(
+ "frigate.detectors.api_types",
+ {det_type: Mock() for det_type in DetectorTypeEnum},
+ ):
+ test_cfg = parse_obj_as(
+ DetectorConfig, ({"type": det_type, "model": {}})
+ )
+ test_cfg.model.path = "/test/modelpath"
+ test_obj = frigate.object_detection.LocalObjectDetector(
+ detector_config=test_cfg
+ )
+
+ assert test_obj is not None
+ for api_key, mock_detector in detectors.api_types.items():
+ if test_cfg.type == api_key:
+ mock_detector.assert_called_once_with(test_cfg)
+ else:
+ mock_detector.assert_not_called()
+
+ @patch.dict(
+ "frigate.detectors.api_types",
+ {det_type: Mock() for det_type in DetectorTypeEnum},
+ )
+ def test_detect_raw_given_tensor_input_should_return_api_detect_raw_result(self):
+ mock_cputfl = detectors.api_types[DetectorTypeEnum.cpu]
+
+ TEST_DATA = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ TEST_DETECT_RESULT = np.ndarray([1, 2, 4, 8, 16, 32])
+ test_obj_detect = frigate.object_detection.LocalObjectDetector(
+ detector_config=parse_obj_as(DetectorConfig, {"type": "cpu", "model": {}})
+ )
+
+ mock_det_api = mock_cputfl.return_value
+ mock_det_api.detect_raw.return_value = TEST_DETECT_RESULT
+
+ test_result = test_obj_detect.detect_raw(TEST_DATA)
+
+ mock_det_api.detect_raw.assert_called_once_with(tensor_input=TEST_DATA)
+ assert test_result is mock_det_api.detect_raw.return_value
+
+ @patch.dict(
+ "frigate.detectors.api_types",
+ {det_type: Mock() for det_type in DetectorTypeEnum},
+ )
+ def test_detect_raw_given_tensor_input_should_call_api_detect_raw_with_transposed_tensor(
+ self,
+ ):
+ mock_cputfl = detectors.api_types[DetectorTypeEnum.cpu]
+
+ TEST_DATA = np.zeros((1, 32, 32, 3), np.uint8)
+ TEST_DETECT_RESULT = np.ndarray([1, 2, 4, 8, 16, 32])
+
+ test_cfg = parse_obj_as(DetectorConfig, {"type": "cpu", "model": {}})
+ test_cfg.model.input_tensor = InputTensorEnum.nchw
+
+ test_obj_detect = frigate.object_detection.LocalObjectDetector(
+ detector_config=test_cfg
+ )
+
+ mock_det_api = mock_cputfl.return_value
+ mock_det_api.detect_raw.return_value = TEST_DETECT_RESULT
+
+ test_result = test_obj_detect.detect_raw(TEST_DATA)
+
+ mock_det_api.detect_raw.assert_called_once()
+ assert (
+ mock_det_api.detect_raw.call_args.kwargs["tensor_input"].shape
+ == np.zeros((1, 3, 32, 32)).shape
+ )
+
+ assert test_result is mock_det_api.detect_raw.return_value
+
+ @patch.dict(
+ "frigate.detectors.api_types",
+ {det_type: Mock() for det_type in DetectorTypeEnum},
+ )
+ @patch("frigate.object_detection.load_labels")
+ def test_detect_given_tensor_input_should_return_lfiltered_detections(
+ self, mock_load_labels
+ ):
+ mock_cputfl = detectors.api_types[DetectorTypeEnum.cpu]
+
+ TEST_DATA = np.zeros((1, 32, 32, 3), np.uint8)
+ TEST_DETECT_RAW = [
+ [2, 0.9, 5, 4, 3, 2],
+ [1, 0.5, 8, 7, 6, 5],
+ [0, 0.4, 2, 4, 8, 16],
+ ]
+ TEST_DETECT_RESULT = [
+ ("label-3", 0.9, (5, 4, 3, 2)),
+ ("label-2", 0.5, (8, 7, 6, 5)),
+ ]
+ TEST_LABEL_FILE = "/test_labels.txt"
+ mock_load_labels.return_value = [
+ "label-1",
+ "label-2",
+ "label-3",
+ "label-4",
+ "label-5",
+ ]
+
+ test_cfg = parse_obj_as(DetectorConfig, {"type": "cpu", "model": {}})
+ test_cfg.model = ModelConfig()
+ test_obj_detect = frigate.object_detection.LocalObjectDetector(
+ detector_config=test_cfg,
+ labels=TEST_LABEL_FILE,
+ )
+
+ mock_load_labels.assert_called_once_with(TEST_LABEL_FILE)
+
+ mock_det_api = mock_cputfl.return_value
+ mock_det_api.detect_raw.return_value = TEST_DETECT_RAW
+
+ test_result = test_obj_detect.detect(tensor_input=TEST_DATA, threshold=0.5)
+
+ mock_det_api.detect_raw.assert_called_once()
+ assert (
+ mock_det_api.detect_raw.call_args.kwargs["tensor_input"].shape
+ == np.zeros((1, 32, 32, 3)).shape
+ )
+ assert test_result == TEST_DETECT_RESULT
diff --git a/frigate/test/test_storage.py b/frigate/test/test_storage.py
new file mode 100644
--- /dev/null
+++ b/frigate/test/test_storage.py
@@ -0,0 +1,239 @@
+import datetime
+import json
+import logging
+import os
+import unittest
+from unittest.mock import MagicMock, patch
+
+from peewee import DoesNotExist
+from peewee_migrate import Router
+from playhouse.sqlite_ext import SqliteExtDatabase
+from playhouse.sqliteq import SqliteQueueDatabase
+from playhouse.shortcuts import model_to_dict
+
+from frigate.config import FrigateConfig
+from frigate.http import create_app
+from frigate.models import Event, Recordings
+from frigate.storage import StorageMaintainer
+
+from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS
+
+
+class TestHttp(unittest.TestCase):
+ def setUp(self):
+ # setup clean database for each test run
+ migrate_db = SqliteExtDatabase("test.db")
+ del logging.getLogger("peewee_migrate").handlers[:]
+ router = Router(migrate_db)
+ router.run()
+ migrate_db.close()
+ self.db = SqliteQueueDatabase(TEST_DB)
+ models = [Event, Recordings]
+ self.db.bind(models)
+
+ self.minimal_config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "front_door": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ }
+ },
+ }
+ self.double_cam_config = {
+ "mqtt": {"host": "mqtt"},
+ "cameras": {
+ "front_door": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ },
+ "back_door": {
+ "ffmpeg": {
+ "inputs": [
+ {"path": "rtsp://10.0.0.2:554/video", "roles": ["detect"]}
+ ]
+ },
+ "detect": {
+ "height": 1080,
+ "width": 1920,
+ "fps": 5,
+ },
+ },
+ },
+ }
+
+ def tearDown(self):
+ if not self.db.is_closed():
+ self.db.close()
+
+ try:
+ for file in TEST_DB_CLEANUPS:
+ os.remove(file)
+ except OSError:
+ pass
+
+ def test_segment_calculations(self):
+ """Test that the segment calculations are correct."""
+ config = FrigateConfig(**self.double_cam_config)
+ storage = StorageMaintainer(config, MagicMock())
+
+ time_keep = datetime.datetime.now().timestamp()
+ rec_fd_id = "1234567.frontdoor"
+ rec_bd_id = "1234568.backdoor"
+ _insert_mock_recording(
+ rec_fd_id,
+ time_keep,
+ time_keep + 10,
+ camera="front_door",
+ seg_size=4,
+ seg_dur=10,
+ )
+ _insert_mock_recording(
+ rec_bd_id,
+ time_keep + 10,
+ time_keep + 20,
+ camera="back_door",
+ seg_size=8,
+ seg_dur=20,
+ )
+ storage.calculate_camera_bandwidth()
+ assert storage.camera_storage_stats == {
+ "front_door": {"bandwidth": 1440, "needs_refresh": True},
+ "back_door": {"bandwidth": 2880, "needs_refresh": True},
+ }
+
+ def test_segment_calculations_with_zero_segments(self):
+ """Ensure segment calculation does not fail when migrating from previous version."""
+ config = FrigateConfig(**self.minimal_config)
+ storage = StorageMaintainer(config, MagicMock())
+
+ time_keep = datetime.datetime.now().timestamp()
+ rec_fd_id = "1234567.frontdoor"
+ _insert_mock_recording(
+ rec_fd_id,
+ time_keep,
+ time_keep + 10,
+ camera="front_door",
+ seg_size=0,
+ seg_dur=10,
+ )
+ storage.calculate_camera_bandwidth()
+ assert storage.camera_storage_stats == {
+ "front_door": {"bandwidth": 0, "needs_refresh": True},
+ }
+
+ def test_storage_cleanup(self):
+ """Ensure that all recordings are cleaned up when necessary."""
+ config = FrigateConfig(**self.minimal_config)
+ storage = StorageMaintainer(config, MagicMock())
+
+ id = "123456.keep"
+ time_keep = datetime.datetime.now().timestamp()
+ _insert_mock_event(id, time_keep, time_keep + 30, True)
+ rec_k_id = "1234567.keep"
+ rec_k2_id = "1234568.keep"
+ rec_k3_id = "1234569.keep"
+ _insert_mock_recording(rec_k_id, time_keep, time_keep + 10)
+ _insert_mock_recording(rec_k2_id, time_keep + 10, time_keep + 20)
+ _insert_mock_recording(rec_k3_id, time_keep + 20, time_keep + 30)
+
+ id2 = "7890.delete"
+ time_delete = datetime.datetime.now().timestamp() - 360
+ _insert_mock_event(id2, time_delete, time_delete + 30, False)
+ rec_d_id = "78901.delete"
+ rec_d2_id = "78902.delete"
+ rec_d3_id = "78903.delete"
+ _insert_mock_recording(rec_d_id, time_delete, time_delete + 10)
+ _insert_mock_recording(rec_d2_id, time_delete + 10, time_delete + 20)
+ _insert_mock_recording(rec_d3_id, time_delete + 20, time_delete + 30)
+
+ storage.calculate_camera_bandwidth()
+ storage.reduce_storage_consumption()
+ with self.assertRaises(DoesNotExist):
+ assert Recordings.get(Recordings.id == rec_k_id)
+ assert Recordings.get(Recordings.id == rec_k2_id)
+ assert Recordings.get(Recordings.id == rec_k3_id)
+ Recordings.get(Recordings.id == rec_d_id)
+ Recordings.get(Recordings.id == rec_d2_id)
+ Recordings.get(Recordings.id == rec_d3_id)
+
+ def test_storage_cleanup_keeps_retained(self):
+ """Ensure that all recordings are cleaned up when necessary."""
+ config = FrigateConfig(**self.minimal_config)
+ storage = StorageMaintainer(config, MagicMock())
+
+ id = "123456.keep"
+ time_keep = datetime.datetime.now().timestamp()
+ _insert_mock_event(id, time_keep, time_keep + 30, True)
+ rec_k_id = "1234567.keep"
+ rec_k2_id = "1234568.keep"
+ rec_k3_id = "1234569.keep"
+ _insert_mock_recording(rec_k_id, time_keep, time_keep + 10)
+ _insert_mock_recording(rec_k2_id, time_keep + 10, time_keep + 20)
+ _insert_mock_recording(rec_k3_id, time_keep + 20, time_keep + 30)
+
+ time_delete = datetime.datetime.now().timestamp() - 7200
+ for i in range(0, 59):
+ _insert_mock_recording(
+ f"{123456 + i}.delete", time_delete, time_delete + 600
+ )
+
+ storage.calculate_camera_bandwidth()
+ storage.reduce_storage_consumption()
+ assert Recordings.get(Recordings.id == rec_k_id)
+ assert Recordings.get(Recordings.id == rec_k2_id)
+ assert Recordings.get(Recordings.id == rec_k3_id)
+
+
+def _insert_mock_event(id: str, start: int, end: int, retain: bool) -> Event:
+ """Inserts a basic event model with a given id."""
+ return Event.insert(
+ id=id,
+ label="Mock",
+ camera="front_door",
+ start_time=start,
+ end_time=end,
+ top_score=100,
+ false_positive=False,
+ zones=list(),
+ thumbnail="",
+ region=[],
+ box=[],
+ area=0,
+ has_clip=True,
+ has_snapshot=True,
+ retain_indefinitely=retain,
+ ).execute()
+
+
+def _insert_mock_recording(
+ id: str, start: int, end: int, camera="front_door", seg_size=8, seg_dur=10
+) -> Event:
+ """Inserts a basic recording model with a given id."""
+ return Recordings.insert(
+ id=id,
+ camera=camera,
+ path=f"/recordings/{id}",
+ start_time=start,
+ end_time=end,
+ duration=seg_dur,
+ motion=True,
+ objects=True,
+ segment_size=seg_size,
+ ).execute()
diff --git a/web/config/handlers.js b/web/__test__/handlers.js
similarity index 84%
rename from web/config/handlers.js
rename to web/__test__/handlers.js
--- a/web/config/handlers.js
+++ b/web/__test__/handlers.js
@@ -19,7 +19,7 @@ export const handlers = [
record: { enabled: true },
detect: { width: 1280, height: 720 },
snapshots: {},
- live: { height: 720 },
+ restream: { enabled: true, jsmpeg: { height: 720 } },
ui: { dashboard: true, order: 0 },
},
side: {
@@ -28,7 +28,7 @@ export const handlers = [
record: { enabled: false },
detect: { width: 1280, height: 720 },
snapshots: {},
- live: { height: 720 },
+ restream: { enabled: true, jsmpeg: { height: 720 } },
ui: { dashboard: true, order: 1 },
},
},
@@ -39,9 +39,10 @@ export const handlers = [
return res(
ctx.status(200),
ctx.json({
+ cpu_usages: { 74: {cpu: 6, mem: 6}, 64: { cpu: 5, mem: 5 }, 54: { cpu: 4, mem: 4 }, 71: { cpu: 3, mem: 3}, 60: {cpu: 2, mem: 2}, 72: {cpu: 1, mem: 1} },
detection_fps: 0.0,
detectors: { coral: { detection_start: 0.0, inference_speed: 8.94, pid: 52 } },
- front: { camera_fps: 5.0, capture_pid: 64, detection_fps: 0.0, pid: 54, process_fps: 0.0, skipped_fps: 0.0 },
+ front: { camera_fps: 5.0, capture_pid: 64, detection_fps: 0.0, pid: 54, process_fps: 0.0, skipped_fps: 0.0, ffmpeg_pid: 72 },
side: {
camera_fps: 6.9,
capture_pid: 71,
@@ -49,6 +50,7 @@ export const handlers = [
pid: 60,
process_fps: 0.0,
skipped_fps: 0.0,
+ ffmpeg_pid: 74,
},
service: { uptime: 34812, version: '0.8.1-d376f6b' },
})
diff --git a/web/__test__/test-setup.ts b/web/__test__/test-setup.ts
new file mode 100644
--- /dev/null
+++ b/web/__test__/test-setup.ts
@@ -0,0 +1,36 @@
+import '@testing-library/jest-dom';
+import 'regenerator-runtime/runtime';
+// This creates a fake indexeddb so there is no need to mock idb-keyval
+import "fake-indexeddb/auto";
+import { setupServer } from 'msw/node';
+import { handlers } from './handlers';
+import { vi } from 'vitest';
+
+// This configures a request mocking server with the given request handlers.
+export const server = setupServer(...handlers);
+
+Object.defineProperty(window, 'matchMedia', {
+ writable: true,
+ value: (query) => ({
+ matches: false,
+ media: query,
+ onchange: null,
+ addEventListener: vi.fn(),
+ removeEventListener: vi.fn(),
+ dispatchEvent: vi.fn(),
+ }),
+});
+
+vi.mock('../src/env');
+
+// Establish API mocking before all tests.
+beforeAll(() => server.listen());
+
+// Reset any request handlers that we may add during the tests,
+// so they don't affect other tests.
+afterEach(() => {
+ server.resetHandlers();
+});
+
+// Clean up after the tests are finished.
+afterAll(() => server.close());
\ No newline at end of file
diff --git a/web/config/testing-library.js b/web/__test__/testing-library.js
similarity index 100%
rename from web/config/testing-library.js
rename to web/__test__/testing-library.js
diff --git a/web/src/__tests__/App.test.jsx b/web/src/__tests__/App.test.jsx
--- a/web/src/__tests__/App.test.jsx
+++ b/web/src/__tests__/App.test.jsx
@@ -1,19 +1,12 @@
import { h } from 'preact';
-import * as IDB from 'idb-keyval';
-import * as PreactRouter from 'preact-router';
-import App from '../App';
+import App from '../app';
import { render, screen } from 'testing-library';
describe('App', () => {
- beforeEach(() => {
- jest.spyOn(IDB, 'get').mockImplementation(() => Promise.resolve(undefined));
- jest.spyOn(IDB, 'set').mockImplementation(() => Promise.resolve(true));
- jest.spyOn(PreactRouter, 'Router').mockImplementation(() => <div data-testid="router" />);
- });
-
- test('shows a loading indicator while loading', async () => {
+ // eslint-disable-next-line jest/no-disabled-tests
+ test.skip('loads the camera dashboard', async () => {
render(<App />);
- await screen.findByTestId('app');
- expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
+ await screen.findByText('Cameras');
+ expect(screen.queryByText('front')).toBeInTheDocument();
});
-});
+});
\ No newline at end of file
diff --git a/web/src/__tests__/AppBar.test.jsx b/web/src/__tests__/AppBar.test.jsx
--- a/web/src/__tests__/AppBar.test.jsx
+++ b/web/src/__tests__/AppBar.test.jsx
@@ -1,14 +1,14 @@
import { h } from 'preact';
import * as Context from '../context';
import AppBar from '../AppBar';
-import { fireEvent, render, screen } from 'testing-library';
+import { fireEvent, render, screen } from '@testing-library/preact';
describe('AppBar', () => {
beforeEach(() => {
- jest.spyOn(Context, 'useDarkMode').mockImplementation(() => ({
- setDarkMode: jest.fn(),
+ vi.spyOn(Context, 'useDarkMode').mockImplementation(() => ({
+ setDarkMode: vi.fn(),
}));
- jest.spyOn(Context, 'DarkModeProvider').mockImplementation(({ children }) => {
+ vi.spyOn(Context, 'DarkModeProvider').mockImplementation(({ children }) => {
return <div>{children}</div>;
});
});
@@ -30,8 +30,8 @@ describe('AppBar', () => {
});
test('sets dark mode on MenuItem select', async () => {
- const setDarkModeSpy = jest.fn();
- jest.spyOn(Context, 'useDarkMode').mockImplementation(() => ({
+ const setDarkModeSpy = vi.fn();
+ vi.spyOn(Context, 'useDarkMode').mockImplementation(() => ({
setDarkMode: setDarkModeSpy,
}));
render(
diff --git a/web/src/__tests__/Sidebar.test.jsx b/web/src/__tests__/Sidebar.test.jsx
--- a/web/src/__tests__/Sidebar.test.jsx
+++ b/web/src/__tests__/Sidebar.test.jsx
@@ -1,15 +1,12 @@
import { h } from 'preact';
-import * as Context from '../context';
+import { DrawerProvider } from '../context';
import Sidebar from '../Sidebar';
import { render, screen } from 'testing-library';
describe('Sidebar', () => {
- beforeEach(() => {
- jest.spyOn(Context, 'useDrawer').mockImplementation(() => ({ showDrawer: true, setShowDrawer: () => {} }));
- });
-
- test('does not render cameras by default', async () => {
- const { findByText } = render(<Sidebar />);
+ // eslint-disable-next-line jest/no-disabled-tests
+ test.skip('does not render cameras by default', async () => {
+ const { findByText } = render(<DrawerProvider><Sidebar /></DrawerProvider>);
await findByText('Cameras');
expect(screen.queryByRole('link', { name: 'front' })).not.toBeInTheDocument();
expect(screen.queryByRole('link', { name: 'side' })).not.toBeInTheDocument();
diff --git a/web/src/api/__tests__/index.test.jsx b/web/src/api/__tests__/index.test.jsx
--- a/web/src/api/__tests__/index.test.jsx
+++ b/web/src/api/__tests__/index.test.jsx
@@ -1,11 +1,11 @@
import { h } from 'preact';
-import * as Mqtt from '../mqtt';
+import * as WS from '../ws';
import { ApiProvider, useApiHost } from '..';
import { render, screen } from 'testing-library';
describe('useApiHost', () => {
beforeEach(() => {
- jest.spyOn(Mqtt, 'MqttProvider').mockImplementation(({ children }) => children);
+ vi.spyOn(WS, 'WsProvider').mockImplementation(({ children }) => children);
});
test('is set from the baseUrl', async () => {
diff --git a/web/src/api/__tests__/mqtt.test.jsx b/web/src/api/__tests__/ws.test.jsx
similarity index 75%
rename from web/src/api/__tests__/mqtt.test.jsx
rename to web/src/api/__tests__/ws.test.jsx
--- a/web/src/api/__tests__/mqtt.test.jsx
+++ b/web/src/api/__tests__/ws.test.jsx
@@ -1,10 +1,10 @@
import { h } from 'preact';
-import { Mqtt, MqttProvider, useMqtt } from '../mqtt';
+import { WS, WsProvider, useWs } from '../ws';
import { useCallback, useContext } from 'preact/hooks';
import { fireEvent, render, screen } from 'testing-library';
function Test() {
- const { state } = useContext(Mqtt);
+ const { state } = useContext(WS);
return state.__connected ? (
<div data-testid="data">
{Object.keys(state).map((key) => (
@@ -18,14 +18,14 @@ function Test() {
const TEST_URL = 'ws://test-foo:1234/ws';
-describe('MqttProvider', () => {
+describe('WsProvider', () => {
let createWebsocket, wsClient;
beforeEach(() => {
wsClient = {
- close: jest.fn(),
- send: jest.fn(),
+ close: vi.fn(),
+ send: vi.fn(),
};
- createWebsocket = jest.fn((url) => {
+ createWebsocket = vi.fn((url) => {
wsClient.args = [url];
return new Proxy(
{},
@@ -34,7 +34,7 @@ describe('MqttProvider', () => {
return wsClient[prop];
},
set(_target, prop, value) {
- wsClient[prop] = typeof value === 'function' ? jest.fn(value) : value;
+ wsClient[prop] = typeof value === 'function' ? vi.fn(value) : value;
if (prop === 'onopen') {
wsClient[prop]();
}
@@ -45,23 +45,23 @@ describe('MqttProvider', () => {
});
});
- test('connects to the mqtt server', async () => {
+ test('connects to the ws server', async () => {
render(
- <MqttProvider config={mockConfig} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <WsProvider config={mockConfig} createWebsocket={createWebsocket} wsUrl={TEST_URL}>
<Test />
- </MqttProvider>
+ </WsProvider>
);
await screen.findByTestId('data');
expect(wsClient.args).toEqual([TEST_URL]);
expect(screen.getByTestId('__connected')).toHaveTextContent('true');
});
- test('receives data through useMqtt', async () => {
+ test('receives data through useWs', async () => {
function Test() {
const {
value: { payload, retain },
connected,
- } = useMqtt('tacos');
+ } = useWs('tacos');
return connected ? (
<div>
<div data-testid="payload">{JSON.stringify(payload)}</div>
@@ -71,26 +71,26 @@ describe('MqttProvider', () => {
}
const { rerender } = render(
- <MqttProvider config={mockConfig} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <WsProvider config={mockConfig} createWebsocket={createWebsocket} wsUrl={TEST_URL}>
<Test />
- </MqttProvider>
+ </WsProvider>
);
await screen.findByTestId('payload');
wsClient.onmessage({
data: JSON.stringify({ topic: 'tacos', payload: JSON.stringify({ yes: true }), retain: false }),
});
rerender(
- <MqttProvider config={mockConfig} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <WsProvider config={mockConfig} createWebsocket={createWebsocket} wsUrl={TEST_URL}>
<Test />
- </MqttProvider>
+ </WsProvider>
);
expect(screen.getByTestId('payload')).toHaveTextContent('{"yes":true}');
expect(screen.getByTestId('retain')).toHaveTextContent('false');
});
- test('can send values through useMqtt', async () => {
+ test('can send values through useWs', async () => {
function Test() {
- const { send, connected } = useMqtt('tacos');
+ const { send, connected } = useWs('tacos');
const handleClick = useCallback(() => {
send({ yes: true });
}, [send]);
@@ -98,9 +98,9 @@ describe('MqttProvider', () => {
}
render(
- <MqttProvider config={mockConfig} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <WsProvider config={mockConfig} createWebsocket={createWebsocket} wsUrl={TEST_URL}>
<Test />
- </MqttProvider>
+ </WsProvider>
);
await screen.findByRole('button');
fireEvent.click(screen.getByRole('button'));
@@ -110,7 +110,7 @@ describe('MqttProvider', () => {
});
test('prefills the recordings/detect/snapshots state from config', async () => {
- jest.spyOn(Date, 'now').mockReturnValue(123456);
+ vi.spyOn(Date, 'now').mockReturnValue(123456);
const config = {
cameras: {
front: { name: 'front', detect: { enabled: true }, record: { enabled: false }, snapshots: { enabled: true } },
@@ -118,9 +118,9 @@ describe('MqttProvider', () => {
},
};
render(
- <MqttProvider config={config} createWebsocket={createWebsocket} mqttUrl={TEST_URL}>
+ <WsProvider config={config} createWebsocket={createWebsocket} wsUrl={TEST_URL}>
<Test />
- </MqttProvider>
+ </WsProvider>
);
await screen.findByTestId('data');
expect(screen.getByTestId('front/detect/state')).toHaveTextContent(
diff --git a/web/src/components/__tests__/AppBar.test.jsx b/web/src/components/__tests__/AppBar.test.jsx
--- a/web/src/components/__tests__/AppBar.test.jsx
+++ b/web/src/components/__tests__/AppBar.test.jsx
@@ -1,7 +1,7 @@
import { h } from 'preact';
import { DrawerProvider } from '../../context';
import AppBar from '../AppBar';
-import { fireEvent, render, screen } from 'testing-library';
+import { fireEvent, render, screen } from '@testing-library/preact';
import { useRef } from 'preact/hooks';
function Title() {
@@ -20,7 +20,7 @@ describe('AppBar', () => {
describe('overflow menu', () => {
test('is not rendered if a ref is not provided', async () => {
- const handleOverflow = jest.fn();
+ const handleOverflow = vi.fn();
render(
<DrawerProvider>
<AppBar title={Title} onOverflowClick={handleOverflow} />
@@ -44,7 +44,7 @@ describe('AppBar', () => {
});
test('is rendered with click handler and ref', async () => {
- const handleOverflow = jest.fn();
+ const handleOverflow = vi.fn();
function Wrapper() {
const ref = useRef(null);
@@ -60,7 +60,7 @@ describe('AppBar', () => {
});
test('calls the handler when clicked', async () => {
- const handleOverflow = jest.fn();
+ const handleOverflow = vi.fn();
function Wrapper() {
const ref = useRef(null);
@@ -94,7 +94,7 @@ describe('AppBar', () => {
});
test('hides when scrolled downward', async () => {
- jest.spyOn(window, 'requestAnimationFrame').mockImplementation((cb) => cb());
+ vi.spyOn(window, 'requestAnimationFrame').mockImplementation((cb) => cb());
render(
<DrawerProvider>
<AppBar title={Title} />
@@ -111,7 +111,7 @@ describe('AppBar', () => {
});
test('reappears when scrolled upward', async () => {
- jest.spyOn(window, 'requestAnimationFrame').mockImplementation((cb) => cb());
+ vi.spyOn(window, 'requestAnimationFrame').mockImplementation((cb) => cb());
render(
<DrawerProvider>
<AppBar title={Title} />
diff --git a/web/src/components/__tests__/AutoUpdatingCameraImage.test.jsx b/web/src/components/__tests__/AutoUpdatingCameraImage.test.jsx
--- a/web/src/components/__tests__/AutoUpdatingCameraImage.test.jsx
+++ b/web/src/components/__tests__/AutoUpdatingCameraImage.test.jsx
@@ -1,9 +1,9 @@
import { h } from 'preact';
import AutoUpdatingCameraImage from '../AutoUpdatingCameraImage';
-import { screen, render } from 'testing-library';
+import { screen, render } from '@testing-library/preact';
let mockOnload;
-jest.mock('../CameraImage', () => {
+vi.mock('../CameraImage', () => {
function CameraImage({ onload, searchParams }) {
mockOnload = () => {
onload();
@@ -19,7 +19,7 @@ jest.mock('../CameraImage', () => {
describe('AutoUpdatingCameraImage', () => {
let dateNowSpy;
beforeEach(() => {
- dateNowSpy = jest.spyOn(Date, 'now').mockReturnValue(0);
+ dateNowSpy = vi.spyOn(Date, 'now').mockReturnValue(0);
});
test('shows FPS by default', async () => {
diff --git a/web/src/components/__tests__/Button.test.jsx b/web/src/components/__tests__/Button.test.jsx
--- a/web/src/components/__tests__/Button.test.jsx
+++ b/web/src/components/__tests__/Button.test.jsx
@@ -1,6 +1,6 @@
import { h } from 'preact';
import Button from '../Button';
-import { render, screen } from 'testing-library';
+import { render, screen } from '@testing-library/preact';
describe('Button', () => {
test('renders children', async () => {
diff --git a/web/src/components/__tests__/CameraImage.test.jsx b/web/src/components/__tests__/CameraImage.test.jsx
--- a/web/src/components/__tests__/CameraImage.test.jsx
+++ b/web/src/components/__tests__/CameraImage.test.jsx
@@ -1,11 +1,11 @@
import { h } from 'preact';
import * as Hooks from '../../hooks';
import CameraImage from '../CameraImage';
-import { render, screen } from 'testing-library';
+import { render, screen } from '@testing-library/preact';
describe('CameraImage', () => {
beforeEach(() => {
- jest.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 0 }]);
+ vi.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 0 }]);
});
test('renders an activity indicator while loading', async () => {
diff --git a/web/src/components/__tests__/Card.test.jsx b/web/src/components/__tests__/Card.test.jsx
--- a/web/src/components/__tests__/Card.test.jsx
+++ b/web/src/components/__tests__/Card.test.jsx
@@ -1,6 +1,6 @@
import { h } from 'preact';
import Card from '../Card';
-import { render, screen } from 'testing-library';
+import { render, screen } from '@testing-library/preact';
describe('Card', () => {
test('renders a Card with media', async () => {
diff --git a/web/src/components/__tests__/Dialog.test.jsx b/web/src/components/__tests__/Dialog.test.jsx
--- a/web/src/components/__tests__/Dialog.test.jsx
+++ b/web/src/components/__tests__/Dialog.test.jsx
@@ -1,6 +1,6 @@
import { h } from 'preact';
import Dialog from '../Dialog';
-import { render, screen } from 'testing-library';
+import { render, screen } from '@testing-library/preact';
describe('Dialog', () => {
let portal;
diff --git a/web/src/components/__tests__/Heading.test.jsx b/web/src/components/__tests__/Heading.test.jsx
--- a/web/src/components/__tests__/Heading.test.jsx
+++ b/web/src/components/__tests__/Heading.test.jsx
@@ -1,6 +1,6 @@
import { h } from 'preact';
import Heading from '../Heading';
-import { render, screen } from 'testing-library';
+import { render, screen } from '@testing-library/preact';
describe('Heading', () => {
test('renders content with default size', async () => {
diff --git a/web/src/components/__tests__/Link.test.jsx b/web/src/components/__tests__/Link.test.jsx
--- a/web/src/components/__tests__/Link.test.jsx
+++ b/web/src/components/__tests__/Link.test.jsx
@@ -2,7 +2,8 @@ import { h } from 'preact';
import Link from '../Link';
import { render, screen } from 'testing-library';
-describe('Link', () => {
+// eslint-disable-next-line jest/no-disabled-tests
+describe.skip('Link', () => {
test('renders a link', async () => {
render(<Link href="/tacos">Hello</Link>);
expect(screen.queryByText('Hello')).toMatchInlineSnapshot(`
diff --git a/web/src/components/__tests__/Menu.test.jsx b/web/src/components/__tests__/Menu.test.jsx
--- a/web/src/components/__tests__/Menu.test.jsx
+++ b/web/src/components/__tests__/Menu.test.jsx
@@ -1,6 +1,6 @@
import { h } from 'preact';
import Menu, { MenuItem } from '../Menu';
-import { fireEvent, render, screen } from 'testing-library';
+import { fireEvent, render, screen } from '@testing-library/preact';
import { useRef } from 'preact/hooks';
describe('Menu', () => {
@@ -27,7 +27,7 @@ describe('MenuItem', () => {
});
test('calls onSelect when clicked', async () => {
- const handleSelect = jest.fn();
+ const handleSelect = vi.fn();
render(<MenuItem label="Tacos" onSelect={handleSelect} value="tacos-value" />);
fireEvent.click(screen.queryByRole('option'));
expect(handleSelect).toHaveBeenCalledWith('tacos-value', 'Tacos');
diff --git a/web/src/components/__tests__/NavigationDrawer.test.jsx b/web/src/components/__tests__/NavigationDrawer.test.jsx
--- a/web/src/components/__tests__/NavigationDrawer.test.jsx
+++ b/web/src/components/__tests__/NavigationDrawer.test.jsx
@@ -1,14 +1,14 @@
import { h } from 'preact';
import * as Context from '../../context';
import NavigationDrawer, { Destination } from '../NavigationDrawer';
-import { fireEvent, render, screen } from 'testing-library';
+import { fireEvent, render, screen } from '@testing-library/preact';
describe('NavigationDrawer', () => {
let useDrawer, setShowDrawer;
beforeEach(() => {
- setShowDrawer = jest.fn();
- useDrawer = jest.spyOn(Context, 'useDrawer').mockImplementation(() => ({ showDrawer: true, setShowDrawer }));
+ setShowDrawer = vi.fn();
+ useDrawer = vi.spyOn(Context, 'useDrawer').mockImplementation(() => ({ showDrawer: true, setShowDrawer }));
});
test('renders a navigation drawer', async () => {
@@ -44,19 +44,20 @@ describe('Destination', () => {
let setShowDrawer;
beforeEach(() => {
- setShowDrawer = jest.fn();
- jest.spyOn(Context, 'useDrawer').mockImplementation(() => ({ showDrawer: true, setShowDrawer }));
+ setShowDrawer = vi.fn();
+ vi.spyOn(Context, 'useDrawer').mockImplementation(() => ({ showDrawer: true, setShowDrawer }));
});
- test('dismisses the drawer moments after being clicked', async () => {
- jest.useFakeTimers();
+ // eslint-disable-next-line jest/no-disabled-tests
+ test.skip('dismisses the drawer moments after being clicked', async () => {
+ vi.useFakeTimers();
render(
<NavigationDrawer>
<Destination href="/tacos" text="Tacos" />
</NavigationDrawer>
);
fireEvent.click(screen.queryByText('Tacos'));
- jest.runAllTimers();
+ vi.runAllTimers();
expect(setShowDrawer).toHaveBeenCalledWith(false);
});
});
diff --git a/web/src/components/__tests__/Prompt.test.jsx b/web/src/components/__tests__/Prompt.test.jsx
--- a/web/src/components/__tests__/Prompt.test.jsx
+++ b/web/src/components/__tests__/Prompt.test.jsx
@@ -1,6 +1,6 @@
import { h } from 'preact';
import Prompt from '../Prompt';
-import { fireEvent, render, screen } from 'testing-library';
+import { fireEvent, render, screen } from '@testing-library/preact';
describe('Prompt', () => {
let portal;
@@ -22,7 +22,7 @@ describe('Prompt', () => {
});
test('renders action buttons', async () => {
- const handleClick = jest.fn();
+ const handleClick = vi.fn();
render(
<Prompt
actions={[
diff --git a/web/src/components/__tests__/RelativeModal.test.jsx b/web/src/components/__tests__/RelativeModal.test.jsx
--- a/web/src/components/__tests__/RelativeModal.test.jsx
+++ b/web/src/components/__tests__/RelativeModal.test.jsx
@@ -1,10 +1,11 @@
import { h, createRef } from 'preact';
import RelativeModal from '../RelativeModal';
import userEvent from '@testing-library/user-event';
-import { fireEvent, render, screen } from 'testing-library';
+import { fireEvent, render, screen } from '@testing-library/preact';
describe('RelativeModal', () => {
- test('keeps tab focus', async () => {
+ // eslint-disable-next-line jest/no-disabled-tests
+ test.skip('keeps tab focus', async () => {
const ref = createRef();
render(
<div>
@@ -27,7 +28,7 @@ describe('RelativeModal', () => {
});
test('pressing ESC dismisses', async () => {
- const handleDismiss = jest.fn();
+ const handleDismiss = vi.fn();
const ref = createRef();
render(
<div>
@@ -46,7 +47,7 @@ describe('RelativeModal', () => {
});
test('clicking a scrim dismisses', async () => {
- const handleDismiss = jest.fn();
+ const handleDismiss = vi.fn();
const ref = createRef();
render(
<div>
diff --git a/web/src/components/__tests__/Select.test.jsx b/web/src/components/__tests__/Select.test.jsx
--- a/web/src/components/__tests__/Select.test.jsx
+++ b/web/src/components/__tests__/Select.test.jsx
@@ -1,10 +1,10 @@
import { h } from 'preact';
import Select from '../Select';
-import { fireEvent, render, screen } from 'testing-library';
+import { fireEvent, render, screen } from '@testing-library/preact';
describe('Select', () => {
test('on focus, shows a menu', async () => {
- const handleChange = jest.fn();
+ const handleChange = vi.fn();
render(
<Select
label="Tacos"
@@ -28,7 +28,7 @@ describe('Select', () => {
});
test('allows keyboard navigation', async () => {
- const handleChange = jest.fn();
+ const handleChange = vi.fn();
render(
<Select
label="Tacos"
diff --git a/web/src/components/__tests__/Switch.test.jsx b/web/src/components/__tests__/Switch.test.jsx
--- a/web/src/components/__tests__/Switch.test.jsx
+++ b/web/src/components/__tests__/Switch.test.jsx
@@ -1,6 +1,6 @@
import { h } from 'preact';
import Switch from '../Switch';
-import { fireEvent, render, screen } from 'testing-library';
+import { fireEvent, render, screen } from '@testing-library/preact';
describe('Switch', () => {
test('renders a hidden checkbox', async () => {
@@ -21,7 +21,7 @@ describe('Switch', () => {
});
test('calls onChange callback when checked/unchecked', async () => {
- const handleChange = jest.fn();
+ const handleChange = vi.fn();
const { rerender } = render(<Switch id="check" onChange={handleChange} />);
fireEvent.change(screen.queryByTestId('check-input'), { checked: true });
expect(handleChange).toHaveBeenCalledWith('check', true);
diff --git a/web/src/components/__tests__/TextField.test.jsx b/web/src/components/__tests__/TextField.test.jsx
--- a/web/src/components/__tests__/TextField.test.jsx
+++ b/web/src/components/__tests__/TextField.test.jsx
@@ -1,6 +1,6 @@
import { h } from 'preact';
import TextField from '../TextField';
-import { render, screen, fireEvent } from 'testing-library';
+import { render, screen, fireEvent } from '@testing-library/preact';
describe('TextField', () => {
test('can render a leading icon', async () => {
@@ -21,7 +21,7 @@ describe('TextField', () => {
});
test('onChange updates the value', async () => {
- const handleChangeText = jest.fn();
+ const handleChangeText = vi.fn();
render(<TextField label="Tacos" onChangeText={handleChangeText} />);
const input = screen.getByRole('textbox');
diff --git a/web/src/components/__tests__/Toolltip.test.jsx b/web/src/components/__tests__/Toolltip.test.jsx
--- a/web/src/components/__tests__/Toolltip.test.jsx
+++ b/web/src/components/__tests__/Toolltip.test.jsx
@@ -1,10 +1,10 @@
import { h, createRef } from 'preact';
import Tooltip from '../Tooltip';
-import { render, screen } from 'testing-library';
+import { render, screen } from '@testing-library/preact';
describe('Tooltip', () => {
test('renders in a relative position', async () => {
- jest
+ vi
.spyOn(window.HTMLElement.prototype, 'getBoundingClientRect')
// relativeTo
.mockReturnValueOnce({
@@ -32,7 +32,7 @@ describe('Tooltip', () => {
test('if too far right, renders to the left', async () => {
window.innerWidth = 1024;
- jest
+ vi
.spyOn(window.HTMLElement.prototype, 'getBoundingClientRect')
// relativeTo
.mockReturnValueOnce({
@@ -59,7 +59,7 @@ describe('Tooltip', () => {
});
test('if too far left, renders to the right', async () => {
- jest
+ vi
.spyOn(window.HTMLElement.prototype, 'getBoundingClientRect')
// relativeTo
.mockReturnValueOnce({
@@ -87,7 +87,7 @@ describe('Tooltip', () => {
test('if too close to top, renders to the bottom', async () => {
window.scrollY = 90;
- jest
+ vi
.spyOn(window.HTMLElement.prototype, 'getBoundingClientRect')
// relativeTo
.mockReturnValueOnce({
diff --git a/web/src/context/__tests__/index.test.jsx b/web/src/context/__tests__/index.test.jsx
--- a/web/src/context/__tests__/index.test.jsx
+++ b/web/src/context/__tests__/index.test.jsx
@@ -1,9 +1,9 @@
import { h } from 'preact';
-import * as IDB from 'idb-keyval';
+import { set as setData } from 'idb-keyval';
import { DarkModeProvider, useDarkMode, usePersistence } from '..';
import { fireEvent, render, screen } from 'testing-library';
import { useCallback } from 'preact/hooks';
-import * as Mqtt from '../../api/mqtt';
+import * as WS from '../../api/ws';
function DarkModeChecker() {
const { currentMode } = useDarkMode();
@@ -11,14 +11,8 @@ function DarkModeChecker() {
}
describe('DarkMode', () => {
- let MockIDB;
beforeEach(() => {
- MockIDB = {
- get: jest.spyOn(IDB, 'get').mockImplementation(() => Promise.resolve(undefined)),
- set: jest.spyOn(IDB, 'set').mockImplementation(() => Promise.resolve(true)),
- };
-
- jest.spyOn(Mqtt, 'MqttProvider').mockImplementation(({ children }) => children);
+ vi.spyOn(WS, 'WsProvider').mockImplementation(({ children }) => children);
});
test('uses media by default', async () => {
@@ -32,7 +26,7 @@ describe('DarkMode', () => {
});
test('uses the mode stored in idb - dark', async () => {
- MockIDB.get.mockResolvedValue('dark');
+ setData('darkmode', 'dark');
render(
<DarkModeProvider>
<DarkModeChecker />
@@ -44,7 +38,7 @@ describe('DarkMode', () => {
});
test('uses the mode stored in idb - light', async () => {
- MockIDB.get.mockResolvedValue('light');
+ setData('darkmode', 'light');
render(
<DarkModeProvider>
<DarkModeChecker />
@@ -56,7 +50,7 @@ describe('DarkMode', () => {
});
test('allows updating the mode', async () => {
- MockIDB.get.mockResolvedValue('dark');
+ setData('darkmode', 'dark');
function Updater() {
const { setDarkMode } = useDarkMode();
@@ -86,10 +80,10 @@ describe('DarkMode', () => {
});
test('when using media, matches on preference', async () => {
- MockIDB.get.mockResolvedValue('media');
- jest.spyOn(window, 'matchMedia').mockImplementation((query) => {
+ setData('darkmode', 'media');
+ vi.spyOn(window, 'matchMedia').mockImplementation((query) => {
if (query === '(prefers-color-scheme: dark)') {
- return { matches: true, addEventListener: jest.fn(), removeEventListener: jest.fn() };
+ return { matches: true, addEventListener: vi.fn(), removeEventListener: vi.fn() };
}
throw new Error(`Unexpected query to matchMedia: ${query}`);
@@ -107,23 +101,8 @@ describe('DarkMode', () => {
});
describe('usePersistence', () => {
- let MockIDB;
- beforeEach(() => {
- MockIDB = {
- get: jest.spyOn(IDB, 'get').mockImplementation(() => Promise.resolve(undefined)),
- set: jest.spyOn(IDB, 'set').mockImplementation(() => Promise.resolve(true)),
- };
- });
test('returns a defaultValue initially', async () => {
- MockIDB.get.mockImplementationOnce(
- () =>
- new Promise((resolve) => {
- setTimeout(() => {
- resolve('foo');
- }, 1);
- })
- );
function Component() {
const [value, , loaded] = usePersistence('tacos', 'my-default');
@@ -154,7 +133,7 @@ describe('usePersistence', () => {
});
test('updates with the previously-persisted value', async () => {
- MockIDB.get.mockResolvedValue('are delicious');
+ setData('tacos', 'are delicious');
function Component() {
const [value, , loaded] = usePersistence('tacos', 'my-default');
@@ -187,7 +166,7 @@ describe('usePersistence', () => {
});
test('can be updated manually', async () => {
- MockIDB.get.mockResolvedValue('are delicious');
+ setData('darkmode', 'are delicious');
function Component() {
const [value, setValue] = usePersistence('tacos', 'my-default');
diff --git a/web/src/routes/__tests__/Camera.test.jsx b/web/src/routes/__tests__/Camera.test.jsx
--- a/web/src/routes/__tests__/Camera.test.jsx
+++ b/web/src/routes/__tests__/Camera.test.jsx
@@ -1,42 +1,36 @@
import { h } from 'preact';
import * as AutoUpdatingCameraImage from '../../components/AutoUpdatingCameraImage';
-import * as Context from '../../context';
-import * as Mqtt from '../../api/mqtt';
+import * as WS from '../../api/ws';
import Camera from '../Camera';
+import { set as setData } from 'idb-keyval';
import * as JSMpegPlayer from '../../components/JSMpegPlayer';
import { fireEvent, render, screen, waitForElementToBeRemoved } from 'testing-library';
describe('Camera Route', () => {
- let mockUsePersistence, mockSetOptions;
-
beforeEach(() => {
- mockSetOptions = jest.fn();
- mockUsePersistence = jest.spyOn(Context, 'usePersistence').mockImplementation(() => [{}, mockSetOptions]);
- jest.spyOn(AutoUpdatingCameraImage, 'default').mockImplementation(({ searchParams }) => {
+ vi.spyOn(AutoUpdatingCameraImage, 'default').mockImplementation(({ searchParams }) => {
return <div data-testid="mock-image">{searchParams.toString()}</div>;
});
- jest.spyOn(JSMpegPlayer, 'default').mockImplementation(() => {
+ vi.spyOn(JSMpegPlayer, 'default').mockImplementation(() => {
return <div data-testid="mock-jsmpeg" />;
});
- jest.spyOn(Mqtt, 'MqttProvider').mockImplementation(({ children }) => children);
+ vi.spyOn(WS, 'WsProvider').mockImplementation(({ children }) => children);
});
- test('reads camera feed options from persistence', async () => {
- mockUsePersistence.mockReturnValue([
- {
- bbox: true,
- timestamp: false,
- zones: true,
- mask: false,
- motion: true,
- regions: false,
- },
- mockSetOptions,
- ]);
+ // eslint-disable-next-line jest/no-disabled-tests
+ test.skip('reads camera feed options from persistence', async () => {
+ setData('front-source', 'mse')
+ setData('front-feed', {
+ bbox: true,
+ timestamp: false,
+ zones: true,
+ mask: false,
+ motion: true,
+ regions: false,
+ });
render(<Camera camera="front" />);
-
- await waitForElementToBeRemoved(() => screen.queryByLabelText('Loading…'));
+ await waitForElementToBeRemoved(() => screen.queryByLabelText('Loading…'), { timeout: 100 });
fireEvent.click(screen.queryByText('Debug'));
fireEvent.click(screen.queryByText('Show Options'));
@@ -45,17 +39,14 @@ describe('Camera Route', () => {
);
});
- test('updates camera feed options to persistence', async () => {
- mockUsePersistence
- .mockReturnValueOnce([{}, mockSetOptions])
- .mockReturnValueOnce([{}, mockSetOptions])
- .mockReturnValueOnce([{}, mockSetOptions])
- .mockReturnValueOnce([{ bbox: true }, mockSetOptions])
- .mockReturnValueOnce([{ bbox: true, timestamp: true }, mockSetOptions]);
+
+ // eslint-disable-next-line jest/no-disabled-tests
+ test.skip('updates camera feed options to persistence', async () => {
+ setData('front-feed', {});
render(<Camera camera="front" />);
- await waitForElementToBeRemoved(() => screen.queryByLabelText('Loading…'));
+ await waitForElementToBeRemoved(() => screen.queryByLabelText('Loading…'), { timeout: 100 });
fireEvent.click(screen.queryByText('Debug'));
fireEvent.click(screen.queryByText('Show Options'));
@@ -63,9 +54,6 @@ describe('Camera Route', () => {
fireEvent.change(screen.queryByTestId('timestamp-input'), { target: { checked: true } });
fireEvent.click(screen.queryByText('Hide Options'));
- expect(mockUsePersistence).toHaveBeenCalledTimes(5);
- expect(mockSetOptions).toHaveBeenCalledTimes(2);
- expect(mockSetOptions).toHaveBeenCalledWith({ bbox: true, timestamp: true });
expect(screen.queryByTestId('mock-image')).toHaveTextContent('bbox=1×tamp=1');
});
});
diff --git a/web/src/routes/__tests__/Cameras.test.jsx b/web/src/routes/__tests__/Cameras.test.jsx
--- a/web/src/routes/__tests__/Cameras.test.jsx
+++ b/web/src/routes/__tests__/Cameras.test.jsx
@@ -1,13 +1,13 @@
import { h } from 'preact';
import * as CameraImage from '../../components/CameraImage';
-import * as Mqtt from '../../api/mqtt';
+import * as WS from '../../api/ws';
import Cameras from '../Cameras';
import { fireEvent, render, screen, waitForElementToBeRemoved } from 'testing-library';
describe('Cameras Route', () => {
beforeEach(() => {
- jest.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />);
- jest.spyOn(Mqtt, 'useMqtt').mockImplementation(() => ({ value: { payload: 'OFF' }, send: jest.fn() }));
+ vi.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />);
+ vi.spyOn(WS, 'useWs').mockImplementation(() => ({ value: { payload: 'OFF' }, send: vi.fn() }));
});
test('shows an ActivityIndicator if not yet loaded', async () => {
@@ -36,16 +36,16 @@ describe('Cameras Route', () => {
});
test('buttons toggle detect, clips, and snapshots', async () => {
- const sendDetect = jest.fn();
- const sendRecordings = jest.fn();
- const sendSnapshots = jest.fn();
- jest.spyOn(Mqtt, 'useDetectState').mockImplementation(() => {
+ const sendDetect = vi.fn();
+ const sendRecordings = vi.fn();
+ const sendSnapshots = vi.fn();
+ vi.spyOn(WS, 'useDetectState').mockImplementation(() => {
return { payload: 'ON', send: sendDetect };
});
- jest.spyOn(Mqtt, 'useRecordingsState').mockImplementation(() => {
+ vi.spyOn(WS, 'useRecordingsState').mockImplementation(() => {
return { payload: 'OFF', send: sendRecordings };
});
- jest.spyOn(Mqtt, 'useSnapshotsState').mockImplementation(() => {
+ vi.spyOn(WS, 'useSnapshotsState').mockImplementation(() => {
return { payload: 'ON', send: sendSnapshots };
});
diff --git a/web/src/routes/__tests__/Recording.test.jsx b/web/src/routes/__tests__/Recording.test.jsx
--- a/web/src/routes/__tests__/Recording.test.jsx
+++ b/web/src/routes/__tests__/Recording.test.jsx
@@ -1,13 +1,13 @@
import { h } from 'preact';
import * as CameraImage from '../../components/CameraImage';
-import * as Mqtt from '../../api/mqtt';
+import * as WS from '../../api/ws';
import Cameras from '../Cameras';
import { render, screen, waitForElementToBeRemoved } from 'testing-library';
describe('Recording Route', () => {
beforeEach(() => {
- jest.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />);
- jest.spyOn(Mqtt, 'useMqtt').mockImplementation(() => ({ value: { payload: 'OFF' }, send: jest.fn() }));
+ vi.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />);
+ vi.spyOn(WS, 'useWs').mockImplementation(() => ({ value: { payload: 'OFF' }, send: jest.fn() }));
});
test('shows an ActivityIndicator if not yet loaded', async () => {
diff --git a/web/src/routes/__tests__/Debug.test.jsx b/web/src/routes/__tests__/System.test.jsx
similarity index 79%
rename from web/src/routes/__tests__/Debug.test.jsx
rename to web/src/routes/__tests__/System.test.jsx
--- a/web/src/routes/__tests__/Debug.test.jsx
+++ b/web/src/routes/__tests__/System.test.jsx
@@ -1,17 +1,18 @@
import { h } from 'preact';
-import Debug from '../Debug';
+import System from '../System';
import { render, screen, waitForElementToBeRemoved } from 'testing-library';
-describe('Debug Route', () => {
+describe('System Route', () => {
beforeEach(() => {});
test('shows an ActivityIndicator if stats are null', async () => {
- render(<Debug />);
+ render(<System />);
expect(screen.queryByLabelText('Loading…')).toBeInTheDocument();
});
- test('shows stats and config', async () => {
- render(<Debug />);
+ // eslint-disable-next-line jest/no-disabled-tests
+ test.skip('shows stats and config', async () => {
+ render(<System />);
await waitForElementToBeRemoved(() => screen.queryByLabelText('Loading…'));
| [HW Accel Support]: Minimum Nvidia driver version may not work with CUDA/NVDEC
### Describe the problem you are having
I bought an old Fermi-era Tesla M2090 for use in my server. It is the best-spec card that is part of an officially supported configuration. Due to its age the latest Nvidia driver version supported is 390.157. This is above the [Jellyfin documentation's listed minimum driver version of 361.93](https://jellyfin.org/docs/general/administration/hardware-acceleration/#nvidia-hardware-acceleration-on-docker-linux).
Following all the documentation, I have built the driver and successfully loaded it:
```
# nvidia-smi
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 390.157 Driver Version: 390.157 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 Tesla M2090 Off | 00000000:42:00.0 Off | 0 |
| N/A N/A P0 79W / N/A | 0MiB / 5301MiB | 0% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
```
As I am on Alpine Linux (with @sgerrand's [alpine-pkg-glibc](https://github.com/sgerrand/alpine-pkg-glibc)), I did not want to bother trying to get `libnvidia-container` to compile; all it is anyway is a shim for Docker to automatically mount host binaries and libraries to ensure driver compatibility, as things apparently break between dot versions. So I do [what Singularity does](https://docs.sylabs.io/guides/3.6/user-guide/gpu.html#library-search-options) and simply mount the binaries and libraries manually into the container:
```yaml
# cat docker-compose.yml
...
devices:
- /dev/nvidia0
...
# https://github.com/sylabs/singularity/blob/main/etc/nvliblist.conf
# binaries, only mount what's needed
- /usr/bin/nvidia-smi:/usr/bin/nvidia-smi:ro
- /usr/bin/nvidia-debugdump:/usr/bin/nvidia-debugdump:ro
- /usr/bin/nvidia-persistenced:/usr/bin/nvidia-persistenced:ro
- /usr/bin/nvidia-cuda-mps-control:/usr/bin/nvidia-cuda-mps-control:ro
- /usr/bin/nvidia-cuda-mps-server:/usr/bin/nvidia-cuda-mps-server:ro
# libs, only mount what exists
- /usr/lib/libcuda.so:/usr/lib/libcuda.so.1:ro
- /usr/lib/libEGL.so:/usr/lib/libEGL.so.1:ro
- /usr/lib/libGLESv1_CM.so:/usr/lib/libGLESv1_CM.so.1:ro
- /usr/lib/libGLESv2.so:/usr/lib/libGLESv2.so.1:ro
- /usr/lib/libGL.so:/usr/lib/libGL.so.1:ro
- /usr/lib/libGLX.so:/usr/lib/libGLX.so.1:ro
- /usr/lib/libnvcuvid.so:/usr/lib/libnvcuvid.so.1:ro
- /usr/lib/libnvidia-cfg.so:/usr/lib/libnvidia-cfg.so.1:ro
- /usr/lib/libnvidia-encode.so:/usr/lib/libnvidia-encode.so.1:ro
- /usr/lib/libnvidia-fbc.so:/usr/lib/libnvidia-fbc.so.1:ro
- /usr/lib/libnvidia-ifr.so:/usr/lib/libnvidia-ifr.so.1:ro
- /usr/lib/libnvidia-ml.so:/usr/lib/libnvidia-ml.so.1:ro
- /usr/lib/libnvidia-ptxjitcompiler.so:/usr/lib/libnvidia-ptxjitcompiler.so.1:ro
- /usr/lib/libOpenCL.so:/usr/lib/libOpenCL.so.1:ro
- /usr/lib/libOpenGL.so:/usr/lib/libOpenGL.so.1:ro
- /usr/lib/libvdpau_nvidia.so:/usr/lib/libvdpau_nvidia.so.1:ro
```
Before someone complains, while yes this is ugly, *it does work*:
```
# sudo docker exec -it frigate nvidia-smi
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 390.157 Driver Version: 390.157 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 Tesla M2090 Off | 00000000:42:00.0 Off | 0 |
| N/A N/A P0 81W / N/A | 0MiB / 5301MiB | 0% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
```
The issue, based on what I can see from the logs, appears to stem from `libnvcuvid.so` missing symbols:
```
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Cannot load cuvidGetDecodeStatus
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Failed loading nvcuvid.
```
I believe this is due to the fact that FFmpeg seems to [only support Nvidia drivers over 520.56.06]( https://github.com/FFmpeg/nv-codec-headers/blob/c12df23d145431fb65c2116481aac19340b2cc30/README). The reason I bought this graphics card in the first place and not a newer one is because I believed it would work despite the old driver version, as the Jellyfin documentation says it should work, which is the documentation Frigate points at. If it is indeed the case that the older drivers will not work, either Jellyfin's documentation needs to be updated if it is relevant to them (they may use an older FFmpeg version, I don't know), or a note needs to be included in Frigate's documentation about the minimum supported driver version differing from Jellyfin docs.
If I am wrong about any of this, please correct me. I am not at all familiar with hardware accelerated video encoding/decoding.
P.S.: As I did not install `libnvidia-container`, I am not able to explicitly allocate the GPU resource from the host, as per the `deploy` element for the [sample `docker-compose.yml` in the docs](https://docs.frigate.video/configuration/hardware_acceleration#nvidia-gpu), but based on [my research](https://docs.docker.com/compose/compose-file/deploy/#resources), this only directs Docker to ensure that the Nvidia Docker runtime is used and that the GPU is available.
So... I know this is a hacky and likely to be unsupported configuration, but the important part here is that all the moving parts work up until the libraries are loaded, at which point the error encountered doesn't appear to be from my hacks, and instead from the driver being unsupported by upstream libraries.
One possibility would be to use Nvidia's VDPAU implementation, which based on my reading has been shown to work with similar-generation cards with FFmpeg, but Frigate's FFmpeg is not compiled with `--enable-vdpau` so I am not even able to test it.
As it stands I am probably going to return this card and get an AMD, officially supported configuration or not...
### Version
0.11.1-2eada21
### Frigate config file
```yaml
ffmpeg:
hwaccel_args: []
input_args: -avoid_negative_ts make_zero -fflags +genpts+discardcorrupt -rtsp_transport tcp -timeout 5000000 -use_wallclock_as_timestamps 1
output_args:
detect: -f rawvideo -pix_fmt yuv420p
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -c:a aac
rtmp: -c copy -f flv
cameras:
cam1:
ffmpeg:
hwaccel_args: -loglevel debug -c:v h264_cuvid
inputs:
- path: rtsp://cam1:554/h264Preview_01_main
roles:
- record
- rtmp
- path: rtsp://cam1:554/h264Preview_01_sub
roles:
- detect
detect:
width: 640
height: 480
```
### docker-compose file or Docker CLI command
```yaml
version: "3.9"
services:
frigate:
container_name: frigate
privileged: true
restart: unless-stopped
image: blakeblackshear/frigate:stable
shm_size: "128mb"
devices:
- /dev/bus/usb:/dev/bus/usb
- /dev/nvidia0
volumes:
- /etc/localtime:/etc/localtime:ro
- ./config.yml:/config/config.yml:ro
- /media/frigate:/media/frigate
- type: tmpfs
target: /tmp/cache
tmpfs:
size: 1000000000
# https://github.com/sylabs/singularity/blob/main/etc/nvliblist.conf
# binaries, only mount what's needed
- /usr/bin/nvidia-smi:/usr/bin/nvidia-smi:ro
- /usr/bin/nvidia-debugdump:/usr/bin/nvidia-debugdump:ro
- /usr/bin/nvidia-persistenced:/usr/bin/nvidia-persistenced:ro
- /usr/bin/nvidia-cuda-mps-control:/usr/bin/nvidia-cuda-mps-control:ro
- /usr/bin/nvidia-cuda-mps-server:/usr/bin/nvidia-cuda-mps-server:ro
# libs, only mount what exists
- /usr/lib/libcuda.so:/usr/lib/libcuda.so:ro
#- /usr/lib/libEGL_installertest.so:/usr/lib/libEGL_installertest.so.1:ro
#- /usr/lib/libEGL_nvidia.so:/usr/lib/libEGL_nvidia.so.1:ro
- /usr/lib/libEGL.so:/usr/lib/libEGL.so.1:ro
#- /usr/lib/libGLdispatch.so:/usr/lib/libGLdispatch.so.1:ro
#- /usr/lib/libGLESv1_CM_nvidia.so:/usr/lib/libGLESv1_CM_nvidia.so.1:ro
- /usr/lib/libGLESv1_CM.so:/usr/lib/libGLESv1_CM.so.1:ro
#- /usr/lib/libGLESv2_nvidia.so:/usr/lib/libGLESv2_nvidia.so.1:ro
- /usr/lib/libGLESv2.so:/usr/lib/libGLESv2.so.1:ro
- /usr/lib/libGL.so:/usr/lib/libGL.so.1:ro
#- /usr/lib/libGLX_installertest.so:/usr/lib/libGLX_installertest.so.1:ro
#- /usr/lib/libGLX_nvidia.so:/usr/lib/libGLX_nvidia.so.1:ro
#- /usr/lib/libglx.so:/usr/lib/libglx.so.1:ro
- /usr/lib/libGLX.so:/usr/lib/libGLX.so.1:ro
- /usr/lib/libnvcuvid.so:/usr/lib/libnvcuvid.so.1:ro
#- /usr/lib/libnvidia-cbl.so:/usr/lib/libnvidia-cbl.so.1:ro
- /usr/lib/libnvidia-cfg.so:/usr/lib/libnvidia-cfg.so.1:ro
#- /usr/lib/libnvidia-compiler.so:/usr/lib/libnvidia-compiler.so.1:ro
#- /usr/lib/libnvidia-eglcore.so:/usr/lib/libnvidia-eglcore.so.1:ro
#- /usr/lib/libnvidia-egl-wayland.so:/usr/lib/libnvidia-egl-wayland.so.1:ro
- /usr/lib/libnvidia-encode.so:/usr/lib/libnvidia-encode.so.1:ro
#- /usr/lib/libnvidia-fatbinaryloader.so:/usr/lib/libnvidia-fatbinaryloader.so.1:ro
- /usr/lib/libnvidia-fbc.so:/usr/lib/libnvidia-fbc.so.1:ro
#- /usr/lib/libnvidia-glcore.so:/usr/lib/libnvidia-glcore.so.1:ro
#- /usr/lib/libnvidia-glsi.so:/usr/lib/libnvidia-glsi.so.1:ro
#- /usr/lib/libnvidia-glvkspirv.so:/usr/lib/libnvidia-glvkspirv.so.1:ro
#- /usr/lib/libnvidia-gtk2.so:/usr/lib/libnvidia-gtk2.so.1:ro
#- /usr/lib/libnvidia-gtk3.so:/usr/lib/libnvidia-gtk3.so.1:ro
- /usr/lib/libnvidia-ifr.so:/usr/lib/libnvidia-ifr.so.1:ro
- /usr/lib/libnvidia-ml.so:/usr/lib/libnvidia-ml.so.1:ro
#- /usr/lib/libnvidia-opencl.so:/usr/lib/libnvidia-opencl.so.1:ro
#- /usr/lib/libnvidia-opticalflow.so:/usr/lib/libnvidia-opticalflow.so.1:ro
- /usr/lib/libnvidia-ptxjitcompiler.so:/usr/lib/libnvidia-ptxjitcompiler.so.1:ro
#- /usr/lib/libnvidia-rtcore.so:/usr/lib/libnvidia-rtcore.so.1:ro
#- /usr/lib/libnvidia-tls.so:/usr/lib/libnvidia-tls.so.1:ro
#- /usr/lib/libnvidia-wfb.so:/usr/lib/libnvidia-wfb.so.1:ro
#- /usr/lib/libnvoptix.so.1:/usr/lib/libnvoptix.so.1.1:ro
- /usr/lib/libOpenCL.so:/usr/lib/libOpenCL.so.1:ro
- /usr/lib/libOpenGL.so:/usr/lib/libOpenGL.so.1:ro
- /usr/lib/libvdpau_nvidia.so:/usr/lib/libvdpau_nvidia.so.1:ro
#- /usr/lib/nvidia_drv.so:/usr/lib/nvidia_drv.so.1:ro
#- /usr/lib/tls_test_.so:/usr/lib/tls_test_.so.1:ro
ports:
- "127.0.0.1:5000:5000"
- "127.0.0.1:1935:1935" # RTMP feeds
extra_hosts: ["host.docker.internal:host-gateway"]
```
### Relevant log output
```shell
sudo docker logs --tail=10 -f frigate
[2022-12-14 20:12:02] frigate.app INFO : Starting Frigate (0.11.1-2eada21)
[2022-12-14 20:12:22] watchdog.cam1 ERROR : Ffmpeg process crashed unexpectedly for cam1.
[2022-12-14 20:12:22] watchdog.cam1 ERROR : The following ffmpeg logs include the last 100 lines prior to exit.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : c=IN IP4 0.0.0.0
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : b=AS:500
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : a=rtpmap:96 H264/90000
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : a=fmtp:96 packetization-mode=1;profile-level-id=640033;sprop-parameter-sets=Z2QAM6wVFKCgPZA=,aO48sA==
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : a=control:track1
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : m=audio 0 RTP/AVP 97
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : c=IN IP4 0.0.0.0
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : b=AS:256
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : a=rtpmap:97 MPEG4-GENERIC/16000
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : a=fmtp:97 streamtype=5;profile-level-id=1;mode=AAC-hbr;sizelength=13;indexlength=3;indexdeltalength=3;config=1408
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : a=control:track2
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR :
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Failed to parse interval end specification ''
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] video codec set to: h264
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] RTP Packetization Mode: 1
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] RTP Profile IDC: 64 Profile IOP: 0 Level: 33
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] Extradata set to 0x55aec7b7db00 (size: 23)
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] audio codec set to: aac
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] audio samplerate set to: 16000
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] audio channels set to: 1
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] setting jitter buffer size to 0
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] hello state=0
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Failed to parse interval end specification ''
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] nal_unit_type: 7(SPS), nal_ref_idc: 3
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] nal_unit_type: 8(PPS), nal_ref_idc: 3
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] nal_unit_type: 7(SPS), nal_ref_idc: 3
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] nal_unit_type: 8(PPS), nal_ref_idc: 3
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] DTS discontinuity in stream 1: packet 3 with DTS 26737125184936, packet 4 with DTS 26737125187534
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] nal_unit_type: 7(SPS), nal_ref_idc: 3
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] nal_unit_type: 8(PPS), nal_ref_idc: 3
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] nal_unit_type: 5(IDR), nal_ref_idc: 3
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] Format yuv420p chosen by get_format().
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] Reinit context to 640x480, pix_fmt: yuv420p
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264 @ 0x55aec7b81a00] nal_unit_type: 1(Coded slice of a non-IDR picture), nal_ref_idc: 1
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 5 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] max_analyze_duration 5000000 reached at 5056000 microseconds st:1
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 2.916667 0.009279
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 3.000000 0.008424
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 6.750000 0.009708
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 6.833333 0.010258
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 9.666667 0.010239
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 9.750000 0.000507
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 9.833333 0.009628
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 12.666667 0.009315
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 12.750000 0.008155
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 16.500000 0.012005
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 16.583333 0.012250
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 19.416667 0.012065
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 19.500000 0.002028
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 19.583333 0.010844
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 22.416667 0.010364
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 22.500000 0.008899
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Last message repeated 1 times
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 26.250000 0.015317
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 26.333333 0.015256
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 29.166667 0.014904
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 29.250000 0.004562
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 29.333333 0.013074
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 36.000000 0.019642
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 39.000000 0.008111
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [rtsp @ 0x55aec7b7ab80] rfps: 42.000000 0.013429
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Input #0, rtsp, from 'rtsp://cam1:554/h264Preview_01_sub':
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Metadata:
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : title : Session streamed by "preview"
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : comment : h264Preview_01_sub
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Duration: N/A, start: 1671070323.868938, bitrate: N/A
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Stream #0:0, 14, 1/90000: Video: h264 (High), 1 reference frame, yuv420p(progressive), 640x480, 0/1, 9.75 tbr, 90k tbn
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Stream #0:1, 81, 1/16000: Audio: aac (LC), 16000 Hz, mono, fltp
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Successfully opened the file.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Parsing a group of options: output url pipe:.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Applying option r (set frame rate (Hz value, fraction or abbreviation)) with argument 5.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Applying option s (set frame size (WxH or abbreviation)) with argument 640x480.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Applying option f (force format) with argument rawvideo.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Applying option pix_fmt (set pixel format) with argument yuv420p.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Successfully parsed a group of options.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Opening an output file: pipe:.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [pipe @ 0x55aec7cff400] Setting default whitelist 'crypto,data'
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Successfully opened the file.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_mp4toannexb @ 0x55aec7b83980] The input looks like it is Annex B already
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Format nv12 chosen by get_format().
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Loaded lib: libnvcuvid.so.1
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Loaded sym: cuvidGetDecoderCaps
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Loaded sym: cuvidCreateDecoder
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Loaded sym: cuvidDestroyDecoder
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Loaded sym: cuvidDecodePicture
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Cannot load cuvidGetDecodeStatus
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [h264_cuvid @ 0x55aec7b97840] Failed loading nvcuvid.
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Stream mapping:
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Stream #0:0 -> #0:0 (h264 (h264_cuvid) -> rawvideo (native))
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : Error while opening decoder for input stream #0:0 : Operation not permitted
[2022-12-14 20:12:22] ffmpeg.cam1.detect ERROR : [AVIOContext @ 0x55aec7d20cc0] Statistics: 0 bytes written, 0 seeks, 0 writeouts
```
### FFprobe output from your camera
```shell
ffprobe version n5.1-2-g915ef932a3-20220731 Copyright (c) 2007-2022 the FFmpeg developers
built with gcc 12.1.0 (crosstool-NG 1.25.0.55_3defb7b)
configuration: --prefix=/ffbuild/prefix --pkg-config-flags=--static --pkg-config=pkg-config --cross-prefix=x86_64-ffbuild-linux-gnu- --arch=x86_64 --target-os=linux --enable-gpl --enable-version3 --disable-debug --enable-iconv --enable-libxml2 --enable-zlib --enable-libfreetype --enable-libfribidi --enable-gmp --enable-lzma --enable-fontconfig --enable-libvorbis --enable-opencl --enable-libpulse --enable-libvmaf --enable-libxcb --enable-xlib --enable-amf --enable-libaom --enable-libaribb24 --enable-avisynth --enable-libdav1d --enable-libdavs2 --disable-libfdk-aac --enable-ffnvcodec --enable-cuda-llvm --enable-frei0r --enable-libgme --enable-libass --enable-libbluray --enable-libjxl --enable-libmp3lame --enable-libopus --enable-mbedtls --enable-librist --enable-libtheora --enable-libvpx --enable-libwebp --enable-lv2 --enable-libmfx --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 --enable-libopenjpeg --enable-libopenmpt --enable-librav1e --enable-librubberband --disable-schannel --enable-sdl2 --enable-libsoxr --enable-libsrt --enable-libsvtav1 --enable-libtwolame --enable-libuavs3d --enable-libdrm --enable-vaapi --enable-libvidstab --enable-vulkan --enable-libshaderc --enable-libplacebo --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxvid --enable-libzimg --enable-libzvbi --extra-cflags=-DLIBTWOLAME_STATIC --extra-cxxflags= --extra-ldflags=-pthread --extra-ldexeflags=-pie --extra-libs='-ldl -lgomp' --extra-version=20220731
libavutil 57. 28.100 / 57. 28.100
libavcodec 59. 37.100 / 59. 37.100
libavformat 59. 27.100 / 59. 27.100
libavdevice 59. 7.100 / 59. 7.100
libavfilter 8. 44.100 / 8. 44.100
libswscale 6. 7.100 / 6. 7.100
libswresample 4. 7.100 / 4. 7.100
libpostproc 56. 6.100 / 56. 6.100
Input #0, rtsp, from 'rtsp://cam1:554/h264Preview_01_sub':
Metadata:
title : Session streamed by "preview"
comment : h264Preview_01_sub
Duration: N/A, start: 0.000313, bitrate: N/A
Stream #0:0: Video: h264 (High), yuv420p(progressive), 640x480, 90k tbr, 90k tbn
Stream #0:1: Audio: aac (LC), 16000 Hz, mono, fltp
```
### Operating system
Other Linux
### Install method
Docker Compose
### Network connection
Mixed
### Camera make and model
Reolink RLC-542WA
### Any other information that may be helpful
_No response_
| 2022-10-09T11:51:53Z | [] | [] |
|
blakeblackshear/frigate | 4,555 | blakeblackshear__frigate-4555 | [
"4369"
] | 5ad391977efc0e7720b533b6c928027675050422 | diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -32,6 +32,7 @@
parse_preset_output_record,
parse_preset_output_rtmp,
)
+from frigate.version import VERSION
logger = logging.getLogger(__name__)
@@ -357,7 +358,13 @@ class BirdseyeCameraConfig(BaseModel):
)
-FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning"]
+FFMPEG_GLOBAL_ARGS_DEFAULT = [
+ "-hide_banner",
+ "-loglevel",
+ "warning",
+ "-user_agent",
+ f"FFmpeg Frigate/{VERSION}",
+]
FFMPEG_INPUT_ARGS_DEFAULT = [
"-avoid_negative_ts",
"make_zero",
diff --git a/frigate/util.py b/frigate/util.py
--- a/frigate/util.py
+++ b/frigate/util.py
@@ -1,6 +1,7 @@
import copy
import datetime
import logging
+import shlex
import subprocess as sp
import json
import re
@@ -888,7 +889,7 @@ def vainfo_hwaccel() -> sp.CompletedProcess:
def get_ffmpeg_arg_list(arg: Any) -> list:
"""Use arg if list or convert to list format."""
- return arg if isinstance(arg, list) else arg.split(" ")
+ return arg if isinstance(arg, list) else shlex.split(arg)
class FrameManager(ABC):
| diff --git a/frigate/test/test_ffmpeg_presets.py b/frigate/test/test_ffmpeg_presets.py
--- a/frigate/test/test_ffmpeg_presets.py
+++ b/frigate/test/test_ffmpeg_presets.py
@@ -1,5 +1,5 @@
import unittest
-from frigate.config import FrigateConfig
+from frigate.config import FFMPEG_INPUT_ARGS_DEFAULT, FrigateConfig
from frigate.ffmpeg_presets import parse_preset_input
@@ -93,6 +93,16 @@ def test_ffmpeg_input_preset(self):
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
+ def test_ffmpeg_input_args_as_string(self):
+ argsString = " ".join(FFMPEG_INPUT_ARGS_DEFAULT) + ' -some "arg with space"'
+ argsList = FFMPEG_INPUT_ARGS_DEFAULT + ["-some", "arg with space"]
+ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = argsString
+ frigate_config = FrigateConfig(**self.default_ffmpeg)
+ frigate_config.cameras["back"].create_ffmpeg_cmds()
+ assert set(argsList).issubset(
+ frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+ )
+
def test_ffmpeg_input_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = "-some inputs"
frigate_config = FrigateConfig(**self.default_ffmpeg)
| Set `-user_agent` in `ffmpeg` calls
**Describe what you are trying to accomplish and why in non technical terms**
When spoofing who is connecting to my cameras, I can easily distinguish some like Home Assistant, or Google Chrome. But Frigate is also connected to them, but since `-user_agent` is not set, it's not easi to distinguish.
![image](https://user-images.githubusercontent.com/29582865/201259480-8b263732-8796-47c4-9cd6-2de3f159ebfb.png)
**Describe the solution you'd like**
This can be set with the `-user_agent` ffmpeg flag. In order of usefulness but also in order of difficulty to implement:
1. `-user_agent 'Frigate'` already much better than now
2. `-user_agent 'Frigate/0.11.1'`
3. `-user_agent 'Frigate/0.11.1 FFmpeg'`
4. `-user_agent 'Frigate/0.11.1 FFmpeg/3.1.2'`
I guess that, with the FFmpeg version in the string we don't need the `Lavf59.27.100` either. But it wouldn't hurt to even add the `Lavf59.27.100`, I guess.
**Describe alternatives you've considered**
I can probably set this by myself using the args, but I think this deserves to come by default.
**Additional context**
- Refs https://github.com/AlexxIT/go2rtc/issues/106
| 2022-11-30T03:11:56Z | [] | [] |
|
blakeblackshear/frigate | 4,596 | blakeblackshear__frigate-4596 | [
"4595"
] | 007fa752946a79c841584ef06d7c0bbf129ff7dc | diff --git a/frigate/config.py b/frigate/config.py
--- a/frigate/config.py
+++ b/frigate/config.py
@@ -359,13 +359,7 @@ class BirdseyeCameraConfig(BaseModel):
)
-FFMPEG_GLOBAL_ARGS_DEFAULT = [
- "-hide_banner",
- "-loglevel",
- "warning",
- "-user_agent",
- f"FFmpeg Frigate/{VERSION}",
-]
+FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning"]
FFMPEG_INPUT_ARGS_DEFAULT = [
"-avoid_negative_ts",
"make_zero",
diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py
--- a/frigate/ffmpeg_presets.py
+++ b/frigate/ffmpeg_presets.py
@@ -2,6 +2,12 @@
from typing import Any
+from frigate.version import VERSION
+
+_user_agent_args = [
+ "-user_agent",
+ f"FFmpeg Frigate/{VERSION}",
+]
PRESETS_HW_ACCEL = {
"preset-rpi-32-h264": ["-c:v", "h264_v4l2m2m"],
@@ -39,7 +45,8 @@ def parse_preset_hardware_acceleration(arg: Any) -> list[str]:
PRESETS_INPUT = {
- "preset-http-jpeg-generic": [
+ "preset-http-jpeg-generic": _user_agent_args
+ + [
"-r",
"{}",
"-stream_loop",
@@ -59,7 +66,8 @@ def parse_preset_hardware_acceleration(arg: Any) -> list[str]:
"-use_wallclock_as_timestamps",
"1",
],
- "preset-http-mjpeg-generic": [
+ "preset-http-mjpeg-generic": _user_agent_args
+ + [
"-avoid_negative_ts",
"make_zero",
"-fflags",
@@ -73,7 +81,8 @@ def parse_preset_hardware_acceleration(arg: Any) -> list[str]:
"-use_wallclock_as_timestamps",
"1",
],
- "preset-http-reolink": [
+ "preset-http-reolink": _user_agent_args
+ + [
"-avoid_negative_ts",
"make_zero",
"-fflags",
@@ -107,7 +116,8 @@ def parse_preset_hardware_acceleration(arg: Any) -> list[str]:
"-f",
"live_flv",
],
- "preset-rtsp-generic": [
+ "preset-rtsp-generic": _user_agent_args
+ + [
"-avoid_negative_ts",
"make_zero",
"-fflags",
@@ -119,7 +129,8 @@ def parse_preset_hardware_acceleration(arg: Any) -> list[str]:
"-use_wallclock_as_timestamps",
"1",
],
- "preset-rtsp-udp": [
+ "preset-rtsp-udp": _user_agent_args
+ + [
"-avoid_negative_ts",
"make_zero",
"-fflags",
@@ -131,7 +142,10 @@ def parse_preset_hardware_acceleration(arg: Any) -> list[str]:
"-use_wallclock_as_timestamps",
"1",
],
- "preset-rtsp-blue-iris": [
+ "preset-rtsp-blue-iris": _user_agent_args
+ + [
+ "-user_agent",
+ f"FFmpeg Frigate/{VERSION}",
"-avoid_negative_ts",
"make_zero",
"-flags",
| diff --git a/frigate/test/test_ffmpeg_presets.py b/frigate/test/test_ffmpeg_presets.py
--- a/frigate/test/test_ffmpeg_presets.py
+++ b/frigate/test/test_ffmpeg_presets.py
@@ -76,8 +76,9 @@ def test_default_ffmpeg_input_arg_preset(self):
frigate_config.cameras["back"].create_ffmpeg_cmds()
frigate_preset_config.cameras["back"].create_ffmpeg_cmds()
assert (
- frigate_preset_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
- == frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
+ # Ignore global and user_agent args in comparison
+ frigate_preset_config.cameras["back"].ffmpeg_cmds[0]["cmd"][6::]
+ == frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"][4::]
)
def test_ffmpeg_input_preset(self):
| [Support]: ERROR : Option user_agent not found.
### Describe the problem you are having
After updating to the latest Dev version Frigate fails to start with an error: ERROR : Option user_agent not found.
Reverting back to dev-5ad3919 makes it work again.
### Version
dev-007fa75
### Frigate config file
```yaml
mqtt:
host: 192.168.1.76
user: frigate
password: *
detectors:
coral:
type: edgetpu
device: usb
detect:
width: 640
height: 480
fps: 10
max_disappeared: 100
stationary:
interval: 100
record:
enabled: True
retain:
days: 6
mode: all
events:
pre_capture: 45
post_capture: 45
objects:
- person
retain:
default: 14
mode: active_objects
objects:
track:
- person
snapshots:
enabled: True
bounding_box: True
retain:
default: 14
rtmp:
enabled: False
ffmpeg:
output_args:
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -c:a aac
cameras:
front:
ffmpeg:
inputs:
- path: rtsp://*@192.168.20.25:554//h265Preview_01_main
roles:
- record
- restream
- path: rtsp://*@192.168.20.25:554//h264Preview_01_sub
roles:
- detect
detect:
width: 640
height: 360
motion:
mask:
- 0,360,0,0,640,0,640,293,527,145,535,83,365,38,256,41
zones:
drive:
coordinates: 0,360,149,151,276,42,346,38,396,52,467,36,640,0,640,360
record:
events:
required_zones:
- drive
snapshots:
required_zones:
- drive
garage:
ffmpeg:
hwaccel_args: preset-rpi-64-h264
input_args: preset-rtmp-generic
inputs:
- path: rtmp://192.168.20.13/bcs/channel0_main.bcs?channel=0&stream=0&user=*&password=*
roles:
- record
- restream
- path: rtmp://192.168.20.13/bcs/channel0_sub.bcs?channel=0&stream=0&user=*&password=*
roles:
- detect
motion:
mask:
- 640,0,303,0,184,0,0,0,0,90,105,89,113,169,276,183,347,81,539,106,640,204
zones:
drive:
coordinates: 640,480,0,480,0,206,269,228,349,82,520,104,640,268
front_lawn:
coordinates: 164,72,0,90,0,200,145,220,260,212,338,72
record:
events:
required_zones:
- drive
- front_lawn
snapshots:
required_zones:
- drive
- front_lawn
rear:
ffmpeg:
hwaccel_args: preset-rpi-64-h264
input_args: preset-rtmp-generic
inputs:
- path: rtmp://192.168.20.14/bcs/channel0_main.bcs?channel=0&stream=0&user=*&password=*
roles:
- record
- restream
- path: rtmp://192.168.20.14/bcs/channel0_sub.bcs?channel=0&stream=0&user=*&password=*
roles:
- detect
motion:
mask:
- 225,171,155,237,67,155,55,0,255,0
garden:
ffmpeg:
hwaccel_args: preset-rpi-64-h264
input_args: preset-rtmp-generic
inputs:
- path: rtmp://192.168.20.16/bcs/channel0_main.bcs?channel=0&stream=0&user=*&password=*
roles:
- record
- restream
- path: rtmp://192.168.20.16/bcs/channel0_sub.bcs?channel=0&stream=0&user=*&password=*
roles:
- detect
motion:
mask:
- 0,0,0,34,484,83,435,356,640,480,640,55,640,0
zones:
grass:
coordinates: 25,139,154,108,504,192,416,450,0,266,115,201,30,178
```
### Relevant log output
```shell
[2022-12-04 13:58:55] watchdog.rear ERROR : The following ffmpeg logs include the last 100 lines prior to exit.
[2022-12-04 13:58:55] ffmpeg.rear.detect ERROR : Option user_agent not found.
[2022-12-04 13:58:55] ffmpeg.rear.record_restream ERROR : Option user_agent not found.
[2022-12-04 13:58:55] watchdog.rear INFO : Terminating the existing ffmpeg process...
[2022-12-04 13:58:55] watchdog.rear INFO : Waiting for ffmpeg to exit gracefully...
[2022-12-04 13:58:55] watchdog.garden ERROR : Ffmpeg process crashed unexpectedly for garden.
[2022-12-04 13:58:55] watchdog.garden ERROR : The following ffmpeg logs include the last 100 lines prior to exit.
[2022-12-04 13:58:55] ffmpeg.garden.detect ERROR : Option user_agent not found.
[2022-12-04 13:58:55] ffmpeg.garden.record_restream ERROR : Option user_agent not found.
[2022-12-04 13:58:55] watchdog.garden INFO : Terminating the existing ffmpeg process...
[2022-12-04 13:58:55] watchdog.garden INFO : Waiting for ffmpeg to exit gracefully...
```
### FFprobe output from your camera
```shell
pi@raspberrypi:~ $ docker exec frigate ffprobe "rtmp://192.168.20.13/bcs/channel0_main.bcs?channel=0&stream=0&user=*&password=*"
ffprobe version n5.1-2-g915ef932a3-20220731 Copyright (c) 2007-2022 the FFmpeg developers
built with gcc 12.1.0 (crosstool-NG 1.25.0.55_3defb7b)
configuration: --prefix=/ffbuild/prefix --pkg-config-flags=--static --pkg-config=pkg-config --cross-prefix=aarch64-ffbuild-linux-gnu- --arch=aarch64 --target-os=linux --enable-gpl --enable-version3 --disable-debug --enable-iconv --enable-libxml2 --enable-zlib --enable-libfreetype --enable-libfribidi --enable-gmp --enable-lzma --enable-fontconfig --enable-libvorbis --enable-opencl --enable-libpulse --enable-libvmaf --enable-libxcb --enable-xlib --enable-amf --enable-libaom --enable-libaribb24 --enable-avisynth --enable-libdav1d --disable-libdavs2 --disable-libfdk-aac --enable-ffnvcodec --enable-cuda-llvm --enable-frei0r --enable-libgme --enable-libass --enable-libbluray --enable-libjxl --enable-libmp3lame --enable-libopus --enable-mbedtls --enable-librist --enable-libtheora --enable-libvpx --enable-libwebp --enable-lv2 --disable-libmfx --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 --enable-libopenjpeg --enable-libopenmpt --enable-librav1e --enable-librubberband --disable-schannel --enable-sdl2 --enable-libsoxr --enable-libsrt --enable-libsvtav1 --enable-libtwolame --enable-libuavs3d --enable-libdrm --disable-vaapi --enable-libvidstab --enable-vulkan --enable-libshaderc --enable-libplacebo --enable-libx264 --enable-libx265 --disable-libxavs2 --enable-libxvid --enable-libzimg --enable-libzvbi --extra-cflags=-DLIBTWOLAME_STATIC --extra-cxxflags= --extra-ldflags=-pthread --extra-ldexeflags=-pie --extra-libs='-ldl -lgomp' --extra-version=20220731
libavutil 57. 28.100 / 57. 28.100
libavcodec 59. 37.100 / 59. 37.100
libavformat 59. 27.100 / 59. 27.100
libavdevice 59. 7.100 / 59. 7.100
libavfilter 8. 44.100 / 8. 44.100
libswscale 6. 7.100 / 6. 7.100
libswresample 4. 7.100 / 4. 7.100
libpostproc 56. 6.100 / 56. 6.100
Input #0, flv, from 'rtmp://192.168.20.13/bcs/channel0_main.bcs?channel=0&stream=0&user=*&password=*':
Metadata:
|RtmpSampleAccess: true
displayWidth : 2560
displayHeight : 1920
Duration: 00:00:00.00, start: 44293.392000, bitrate: N/A
Stream #0:0: Video: h264 (High), yuv420p(progressive), 2560x1920, 29 fps, 30 tbr, 1k tbn
Stream #0:1: Audio: aac (LC), 16000 Hz, mono, fltp
```
### Frigate stats
```json
{"cpu_usages":{"%Cpu(s):":{"cpu":"id,","mem":"2.0"},"1":{"cpu":"0.0","mem":"0.0"},"101":{"cpu":"2.0","mem":"1.1"},"105":{"cpu":"0.0","mem":"1.1"},"107":{"cpu":"0.0","mem":"0.1"},"108":{"cpu":"0.0","mem":"1.1"},"109":{"cpu":"0.0","mem":"0.1"},"110":{"cpu":"0.0","mem":"1.1"},"111":{"cpu":"2.7","mem":"1.0"},"112":{"cpu":"0.0","mem":"0.1"},"113":{"cpu":"0.0","mem":"1.1"},"116":{"cpu":"0.0","mem":"0.1"},"117":{"cpu":"0.0","mem":"1.1"},"118":{"cpu":"5.3","mem":"0.4"},"120":{"cpu":"0.0","mem":"1.1"},"126":{"cpu":"0.0","mem":"0.1"},"128":{"cpu":"12.6","mem":"0.3"},"14":{"cpu":"0.0","mem":"0.0"},"15":{"cpu":"0.0","mem":"0.0"},"17":{"cpu":"0.0","mem":"0.0"},"24":{"cpu":"0.0","mem":"0.0"},"25":{"cpu":"0.0","mem":"0.0"},"31":{"cpu":"0.0","mem":"0.0"},"528":{"cpu":"0.0","mem":"0.0"},"532":{"cpu":"0.0","mem":"0.0"},"534":{"cpu":"0.0","mem":"0.0"},"541":{"cpu":"0.0","mem":"0.0"},"62":{"cpu":"0.0","mem":"0.0"},"63":{"cpu":"0.0","mem":"0.0"},"65":{"cpu":"0.0","mem":"0.2"},"66":{"cpu":"0.0","mem":"0.2"},"73":{"cpu":"1.7","mem":"2.2"},"75":{"cpu":"0.0","mem":"0.1"},"89":{"cpu":"0.0","mem":"1.0"},"95":{"cpu":"0.3","mem":"0.1"},"96":{"cpu":"0.0","mem":"1.3"},"98":{"cpu":"1.0","mem":"1.1"},"MiB":{"cpu":"5717.8","mem":"avail"},"PID":{"cpu":"%CPU","mem":"%MEM"},"Tasks:":{"cpu":"stopped,","mem":"3"},"top":{"cpu":"load","mem":"average:"}},"detection_fps":0.0,"detectors":{"coral":{"detection_start":0.0,"inference_speed":14.7,"pid":96}},"front":{"camera_fps":10.0,"capture_pid":111,"detection_fps":0.0,"ffmpeg_pid":118,"pid":101,"process_fps":10.0,"skipped_fps":0.0},"garage":{"camera_fps":0.0,"capture_pid":113,"detection_fps":0.0,"ffmpeg_pid":526,"pid":105,"process_fps":0.0,"skipped_fps":0.0},"garden":{"camera_fps":0.0,"capture_pid":120,"detection_fps":0.0,"ffmpeg_pid":531,"pid":110,"process_fps":0.0,"skipped_fps":0.0},"gpu_usages":{"rpi-v4l2m2m":{"gpu":-1,"mem":-1}},"rear":{"camera_fps":0.0,"capture_pid":117,"detection_fps":0.0,"ffmpeg_pid":529,"pid":108,"process_fps":0.0,"skipped_fps":0.0},"service":{"latest_version":"0.11.1","storage":{"/dev/shm":{"free":267.1,"mount_type":"tmpfs","total":268.4,"used":1.3},"/media/frigate/clips":{"free":1966794.1,"mount_type":"ext4","total":1967846.1,"used":1035.2},"/media/frigate/recordings":{"free":1966794.1,"mount_type":"ext4","total":1967846.1,"used":1035.2},"/tmp/cache":{"free":1989.8,"mount_type":"tmpfs","total":2000.0,"used":10.2}},"temperatures":{},"uptime":331,"version":"0.12.0-007fa75"}}
```
### Operating system
Debian
### Install method
Docker Compose
### Coral version
USB
### Network connection
Wired
### Camera make and model
Reolink
### Any other information that may be helpful
_No response_
| Hmm, might need to only add this option for rtsp streams and not make it a constant option.
For now you can simply overwrite the global args for that camera
cc @felipecrs
Hm, I think I have a better idea. Which is to include it in the rtsp input presets only. What do you think?
> Hm, I think I have a better idea. Which is to include it in the rtsp input presets only. What do you think?
Ah yeah that makes sense | 2022-12-04T14:50:20Z | [] | [] |
blakeblackshear/frigate | 5,800 | blakeblackshear__frigate-5800 | [
"5798"
] | e454daf7279d48cbf31de3222d442ce1dbb1781d | diff --git a/frigate/util.py b/frigate/util.py
--- a/frigate/util.py
+++ b/frigate/util.py
@@ -722,7 +722,7 @@ def load_labels(path, encoding="utf-8"):
def clean_camera_user_pass(line: str) -> str:
"""Removes user and password from line."""
- if line.startswith("rtsp://"):
+ if "rtsp://" in line:
return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
else:
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
| diff --git a/frigate/test/test_camera_pw.py b/frigate/test/test_camera_pw.py
--- a/frigate/test/test_camera_pw.py
+++ b/frigate/test/test_camera_pw.py
@@ -36,3 +36,14 @@ def test_no_special_char_password(self):
"""Test that no change is made to path with no special characters."""
escaped = escape_special_characters(self.rtsp_with_pass)
assert escaped == self.rtsp_with_pass
+
+
+class TestUserPassMasking(unittest.TestCase):
+ def setUp(self) -> None:
+ self.rtsp_log_message = "Did you mean file:rtsp://user:[email protected]:554"
+
+ def test_rtsp_in_log_message(self):
+ """Test that the rtsp url in a log message is espaced."""
+ escaped = clean_camera_user_pass(self.rtsp_log_message)
+ print(f"The escaped is {escaped}")
+ assert escaped == "Did you mean file:rtsp://*:*@192.168.1.3:554"
| [Support]: RTSP password not correctly masked / in log file
### Describe the problem you are having
Password is shown in Log FIle
```
Ffmpeg process crashed unexpectedly for E1ProBabyRoom.
The following ffmpeg logs include the last 100 lines prior to exit.
rtsp://*:*@192.168.1.3:554/h264Preview_01_sub: Protocol not found
Did you mean file:rtsp://hass:Welcome123%[email protected]:554
```
### Version
0.12.0-e454daf
### Frigate config file
```yaml
Not relevant
```
### Relevant log output
```shell
2023-03-21 20:01:53.982583022 [2023-03-21 20:01:53] watchdog.E1ProBabyRoom ERROR : Ffmpeg process crashed unexpectedly for E1ProBabyRoom.
2023-03-21 20:01:53.982672799 [2023-03-21 20:01:53] watchdog.E1ProBabyRoom ERROR : The following ffmpeg logs include the last 100 lines prior to exit.
2023-03-21 20:01:53.982710434 [2023-03-21 20:01:53] ffmpeg.E1ProBabyRoom.detect ERROR : rtsp://*:*@192.168.1.3:554/h264Preview_01_sub: Protocol not found
2023-03-21 20:01:53.982783621 [2023-03-21 20:01:53] ffmpeg.E1ProBabyRoom.detect ERROR : Did you mean file:rtsp://hass:Welcome123%[email protected]:554
```
### Operating system
Other Linux
### Install method
Kubernetes
### Coral version
CPU (no coral)
### Network connection
Wireless
### Camera make and model
Reolink E1 Pro
### Any other information that may be helpful
_No response_
| 2023-03-21T20:37:05Z | [] | [] |
|
blakeblackshear/frigate | 7,962 | blakeblackshear__frigate-7962 | [
"3140"
] | 0858859939bb634c618b2ee65b9247c1e3434b4a | diff --git a/frigate/http.py b/frigate/http.py
--- a/frigate/http.py
+++ b/frigate/http.py
@@ -56,6 +56,8 @@
logger = logging.getLogger(__name__)
+DEFAULT_TIME_RANGE = "00:00,24:00"
+
bp = Blueprint("frigate", __name__)
@@ -769,6 +771,7 @@ def events():
limit = request.args.get("limit", 100)
after = request.args.get("after", type=float)
before = request.args.get("before", type=float)
+ time_range = request.args.get("time_range", DEFAULT_TIME_RANGE)
has_clip = request.args.get("has_clip", type=int)
has_snapshot = request.args.get("has_snapshot", type=int)
in_progress = request.args.get("in_progress", type=int)
@@ -851,6 +854,36 @@ def events():
if before:
clauses.append((Event.start_time < before))
+ if time_range != DEFAULT_TIME_RANGE:
+ # get timezone arg to ensure browser times are used
+ tz_name = request.args.get("timezone", default="utc", type=str)
+ hour_modifier, minute_modifier = get_tz_modifiers(tz_name)
+
+ times = time_range.split(",")
+ time_after = times[0]
+ time_before = times[1]
+
+ start_hour_fun = fn.strftime(
+ "%H:%M",
+ fn.datetime(Event.start_time, "unixepoch", hour_modifier, minute_modifier),
+ )
+
+ # cases where user wants events overnight, ex: from 20:00 to 06:00
+ # should use or operator
+ if time_after > time_before:
+ clauses.append(
+ (
+ reduce(
+ operator.or_,
+ [(start_hour_fun > time_after), (start_hour_fun < time_before)],
+ )
+ )
+ )
+ # all other cases should be and operator
+ else:
+ clauses.append((start_hour_fun > time_after))
+ clauses.append((start_hour_fun < time_before))
+
if has_clip is not None:
clauses.append((Event.has_clip == has_clip))
| diff --git a/frigate/test/test_http.py b/frigate/test/test_http.py
--- a/frigate/test/test_http.py
+++ b/frigate/test/test_http.py
@@ -236,6 +236,44 @@ def test_event_retention(self):
assert event["id"] == id
assert event["retain_indefinitely"] is False
+ def test_event_time_filtering(self):
+ app = create_app(
+ FrigateConfig(**self.minimal_config),
+ self.db,
+ None,
+ None,
+ None,
+ None,
+ None,
+ PlusApi(),
+ )
+ morning_id = "123456.random"
+ evening_id = "654321.random"
+ morning = 1656590400 # 06/30/2022 6 am (GMT)
+ evening = 1656633600 # 06/30/2022 6 pm (GMT)
+
+ with app.test_client() as client:
+ _insert_mock_event(morning_id, morning)
+ _insert_mock_event(evening_id, evening)
+ # both events come back
+ events = client.get("/events").json
+ assert events
+ assert len(events) == 2
+ # morning event is excluded
+ events = client.get(
+ "/events",
+ query_string={"time_range": "07:00,24:00"},
+ ).json
+ assert events
+ # assert len(events) == 1
+ # evening event is excluded
+ events = client.get(
+ "/events",
+ query_string={"time_range": "00:00,18:00"},
+ ).json
+ assert events
+ assert len(events) == 1
+
def test_set_delete_sub_label(self):
app = create_app(
FrigateConfig(**self.minimal_config),
@@ -351,14 +389,17 @@ def test_stats(self, mock_stats):
assert stats == self.test_stats
-def _insert_mock_event(id: str) -> Event:
+def _insert_mock_event(
+ id: str,
+ start_time: datetime.datetime = datetime.datetime.now().timestamp(),
+) -> Event:
"""Inserts a basic event model with a given id."""
return Event.insert(
id=id,
label="Mock",
camera="front_door",
- start_time=datetime.datetime.now().timestamp(),
- end_time=datetime.datetime.now().timestamp() + 20,
+ start_time=start_time,
+ end_time=start_time + 20,
top_score=100,
false_positive=False,
zones=list(),
| Event time filter
Hello,
I would like to be able to have the date picker also have a time option. For example, I can just view events that are overnight as there are so many during the day.
| I concur.
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.
I second that. Much less events are during nights/early morning as well as most criminal activities happens around the same time frame. It will be much easier to track that.
This has been implemented for 0.13 👍
> This has been implemented for 0.13 👍
Firstly, thanks for adding this option!
Can I just add feedback that the time selector isn't super functional in 0.13. It doesn't let you select say 10pm - 6am . You need to do two different selections as you can only do after midnight or before midnight.
@sinamics is that a bug or expected for the current implementation?
(Haven't tested myself)
This is the selector:
![2023-09-13_8-21-43](https://github.com/blakeblackshear/frigate/assets/8526509/5c6eef29-5a13-4b7f-b676-dc454672038a)
When you select the initial start time, it only lets you select an item greater than that time. So you cannot select 2200 to 0600. Only 2200 to 2300 then you would need a second selection of 0000 to 0600
It is just a time selector for specific date range (on the screenshot above). It is not the same what was requested.
E.g. I need to finds events which happened from 1am till 7am over the last 4 weeks (I keep detections for 90 days).
> This is the selector:
>
> ![2023-09-13_8-21-43](https://user-images.githubusercontent.com/8526509/267481083-5c6eef29-5a13-4b7f-b676-dc454672038a.png)
>
> When you select the initial start time, it only lets you select an item greater than that time. So you cannot select 2200 to 0600. Only 2200 to 2300 then you would need a second selection of 0000 to 0600
that's not what I am seeing, it worked just fine to choose overnight
![Screen Shot 2023-09-12 at 17 20 58 PM](https://github.com/blakeblackshear/frigate/assets/14866235/0dbda79f-fa99-4bee-9c33-de0e9e0b88ec)
> > This is the selector:
> > ![2023-09-13_8-21-43](https://user-images.githubusercontent.com/8526509/267481083-5c6eef29-5a13-4b7f-b676-dc454672038a.png)
> > When you select the initial start time, it only lets you select an item greater than that time. So you cannot select 2200 to 0600. Only 2200 to 2300 then you would need a second selection of 0000 to 0600
>
> that's not what I am seeing, it worked just fine to choose overnight
>
> ![Screen Shot 2023-09-12 at 17 20 58 PM](https://user-images.githubusercontent.com/14866235/267489497-0dbda79f-fa99-4bee-9c33-de0e9e0b88ec.png)
Yep, it works for one night, overnight. Not show all the events for 10pm - 6am for more than one day...
_Don't get me wrong though, it is better than what we had previously_
I don't think it will be too much to adjust it to behave that way, I do agree that that is likely how more people will want to use it.
@NickM-27, this behavior is by design, not a bug. I may have misunderstood the original requirements.
However, modifying it to support time-from to time-to selection for each day should be doable. | 2023-09-26T21:10:39Z | [] | [] |
freedomofpress/securedrop | 83 | freedomofpress__securedrop-83 | [
"19"
] | 229ec0c940cf4322e4438e87f1bd91aea0e5976e | diff --git a/modules/deaddrop/files/deaddrop/crypto.py b/modules/deaddrop/files/deaddrop/crypto.py
--- a/modules/deaddrop/files/deaddrop/crypto.py
+++ b/modules/deaddrop/files/deaddrop/crypto.py
@@ -34,7 +34,8 @@ def clean(s, also=''):
"""
ok = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
for c in s:
- if c not in ok and c not in also: raise CryptoException("invalid input")
+ if c not in ok and c not in also:
+ raise CryptoException("invalid input: %s" % s)
return s
words = file(config.WORD_LIST).read().split('\n')
diff --git a/modules/deaddrop/files/deaddrop/journalist.py b/modules/deaddrop/files/deaddrop/journalist.py
--- a/modules/deaddrop/files/deaddrop/journalist.py
+++ b/modules/deaddrop/files/deaddrop/journalist.py
@@ -51,6 +51,7 @@ def GET(self, sid):
class doc:
def GET(self, sid, fn):
+ web.header('Content-Type', 'application/octet-stream')
web.header('Content-Disposition', 'attachment; filename="' +
crypto.displayid(sid).replace(' ', '_') + '_' + fn + '"')
| diff --git a/modules/deaddrop/files/deaddrop/test.py b/modules/deaddrop/files/deaddrop/test.py
--- a/modules/deaddrop/files/deaddrop/test.py
+++ b/modules/deaddrop/files/deaddrop/test.py
@@ -4,40 +4,50 @@
import tempfile
import unittest
import re
+from time import sleep
import gnupg
from paste.fixture import TestApp
+from bs4 import BeautifulSoup
# Set the environment variable so config.py uses a test environment
os.environ['DEADDROPENV'] = 'test'
-import config
+import config, crypto
+
from source import app as source_app
from journalist import app as journalist_app
def setUpModule():
- """Set up the file system and GPG"""
- # Create directories for the file store and the GPG keyring
- for d in (config.TEST_DIR, config.STORE_DIR, config.GPG_KEY_DIR):
- try:
- # some of these dirs already exist because we import source and
- # journalist, which import crypto, which calls gpg.GPG at module
- # level, which auto-generates the GPG homedir if it does not exist
- os.mkdir(d)
- except OSError:
- pass
-
- # Initialize the GPG keyring
- gpg = gnupg.GPG(gnupghome=config.GPG_KEY_DIR)
-
- # Import the journalist key for testing (faster to import a pre-generated
- # key than to gen a new one every time)
- for keyfile in ("test_journalist_key.pub", "test_journalist_key.sec"):
- gpg.import_keys(open(keyfile).read())
+ pass
def tearDownModule():
- shutil.rmtree(config.TEST_DIR)
+ pass
+
+class TestSecureDrop(unittest.TestCase):
-class TestSource(unittest.TestCase):
+ def setUp(self):
+ """Set up the file system and GPG"""
+ # Create directories for the file store and the GPG keyring
+ for d in (config.TEST_DIR, config.STORE_DIR, config.GPG_KEY_DIR):
+ try:
+ # some of these dirs already exist because we import source and
+ # journalist, which import crypto, which calls gpg.GPG at module
+ # level, which auto-generates the GPG homedir if it does not exist
+ os.mkdir(d)
+ except OSError:
+ pass
+
+ # Initialize the GPG keyring
+ self.gpg = gnupg.GPG(gnupghome=config.GPG_KEY_DIR)
+
+ # Import the journalist key for testing (faster to import a pre-generated
+ # key than to gen a new one every time)
+ for keyfile in ("test_journalist_key.pub", "test_journalist_key.sec"):
+ self.gpg.import_keys(open(keyfile).read())
+
+ middleware = []
+ self.source_app = TestApp(source_app.wsgifunc(*middleware))
+ self.journalist_app = TestApp(journalist_app.wsgifunc(*middleware))
def _find_codename(self, body):
# Codenames may contain HTML escape characters, and the wordlist
@@ -50,24 +60,41 @@ def _find_codename(self, body):
return codename_match.group('codename')
def _navigate_to_create_page(self):
- res = self.app.get('/').click(href='/generate/')
+ res = self.source_app.get('/').click(href='/generate/')
codename = self._find_codename(res.normal_body)
res = res.forms['create-form'].submit()
return res, codename
- def setUp(self):
- middleware = []
- self.app = TestApp(source_app.wsgifunc(*middleware))
+ def _do_submission(self, msg=None, doc=None):
+ res, codename = self._navigate_to_create_page()
+ upload_form = res.forms['upload']
+ if msg:
+ upload_form.set('msg', msg)
+ upload_files = []
+ if doc:
+ # doc should be a tuple (filename, contents)
+ assert isinstance(doc, tuple) and len(doc) == 2
+ upload_files = [('fh', doc[0], doc[1])]
+ res = self.source_app.post(upload_form.action,
+ params=upload_form.submit_fields(),
+ upload_files=upload_files)
+ return res, codename
- def test_index(self):
- res = self.app.get('/')
+ def test_source_index(self):
+ res = self.source_app.get('/')
self.assertEqual(res.status, 200)
self.assertIn("Submit documents for the first time", res.normal_body)
self.assertIn("Already submitted something?", res.normal_body)
+ def test_journalist_index(self):
+ res = self.journalist_app.get('/')
+ self.assertEqual(res.status, 200)
+ self.assertIn("Latest submissions", res.normal_body)
+ self.assertIn("Here are the various collections of documents that have been submitted, with the most recently updated first:", res.normal_body)
+
def test_index_click_submit_documents(self):
# The "Submit Documents" button is a form submit button
- res = self.app.get('/').click(href='/generate/')
+ res = self.source_app.get('/').click(href='/generate/')
self.assertEqual(res.status, 200)
self.assertIn("To protect your identity, we're assigning you a code name.", res.normal_body)
codename = self._find_codename(res.normal_body)
@@ -80,39 +107,131 @@ def test_generate_click_continue(self):
self.assertIn(codename, res.normal_body)
def test_create_submit_message(self):
- res, codename = self._navigate_to_create_page()
- upload_form = res.forms['upload']
- upload_form.set('msg', 'This msg is for your eyes only')
- res = upload_form.submit()
+ # Submit the message through the source app
+ test_msg = 'This msg is for your eyes only'
+ res, codename = self._do_submission(msg=test_msg)
self.assertEqual(res.status, 200)
self.assertIn("Thanks! We received your message.", res.normal_body)
+ # Check the journalist app for the submitted message
+ res = self.journalist_app.get('/')
+ soup = BeautifulSoup(res.normal_body)
+ self.assertEqual(len(soup.find_all('li')), 1) # we only submitted one message
+
+ # Go to the source submissions page
+ source_page_url = '/' + soup.li.a['href']
+ res = self.journalist_app.get(source_page_url)
+ self.assertIn("Read documents", res.normal_body)
+
+ # Download the submission
+ soup = BeautifulSoup(res.normal_body)
+ self.assertEqual(len(soup.find_all('li')), 1) # we only submitted one message
+ submission_url = source_page_url + soup.li.a['href']
+ res = self.journalist_app.get(submission_url)
+ decrypted_data = self.gpg.decrypt(res.body)
+ self.assertTrue(decrypted_data.ok)
+ self.assertEqual(decrypted_data.data, test_msg)
+
def test_create_submit_file(self):
- res, codename = self._navigate_to_create_page()
- upload_form = res.forms['upload']
+ # Submit the file through the source app
test_filename = 'secrets.txt'
- res = self.app.post(upload_form.action,
- params=upload_form.submit_fields(),
- upload_files=[
- ('fh', test_filename, 'This file is for your eyes only'),
- ])
+ test_file_contents = 'This file is for your eyes only'
+ res, codename = self._do_submission(
+ doc=(test_filename, test_file_contents))
self.assertEqual(res.status, 200)
self.assertIn("Thanks! We received your document '%s'." % test_filename,
res.normal_body)
- def test_create_submit_both(self):
- pass
+ # Check the journalist app for the submitted file
+ res = self.journalist_app.get('/')
+ soup = BeautifulSoup(res.normal_body)
+ self.assertEqual(len(soup.find_all('li')), 1) # we only submitted one message
+
+ # Go to the source submissions page
+ source_page_url = '/' + soup.li.a['href']
+ res = self.journalist_app.get(source_page_url)
+ self.assertIn("Read documents", res.normal_body)
+
+ # Download the submission
+ soup = BeautifulSoup(res.normal_body)
+ self.assertEqual(len(soup.find_all('li')), 1) # we only submitted one message
+ submission_url = source_page_url + soup.li.a['href']
+ res = self.journalist_app.get(submission_url)
+ decrypted_data = self.gpg.decrypt(res.body)
+ self.assertTrue(decrypted_data.ok)
+ self.assertEqual(decrypted_data.data, test_file_contents)
+ # TODO: test the filename (encoding with gpg2 --set-filename; unclear
+ # if it can be accessed using the gnupg library)
- def tearDown(self):
- pass
+ def test_create_submit_both(self):
+ test_msg = 'This msg is for your eyes only'
+ test_filename = 'secrets.txt'
+ test_file_contents = 'This file is for your eyes only'
+ res, codename = self._do_submission(msg=test_msg,
+ doc=(test_filename, test_file_contents))
+ self.assertEqual(res.status, 200)
+ # TODO: should we specifically mention receiving a message when both
+ # a message and a document are uploaded simultaneously?
+ self.assertIn("Thanks! We received your document '%s'." % test_filename,
+ res.normal_body)
-class TestJournalist(unittest.TestCase):
- def setUp(self):
- middleware = []
- self.app = TestApp(journalist_app.wsgifunc(*middleware))
+ # Check the journalist app for the submitted file
+ res = self.journalist_app.get('/')
+ soup = BeautifulSoup(res.normal_body)
+ self.assertEqual(len(soup.find_all('li')), 1)
+
+ # Go to the source submissions page
+ source_page_url = '/' + soup.li.a['href']
+ res = self.journalist_app.get(source_page_url)
+ self.assertIn("Read documents", res.normal_body)
+
+ # Download the submissions
+ soup = BeautifulSoup(res.normal_body)
+ submissions = [li.a['href'] for li in soup.find_all('li')]
+ self.assertEqual(len(submissions), 2)
+ for submission in submissions:
+ submission_url = source_page_url + submission
+ res = self.journalist_app.get(submission_url)
+ decrypted_data = self.gpg.decrypt(res.body)
+ self.assertTrue(decrypted_data.ok)
+ if '_msg' in submission:
+ self.assertEqual(decrypted_data.data, test_msg)
+ elif '_doc' in submission:
+ self.assertEqual(decrypted_data.data, test_file_contents)
+
+ def test_journalist_reply(self):
+ # Submit the message through the source app
+ test_msg = 'This msg is for your eyes only'
+ res, codename = self._do_submission(msg=test_msg)
+
+ # Wait until the source key has been generated...
+ # (the reply form won't be available unless the key exists)
+ source_id = crypto.shash(codename)
+ while not crypto.getkey(source_id):
+ sleep(0.1)
+
+ # Check the journalist app for the submitted message
+ res = self.journalist_app.get('/')
+ soup = BeautifulSoup(res.normal_body)
+ res = res.click(href=soup.li.a['href'])
+
+ # Send a reply to the source
+ test_reply = "Thanks for sharing this. We'll follow up soon."
+ res.form.set('msg', test_reply)
+ res = res.form.submit()
+ self.assertIn("Thanks! Your reply has been stored.", res.normal_body)
+
+ # Check the source page for a reply
+ res = self.source_app.get('/lookup/')
+ res.form.set('id', codename)
+ res = res.form.submit()
+ self.assertIn("You have received a reply. For your security, please delete all replies when you're done with them.", res.normal_body)
+ soup = BeautifulSoup(res.normal_body)
+ message = soup.find_all('blockquote', class_='message')[0].text
+ self.assertEquals(message, test_reply)
def tearDown(self):
- pass
+ shutil.rmtree(config.TEST_DIR)
if __name__ == "__main__":
unittest.main()
| Add Unit and Integration Tests
If you want to contribute this would be a good place to start. Lots of people are suggesting pretty substantial changes to the codebase and tests would make sure those changes don't unintentionally break things.
| This has become more of an issue now that we have a lot of active development on the project. Several recent minor changes (switching to a fork of the python gpg library, switching to bcrypt) have managed to break the webapp.
| 2013-10-15T07:27:39Z | [] | [] |
freedomofpress/securedrop | 146 | freedomofpress__securedrop-146 | [
"123"
] | 9f7dcfa09f4fa4b4845da5da9bae9d114d903cec | diff --git a/modules/deaddrop/files/deaddrop/journalist.py b/modules/deaddrop/files/deaddrop/journalist.py
--- a/modules/deaddrop/files/deaddrop/journalist.py
+++ b/modules/deaddrop/files/deaddrop/journalist.py
@@ -71,11 +71,22 @@ def reply():
store.path(sid, 'reply-%s.gpg' % uuid.uuid4()))
return render_template('reply.html', sid=sid, codename=crypto.displayid(sid))
[email protected]('/delete', methods=('POST',))
-def delete():
[email protected]('/bulk', methods=('POST',))
+def bulk():
+ action = request.form['action']
+
sid = request.form['sid']
doc_names_selected = request.form.getlist('doc_names_selected')
- docs_selected = [doc for doc in get_docs(sid) if doc['name'] in doc_names_selected]
+ docs_selected = [ doc for doc in get_docs(sid) if doc['name'] in doc_names_selected ]
+
+ if action == 'download':
+ return bulk_download(sid, docs_selected)
+ elif action == 'delete':
+ return bulk_delete(sid, docs_selected)
+ else:
+ abort(422)
+
+def bulk_delete(sid, docs_selected):
confirm_delete = bool(request.form.get('confirm_delete', False))
if confirm_delete:
for doc in docs_selected:
@@ -84,6 +95,13 @@ def delete():
return render_template('delete.html', sid=sid, codename=crypto.displayid(sid),
docs_selected=docs_selected, confirm_delete=confirm_delete)
+def bulk_download(sid, docs_selected):
+ filenames = [store.path(sid, doc['name']) for doc in docs_selected]
+ zip = store.get_bulk_archive(filenames)
+ return send_file(zip, mimetype="application/zip", attachment_filename=crypto.displayid(sid), as_attachment=True)
+
+
+
if __name__ == "__main__":
# TODO: make sure this gets run by the web server
CsrfProtect(app)
diff --git a/modules/deaddrop/files/deaddrop/store.py b/modules/deaddrop/files/deaddrop/store.py
--- a/modules/deaddrop/files/deaddrop/store.py
+++ b/modules/deaddrop/files/deaddrop/store.py
@@ -2,6 +2,9 @@
import os
import re
import config
+import zipfile
+import crypto
+import uuid
VALIDATE_FILENAME = re.compile("^(reply-)?[a-f0-9-]+(_msg|_doc|)\.gpg$").match
@@ -48,5 +51,14 @@ def path(*s):
verify(absolute)
return absolute
+def get_bulk_archive(filenames):
+ zip_file_name = os.path.join(config.TEMP_DIR, str(uuid.uuid4()) + '.zip')
+ with zipfile.ZipFile(zip_file_name, 'w') as zip:
+ for filename in filenames:
+ verify(filename)
+ basename = os.path.basename(filename)
+ zip.write(filename, arcname=basename)
+ return zip_file_name
+
def log(msg):
file(path('NOTES'), 'a').write(msg)
| diff --git a/modules/deaddrop/files/deaddrop/test.py b/modules/deaddrop/files/deaddrop/test.py
--- a/modules/deaddrop/files/deaddrop/test.py
+++ b/modules/deaddrop/files/deaddrop/test.py
@@ -19,7 +19,7 @@
# Set the environment variable so config.py uses a test environment
os.environ['DEADDROPENV'] = 'test'
-import config, crypto
+import config, crypto, store
import source, store, journalist
@@ -27,10 +27,21 @@ def _block_on_reply_keypair_gen(codename):
sid = crypto.shash(codename)
while not crypto.getkey(sid): sleep(0.1)
+
+def _setup_test_docs(sid, files):
+ filenames = [ os.path.join(config.STORE_DIR, sid, file) for file in files ]
+ for file in filenames:
+ dirname = os.path.dirname(file)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ with open(file, 'w') as myfile:
+ myfile.write('test')
+ return filenames
+
def shared_setup():
"""Set up the file system and GPG"""
# Create directories for the file store and the GPG keyring
- for d in (config.TEST_DIR, config.STORE_DIR, config.GPG_KEY_DIR):
+ for d in (config.TEST_DIR, config.STORE_DIR, config.GPG_KEY_DIR, config.TEMP_DIR):
try:
os.mkdir(d)
except OSError:
@@ -189,12 +200,10 @@ def test_tor2web_warning(self):
class TestJournalist(TestCase):
- @classmethod
- def setUpClass(cls):
+ def setUp(cls):
shared_setup()
- @classmethod
- def tearDownClass(cls):
+ def tearDown(cls):
shared_teardown()
def create_app(self):
@@ -212,6 +221,19 @@ def test_index(self):
self.assertIn("Latest submissions", rv.data)
self.assertIn("No documents have been submitted!", rv.data)
+ def test_bulk_download(self):
+ sid = 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZJPKJFECLS2NZ4G4U3QOZCFKTTPNZMVIWDCJBBHMUDBGFHXCQ3R'
+ files = ['abc1_msg.gpg', 'abc2_msg.gpg']
+ filenames = _setup_test_docs(sid, files)
+
+ rv = self.client.post('/bulk', data=dict(
+ action='download',
+ sid=sid,
+ doc_names_selected=filenames
+ ))
+
+ self.assertEqual(rv.status_code, 200)
+
class TestIntegration(unittest.TestCase):
def setUp(self):
@@ -270,7 +292,8 @@ def test_submit_message(self):
self.assertEqual(rv.status_code, 200)
soup = BeautifulSoup(rv.data)
doc_name = soup.select('ul > li > input[name="doc_names_selected"]')[0]['value']
- rv = self.journalist_app.post('/delete', data=dict(
+ rv = self.journalist_app.post('/bulk', data=dict(
+ action='delete',
sid=sid,
doc_names_selected=doc_name
))
@@ -282,7 +305,8 @@ def test_submit_message(self):
# confirm delete submission
doc_name = soup.select
doc_name = soup.select('ul > li > input[name="doc_names_selected"]')[0]['value']
- rv = self.journalist_app.post('/delete', data=dict(
+ rv = self.journalist_app.post('/bulk', data=dict(
+ action='delete',
sid=sid,
doc_names_selected=doc_name,
confirm_delete="1"
@@ -340,7 +364,8 @@ def test_submit_file(self):
self.assertEqual(rv.status_code, 200)
soup = BeautifulSoup(rv.data)
doc_name = soup.select('ul > li > input[name="doc_names_selected"]')[0]['value']
- rv = self.journalist_app.post('/delete', data=dict(
+ rv = self.journalist_app.post('/bulk', data=dict(
+ action='delete',
sid=sid,
doc_names_selected=doc_name
))
@@ -352,7 +377,8 @@ def test_submit_file(self):
# confirm delete submission
doc_name = soup.select
doc_name = soup.select('ul > li > input[name="doc_names_selected"]')[0]['value']
- rv = self.journalist_app.post('/delete', data=dict(
+ rv = self.journalist_app.post('/bulk', data=dict(
+ action='delete',
sid=sid,
doc_names_selected=doc_name,
confirm_delete="1"
@@ -419,10 +445,28 @@ def test_reply(self):
class TestStore(unittest.TestCase):
'''The set of tests for store.py.'''
+ @classmethod
+ def setUp(self):
+ shared_setup()
+
+ @classmethod
+ def tearDown(self):
+ shared_teardown()
+
def test_verify(self):
with self.assertRaises(store.PathException):
store.verify(os.path.join(config.STORE_DIR, '..', 'etc', 'passwd'))
+ def test_get_zip(self):
+ sid = 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZJPKJFECLS2NZ4G4U3QOZCFKTTPNZMVIWDCJBBHMUDBGFHXCQ3R'
+ files = ['abc1_msg.gpg', 'abc2_msg.gpg']
+ filenames = _setup_test_docs(sid, files)
+
+ zip = store.get_bulk_archive(filenames)
+
+ zipfile_contents = zipfile.ZipFile(zip).namelist()
+ for file in files:
+ self.assertIn(file, zipfile_contents)
if __name__ == "__main__":
unittest.main()
| Journalists should be able to bulk download from document server
Right now, journalists can only download one file at a time, even if there are dozens new submissions in any given session. The New Yorker team asked if we can enable bulk downloading so that journalists can download multiple files at once.
| looking at this now
Check out #131, which adds checkboxes next to documents in a collection so they can be deleted. You can probably reuse this code, adding a "download" button.
FYI @msjoinder, @jacksingleton's comment above was meant to mean "I'm taking this to work on at the hackathon". I'll wait for his pull request, and then compare the two and figure out what to merge. Sorry that wasn't more clear!
| 2013-11-11T01:15:30Z | [] | [] |
freedomofpress/securedrop | 163 | freedomofpress__securedrop-163 | [
"162"
] | 7975936e3b6e3828547ce54b76cc75a4d299a9af | diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -97,7 +97,7 @@ def generate():
number_words = 8
if request.method == 'POST':
number_words = int(request.form['number-words'])
- if number_words not in range(4, 11):
+ if number_words not in range(7, 11):
abort(403)
session['codename'] = crypto_util.genrandomid(number_words)
return render_template('generate.html', codename=session['codename'])
| diff --git a/securedrop/test.py b/securedrop/test.py
--- a/securedrop/test.py
+++ b/securedrop/test.py
@@ -109,7 +109,7 @@ def test_generate(self):
def test_regenerate_valid_lengths(self):
"""Make sure we can regenerate all valid length codenames"""
- for codename_len in xrange(4, 11):
+ for codename_len in xrange(7, 11):
response = self.client.post('/generate', data={
'number-words': str(codename_len),
})
| Sources should not be allowed to choose insecure codenames
A form was added in a607e9f4 to allow sources to choose the length of their randomly generated codenames. Sources are allowed to choose codenames from length 4 to 10.
The argument for allowing sources to choose shorter codenames is that they are easier to remember, and after a certain length sources will be forced to write them down, possibly causing a loss of plausible deniability. However, the codename lengths offered now are far too short.
We want source codenames to have a _minimum_ of 80 bits of entropy, and ideally they would have a 128 bit security level. There are currently 6969 words in our wordlist, so:
log(6969^10, 2) = 127.66 bits of entropy
log(6969^9, 2) = 114.9
log(6969^8, 2) = 102.13
log(6969^7, 2) = 89.36
log(6969^6, 2) = 76.60
So the minimum allowable length should be 7 words. 10 is a good maximum, as that pretty much gets to 128 bits.
Since the source's security relies entirely on the difficulty in guessing their codename, we should be strict about this. It is a given that most humans cannot remember passwords with sufficient entropy in today's security environment. Writing them down should be [encouraged](https://www.schneier.com/blog/archives/2005/06/write_down_your.html) - although we should also educate sources on what to do once they're done communicating with a journalist (including destroying any written copies of their codename).
| Or increase the wordlist to about 12000
log( 12000^6, 2 ) = 81.30
| 2013-11-20T01:58:37Z | [] | [] |
freedomofpress/securedrop | 174 | freedomofpress__securedrop-174 | [
"64"
] | c76d12734b07e8fa523175eb0fdc5c85acad9318 | diff --git a/install_files/journalist.config.py b/install_files/journalist.config.py
--- a/install_files/journalist.config.py
+++ b/install_files/journalist.config.py
@@ -34,7 +34,7 @@ class TestingConfig(BaseConfig):
if os.environ.get('SECUREDROP_ENV') == 'test':
FlaskConfig = TestingConfig
- TEST_DIR='/tmp/deaddrop_test'
+ TEST_DIR='/tmp/securedrop_test'
STORE_DIR=os.path.join(TEST_DIR, 'store')
GPG_KEY_DIR=os.path.join(TEST_DIR, 'keys')
# test_journalist_key.pub
diff --git a/install_files/source.config.py b/install_files/source.config.py
--- a/install_files/source.config.py
+++ b/install_files/source.config.py
@@ -33,7 +33,7 @@ class TestingConfig(BaseConfig):
if os.environ.get('SECUREDROP_ENV') == 'test':
FlaskConfig = TestingConfig
- TEST_DIR='/tmp/deaddrop_test'
+ TEST_DIR='/tmp/securedrop_test'
STORE_DIR=os.path.join(TEST_DIR, 'store')
GPG_KEY_DIR=os.path.join(TEST_DIR, 'keys')
# test_journalist_key.pub
diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -96,14 +96,14 @@ def genkeypair(name, secret):
return gpg.gen_key(gpg.gen_key_input(
key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,
passphrase=secret,
- name_email="%[email protected]" % name
+ name_email=name
))
def getkey(name):
for key in gpg.list_keys():
for uid in key['uids']:
- if ' <%s@' % name in uid:
+ if name in uid:
return key['fingerprint']
return None
| diff --git a/securedrop/test_journalist_key.pub b/securedrop/test_journalist_key.pub
--- a/securedrop/test_journalist_key.pub
+++ b/securedrop/test_journalist_key.pub
@@ -12,41 +12,41 @@ ak+Y8aolHDt6a785eF0AaAtgbPX4THMum/CNMksHO0PBBqxR+C9z7WSHXFHvv+8B
CyIBiMZSK/j8PMJT1X5tgpL1NXImNdVIPV2Fy+W7PkNfG2FL/FQIUnK6ntukLW/7
hV6VHcx52mMn1pVUc6v80LEb4BMDz41vlj9R8YVv8hycPtnN0QL5gIME1n7jbKJf
yfWxkvBXMINDgHK/RysRMP6FXA6Mw65BGNIuO0Il0FTy12HuKI/coEsG2QARAQAB
-tDZEZWFkZHJvcCBUZXN0IChETyBOT1QgVVNFKSA8dGVzdEBkZWFkZHJvcC5leGFt
-cGxlLmNvbT6JAjgEEwECACIFAlJZi2ACGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
-AheAAAoJEMxA7xIoJxRBMlkP/18HHiAJz2SHxYrzmuw/dsrQVUMvqN8xeWg4ljfq
-nF1O3OkpS19LRqa0oICQC0AvwrjAxP7CESSHZwa1AD7mqTMOJEa/uJNnbBfWuBRq
-xlX8n4JtFZGHvc747iHrv4fF/Mc0dCSnJU4XqPcnOMrUHIddi0Up1Wv5rKM0idDc
-ApPpAuunz/ZvF8s8UXYNkcRxKNeQA9qYCMcI/FDnB2hdbXFP4FwOBWt9NUyvqbrr
-URd40cBpIcKhWgETArwbFy7SrGvL0KHyLINKJxQCTS4yUeyG5r/iHZ/WKoGW46jF
-iOUx3eAzsBvJs/6BGkls4+jEQAdAhqqcqlULWbzHpyoUBl7AIEiJ1AOC4v653Qf+
-GJEMhuLogXabmlUUoyWD75iZXp21IjcP2Fp2KjufHpac4qETYnrNYZu61gTZH6B8
-Zzj/ASblkfaebSoarNeU80AnDibyAtQ/zfqzlqbW6PGIyXH9jdv1rZRppKh2/Tko
-MWlQ8D/EmMPLePxgpEZJblAALwQWdQxv1mLgMkHHuwOBAd/UUh6Sps8FbJWag8on
-KB7y5YV1XYhYS071o0fMjivUIP6ijeeW7xbRkcJwGekkdFLf3PszeKzg/Jq7hTZW
-mOUf8kngoPckhui1WsbpZsWRYCmP6/gNpEhUEPh32dXWw08BMZigKzfpZeLsU9gO
-lOGJuQINBFJZi2ABEADzfv+9Ogb4KEWFom9zMF+xg8bcd/Ct72/sWLQW6Pz6+Skm
-LEHuklTO+k7xiQ6jdzXzj1rTfy317L7G51naBSb6Ekfv8mu2ogOwrvtgYnGCvfCp
-ooUSxcfi+aEJzIJL29TAi1RCLZm15KRbkvEl8wS93BSLiag5w4/8eP1vXebq95Gr
-CZwiNZdhdQs3qn4j3VRvTW/SZHIAdJY+mMfUMPjq4c4sA82os6kVrEnWeLGfT9d+
-knfm9J/2Rumy90bLAY6SFmRZ9/DxwKwbIsVy8CRvU3RVFSX8HCBQepRCQkls9r7K
-VBqYE2Wh+0a+9wHHHNI7VBxKGXPflrirxY1AB5vjLcX1hmXbCoyf4ytgdHyCKDz9
-Oc+xkgJeyVW6XwSqc5EhuNFXp3+C7BF7eQZ1REJLbL6CtEkeF0jHBaTeKM/pN4fV
-hjPiU/FsNmZGKxxLyxDnnDI5pY8bhphVxwBRZ5GtVNqiVNDw+rRACQalpT21OcAg
-LP+Rz+qf3TPyEZN6WPEx8/76ILuSHb8mpOH7W/514f5NuFaAlgmUnO3cT10hh4Iw
-OQ+kvj0qMww8fASI9DJExXUYb3xDSCmOkJPhu1/Drr3gdFBha4/jAz7jBWlsVr2R
-LJzilf8Mi9j8WpHIfP+WXtwWz3+iYPS0SPoB7g9DA0+Ei760pJJf73AEjD+fFwAR
-AQABiQIfBBgBAgAJBQJSWYtgAhsMAAoJEMxA7xIoJxRBp/cP/3lJx9z5yzZA6UvL
-QR6pK+V1iy2hvZ+S+EwYRCiTgYTXekHzLXWwjWGfUYDTHMeaS9O9BMRMGOU3inyb
-47GZSoQ0N0bRVTzrY6/0ifhUSJ00MemOodI1bz4pAMk3uR8iWyhlaGn7JAIAKmCm
-+K0qkeJd61S9iyrx7s9QmaNPnupm5pc+bpOAkbKyq7sEFpWM5Qx82n1tVMtnIW2O
-oRPbz80JkkQB2pl6SjskXqZ89jcFWGI6IChYENKc65xafDt4uFuHU+5j4j2f4ySY
-SwfoWC97MOgJLqA/WimxeeNCYFhykUDWrL5mKBTgMXgH/sYk3GDo7fssaYbKn1xb
-bX4GXQl3+ru4zT6/F7CxZErjLb+evShyf4itM+5AdbKRiRzoraqKblBa4TfJBSqH
-isdcxdZeBe19+jyY6a8ZMcGhrQeksiKxTRh7ylAk7CLVgLEIHLxXzHoZ0oAFz2ul
-G+zH9KS9Pe8MQxHCrlyfoQElQuJoYbrYBOu28itvGPgz6+5xgvZROvPoqIkIk8DY
-t9lJqUFBeZuFJd5W1TuHKLxueVYvSKeG+e3TjOYdJFvDZInM4cNWr8N92mYSiphl
-jiHAKVTQeIf1ma07QUH/ul3YC+g07F+BLonIIXA6uQVebv5iLxTgOzIQwHTJVu4M
-PiQNn1h4dk1RonfV/aJ+de1+qjA8
-=X10h
+tDZTZWN1cmVEcm9wIFRlc3QvRGV2ZWxvcG1lbnQgKERPIE5PVCBVU0UgSU4gUFJP
+RFVDVElPTimJAjsEEwECACUCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheABQJS
+m8UzAhkBAAoJEMxA7xIoJxRB1hAP/jVoFRi1R3i4P3EhmaYg9VQUo5SRyfMDoE6r
+FyzOv2x3vRqPM1Bm4ihLQePfwKsJLDo7UVgjmTNEY4bpSYmKus/uo6Kx6yrxm6d/
+JzY0BER+LJi0iA0iyLTqYk3eXyxQmHmy6my8zVyag5k/f/DejSUQgckJZ9pAhr7r
+q4aTCWYapo/6fDM0XAo1T5Upt/iSqHet6NZR15JCDHIvGJYGAxVemccSNKFb1tsn
+5aIMuGDbNivCUIFav+eo2JIEy60BokcZCy68qWwtlO5nIao79MoNMNz2EFSOomOg
+b1sNadEj2vAkLfU4+dOVbYsFGUzOaV0mUHcaTNPYwnK+PgyOi5M05BX55a9FSBgi
+AsEwEnDK1lvzLfWEQxVQvsw9A9vnCbSX8PwC4/uUtokkKxVN9ICl8AfaT38+OUHW
+iNl4NCgd26iRgTLhfMXpTjRyOb2RvFdzLByDEWIbvu5kCh247UFYSL0llk+suNh3
+cm0mOUdL1nZuEo4EyEF1dq+1opMfDMF98q0660wZdwvwUQIXBt/yK3FH0BGA66ai
+R78Z4pH1JqtYvzfDJx+XP8O2N9GYGd7kpak/5C2BTJzLVyzagB1yi8SmiYna5yQj
+EqW5Txeq0GGd2H4KtUETUevU4x0Rw3luHToaDd9d5sioF48o87PlGwk+OCofPfLj
+LnwFPNZcuQINBFJZi2ABEADzfv+9Ogb4KEWFom9zMF+xg8bcd/Ct72/sWLQW6Pz6
++SkmLEHuklTO+k7xiQ6jdzXzj1rTfy317L7G51naBSb6Ekfv8mu2ogOwrvtgYnGC
+vfCpooUSxcfi+aEJzIJL29TAi1RCLZm15KRbkvEl8wS93BSLiag5w4/8eP1vXebq
+95GrCZwiNZdhdQs3qn4j3VRvTW/SZHIAdJY+mMfUMPjq4c4sA82os6kVrEnWeLGf
+T9d+knfm9J/2Rumy90bLAY6SFmRZ9/DxwKwbIsVy8CRvU3RVFSX8HCBQepRCQkls
+9r7KVBqYE2Wh+0a+9wHHHNI7VBxKGXPflrirxY1AB5vjLcX1hmXbCoyf4ytgdHyC
+KDz9Oc+xkgJeyVW6XwSqc5EhuNFXp3+C7BF7eQZ1REJLbL6CtEkeF0jHBaTeKM/p
+N4fVhjPiU/FsNmZGKxxLyxDnnDI5pY8bhphVxwBRZ5GtVNqiVNDw+rRACQalpT21
+OcAgLP+Rz+qf3TPyEZN6WPEx8/76ILuSHb8mpOH7W/514f5NuFaAlgmUnO3cT10h
+h4IwOQ+kvj0qMww8fASI9DJExXUYb3xDSCmOkJPhu1/Drr3gdFBha4/jAz7jBWls
+Vr2RLJzilf8Mi9j8WpHIfP+WXtwWz3+iYPS0SPoB7g9DA0+Ei760pJJf73AEjD+f
+FwARAQABiQIfBBgBAgAJBQJSWYtgAhsMAAoJEMxA7xIoJxRBp/cP/3lJx9z5yzZA
+6UvLQR6pK+V1iy2hvZ+S+EwYRCiTgYTXekHzLXWwjWGfUYDTHMeaS9O9BMRMGOU3
+inyb47GZSoQ0N0bRVTzrY6/0ifhUSJ00MemOodI1bz4pAMk3uR8iWyhlaGn7JAIA
+KmCm+K0qkeJd61S9iyrx7s9QmaNPnupm5pc+bpOAkbKyq7sEFpWM5Qx82n1tVMtn
+IW2OoRPbz80JkkQB2pl6SjskXqZ89jcFWGI6IChYENKc65xafDt4uFuHU+5j4j2f
+4ySYSwfoWC97MOgJLqA/WimxeeNCYFhykUDWrL5mKBTgMXgH/sYk3GDo7fssaYbK
+n1xbbX4GXQl3+ru4zT6/F7CxZErjLb+evShyf4itM+5AdbKRiRzoraqKblBa4TfJ
+BSqHisdcxdZeBe19+jyY6a8ZMcGhrQeksiKxTRh7ylAk7CLVgLEIHLxXzHoZ0oAF
+z2ulG+zH9KS9Pe8MQxHCrlyfoQElQuJoYbrYBOu28itvGPgz6+5xgvZROvPoqIkI
+k8DYt9lJqUFBeZuFJd5W1TuHKLxueVYvSKeG+e3TjOYdJFvDZInM4cNWr8N92mYS
+iphljiHAKVTQeIf1ma07QUH/ul3YC+g07F+BLonIIXA6uQVebv5iLxTgOzIQwHTJ
+Vu4MPiQNn1h4dk1RonfV/aJ+de1+qjA8
+=XVz8
-----END PGP PUBLIC KEY BLOCK-----
diff --git a/securedrop/test_journalist_key.sec b/securedrop/test_journalist_key.sec
--- a/securedrop/test_journalist_key.sec
+++ b/securedrop/test_journalist_key.sec
@@ -38,20 +38,20 @@ LRSDPf//qAsXrN8YkrOm7BsfRp9tMzgCEpkCgDj3JZDLh1TlmX8Gmsa/xVq+bfNP
8W0ELulOrcCQ0aAQxrJRCHjnUAzcI2tjzT6961PrrEYTsy7tlZ7mYZ2SmPyrPZEh
SNVnO8H3rDaBXaqqLOi+SzrSkYn9DjA+IEp4Pi1J8mZWs5vV662xrqnHPhzNKf6Y
dAAF5GlXOrEqCj2qF/i79P9kh5KHr37ZsgFl11zesVEyezL2sScv6KmeRjz3O3Nk
-TagLhJTzBNoUZymiq5CQlY2nn5c5UeFx9lpRHnJRkv9p8adspqwYKguBi7Q2RGVh
-ZGRyb3AgVGVzdCAoRE8gTk9UIFVTRSkgPHRlc3RAZGVhZGRyb3AuZXhhbXBsZS5j
-b20+iQI4BBMBAgAiBQJSWYtgAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAK
-CRDMQO8SKCcUQTJZD/9fBx4gCc9kh8WK85rsP3bK0FVDL6jfMXloOJY36pxdTtzp
-KUtfS0amtKCAkAtAL8K4wMT+whEkh2cGtQA+5qkzDiRGv7iTZ2wX1rgUasZV/J+C
-bRWRh73O+O4h67+HxfzHNHQkpyVOF6j3JzjK1ByHXYtFKdVr+ayjNInQ3AKT6QLr
-p8/2bxfLPFF2DZHEcSjXkAPamAjHCPxQ5wdoXW1xT+BcDgVrfTVMr6m661EXeNHA
-aSHCoVoBEwK8Gxcu0qxry9Ch8iyDSicUAk0uMlHshua/4h2f1iqBluOoxYjlMd3g
-M7AbybP+gRpJbOPoxEAHQIaqnKpVC1m8x6cqFAZewCBIidQDguL+ud0H/hiRDIbi
-6IF2m5pVFKMlg++YmV6dtSI3D9hadio7nx6WnOKhE2J6zWGbutYE2R+gfGc4/wEm
-5ZH2nm0qGqzXlPNAJw4m8gLUP836s5am1ujxiMlx/Y3b9a2UaaSodv05KDFpUPA/
-xJjDy3j8YKRGSW5QAC8EFnUMb9Zi4DJBx7sDgQHf1FIekqbPBWyVmoPKJyge8uWF
-dV2IWEtO9aNHzI4r1CD+oo3nlu8W0ZHCcBnpJHRS39z7M3is4Pyau4U2VpjlH/JJ
-4KD3JIbotVrG6WbFkWApj+v4DaRIVBD4d9nV1sNPATGYoCs36WXi7FPYDpThiZ0H
+TagLhJTzBNoUZymiq5CQlY2nn5c5UeFx9lpRHnJRkv9p8adspqwYKguBi7Q2U2Vj
+dXJlRHJvcCBUZXN0L0RldmVsb3BtZW50IChETyBOT1QgVVNFIElOIFBST0RVQ1RJ
+T04piQI4BBMBAgAiBQJSm8UDAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAK
+CRDMQO8SKCcUQReED/4uGk1OGSJHip2EsgAPrwL6L3aT9FMKt+eQCLoj5DdoH3tY
+0mXGMP/0M/oIq2Y+q6BEXVNEYOy2QzTnnPqn965tqN/SZF1CNu/IYmxCJj7TSJi/
+MuWtg7IebR8KvWLKJjW4PU5ybmB2hzyO3jTEzXY3j8bocGfx3Q8B6ot/MdK8ss5J
+rLSIPlgQHhyXloe4CTTk0alQbtt8KEp0kMXmqjrz66AsofwjzcezOn1PSc0S4tV7
+0OkIEapevBcr7cnYQv3gWSXpK4zZNg9NZ5dLR73g64Lv+GqK0UBksueMfEEmx/uD
+Bd7/uxmz7jWFb3D9MBLCjAMQ+s8Kh8bJQ/HPMjIh8T9y8ek/dI5Il7ehFaci1yzT
++qIPt7SArj3q4KR5lCNeIK7Bu8Kuu2VgfCRske2PJAQlauu7jO3XZcLSuihwTdLL
+se+WIdW6miyczJNAt1pHknHsdXANegJh7eoAy+ghFok7kZpYMTR/iy95EqNxAt6l
+LivYeyzUtfPjHjDpqPUtrZRGipqmFwIcTn5E/HokkViUSizx0Sd2LyJ2tox6a+az
+lreW4hRY5WVPclVeTynvAtMrSl1DEErRoVK/AZKnBgUEeDCd9g/EiRzOLrX5azb9
+nCiFlLMeWWVmbefvnCXrXJqVQNrVZdelSZakJpJ/oQpKyv/5nU9pcgeVrzP68Z0H
GARSWYtgARAA837/vToG+ChFhaJvczBfsYPG3Hfwre9v7Fi0Fuj8+vkpJixB7pJU
zvpO8YkOo3c1849a038t9ey+xudZ2gUm+hJH7/JrtqIDsK77YGJxgr3wqaKFEsXH
4vmhCcyCS9vUwItUQi2ZteSkW5LxJfMEvdwUi4moOcOP/Hj9b13m6veRqwmcIjWX
@@ -102,5 +102,5 @@ EcKuXJ+hASVC4mhhutgE67byK28Y+DPr7nGC9lE68+ioiQiTwNi32UmpQUF5m4Ul
3lbVO4covG55Vi9Ip4b57dOM5h0kW8Nkiczhw1avw33aZhKKmGWOIcApVNB4h/WZ
rTtBQf+6XdgL6DTsX4EuicghcDq5BV5u/mIvFOA7MhDAdMlW7gw+JA2fWHh2TVGi
d9X9on517X6qMDw=
-=jm9I
+=E6hg
-----END PGP PRIVATE KEY BLOCK-----
| change internal naming to securedrop
change internal references in code to securedrop
| You think this is important for 0.1? The only reason I'm hesitant is because we don't have tests yet, so I would hate to break everything at the last minute. But if it would be easy to do without trouble let's go ahead and do it.
No, I don't think it's necessary for 0.1, given the amount of references
to deaddrop. I've added this to the 0.2 milestone.
On Sat, 12 Oct 2013 15:11:57 -0700, Micah Lee wrote:
> You think this is important for 0.1? The only reason I'm hesitant is because we don't have tests yet, so I would hate to break everything at the last minute. But if it would be easy to do without trouble let's go ahead and do it.
>
> ---
>
> Reply to this email directly or view it on GitHub:
> https://github.com/freedomofpress/securedrop/issues/64#issuecomment-26207152
| 2013-11-30T23:52:29Z | [] | [] |
freedomofpress/securedrop | 177 | freedomofpress__securedrop-177 | [
"142"
] | c76d12734b07e8fa523175eb0fdc5c85acad9318 | diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -108,6 +108,11 @@ def getkey(name):
return None
+def get_key_by_fingerprint(fingerprint):
+ matches = filter(lambda k: k['fingerprint'] == fingerprint, gpg.list_keys())
+ return matches[0] if matches else None
+
+
def _shquote(s):
return "\\'".join("'" + p + "'" for p in s.split("'"))
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -5,7 +5,7 @@
from functools import wraps
from flask import (Flask, request, render_template, session, redirect, url_for,
- flash, abort, g)
+ flash, abort, g, send_file)
from flask_wtf.csrf import CsrfProtect
import config
@@ -218,6 +218,20 @@ def tor2web_warning():
return render_template("tor2web-warning.html")
[email protected]('/journalist-key')
+def download_journalist_pubkey():
+ journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)
+ return send_file(StringIO(journalist_pubkey),
+ mimetype="application/pgp-keys",
+ attachment_filename=config.JOURNALIST_KEY + ".asc",
+ as_attachment=True)
+
+
[email protected]('/why-journalist-key')
+def why_download_journalist_pubkey():
+ return render_template("why-journalist-key.html")
+
+
@app.errorhandler(404)
def page_not_found(error):
return render_template('notfound.html'), 404
| diff --git a/securedrop/test.py b/securedrop/test.py
--- a/securedrop/test.py
+++ b/securedrop/test.py
@@ -143,6 +143,17 @@ def _new_codename(self):
rv = c.post('/create')
return codename
+ def test_lookup(self):
+ """Test various elements on the /lookup page"""
+ codename = self._new_codename()
+ rv = self.client.post('login', data=dict(codename=codename),
+ follow_redirects=True)
+ # redirects to /lookup
+ self.assertIn("Download journalist's public key", rv.data)
+ # download the public key
+ rv = self.client.get('journalist-key')
+ self.assertIn("BEGIN PGP PUBLIC KEY BLOCK", rv.data)
+
def test_login(self):
rv = self.client.get('/login')
self.assert200(rv)
| Make journalist's public keys available to sources
Technically sophisticated sources might want to encrypt their submissions themselves, so the cleartext _never_ exists on the web server. We should add a link to the submission page to optionally download the journalist's public key (perhaps in an "Advanced options" section).
There is an interesting trust problem in this case. We are trying to stop an attacker who has somehow gained access to the source server from accessing submissions in cleartext. However, if they have access to the server then they could just send their own public key to a source who wants to use this, tricking them into encrypting it to the adversary instead of the journalist.
This could be mitigated with any of the standard GPG key verification mechanisms. For example, we could publish the valid GPG key fingerprint on the website and/or in a physical newspaper/magazine so savvy sources can additionally verify the key. We could also run a third party server that periodically checks the public key for unauthorized changes.
| If a source submits an encrypted blob of information, will SecureDrop encrypt this blob again or will it just pass it on to the journalist server?
@runasand We could, or we could detect that it is encrypted (via `file` and/or the presence of `.gpg` extension) and not encrypt it again in that case.
| 2013-12-01T18:06:41Z | [] | [] |
freedomofpress/securedrop | 188 | freedomofpress__securedrop-188 | [
"185"
] | 027ed06e736a69b5ef730b16ab5af2ef89ccd33b | diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -61,7 +61,9 @@ def setup_g():
# and we don't need to waste time running if we're just serving a static
# resource that won't need to access these common values.
if logged_in():
- g.flagged = session['flagged']
+ # We use session.get (which defaults to None if 'flagged' is not in the
+ # session) to avoid a KeyError on the redirect from login/ to lookup/
+ g.flagged = session.get('flagged')
g.codename = session['codename']
g.sid = crypto_util.shash(g.codename)
g.loc = store.path(g.sid)
@@ -128,6 +130,8 @@ def lookup():
msgs = []
flagged = False
for fn in os.listdir(g.loc):
+ # TODO: make 'flag' a db column, so we can replace this with a db
+ # lookup in the future
if fn == '_FLAG':
flagged = True
continue
| diff --git a/securedrop/test.py b/securedrop/test.py
--- a/securedrop/test.py
+++ b/securedrop/test.py
@@ -40,6 +40,15 @@ def _setup_test_docs(sid, files):
fp.write(str(uuid.uuid4()))
return filenames
+def _logout(app):
+ # See http://flask.pocoo.org/docs/testing/#accessing-and-modifying-sessions
+ # This is necessary because SecureDrop doesn't have a logout button, so a
+ # user is logged in until they close the browser, which clears the session.
+ # For testing, this function simulates closing the browser at places
+ # where a source is likely to do so (for instance, between submitting a
+ # document and checking for a journalist reply).
+ with app.session_transaction() as sess:
+ sess.clear()
def shared_setup():
"""Set up the file system, GPG, and database"""
@@ -157,21 +166,27 @@ def test_lookup(self):
rv = self.client.get('journalist-key')
self.assertIn("BEGIN PGP PUBLIC KEY BLOCK", rv.data)
- def test_login(self):
+ def test_login_and_logout(self):
rv = self.client.get('/login')
self.assert200(rv)
self.assertIn("Already submitted something?", rv.data)
codename = self._new_codename()
- rv = self.client.post('/login', data=dict(codename=codename),
- follow_redirects=True)
- self.assert200(rv)
- self.assertIn("Submit a document, message, or both", rv.data)
+ with self.client as c:
+ rv = c.post('/login', data=dict(codename=codename),
+ follow_redirects=True)
+ self.assert200(rv)
+ self.assertIn("Submit a document, message, or both", rv.data)
+ self.assertTrue(session['logged_in'])
+ _logout(c)
+ self.assertEquals(len(session), 0)
- rv = self.client.post('/login', data=dict(codename='invalid'),
- follow_redirects=True)
- self.assert200(rv)
- self.assertIn('Sorry, that is not a recognized codename.', rv.data)
+ with self.client as c:
+ rv = self.client.post('/login', data=dict(codename='invalid'),
+ follow_redirects=True)
+ self.assert200(rv)
+ self.assertIn('Sorry, that is not a recognized codename.', rv.data)
+ self.assertNotIn('logged_in', session)
def test_submit_message(self):
codename = self._new_codename()
@@ -262,12 +277,13 @@ def test_submit_message(self):
rv = source_app.post('/create', follow_redirects=True)
codename = session['codename']
sid = g.sid
- # redirected to submission form
- rv = self.source_app.post('/submit', data=dict(
- msg=test_msg,
- fh=(StringIO(''), ''),
- ), follow_redirects=True)
- self.assertEqual(rv.status_code, 200)
+ # redirected to submission form
+ rv = self.source_app.post('/submit', data=dict(
+ msg=test_msg,
+ fh=(StringIO(''), ''),
+ ), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ _logout(source_app)
rv = self.journalist_app.get('/')
self.assertEqual(rv.status_code, 200)
@@ -334,12 +350,13 @@ def test_submit_file(self):
rv = source_app.post('/create', follow_redirects=True)
codename = session['codename']
sid = g.sid
- # redirected to submission form
- rv = self.source_app.post('/submit', data=dict(
- msg="",
- fh=(StringIO(test_file_contents), test_filename),
- ), follow_redirects=True)
- self.assertEqual(rv.status_code, 200)
+ # redirected to submission form
+ rv = self.source_app.post('/submit', data=dict(
+ msg="",
+ fh=(StringIO(test_file_contents), test_filename),
+ ), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ _logout(source_app)
rv = self.journalist_app.get('/')
self.assertEqual(rv.status_code, 200)
@@ -412,13 +429,14 @@ def test_reply(self):
codename = session['codename']
flagged = session['flagged']
sid = g.sid
- # redirected to submission form
- rv = self.source_app.post('/submit', data=dict(
- msg=test_msg,
- fh=(StringIO(''), ''),
- ), follow_redirects=True)
- self.assertEqual(rv.status_code, 200)
- self.assertFalse(flagged)
+ # redirected to submission form
+ rv = source_app.post('/submit', data=dict(
+ msg=test_msg,
+ fh=(StringIO(''), ''),
+ ), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ self.assertFalse(flagged)
+ _logout(source_app)
rv = self.journalist_app.get('/')
self.assertEqual(rv.status_code, 200)
@@ -433,11 +451,13 @@ def test_reply(self):
rv = source_app.post('/login', data=dict(
codename=codename), follow_redirects=True)
self.assertEqual(rv.status_code, 200)
- self.assertFalse(session['flagged'])
+ _logout(source_app)
- rv = self.journalist_app.post('/flag', data=dict(
- sid=sid))
- self.assertEqual(rv.status_code, 200)
+ with self.journalist_app as journalist_app:
+ rv = journalist_app.post('/flag', data=dict(
+ sid=sid))
+ self.assertEqual(rv.status_code, 200)
+ _logout(journalist_app)
with self.source_app as source_app:
rv = source_app.post('/login', data=dict(
@@ -446,6 +466,7 @@ def test_reply(self):
self.assertTrue(session['flagged'])
source_app.get('/lookup')
self.assertTrue(g.flagged)
+ _logout(source_app)
# Block until the reply keypair has been generated, so we can test
# sending a reply
@@ -458,24 +479,30 @@ def test_reply(self):
self.assertEqual(rv.status_code, 200)
self.assertIn("Thanks! Your reply has been stored.", rv.data)
- rv = self.journalist_app.get(col_url)
- self.assertIn("reply-", rv.data)
+ with self.journalist_app as journalist_app:
+ rv = journalist_app.get(col_url)
+ self.assertIn("reply-", rv.data)
+ _logout(journalist_app)
_block_on_reply_keypair_gen(codename)
- rv = self.source_app.get('/lookup')
- self.assertEqual(rv.status_code, 200)
- self.assertIn(
- "You have received a reply. For your security, please delete all replies when you're done with them.", rv.data)
- self.assertIn(test_reply, rv.data)
-
- soup = BeautifulSoup(rv.data)
- msgid = soup.select('form.message > input[name="msgid"]')[0]['value']
- rv = self.source_app.post('/delete', data=dict(
- sid=sid,
- msgid=msgid,
- ), follow_redirects=True)
- self.assertEqual(rv.status_code, 200)
- self.assertIn("Reply deleted", rv.data)
+ with self.source_app as source_app:
+ rv = source_app.post('/login', data=dict(codename=codename), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ rv = source_app.get('/lookup')
+ self.assertEqual(rv.status_code, 200)
+ self.assertIn(
+ "You have received a reply. For your security, please delete all replies when you're done with them.", rv.data)
+ self.assertIn(test_reply, rv.data)
+
+ soup = BeautifulSoup(rv.data)
+ msgid = soup.select('form.message > input[name="msgid"]')[0]['value']
+ rv = source_app.post('/delete', data=dict(
+ sid=sid,
+ msgid=msgid,
+ ), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ self.assertIn("Reply deleted", rv.data)
+ _logout(source_app)
class TestStore(unittest.TestCase):
| Returning "flagged for reply" sources generate an error
When a source is flagged for a reply.
And the source returns to the site after initial session is ended.
It generates the error below.
```
[Mon Dec 02 21:49:44 2013] [error] ERROR:source:Exception on / [GET]
[Mon Dec 02 21:49:44 2013] [error] Traceback (most recent call last):
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1817, in
wsgi_app
[Mon Dec 02 21:49:44 2013] [error] response =
self.full_dispatch_request()
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1477, in
full_dispatch_request
[Mon Dec 02 21:49:44 2013] [error] rv = self.handle_user_exception(e)
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1381, in
handle_user_exception
[Mon Dec 02 21:49:44 2013] [error] reraise(exc_type, exc_value, tb)
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1473, in
full_dispatch_request
[Mon Dec 02 21:49:44 2013] [error] rv = self.preprocess_request()
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1666, in
preprocess_request
[Mon Dec 02 21:49:44 2013] [error] rv = func()
[Mon Dec 02 21:49:44 2013] [error] File
"/var/www/securedrop/source.py", line 52, in decorated_function
[Mon Dec 02 21:49:44 2013] [error] return f(*args, **kwargs)
[Mon Dec 02 21:49:44 2013] [error] File
"/var/www/securedrop/source.py", line 64, in setup_g
[Mon Dec 02 21:49:44 2013] [error] g.flagged = session['flagged']
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 368, in
<lambda>
[Mon Dec 02 21:49:44 2013] [error] __getitem__ = lambda x, i:
x._get_current_object()[i]
[Mon Dec 02 21:49:44 2013] [error] KeyError: 'flagged'
```
| I'll dig into this later tonight when I have time.
| 2013-12-03T12:26:59Z | [] | [] |
freedomofpress/securedrop | 230 | freedomofpress__securedrop-230 | [
"189"
] | 0b905aa89dbcb11df7e2de08beb503c76865ec7f | diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -114,6 +114,17 @@ def genkeypair(name, secret):
))
+def delete_reply_keypair(source_id):
+ key = getkey(source_id)
+ # If this source was never flagged for reivew, they won't have a reply keypair
+ if not key: return
+ # The private key needs to be deleted before the public key can be deleted
+ # http://pythonhosted.org/python-gnupg/#deleting-keys
+ gpg.delete_keys(key, True) # private key
+ gpg.delete_keys(key) # public key
+ # TODO: srm?
+
+
def getkey(name):
for key in gpg.list_keys():
for uid in key['uids']:
@@ -164,11 +175,6 @@ def decrypt(name, secret, s):
return gpg.decrypt(s, passphrase=secret).data
-def secureunlink(fn):
- store.verify(fn)
- return subprocess.check_call(['srm', fn])
-
-
if __name__ == "__main__":
import doctest
doctest.testmod()
diff --git a/securedrop/db.py b/securedrop/db.py
--- a/securedrop/db.py
+++ b/securedrop/db.py
@@ -62,3 +62,16 @@ def regenerate_display_id(filesystem_id):
session.execute(add)
session.commit()
session.close()
+
+
+def delete_source(source_id):
+ session = sqlalchemy_handle()
+ try:
+ delete = sources.delete().where(
+ sources.c.filesystem_id == source_id)
+ except SQLAlchemyError as e:
+ # TODO: proper logging
+ print "Exception occurred attempting to delete source (source_id: %s): %s" % (source_id, e)
+ session.execute(delete)
+ session.commit()
+ session.close()
diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -3,7 +3,8 @@
from datetime import datetime
import uuid
-from flask import Flask, request, render_template, send_file, redirect, flash
+from flask import (Flask, request, render_template, send_file, redirect,
+ flash, url_for)
from flask_wtf.csrf import CsrfProtect
import config
@@ -64,12 +65,12 @@ def index():
dirs = os.listdir(config.STORE_DIR)
cols = []
db_session = db.sqlalchemy_handle()
- for d in dirs:
- display_id = db.display_id(d, db_session)
+ for source_id in dirs:
+ display_id = db.display_id(source_id, db_session)
cols.append(dict(
- name=d,
- sid=display_id,
- date=str(datetime.fromtimestamp(os.stat(store.path(d)).st_mtime)
+ sid=source_id,
+ name=display_id,
+ date=str(datetime.fromtimestamp(os.stat(store.path(source_id)).st_mtime)
).split('.')[0]
))
db_session.close()
@@ -86,6 +87,34 @@ def col(sid):
haskey=haskey, flagged=flagged)
+def delete_collection(source_id):
+ store.delete_source_directory(source_id)
+ crypto_util.delete_reply_keypair(source_id)
+ db.delete_source(source_id)
+
+
[email protected]('/col/delete', methods=('POST',))
+def col_delete():
+ if 'cols_selected' in request.form:
+ # deleting multiple collections from the index
+ if len('cols_selected') < 1:
+ flash("No collections selected to delete!", "warning")
+ else:
+ cols_selected = request.form.getlist('cols_selected')
+ for source_id in cols_selected:
+ delete_collection(source_id)
+ flash("%s %s deleted" % (
+ len(cols_selected),
+ "collection" if len(cols_selected) == 1 else "collections"
+ ), "notification")
+ else:
+ # deleting a single collection from its /col page
+ source_id, col_name = request.form['sid'], request.form['col_name']
+ delete_collection(source_id)
+ flash("%s's collection deleted" % (col_name,), "notification")
+
+ return redirect(url_for('index'))
+
@app.route('/col/<sid>/<fn>')
def doc(sid, fn):
if '..' in fn or fn.startswith('/'):
@@ -135,7 +164,7 @@ def bulk_delete(sid, docs_selected):
if confirm_delete:
for doc in docs_selected:
fn = store.path(sid, doc['name'])
- crypto_util.secureunlink(fn)
+ store.secure_unlink(fn)
return render_template(
'delete.html', sid=sid, codename=db.display_id(sid, db.sqlalchemy_handle()),
docs_selected=docs_selected, confirm_delete=confirm_delete)
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -196,7 +196,7 @@ def delete():
potential_files = os.listdir(g.loc)
if msgid not in potential_files:
abort(404) # TODO are the checks necessary?
- crypto_util.secureunlink(store.path(g.sid, msgid))
+ store.secure_unlink(store.path(g.sid, msgid))
flash("Reply deleted.", "notification")
return redirect(url_for('lookup'))
diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -6,6 +6,7 @@
import crypto_util
import uuid
import tempfile
+import subprocess
from cStringIO import StringIO
import logging
@@ -73,6 +74,7 @@ def get_bulk_archive(filenames):
zip.write(filename, arcname=os.path.basename(filename))
return zip_file
+
def save_file_submission(sid, filename, stream):
sanitized_filename = secure_filename(filename)
@@ -84,6 +86,20 @@ def save_file_submission(sid, filename, stream):
file_loc = path(sid, "%s_doc.zip.gpg" % uuid.uuid4())
crypto_util.encrypt(config.JOURNALIST_KEY, s, file_loc)
+
def save_message_submission(sid, message):
msg_loc = path(sid, '%s_msg.gpg' % uuid.uuid4())
crypto_util.encrypt(config.JOURNALIST_KEY, message, msg_loc)
+
+
+def secure_unlink(fn, recursive=False):
+ verify(fn)
+ command = ['srm']
+ if recursive:
+ command.append('-r')
+ command.append(fn)
+ return subprocess.check_call(command)
+
+
+def delete_source_directory(source_id):
+ secure_unlink(path(source_id), recursive=True)
| diff --git a/securedrop/test.py b/securedrop/test.py
--- a/securedrop/test.py
+++ b/securedrop/test.py
@@ -516,7 +516,7 @@ def helper_test_reply(self, test_reply, expected_success=True):
self.assertEqual(rv.status_code, 200)
if not expected_success:
# there should be no reply
- self.assertTrue("You have received a reply." not in rv.data)
+ self.assertTrue("You have received a reply." not in rv.data)
else:
self.assertIn(
"You have received a reply. For your security, please delete all replies when you're done with them.", rv.data)
@@ -532,6 +532,64 @@ def helper_test_reply(self, test_reply, expected_success=True):
_logout(source_app)
+ def test_delete_collection(self):
+ """Test the "delete collection" button on each collection page"""
+ # first, add a source
+ self.source_app.get('/generate')
+ self.source_app.post('/create')
+
+ rv = self.journalist_app.get('/')
+ # navigate to the collection page
+ soup = BeautifulSoup(rv.data)
+ first_col_url = soup.select('ul#cols > li a')[0]['href']
+ rv = self.journalist_app.get(first_col_url)
+ self.assertEqual(rv.status_code, 200)
+
+ # find the delete form and extract the post parameters
+ soup = BeautifulSoup(rv.data)
+ delete_form_inputs = soup.select('form#delete-collection')[0]('input')
+ sid = delete_form_inputs[1]['value']
+ col_name = delete_form_inputs[2]['value']
+ # POST to /col/delete
+ rv = self.journalist_app.post('/col/delete', data=dict(
+ sid=sid,
+ col_name=col_name
+ ), follow_redirects=True)
+ self.assertEquals(rv.status_code, 200)
+ # /col/delete redirects to the index
+ self.assertIn(escape("%s's collection deleted" % (col_name,)), rv.data)
+ self.assertIn("No documents have been submitted!", rv.data)
+
+
+ def test_delete_collections(self):
+ """Test the "delete selected" checkboxes on the index page that can be
+ used to delete multiple collections"""
+ # first, add some sources
+ num_sources = 2
+ for i in range(num_sources):
+ self.source_app.get('/generate')
+ self.source_app.post('/create')
+
+ rv = self.journalist_app.get('/')
+ # get all the checkbox values
+ soup = BeautifulSoup(rv.data)
+ checkbox_values = [ checkbox['value'] for checkbox in
+ soup.select('input[name="cols_selected"]') ]
+ rv = self.journalist_app.post('/col/delete', data=dict(
+ cols_selected=checkbox_values
+ ), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ self.assertIn("%s collections deleted" % (num_sources,), rv.data)
+
+ # TODO: functional tests (selenium)
+ # This code just tests the underlying API and *does not* test the
+ # interactions due to the Javascript in journalist.js. Once we have
+ # functional tests, we should add tests for:
+ # 1. Warning dialog appearance
+ # 2. "Don't show again" checkbox behavior
+ # 2. Correct behavior on "yes" and "no" buttons
+
+
class TestStore(unittest.TestCase):
'''The set of tests for store.py.'''
| Journalists should be able to delete codenames from interface
Right now, the delete function on version 0.2 is a big improvement. Under each code name, journalists can now easily delete any and all files from a particular source.
However, on the document interface landing page, before they click over to an individual source, journalists still do not have the ability to delete code names of sources that are just spam or they do not talk to anymore. The code names will stay there even if all messages under their name have been deleted.
Eventually this will lead to the document interface getting very cluttered.
In addition to deleting files, journalists should be able to delete code names off the document interface landing page.
| This was originally coded so an empty source directory is simply not included in the collections list. When we added the ability for journalists to delete documents, this was confusing. It's confusing to delete all the documents from a collection and, once it's done, see the page "disappear" from the main list. And that would make it impossible for a journalist to send replies to a source after they'd deleted their submissions (say, after moving the submissions to cold storage).
If we completely delete the source directory, then that source will no longer be able to log in - their codename will no longer be valid. If we can trust that journalists would never delete a collection from a source that is planning to return, then I'm ok with that.
Otherwise, the best implementation might be to set a "deleted" flag, either in the database or in a hidden file like we currently do with "flag" (for performance reasons, it's best to do this in the database). Then the directory will still exist, but if the deleted flag is set, it will not be shown in the collections list. Then if the source logs in again and submits something, we can have the submission process reset the delete flag if necessary. Then the same collection will reappear in the collections list. The only problem with this approach is it doesn't achieve data minimization - every source that ever existed will leave a permanent record, in the form of their hashed codename, on the server. This is not a desirable property. We could solve this problem by enforcing a window of automatic deletion, as has been discussed before (but for which no issue has yet been created).
For now, I think I'm in favor of the second solution (delete the directory) as long as we advise journalists that it will "lock out" that source. This is actually desirable from a DoS point of a view. Otherwise, a spam account, once deleted, could be easily recreated by simply logging in and sending another spam submission.
Looping back around on this. Agreed @garrettr, let's go with the second solution and delete the directory while strongly advising the journalist before deleting that it will lock out the source.
This apparently is Forbes' number one complaint so far too, that they can't delete spam codenames.
If we go with warning the journalist and deleting the source's directory should we also delete the source's GPG keypair? From a data minimization point of view it would make sense.
@dolanjs definitely
| 2013-12-18T03:18:52Z | [] | [] |
freedomofpress/securedrop | 237 | freedomofpress__securedrop-237 | [
"194"
] | dddcc5fb649a843840398850fd2279989ef35a5c | diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -35,13 +35,13 @@ def verify(p):
if not p == os.path.abspath(p):
raise PathException("The path is not absolute and/or normalized")
- if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:
+ # Check that the path p is in config.STORE_DIR
+ if os.path.relpath(p, config.STORE_DIR).startswith('..'):
raise PathException("Invalid directory %s" % (p, ))
- filename = os.path.basename(p)
- ext = os.path.splitext(filename)[-1]
-
if os.path.isfile(p):
+ filename = os.path.basename(p)
+ ext = os.path.splitext(filename)[-1]
if filename == '_FLAG':
return True
if ext != '.gpg':
| diff --git a/securedrop/test.py b/securedrop/test.py
--- a/securedrop/test.py
+++ b/securedrop/test.py
@@ -530,6 +530,8 @@ def tearDown(self):
def test_verify(self):
with self.assertRaises(store.PathException):
store.verify(os.path.join(config.STORE_DIR, '..', 'etc', 'passwd'))
+ with self.assertRaises(store.PathException):
+ store.verify(config.STORE_DIR + "_backup")
def test_get_zip(self):
sid = 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZJPKJFECLS2NZ4G4U3QOZCFKTTPNZMVIWDCJBBHMUDBGFHXCQ3R'
| Possible path confusion / traversal via imprecise store.verify()
The method `store.verify()` checks file paths provided via URL and other ways and raises an exception if they are not matching the validation criteria.
A problem with this validation process was spotted: `os.path.commonprefix()` is not sufficient to check if the path is inside the configured store path. It only compares character by character. Thus allows to navigate into another folder when they share the same start string.
```
Example: config.STORE_DIR = '/opt/store'
PoC: store.verify('/opt/store_backup')
```
Mitigation has to make sure, that the path is inside the configured store folder. A mitigation could be to add another check in `store.verify()` with `os.path.relpath(p, config.STORE_DIR)`. If the absolute path p is not inside the store directory, `os.path.relpath()` will return a string starting with '../'.
Example:
```
os.path.relpath('/opt/store_backup', config.STORE_DIR) == '../store_backup'
```
**Reported as part of the cure53 audit of 0.2 as: SD-01-006**
| 2013-12-20T21:27:36Z | [] | [] |
|
freedomofpress/securedrop | 245 | freedomofpress__securedrop-245 | [
"51"
] | b3dc92cf26b971ddd572bc64f00da7eddcba8dd2 | diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -1,15 +1,16 @@
# -*- coding: utf-8 -*-
import os
-import bcrypt
import subprocess
-import threading
+from base64 import b32encode
+import re
+
from Crypto.Random import random
import random as badrandom
import gnupg
+import scrypt
+
import config
import store
-from base64 import b32encode
-import re
# to fix gpg error #78 on production
os.environ['USERNAME'] = 'www-data'
@@ -19,23 +20,20 @@
# Optiimize crypto to speed up tests (at the expense of security - DO NOT
# use these settings in production)
GPG_KEY_LENGTH = "1024"
- BCRYPT_ID_SALT = bcrypt.gensalt(log_rounds=0)
- BCRYPT_GPG_SALT = bcrypt.gensalt(log_rounds=0)
+ SCRYPT_PARAMS = dict(N=2**1, r=1, p=1)
else:
GPG_KEY_LENGTH = "4096"
- BCRYPT_ID_SALT = config.BCRYPT_ID_SALT
- BCRYPT_GPG_SALT = config.BCRYPT_GPG_SALT
-
-DEFAULT_WORDS_IN_RANDOM_ID = 8
+ SCRYPT_PARAMS = config.SCRYPT_PARAMS
+SCRYPT_ID_PEPPER = config.SCRYPT_ID_PEPPER
+SCRYPT_GPG_PEPPER = config.SCRYPT_GPG_PEPPER
-class CryptoException(Exception):
- pass
+DEFAULT_WORDS_IN_RANDOM_ID = 8
# Make sure these pass before the app can run
# TODO: Add more tests
def do_runtime_tests():
- assert(config.BCRYPT_ID_SALT != config.BCRYPT_GPG_SALT)
+ assert(config.SCRYPT_ID_PEPPER != config.SCRYPT_GPG_PEPPER)
# crash if we don't have srm:
try:
subprocess.check_call(['srm'], stdout=subprocess.PIPE)
@@ -44,6 +42,25 @@ def do_runtime_tests():
do_runtime_tests()
+GPG_BINARY = 'gpg2'
+try:
+ p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)
+except OSError:
+ GPG_BINARY = 'gpg'
+ p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)
+
+assert p.stdout.readline().split()[
+ -1].split('.')[0] == '2', "upgrade GPG to 2.0"
+gpg = gnupg.GPG(gpgbinary=GPG_BINARY, gnupghome=config.GPG_KEY_DIR)
+
+words = file(config.WORD_LIST).read().split('\n')
+nouns = file(config.NOUNS).read().split('\n')
+adjectives = file(config.ADJECTIVES).read().split('\n')
+
+
+class CryptoException(Exception):
+ pass
+
def clean(s, also=''):
"""
@@ -54,18 +71,15 @@ def clean(s, also=''):
>>> clean("Helloworld")
'Helloworld'
"""
- # safe characters for every possible word in the wordlist
- # includes capital letters because bcrypt hashes are base32-encoded with
- # capital letters
- ok = '!#%$&)(+*-1032547698;:=?@acbedgfihkjmlonqpsrutwvyxzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ # safe characters for every possible word in the wordlist includes capital
+ # letters because codename hashes are base32-encoded with capital letters
+ ok = ' !#%$&)(+*-1032547698;:=?@acbedgfihkjmlonqpsrutwvyxzABCDEFGHIJKLMNOPQRSTUVWXYZ'
for c in s:
if c not in ok and c not in also:
raise CryptoException("invalid input: %s" % s)
- return s
-
-words = file(config.WORD_LIST).read().split('\n')
-nouns = file(config.NOUNS).read().split('\n')
-adjectives = file(config.ADJECTIVES).read().split('\n')
+ # scrypt.hash requires input of type str. Since the wordlist is all ASCII
+ # characters, this conversion is not problematic
+ return str(s)
def genrandomid(words_in_random_id=DEFAULT_WORDS_IN_RANDOM_ID):
@@ -78,35 +92,24 @@ def displayid(n):
return badrandom_value.choice(adjectives) + " " + badrandom_value.choice(nouns)
-
-def shash(s, salt=BCRYPT_ID_SALT):
+def hash_codename(codename, salt=SCRYPT_ID_PEPPER):
"""
- >>> shash('Hello, world!')
+ >>> hash_codename('Hello, world!')
'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZLFHBTS6WLCHFHTOLRSGQXUQLRQHFMXKOKKOQ4WQ6SXGZXDAS3Z'
"""
- return b32encode(bcrypt.hashpw(s, salt))
-
-GPG_BINARY = 'gpg2'
-try:
- p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)
-except OSError:
- GPG_BINARY = 'gpg'
- p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)
-
-assert p.stdout.readline().split()[
- -1].split('.')[0] == '2', "upgrade GPG to 2.0"
-gpg = gnupg.GPG(gpgbinary=GPG_BINARY, gnupghome=config.GPG_KEY_DIR)
+ return b32encode(scrypt.hash(clean(codename), salt, **SCRYPT_PARAMS))
def genkeypair(name, secret):
"""
- >>> if not gpg.list_keys(shash('randomid')):
- ... genkeypair(shash('randomid'), 'randomid').type
+ >>> if not gpg.list_keys(hash_codename('randomid')):
+ ... genkeypair(hash_codename('randomid'), 'randomid').type
... else:
... u'P'
u'P'
"""
- name, secret = clean(name), shash(clean(secret, ' '), salt=BCRYPT_GPG_SALT)
+ name = clean(name)
+ secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)
return gpg.gen_key(gpg.gen_key_input(
key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,
passphrase=secret,
@@ -138,10 +141,6 @@ def get_key_by_fingerprint(fingerprint):
return matches[0] if matches else None
-def _shquote(s):
- return "\\'".join("'" + p + "'" for p in s.split("'"))
-
-
def encrypt(fp, s, output=None):
r"""
>>> key = genkeypair('randomid', 'randomid')
@@ -171,7 +170,7 @@ def decrypt(name, secret, s):
... )
'Goodbye, cruel world!'
"""
- secret = shash(secret, salt=BCRYPT_GPG_SALT)
+ secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)
return gpg.decrypt(s, passphrase=secret).data
diff --git a/securedrop/example_config.py b/securedrop/example_config.py
--- a/securedrop/example_config.py
+++ b/securedrop/example_config.py
@@ -8,8 +8,9 @@
NOUNS='./dictionaries/nouns.txt'
ADJECTIVES='./dictionaries/adjectives.txt'
JOURNALIST_KEY='' # fingerprint of the public key for encrypting submissions
-BCRYPT_ID_SALT='' # bcrypt.gensalt(); for constructing public ID from source codename
-BCRYPT_GPG_SALT='' # bcrypt.gensalt(); for stretching source codename into GPG passphrase
+SCRYPT_ID_PEPPER='' # os.urandom(32); for constructing public ID from source codename
+SCRYPT_GPG_PEPPER='' # os.urandom(32); for stretching source codename into GPG passphrase
+SCRYPT_PARAMS = dict(N=2**14, r=8, p=1)
### Theming Options
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -62,15 +62,15 @@ def decorated_function(*args, **kwargs):
@ignore_static
def setup_g():
"""Store commonly used values in Flask's special g object"""
- # ignore_static here because `crypto_util.shash` is bcrypt (very time consuming),
- # and we don't need to waste time running if we're just serving a static
- # resource that won't need to access these common values.
+ # ignore_static here because `crypto_util.hash_codename` is scrypt (very
+ # time consuming), and we don't need to waste time running if we're just
+ # serving a static resource that won't need to access these common values.
if logged_in():
# We use session.get (which defaults to None if 'flagged' is not in the
# session) to avoid a KeyError on the redirect from login/ to lookup/
g.flagged = session.get('flagged')
g.codename = session['codename']
- g.sid = crypto_util.shash(g.codename)
+ g.sid = crypto_util.hash_codename(g.codename)
g.loc = store.path(g.sid)
@@ -118,7 +118,7 @@ def generate():
@app.route('/create', methods=['POST'])
def create():
- sid = crypto_util.shash(session['codename'])
+ sid = crypto_util.hash_codename(session['codename'])
if os.path.exists(store.path(sid)):
# if this happens, we're not using very secure crypto
log.warning("Got a duplicate ID '%s'" % sid)
@@ -203,7 +203,7 @@ def delete():
def valid_codename(codename):
- return os.path.exists(store.path(crypto_util.shash(codename)))
+ return os.path.exists(store.path(crypto_util.hash_codename(codename)))
@app.route('/login', methods=('GET', 'POST'))
| diff --git a/securedrop/test.py b/securedrop/test.py
--- a/securedrop/test.py
+++ b/securedrop/test.py
@@ -26,7 +26,7 @@
def _block_on_reply_keypair_gen(codename):
- sid = crypto_util.shash(codename)
+ sid = crypto_util.hash_codename(codename)
while not crypto_util.getkey(sid):
sleep(0.1)
| Consider using scrypt instead of bcrypt
scrypt is another adaptive key stretching algorithm in the vein of bcrypt. Unlike bcrypt, however, it was specifically designed to thwart attacks by powerful adversaries capable of building large clusters of custom hardware for password cracking. Given securedrop's threat model, scrypt might a better choice than bcrypt; however, scrypt is significantly newer and comparatively less tested.
| At globaleaks we're using too scrypt by following this specific approach, described in our application security design and details: https://docs.google.com/a/apps.globaleaks.org/document/d/1SMSiAry7x5XY9nY8GAejJD75NWg7bp7M1PwXSiwy62U/pub#h.ibk1v235g7wb
The reason I think scrypt should be eventually implemented is because of our threat model. When bcrypt was designed back in 1999 its threat was application specific hardware bruteforcing arrays. However we need to be thinking about FPGA arrays and cost to the adversary. scrypt has the potential to pretty much force an adversary to have to use FPGAs, and via the memory hardening settings you can also increase the cost factor ( for example of ram ) from a few million dollars to crack a codename in say one year, to the potential of billions of dollars.
@taipo, do you have evidence for your claims regarding the use of FPGA arrays for cracking bcrypt?
I derived my opinion from reading this 'STRONGER KEY DERIVATION VIA SEQUENTIAL
MEMORY-HARD FUNCTIONS'
From: https://www.tarsnap.com/scrypt/scrypt.pdf
Also see "Table 1. Estimated cost of hardware to crack a password in 1 year."
I am not saying that there is anything specific about scrypt that means an adversary has to use FPGAs, or that FPGA's could increase the effectiveness of determined attacks even further than ASIC's, I am looking at which method gives us the potential to make bruteforcing attempts the most expensive.
Also:
- http://www.openwall.com/presentations/Passwords12-The-Future-Of-Hashing/
Future dev of 'supercomputers':
- "James Bamford's book The Shadow Factory reported that NSA told the Pentagon it would need an exaflop computer by 2018." - https://en.wikipedia.org/wiki/Shadow_Factory p339
- http://www.infoworld.com/t/networking/ibm-breaks-petaflop-barrier-263
- http://www.defencenews.in/defence-news-internal.asp?get=new&id=500
- http://www.h-online.com/newsticker/news/item/IDF-Intel-says-Moore-s-Law-holds-until-2029-734779.html
One of the difficulties is finding working benchmarks for attacking scrypt key hashes. For now I can only go by information mostly authored by Colin Percival.
Summary of points where scrypt is said to get one over bcrypt ( plus some extras ):
- scrypt, via its memory cost and parallelization settings, can be configured to extend the cost to the attacker, considerably past the highest practical bcrypt cost setting without incurring memory and cpu costs on the host webserver.
- although its hard to find info on this, it appears that there is a limited password byte length for bcrypt. "Finally, the key argument is a secret encryption key, which can be a user-chosen password of up to 56 bytes (including a terminating zero byte when the key is an ASCII string)."
from: https://www.usenix.org/legacy/events/usenix99/provos/provos_html/node4.html
see also: http://security.stackexchange.com/questions/39849/does-bcrypt-have-a-maximum-password-length
- Opinion: these organisations: NSA, GCHQ, CSE of Canada and the Chinese Governments, do not publish the amount of money they have invested into building custom password-cracking units, so while 'more' might be insane in some thinking, more is better than not enough because we are largely left making guesses by comparing what is the best of the best being developed and in play currently. So it looks like we will be contending with exaflop rigs ( see links above ) in about 5 years time.
- according to Percival, scrypt is 2⁵ times more expensive to attack than bcrypt
from: https://www.tarsnap.com/scrypt/scrypt-slides.pdf slide 19
So in my thinking, if those other two cost settings cannot be bypassed, then that makes scrypt the number 1 contender method for some future version of SecureDrop.
These are all great points and resources, thanks @Taipo! It should be pretty easy for someone to implement this using [this Python library](https://pypi.python.org/pypi/scrypt/).
| 2014-01-06T02:55:26Z | [] | [] |
freedomofpress/securedrop | 335 | freedomofpress__securedrop-335 | [
"301"
] | 3546c88c6a1aee99bf2e3d3673923a1a5d8ce48b | diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -5,6 +5,7 @@
from functools import wraps
import zipfile
from cStringIO import StringIO
+import subprocess
import logging
# This module's logger is explicitly labeled so the correct logger is used,
@@ -180,6 +181,22 @@ def async_genkey(sid, codename):
flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))
+def normalize_timestamps(sid):
+ """
+ Update the timestamps on all of the source's submissions to match that of
+ the latest submission. This minimizes metadata that could be useful to
+ investigators. See #301.
+ """
+ sub_paths = [ store.path(sid, submission.filename)
+ for submission in g.source.submissions ]
+ if len(sub_paths) > 1:
+ args = ["touch"]
+ args.extend(sub_paths[:-1])
+ rc = subprocess.call(args)
+ if rc != 0:
+ app.logger.warning("Couldn't normalize submission timestamps (touch exited with %d)" % rc)
+
+
@app.route('/submit', methods=('POST',))
@login_required
def submit():
@@ -204,6 +221,7 @@ def submit():
g.source.last_updated = datetime.now()
db_session.commit()
+ normalize_timestamps(g.sid)
return redirect(url_for('lookup'))
| diff --git a/securedrop/tests/unit_tests.py b/securedrop/tests/unit_tests.py
--- a/securedrop/tests/unit_tests.py
+++ b/securedrop/tests/unit_tests.py
@@ -197,22 +197,21 @@ def test_submit_dirty_file_to_be_cleaned(self):
img = open(os.getcwd()+'/tests/test_images/dirty.jpg')
img_metadata = store.metadata_handler(img.name)
self.assertFalse(img_metadata.is_clean(), "The file is dirty.")
- del(img_metadata)
codename = self._new_codename()
rv = self.client.post('/submit', data=dict(
- msg="This is a test",
+ msg="",
fh=(img, 'dirty.jpg'),
notclean='True',
), follow_redirects=True)
self.assertEqual(rv.status_code, 200)
- self.assertIn("Thanks! We received your message.", rv.data)
self.assertIn(escape("Thanks! We received your document 'dirty.jpg'."),
rv.data)
store_dirs = [os.path.join(config.STORE_DIR,d) for d in os.listdir(config.STORE_DIR) if os.path.isdir(os.path.join(config.STORE_DIR,d))]
latest_subdir = max(store_dirs, key=os.path.getmtime)
zip_gpg_files = [os.path.join(latest_subdir,f) for f in os.listdir(latest_subdir) if os.path.isfile(os.path.join(latest_subdir,f))]
- zip_gpg = max(zip_gpg_files, key=os.path.getmtime)
+ self.assertEqual(len(zip_gpg_files), 1)
+ zip_gpg = zip_gpg_files[0]
zip_gpg_file = open(zip_gpg)
decrypted_data = self.gpg.decrypt_file(zip_gpg_file)
@@ -228,7 +227,6 @@ def test_submit_dirty_file_to_be_cleaned(self):
# check for the actual file been clean
clean_file_metadata = store.metadata_handler(clean_file.name)
self.assertTrue(clean_file_metadata.is_clean(), "the file is now clean.")
- del(clean_file_metadata)
zip_gpg_file.close()
clean_file.close()
img.close()
@@ -238,21 +236,20 @@ def test_submit_dirty_file_to_not_clean(self):
img = open(os.getcwd()+'/tests/test_images/dirty.jpg')
img_metadata = store.metadata_handler(img.name)
self.assertFalse(img_metadata.is_clean(), "The file is dirty.")
- del(img_metadata)
codename = self._new_codename()
rv = self.client.post('/submit', data=dict(
- msg="This is a test",
+ msg="",
fh=(img, 'dirty.jpg'),
), follow_redirects=True)
self.assertEqual(rv.status_code, 200)
- self.assertIn("Thanks! We received your message.", rv.data)
self.assertIn(escape("Thanks! We received your document 'dirty.jpg'."),
rv.data)
store_dirs = [os.path.join(config.STORE_DIR,d) for d in os.listdir(config.STORE_DIR) if os.path.isdir(os.path.join(config.STORE_DIR,d))]
latest_subdir = max(store_dirs, key=os.path.getmtime)
zip_gpg_files = [os.path.join(latest_subdir,f) for f in os.listdir(latest_subdir) if os.path.isfile(os.path.join(latest_subdir,f))]
- zip_gpg = max(zip_gpg_files, key=os.path.getmtime)
+ self.assertEqual(len(zip_gpg_files), 1)
+ zip_gpg = zip_gpg_files[0]
zip_gpg_file = open(zip_gpg)
decrypted_data = self.gpg.decrypt_file(zip_gpg_file)
@@ -268,7 +265,6 @@ def test_submit_dirty_file_to_not_clean(self):
# check for the actual file been clean
clean_file_metadata = store.metadata_handler(clean_file.name)
self.assertFalse(clean_file_metadata.is_clean(), "the file is was not cleaned.")
- del(clean_file_metadata)
zip_gpg_file.close()
clean_file.close()
img.close()
| Timestamps
The issue of having timestamps on submitted files, and the disadvantages of retaining that metadata which can be correlated with other known variables, has been brought up.
One of the solutions which has been posited is scrambling/randomizing or nullifying the timestamps, but the problem with that is journalists would lose track of the order in which submissions arrived. With document flagging implemented in the future (#124) that would be less of an issue.
I believe @Hainish proposed an idea that would employ cryptographic hashes to retain the ordering of documents, while ditching the timestamps, but I don't know much about the specifics. Would love to hear more. So far I've done a preliminary search for possible filesystem/kernel or mount option-based solutions and didn't come up with anything.
| I have a nearly-complete rewrite of the database code that stores metadata in the db rather than stat'ing it from the filesystem (which is less efficient anyway).
In that rewrite, the order of submissions was retained through the natural ordering of an auto-incrementing primary key, and only the timestamp of the last upload ("last updated") was stored in the db. This retained a useful order of submissions/collections for the journalist interface while minimizing stored metadata. Of course, there is still metadata in the filesystem. Linux doesn't store file creation times. `stat.st_ctime`, which we use now, is platform dependent, and returns the last time of metadata change on Linux. So, we could minimize metadata in the filesystem with this procedure:
1. On a new submission, update the database. Source table has a `last_submitted` datetime field. Submission table primary keys are used to keep submissions in "sort order", and the most recent submission for a given source is correlated with `last_submitted`.
2. `touch` the other submissions in the source's directory. This will update their metadata to the same time as the latest submission, so we don't leak anything other than that.
| 2014-03-14T08:14:23Z | [] | [] |
freedomofpress/securedrop | 343 | freedomofpress__securedrop-343 | [
"302",
"302"
] | 3e5ce6ae0e65ff537be51edee24775a91643f54c | diff --git a/securedrop/db.py b/securedrop/db.py
--- a/securedrop/db.py
+++ b/securedrop/db.py
@@ -41,6 +41,9 @@ class Source(Base):
journalist_designation = Column(String(255), nullable=False)
flagged = Column(Boolean, default=False)
last_updated = Column(DateTime, default=datetime.datetime.now)
+
+ # sources are "pending" and don't get displayed to journalists until they submit something
+ pending = Column(Boolean, default=True)
def __init__(self, filesystem_id=None, journalist_designation=None):
self.filesystem_id = filesystem_id
diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -79,7 +79,7 @@ def get_docs(sid):
@app.route('/')
def index():
- sources = Source.query.order_by(Source.last_updated.desc()).all()
+ sources = Source.query.filter_by(pending=False).order_by(Source.last_updated.desc()).all()
return render_template('index.html', sources=sources)
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -205,6 +205,7 @@ def submit():
submission = Submission(g.source, fname)
db_session.add(submission)
+ g.source.pending = False
g.source.last_updated = datetime.now()
db_session.commit()
normalize_timestamps(g.sid)
| diff --git a/securedrop/tests/unit_tests.py b/securedrop/tests/unit_tests.py
--- a/securedrop/tests/unit_tests.py
+++ b/securedrop/tests/unit_tests.py
@@ -604,6 +604,10 @@ def test_delete_collection(self):
# first, add a source
self.source_app.get('/generate')
self.source_app.post('/create')
+ self.source_app.post('/submit', data=dict(
+ msg="This is a test.",
+ fh=(StringIO(''), ''),
+ ), follow_redirects=True)
rv = self.journalist_app.get('/')
# navigate to the collection page
@@ -636,6 +640,10 @@ def test_delete_collections(self):
for i in range(num_sources):
self.source_app.get('/generate')
self.source_app.post('/create')
+ self.source_app.post('/submit', data=dict(
+ msg="This is a test "+str(i)+".",
+ fh=(StringIO(''), ''),
+ ), follow_redirects=True)
_logout(self.source_app)
rv = self.journalist_app.get('/')
| Don't show sources that haven't submitted anything
The latest submissions page lists all of the source codenames, including codenames of sources that haven't submitted any messages or documents. It would be helpful if sources that don't actually have any data were all listed at the bottom instead of in chronological order, or even if they weren't listed at all.
Similarly, it would be nice to be able to "favorite" sources so that they always stuck to the top.
Don't show sources that haven't submitted anything
The latest submissions page lists all of the source codenames, including codenames of sources that haven't submitted any messages or documents. It would be helpful if sources that don't actually have any data were all listed at the bottom instead of in chronological order, or even if they weren't listed at all.
Similarly, it would be nice to be able to "favorite" sources so that they always stuck to the top.
| After giving some thought and (briefly) talking with @garrettr, I think it makes sense to do this:
Each source should have an "active" flag that starts out as false and becomes true the first time they submit a message or document. Only "active" sources should show up in the list to journalists. This will prevent total spam sources from cluttering up the interface.
Maybe there's a better word than active to use.
Verified?
Verified could work, but it has a different meaning to journalists. When an anonymous person gives you a document the first thing you do is verify it to learn if it's real or if someone is trolling you.
Maybe all new sources get created with "pending" true, and the first time they submit something "pending" gets changed to false. The source list only shows sources that aren't pending.
After giving some thought and (briefly) talking with @garrettr, I think it makes sense to do this:
Each source should have an "active" flag that starts out as false and becomes true the first time they submit a message or document. Only "active" sources should show up in the list to journalists. This will prevent total spam sources from cluttering up the interface.
Maybe there's a better word than active to use.
Verified?
Verified could work, but it has a different meaning to journalists. When an anonymous person gives you a document the first thing you do is verify it to learn if it's real or if someone is trolling you.
Maybe all new sources get created with "pending" true, and the first time they submit something "pending" gets changed to false. The source list only shows sources that aren't pending.
| 2014-04-01T21:43:41Z | [] | [] |
freedomofpress/securedrop | 349 | freedomofpress__securedrop-349 | [
"320"
] | 644b3d32eaebb5234f245cf8abb2351dacbb0951 | diff --git a/securedrop/db.py b/securedrop/db.py
--- a/securedrop/db.py
+++ b/securedrop/db.py
@@ -45,6 +45,9 @@ class Source(Base):
# sources are "pending" and don't get displayed to journalists until they submit something
pending = Column(Boolean, default=True)
+ # keep track of how many interactions have happened, for filenames
+ interaction_count = Column(Integer, default=0, nullable=False)
+
def __init__(self, filesystem_id=None, journalist_designation=None):
self.filesystem_id = filesystem_id
self.journalist_designation = journalist_designation
@@ -52,6 +55,9 @@ def __init__(self, filesystem_id=None, journalist_designation=None):
def __repr__(self):
return '<Source %r>' % (self.journalist_designation)
+ def journalist_filename(self):
+ valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_'
+ return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars])
class Submission(Base):
__tablename__ = 'submissions'
diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -72,8 +72,8 @@ def get_docs(sid):
date=str(datetime.fromtimestamp(os_stat.st_mtime)),
size=os_stat.st_size,
))
- # sort by date since ordering by filename is meaningless
- docs.sort(key=lambda x: x['date'])
+ # sort in chronological order
+ docs.sort(key=lambda x: int(x['name'].split('-')[0]))
return docs
@@ -138,8 +138,13 @@ def doc(sid, fn):
@app.route('/reply', methods=('POST',))
def reply():
msg = request.form['msg']
+ g.source.interaction_count += 1
+ filename = "{0}-reply.gpg".format(g.source.interaction_count)
+
crypto_util.encrypt(crypto_util.getkey(g.sid), msg, output=
- store.path(g.sid, 'reply-%s.gpg' % uuid.uuid4()))
+ store.path(g.sid, filename))
+
+ db_session.commit()
return render_template('reply.html', sid=g.sid,
codename=g.source.journalist_designation)
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -142,7 +142,7 @@ def create():
def lookup():
replies = []
for fn in os.listdir(g.loc):
- if fn.startswith('reply-'):
+ if fn.endswith('-reply.gpg'):
try:
msg = crypto_util.decrypt(g.sid, g.codename,
file(store.path(g.sid, fn)).read()).decode("utf-8")
@@ -191,13 +191,17 @@ def submit():
strip_metadata = True if 'notclean' in request.form else False
fnames = []
+ journalist_filename = g.source.journalist_filename()
if msg:
- fnames.append(store.save_message_submission(g.sid, msg))
+ g.source.interaction_count += 1
+ fnames.append(store.save_message_submission(g.sid, g.source.interaction_count,
+ journalist_filename, msg))
flash("Thanks! We received your message.", "notification")
if fh:
- fnames.append(store.save_file_submission(g.sid, fh.filename,
- fh.stream, fh.content_type, strip_metadata))
+ g.source.interaction_count += 1
+ fnames.append(store.save_file_submission(g.sid, g.source.interaction_count,
+ journalist_filename, fh.filename, fh.stream, fh.content_type, strip_metadata))
flash("Thanks! We received your document '%s'."
% fh.filename or '[unnamed]', "notification")
diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -19,7 +19,7 @@
from werkzeug import secure_filename
VALIDATE_FILENAME = re.compile(
- "^(reply-)?[a-f0-9-]+(_msg|_doc\.zip|)\.gpg$").match
+ "^(reply-)?[a-z0-9-_]+(-msg|-doc\.zip|)\.gpg$").match
class PathException(Exception):
@@ -79,7 +79,7 @@ def get_bulk_archive(filenames):
return zip_file
-def save_file_submission(sid, filename, stream, content_type, strip_metadata):
+def save_file_submission(sid, count, journalist_filename, filename, stream, content_type, strip_metadata):
sanitized_filename = secure_filename(filename)
clean_file = sanitize_metadata(stream, content_type, strip_metadata)
@@ -88,14 +88,14 @@ def save_file_submission(sid, filename, stream, content_type, strip_metadata):
zf.writestr(sanitized_filename, clean_file.read() if clean_file else stream.read())
s.reset()
- filename = "%s_doc.zip.gpg" % uuid.uuid4()
+ filename = "{0}-{1}-doc.zip.gpg".format(count, journalist_filename)
file_loc = path(sid, filename)
crypto_util.encrypt(config.JOURNALIST_KEY, s, file_loc)
return filename
-def save_message_submission(sid, message):
- filename = "%s_msg.gpg" % uuid.uuid4()
+def save_message_submission(sid, count, journalist_filename, message):
+ filename = "{0}-{1}-msg.gpg".format(count, journalist_filename)
msg_loc = path(sid, filename)
crypto_util.encrypt(config.JOURNALIST_KEY, message, msg_loc)
return filename
@@ -144,3 +144,4 @@ def sanitize_metadata(stream, content_type, strip_metadata):
t.close()
return s
+
| diff --git a/securedrop/tests/unit_tests.py b/securedrop/tests/unit_tests.py
--- a/securedrop/tests/unit_tests.py
+++ b/securedrop/tests/unit_tests.py
@@ -323,7 +323,7 @@ def test_bulk_download(self):
source = Source(sid, crypto_util.display_id())
db_session.add(source)
db_session.commit()
- files = ['abc1_msg.gpg', 'abc2_msg.gpg']
+ files = ['1-abc1-msg.gpg', '2-abc2-msg.gpg']
filenames = test_setup.setup_test_docs(sid, files)
rv = self.client.post('/bulk', data=dict(
@@ -375,7 +375,7 @@ def test_submit_message(self):
self.assertEqual(rv.status_code, 200)
soup = BeautifulSoup(rv.data)
submission_url = soup.select('ul#submissions li a')[0]['href']
- self.assertIn("_msg", submission_url)
+ self.assertIn("-msg", submission_url)
li = soup.select('ul#submissions li')[0]
self.assertRegexpMatches(li.contents[-1], "\d+ bytes")
@@ -448,7 +448,7 @@ def test_submit_file(self):
self.assertEqual(rv.status_code, 200)
soup = BeautifulSoup(rv.data)
submission_url = soup.select('ul#submissions li a')[0]['href']
- self.assertIn("_doc", submission_url)
+ self.assertIn("-doc", submission_url)
li = soup.select('ul#submissions li')[0]
self.assertRegexpMatches(li.contents[-1], "\d+ bytes")
@@ -665,6 +665,95 @@ def test_delete_collections(self):
# 2. "Don't show again" checkbox behavior
# 2. Correct behavior on "yes" and "no" buttons
+ def test_filenames(self):
+ """Test pretty, sequential filenames when source uploads messages and files"""
+ # add a source and submit stuff
+ self.source_app.get('/generate')
+ self.source_app.post('/create')
+ self.helper_filenames_submit()
+
+ # navigate to the collection page
+ rv = self.journalist_app.get('/')
+ soup = BeautifulSoup(rv.data)
+ first_col_url = soup.select('ul#cols > li a')[0]['href']
+ rv = self.journalist_app.get(first_col_url)
+ self.assertEqual(rv.status_code, 200)
+
+ # test filenames and sort order
+ soup = BeautifulSoup(rv.data)
+ submission_filename_re = r'^{0}-[a-z0-9-_]+(-msg|-doc\.zip)\.gpg$'
+ for i, submission_link in enumerate(soup.select('ul#submissions li a')):
+ filename = str(submission_link.contents[0])
+ self.assertTrue(re.match(submission_filename_re.format(i+1), filename))
+
+
+ def test_filenames_delete(self):
+ """Test pretty, sequential filenames when journalist deletes files"""
+ # add a source and submit stuff
+ self.source_app.get('/generate')
+ self.source_app.post('/create')
+ self.helper_filenames_submit()
+
+ # navigate to the collection page
+ rv = self.journalist_app.get('/')
+ soup = BeautifulSoup(rv.data)
+ first_col_url = soup.select('ul#cols > li a')[0]['href']
+ rv = self.journalist_app.get(first_col_url)
+ self.assertEqual(rv.status_code, 200)
+ soup = BeautifulSoup(rv.data)
+
+ # delete file #2
+ self.helper_filenames_delete(soup, 1)
+ rv = self.journalist_app.get(first_col_url)
+ soup = BeautifulSoup(rv.data)
+
+ # test filenames and sort order
+ submission_filename_re = r'^{0}-[a-z0-9-_]+(-msg|-doc\.zip)\.gpg$'
+ filename = str(soup.select('ul#submissions li a')[0].contents[0])
+ self.assertTrue( re.match(submission_filename_re.format(1), filename) )
+ filename = str(soup.select('ul#submissions li a')[1].contents[0])
+ self.assertTrue( re.match(submission_filename_re.format(3), filename) )
+ filename = str(soup.select('ul#submissions li a')[2].contents[0])
+ self.assertTrue( re.match(submission_filename_re.format(4), filename) )
+
+
+ def helper_filenames_submit(self):
+ self.source_app.post('/submit', data=dict(
+ msg="This is a test.",
+ fh=(StringIO(''), ''),
+ ), follow_redirects=True)
+ self.source_app.post('/submit', data=dict(
+ msg="This is a test.",
+ fh=(StringIO('This is a test'), 'test.txt'),
+ ), follow_redirects=True)
+ self.source_app.post('/submit', data=dict(
+ msg="",
+ fh=(StringIO('This is a test'), 'test.txt'),
+ ), follow_redirects=True)
+
+ def helper_filenames_delete(self, soup, i):
+ sid = soup.select('input[name="sid"]')[0]['value']
+ checkbox_values = [soup.select('input[name="doc_names_selected"]')[i]['value']]
+
+ # delete
+ rv = self.journalist_app.post('/bulk', data=dict(
+ sid=sid,
+ action='delete',
+ doc_names_selected=checkbox_values
+ ), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ self.assertIn("The following file has been selected for <strong>permanent deletion</strong>", rv.data)
+
+ # confirm delete
+ rv = self.journalist_app.post('/bulk', data=dict(
+ sid=sid,
+ action='delete',
+ confirm_delete=1,
+ doc_names_selected=checkbox_values
+ ), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ self.assertIn("File permanently deleted.", rv.data)
+
class TestStore(unittest.TestCase):
@@ -683,7 +772,7 @@ def test_verify(self):
def test_get_zip(self):
sid = 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZJPKJFECLS2NZ4G4U3QOZCFKTTPNZMVIWDCJBBHMUDBGFHXCQ3R'
- files = ['abc1_msg.gpg', 'abc2_msg.gpg']
+ files = ['1-abc1-msg.gpg', '2-abc2-msg.gpg']
filenames = test_setup.setup_test_docs(sid, files)
archive = zipfile.ZipFile(store.get_bulk_archive(filenames))
| Better filenames for individual docs and messages
It would be great if the filenames of documents and messages could be related to the source's codenames. If you just download a bunch of files from multiple sources, there's no way to tell which ones came from which sources.
Also, it would be great if the messages were numbered chronologically.
So instead of files like this:
9ebc79d1-46cf-4541-aa66-3bb2844e0ad0_msg.gpg
9ebc79d1-46cf-4541-aa66-3bb2844e0ad0_doc.zip.gpg
They could be like this (assuming the codename is "unguided artillery fire"):
unguided_artillery_fire_msg1.gpg
unguided_artillery_fire_doc1.zip.gpg
This would make it way easier for journalists to organize files they're downloading. As it stands, if you login and download new files from multiple sources, you need to save them into separate folders for each source before transferring them to the viewing station, which gets cumbersome.
| 2014-04-05T03:14:24Z | [] | [] |
|
freedomofpress/securedrop | 382 | freedomofpress__securedrop-382 | [
"342"
] | 7c9e49ffd17acba67d3300a46705f09b8e812afe | diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -14,6 +14,7 @@
import crypto_util
import store
import background
+import util
from db import db_session, Source, Submission
app = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)
@@ -25,7 +26,7 @@
app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE
app.jinja_env.globals['use_custom_header_image'] = True
else:
- app.jinja_env.globals['header_image'] = 'securedrop.png'
+ app.jinja_env.globals['header_image'] = 'logo.png'
app.jinja_env.globals['use_custom_header_image'] = False
@app.teardown_appcontext
@@ -151,7 +152,7 @@ def reply():
crypto_util.encrypt(crypto_util.getkey(g.sid), msg, output=
store.path(g.sid, filename))
-
+
db_session.commit()
return render_template('reply.html', sid=g.sid,
codename=g.source.journalist_designation)
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -23,6 +23,7 @@
import crypto_util
import store
import background
+import util
from db import db_session, Source, Submission
app = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)
@@ -101,7 +102,7 @@ def check_tor2web():
flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '
'This <strong>does not</strong> provide anonymity. '
'<a href="/tor2web-warning">Why is this dangerous?</a>',
- "header-warning")
+ "banner-warning")
@app.route('/')
@@ -152,8 +153,8 @@ def lookup():
except UnicodeDecodeError:
app.logger.error("Could not decode reply %s" % fn)
else:
- date = str(datetime.fromtimestamp(
- os.stat(store.path(g.sid, fn)).st_mtime))
+ d = datetime.fromtimestamp(os.stat(store.path(g.sid, fn)).st_mtime)
+ date = util.format_time(d)
replies.append(dict(id=fn, date=date, msg=msg))
def async_genkey(sid, codename):
diff --git a/securedrop/util.py b/securedrop/util.py
new file mode 100644
--- /dev/null
+++ b/securedrop/util.py
@@ -0,0 +1,5 @@
+import datetime
+
+def format_time(d):
+ return d.strftime('%b %d, %Y %I:%M %p')
+
| diff --git a/securedrop/tests/functional/source_navigation_steps.py b/securedrop/tests/functional/source_navigation_steps.py
--- a/securedrop/tests/functional/source_navigation_steps.py
+++ b/securedrop/tests/functional/source_navigation_steps.py
@@ -4,20 +4,20 @@ class SourceNavigationSteps():
def _source_visits_source_homepage(self):
self.driver.get(self.source_location)
- self.assertEqual("SecureDrop", self.driver.title)
+ self.assertEqual("SecureDrop | Protecting Journalists and Sources", self.driver.title)
def _source_chooses_to_submit_documents(self):
self.driver.find_element_by_id('submit-documents-button').click()
- code_name = self.driver.find_element_by_css_selector('#code-name')
+ codename = self.driver.find_element_by_css_selector('#codename')
- self.assertTrue(len(code_name.text) > 0)
- self.source_name = code_name.text
+ self.assertTrue(len(codename.text) > 0)
+ self.source_name = codename.text
def _source_continues_to_submit_page(self):
continue_button = self.driver.find_element_by_id('continue-button')
continue_button.click()
headline = self.driver.find_element_by_class_name('headline')
- self.assertEqual('Submit a document, message, or both', headline.text)
+ self.assertEqual('You have three options to send data', headline.text)
diff --git a/securedrop/tests/unit_tests.py b/securedrop/tests/unit_tests.py
--- a/securedrop/tests/unit_tests.py
+++ b/securedrop/tests/unit_tests.py
@@ -75,7 +75,7 @@ def _find_codename(self, html):
"""Find a source codename (diceware passphrase) in HTML"""
# Codenames may contain HTML escape characters, and the wordlist
# contains various symbols.
- codename_re = r'<p id="code-name" class="code-name">(?P<codename>[a-z0-9 &#;?:=@_.*+()\'"$%!-]+)</p>'
+ codename_re = r'<strong id="codename">(?P<codename>[a-z0-9 &#;?:=@_.*+()\'"$%!-]+)</strong>'
codename_match = re.search(codename_re, html)
self.assertIsNotNone(codename_match)
return codename_match.group('codename')
@@ -85,7 +85,7 @@ def test_generate(self):
rv = c.get('/generate')
self.assertEqual(rv.status_code, 200)
session_codename = session['codename']
- self.assertIn("Submitting for the first time", rv.data)
+ self.assertIn("Remember this code and keep it secret", rv.data)
self.assertIn(
"To protect your identity, we're assigning you a unique code name.", rv.data)
codename = self._find_codename(rv.data)
@@ -120,7 +120,7 @@ def test_create(self):
rv = c.post('/create', follow_redirects=True)
self.assertTrue(session['logged_in'])
# should be redirected to /lookup
- self.assertIn("Submit a document, message, or both", rv.data)
+ self.assertIn("You have three options to send data", rv.data)
def _new_codename(self):
"""Helper function to go through the "generate codename" flow"""
@@ -136,7 +136,7 @@ def test_lookup(self):
rv = self.client.post('login', data=dict(codename=codename),
follow_redirects=True)
# redirects to /lookup
- self.assertIn("Download journalist's public key", rv.data)
+ self.assertIn("journalist's public key", rv.data)
# download the public key
rv = self.client.get('journalist-key')
self.assertIn("BEGIN PGP PUBLIC KEY BLOCK", rv.data)
@@ -144,14 +144,14 @@ def test_lookup(self):
def test_login_and_logout(self):
rv = self.client.get('/login')
self.assertEqual(rv.status_code, 200)
- self.assertIn("Already submitted something?", rv.data)
+ self.assertIn("Login to check for responses", rv.data)
codename = self._new_codename()
with self.client as c:
rv = c.post('/login', data=dict(codename=codename),
follow_redirects=True)
self.assertEqual(rv.status_code, 200)
- self.assertIn("Submit a document, message, or both", rv.data)
+ self.assertIn("You have three options to send data", rv.data)
self.assertTrue(session['logged_in'])
_logout(c)
@@ -315,7 +315,7 @@ def tearDown(self):
def test_index(self):
rv = self.client.get('/')
self.assertEqual(rv.status_code, 200)
- self.assertIn("Latest submissions", rv.data)
+ self.assertIn("Sources", rv.data)
self.assertIn("No documents have been submitted!", rv.data)
def test_bulk_download(self):
@@ -367,7 +367,7 @@ def test_submit_message(self):
rv = self.journalist_app.get('/')
self.assertEqual(rv.status_code, 200)
- self.assertIn("Latest submissions", rv.data)
+ self.assertIn("Sources", rv.data)
soup = BeautifulSoup(rv.data)
col_url = soup.select('ul#cols > li a')[0]['href']
@@ -440,7 +440,7 @@ def test_submit_file(self):
rv = self.journalist_app.get('/')
self.assertEqual(rv.status_code, 200)
- self.assertIn("Latest submissions", rv.data)
+ self.assertIn("Sources", rv.data)
soup = BeautifulSoup(rv.data)
col_url = soup.select('ul#cols > li a')[0]['href']
@@ -524,7 +524,7 @@ def helper_test_reply(self, test_reply, expected_success=True):
rv = self.journalist_app.get('/')
self.assertEqual(rv.status_code, 200)
- self.assertIn("Latest submissions", rv.data)
+ self.assertIn("Sources", rv.data)
soup = BeautifulSoup(rv.data)
col_url = soup.select('ul#cols > li a')[0]['href']
| Tor2Web warning issues
So there are some issues with how the Tor2Web warnings appear, namely that `.header-warning` sometimes appears multiple times, and can have styling errors:
![screenshot from 2014-03-30 23 54 09](https://cloud.githubusercontent.com/assets/452041/2563836/92e1dc66-b88c-11e3-99e9-a0a3d705ff12.png)
appeared on `https://poulsenjzbufll63.onion.to/lookup`
| 2014-05-23T02:10:11Z | [] | [] |
|
freedomofpress/securedrop | 519 | freedomofpress__securedrop-519 | [
"348",
"492"
] | d7ce95aecefb445ba9720952734fcb3b074f7ccc | diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -220,7 +220,6 @@ def normalize_timestamps(sid):
def submit():
msg = request.form['msg']
fh = request.files['fh']
- strip_metadata = True if 'notclean' in request.form else False
fnames = []
journalist_filename = g.source.journalist_filename()
@@ -234,7 +233,7 @@ def submit():
if fh:
g.source.interaction_count += 1
fnames.append(store.save_file_submission(g.sid, g.source.interaction_count,
- journalist_filename, fh.filename, fh.stream, fh.content_type, strip_metadata))
+ journalist_filename, fh.filename, fh.stream))
flash("{} '{}'. {}".format(SUBMIT_DOC_NOTIFY_STR,
fh.filename or '[unnamed]',
SUBMIT_CODENAME_NOTIFY_STR), "notification")
diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -8,10 +8,6 @@
import tempfile
import subprocess
from cStringIO import StringIO
-from shutil import copyfileobj
-
-from MAT import mat
-from MAT import strippers
import logging
log = logging.getLogger(__name__)
@@ -79,13 +75,12 @@ def get_bulk_archive(filenames):
return zip_file
-def save_file_submission(sid, count, journalist_filename, filename, stream, content_type, strip_metadata):
+def save_file_submission(sid, count, journalist_filename, filename, stream):
sanitized_filename = secure_filename(filename)
- clean_file = sanitize_metadata(stream, content_type, strip_metadata)
s = StringIO()
with zipfile.ZipFile(s, 'w') as zf:
- zf.writestr(sanitized_filename, clean_file.read() if clean_file else stream.read())
+ zf.writestr(sanitized_filename, stream.read())
s.reset()
filename = "{0}-{1}-doc.zip.gpg".format(count, journalist_filename)
@@ -101,9 +96,8 @@ def save_message_submission(sid, count, journalist_filename, message):
return filename
-def secure_unlink(fn, recursive=False, do_verify = True):
- if do_verify:
- verify(fn)
+def secure_unlink(fn, recursive=False):
+ verify(fn)
command = ['srm']
if recursive:
command.append('-r')
@@ -113,35 +107,3 @@ def secure_unlink(fn, recursive=False, do_verify = True):
def delete_source_directory(source_id):
secure_unlink(path(source_id), recursive=True)
-
-def metadata_handler(f):
- return mat.create_class_file(f, False, add2archive=True)
-
-def sanitize_metadata(stream, content_type, strip_metadata):
- text_plain = content_type == 'text/plain'
-
- s = None
- t = None
- clean_file = False
-
- if strip_metadata and not text_plain:
- t = tempfile.NamedTemporaryFile(delete = False)
- copyfileobj(stream, t)
- t.flush()
- file_meta = metadata_handler(t.name)
-
- if not file_meta.is_clean():
- file_meta.remove_all()
- f = open(t.name)
- s = StringIO()
- s.write(f.read())
- f.close()
- s.reset()
- secure_unlink(t.name, do_verify = False)
- t.close()
- else:
- secure_unlink(t.name, do_verify = False)
- t.close()
-
- return s
-
| diff --git a/securedrop/tests/test_images/dirty.jpg b/securedrop/tests/test_images/dirty.jpg
deleted file mode 100644
Binary files a/securedrop/tests/test_images/dirty.jpg and /dev/null differ
diff --git a/securedrop/tests/test_unit.py b/securedrop/tests/test_unit.py
--- a/securedrop/tests/test_unit.py
+++ b/securedrop/tests/test_unit.py
@@ -26,6 +26,7 @@
import test_setup
from db import db_session, Source
+
def _block_on_reply_keypair_gen(codename):
sid = crypto_util.hash_codename(codename)
while not crypto_util.getkey(sid):
@@ -193,81 +194,6 @@ def test_submit_both(self):
self.assertIn(source.SUBMIT_MSG_NOTIFY_STR, rv.data)
self.assertIn(escape("%s '%s'. %s" % (source.SUBMIT_DOC_NOTIFY_STR, 'test.txt', source.SUBMIT_CODENAME_NOTIFY_STR)), rv.data)
- def test_submit_dirty_file_to_be_cleaned(self):
- self.gpg = gnupg.GPG(homedir=config.GPG_KEY_DIR)
- img = open(os.getcwd()+'/tests/test_images/dirty.jpg')
- img_metadata = store.metadata_handler(img.name)
- self.assertFalse(img_metadata.is_clean(), "The file is dirty.")
- codename = self._new_codename()
- rv = self.client.post('/submit', data=dict(
- msg="",
- fh=(img, 'dirty.jpg'),
- notclean='True',
- ), follow_redirects=True)
- self.assertEqual(rv.status_code, 200)
- self.assertIn(escape("%s '%s'. %s" % (source.SUBMIT_DOC_NOTIFY_STR, 'dirty.jpg', source.SUBMIT_CODENAME_NOTIFY_STR)), rv.data)
-
- store_dirs = [os.path.join(config.STORE_DIR,d) for d in os.listdir(config.STORE_DIR) if os.path.isdir(os.path.join(config.STORE_DIR,d))]
- latest_subdir = max(store_dirs, key=os.path.getmtime)
- zip_gpg_files = [os.path.join(latest_subdir,f) for f in os.listdir(latest_subdir) if os.path.isfile(os.path.join(latest_subdir,f))]
- self.assertEqual(len(zip_gpg_files), 1)
- zip_gpg = zip_gpg_files[0]
-
- zip_gpg_file = open(zip_gpg)
- decrypted_data = self.gpg.decrypt_file(zip_gpg_file)
- self.assertTrue(decrypted_data.ok, 'Checking the integrity of the data after decryption.')
-
- s = StringIO(decrypted_data.data)
- zip_file = zipfile.ZipFile(s, 'r')
- clean_file = open(os.path.join(latest_subdir,'dirty.jpg'), 'w+b')
- clean_file.write(zip_file.read('dirty.jpg'))
- clean_file.seek(0)
- zip_file.close()
-
- # check for the actual file been clean
- clean_file_metadata = store.metadata_handler(clean_file.name)
- self.assertTrue(clean_file_metadata.is_clean(), "the file is now clean.")
- zip_gpg_file.close()
- clean_file.close()
- img.close()
-
- def test_submit_dirty_file_to_not_clean(self):
- self.gpg = gnupg.GPG(homedir=config.GPG_KEY_DIR)
- img = open(os.getcwd()+'/tests/test_images/dirty.jpg')
- img_metadata = store.metadata_handler(img.name)
- self.assertFalse(img_metadata.is_clean(), "The file is dirty.")
- codename = self._new_codename()
- rv = self.client.post('/submit', data=dict(
- msg="",
- fh=(img, 'dirty.jpg'),
- ), follow_redirects=True)
- self.assertEqual(rv.status_code, 200)
- self.assertIn(escape("%s '%s'. %s" % (source.SUBMIT_DOC_NOTIFY_STR, 'dirty.jpg', source.SUBMIT_CODENAME_NOTIFY_STR)), rv.data)
-
- store_dirs = [os.path.join(config.STORE_DIR,d) for d in os.listdir(config.STORE_DIR) if os.path.isdir(os.path.join(config.STORE_DIR,d))]
- latest_subdir = max(store_dirs, key=os.path.getmtime)
- zip_gpg_files = [os.path.join(latest_subdir,f) for f in os.listdir(latest_subdir) if os.path.isfile(os.path.join(latest_subdir,f))]
- self.assertEqual(len(zip_gpg_files), 1)
- zip_gpg = zip_gpg_files[0]
-
- zip_gpg_file = open(zip_gpg)
- decrypted_data = self.gpg.decrypt_file(zip_gpg_file)
- self.assertTrue(decrypted_data.ok, 'Checking the integrity of the data after decryption.')
-
- s = StringIO(decrypted_data.data)
- zip_file = zipfile.ZipFile(s, 'r')
- clean_file = open(os.path.join(latest_subdir,'dirty.jpg'), 'w+b')
- clean_file.write(zip_file.read('dirty.jpg'))
- clean_file.seek(0)
- zip_file.close()
-
- # check for the actual file been clean
- clean_file_metadata = store.metadata_handler(clean_file.name)
- self.assertFalse(clean_file_metadata.is_clean(), "the file is was not cleaned.")
- zip_gpg_file.close()
- clean_file.close()
- img.close()
-
def test_submit_clean_file(self):
img = open(os.getcwd()+'/tests/test_images/clean.jpg')
codename = self._new_codename()
| Flask logging disappeared
At some point, running `python {source,journalist}.py` stopped printing the familiar (and useful) default Flask logging of requests, responses, and logger messages. We should get it back, it's very helpful for debugging.
Pull latest version of mat and verify the digital signature
The source requirements text file tells the SecureDrop installation process to download version 0.4.2 of mat and install it without ever verifying the digital signature. This process should ideally be updated to (1) download the latest version (0.5.2) and (2) verify the digital signature.
| 2014-08-02T23:36:09Z | [] | [] |
|
freedomofpress/securedrop | 593 | freedomofpress__securedrop-593 | [
"590"
] | 6f84534d9622c02b1bb1dc593784622991825d3d | diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -289,9 +289,9 @@ def bulk_download(sid, docs_selected):
except NoResultFound as e:
app.logger.error("Could not mark " + doc + " as downloaded: %s" % (e,))
db_session.commit()
- zip = store.get_bulk_archive(filenames)
+ zip = store.get_bulk_archive(filenames, zip_directory=source.journalist_filename())
return send_file(zip.name, mimetype="application/zip",
- attachment_filename=source.journalist_designation + ".zip",
+ attachment_filename=source.journalist_filename() + ".zip",
as_attachment=True)
diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -66,12 +66,15 @@ def path(*s):
return absolute
-def get_bulk_archive(filenames):
+def get_bulk_archive(filenames, zip_directory=''):
zip_file = tempfile.NamedTemporaryFile(prefix='tmp_securedrop_bulk_dl_')
with zipfile.ZipFile(zip_file, 'w') as zip:
for filename in filenames:
verify(filename)
- zip.write(filename, arcname=os.path.basename(filename))
+ zip.write(filename, arcname=os.path.join(
+ zip_directory,
+ os.path.basename(filename)
+ ))
return zip_file
| diff --git a/securedrop/tests/test_unit.py b/securedrop/tests/test_unit.py
--- a/securedrop/tests/test_unit.py
+++ b/securedrop/tests/test_unit.py
@@ -259,6 +259,9 @@ def test_bulk_download(self):
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.content_type, 'application/zip')
self.assertTrue(zipfile.is_zipfile(StringIO(rv.data)))
+ self.assertTrue(zipfile.ZipFile(StringIO(rv.data)).getinfo(
+ os.path.join(source.journalist_filename(),files[0])
+ ))
class TestIntegration(unittest.TestCase):
| Add folder with the source's name to the .zip
When a journalist downloads a new submission and extracts the archive, she is left with a number of files in the same directory as the .zip file. It would be great if a folder with files was created instead, the name of the folder should be the code name given to the source.
| 2014-09-18T22:25:45Z | [] | [] |
|
freedomofpress/securedrop | 603 | freedomofpress__securedrop-603 | [
"602"
] | bc7b1ce51a099e63510a30929bc5cd570b93c1f4 | diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -276,7 +276,8 @@ def bulk_delete(sid, docs_selected):
confirm_delete = bool(request.form.get('confirm_delete', False))
if confirm_delete:
for doc in docs_selected:
- db_session.delete(Submission.query.filter(Submission.filename == doc['name']).one())
+ if not doc['name'].endswith('reply.gpg'):
+ db_session.delete(Submission.query.filter(Submission.filename == doc['name']).one())
fn = store.path(sid, doc['name'])
store.secure_unlink(fn)
db_session.commit()
| diff --git a/securedrop/tests/test_unit.py b/securedrop/tests/test_unit.py
--- a/securedrop/tests/test_unit.py
+++ b/securedrop/tests/test_unit.py
@@ -484,11 +484,13 @@ def helper_test_reply(self, test_reply, expected_success=True):
# sending a reply
_block_on_reply_keypair_gen(codename)
- rv = self.journalist_app.post('/reply', data=dict(
- sid=sid,
- msg=test_reply
- ), follow_redirects=True)
- self.assertEqual(rv.status_code, 200)
+ # Create 2 replies to test deleting on journalist and source interface
+ for i in range(2):
+ rv = self.journalist_app.post('/reply', data=dict(
+ sid=sid,
+ msg=test_reply
+ ), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
if not expected_success:
pass
@@ -502,6 +504,11 @@ def helper_test_reply(self, test_reply, expected_success=True):
_block_on_reply_keypair_gen(codename)
+ # Test deleting reply on the journalist interface
+ soup = BeautifulSoup(rv.data)
+ last_reply_number = len(soup.select('input[name="doc_names_selected"]')) - 1
+ self.helper_filenames_delete(soup, last_reply_number)
+
with self.source_app as source_app:
rv = source_app.post('/login', data=dict(codename=codename), follow_redirects=True)
self.assertEqual(rv.status_code, 200)
| Database error when trying to delete replies in the journalist interface
An error is thrown deleting replies in the journalist interface. An attempt is made to remove a record for the reply from the database but replies are only recorded on the filesystem.
| 2014-09-23T18:46:28Z | [] | [] |
|
freedomofpress/securedrop | 633 | freedomofpress__securedrop-633 | [
"607"
] | a2700b723f356701b809d0c103a85353f3ac65f7 | diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -139,21 +139,26 @@ def get_key_by_fingerprint(fingerprint):
return matches[0] if matches else None
-def encrypt(fp, s, output=None):
- r"""
- >>> key = genkeypair('randomid', 'randomid')
- >>> encrypt('randomid', "Goodbye, cruel world!")[:45]
- '-----BEGIN PGP MESSAGE-----\nVersion: GnuPG v2'
- """
+def encrypt(plaintext, fingerprints, output=None):
+ # Verify the output path
if output:
store.verify(output)
- fp = fp.replace(' ', '')
- if isinstance(s, unicode):
- s = s.encode('utf8')
- if isinstance(s, str):
- out = gpg.encrypt(s, fp, output=output, always_trust=True)
- else:
- out = gpg.encrypt_file(s, fp, output=output, always_trust=True)
+
+ # Remove any spaces from provided fingerpints
+ # GPG outputs fingerprints with spaces for readability, but requires the
+ # spaces to be removed when using fingerprints to specify recipients.
+ if not isinstance(fingerprints, (list, tuple)):
+ fingerprints = [fingerprints,]
+ fingerprints = [ fpr.replace(' ', '') for fpr in fingerprints ]
+
+ if isinstance(plaintext, unicode):
+ plaintext = plaintext.encode('utf8')
+
+ encrypt_fn = gpg.encrypt if isinstance(plaintext, str) else gpg.encrypt_file
+ out = encrypt_fn(plaintext,
+ *fingerprints,
+ output=output,
+ always_trust=True)
if out.ok:
return out.data
else:
diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -428,8 +428,9 @@ def reply():
g.source.interaction_count += 1
filename = "{0}-reply.gpg".format(g.source.interaction_count)
- crypto_util.encrypt(crypto_util.getkey(g.sid), msg, output=
- store.path(g.sid, filename))
+ crypto_util.encrypt(msg,
+ [ crypto_util.getkey(g.sid), config.JOURNALIST_KEY ],
+ output=store.path(g.sid, filename))
db_session.commit()
return render_template('reply.html', sid=g.sid,
diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -84,18 +84,18 @@ def save_file_submission(sid, count, journalist_filename, filename, stream):
s = StringIO()
with zipfile.ZipFile(s, 'w') as zf:
zf.writestr(sanitized_filename, stream.read())
- s.reset()
+ s.seek(0)
filename = "{0}-{1}-doc.zip.gpg".format(count, journalist_filename)
file_loc = path(sid, filename)
- crypto_util.encrypt(config.JOURNALIST_KEY, s, file_loc)
+ crypto_util.encrypt(s, config.JOURNALIST_KEY, file_loc)
return filename
def save_message_submission(sid, count, journalist_filename, message):
filename = "{0}-{1}-msg.gpg".format(count, journalist_filename)
msg_loc = path(sid, filename)
- crypto_util.encrypt(config.JOURNALIST_KEY, message, msg_loc)
+ crypto_util.encrypt(message, config.JOURNALIST_KEY, msg_loc)
return filename
| diff --git a/securedrop/tests/test_unit_integration.py b/securedrop/tests/test_unit_integration.py
--- a/securedrop/tests/test_unit_integration.py
+++ b/securedrop/tests/test_unit_integration.py
@@ -6,6 +6,8 @@
from cStringIO import StringIO
import zipfile
from time import sleep
+import tempfile
+import shutil
import mock
@@ -221,6 +223,37 @@ def test_reply_normal(self):
def test_reply_unicode(self):
self.helper_test_reply("Teşekkürler", True)
+ def _can_decrypt_with_key(self, msg, key_fpr, passphrase=None):
+ """
+ Test that the given GPG message can be decrypted with the given key
+ (identified by its fingerprint).
+ """
+ # GPG does not provide a way to specify which key to use to decrypt a
+ # message. Since the default keyring that we use has both the
+ # `config.JOURNALIST_KEY` and all of the reply keypairs, there's no way
+ # to use it to test whether a message is decryptable with a specific
+ # key.
+ gpg_tmp_dir = tempfile.mkdtemp()
+ gpg = gnupg.GPG(homedir=gpg_tmp_dir)
+
+ # Export the key of interest from the application's keyring
+ pubkey = self.gpg.export_keys(key_fpr)
+ seckey = self.gpg.export_keys(key_fpr, secret=True)
+ # Import it into our isolated temporary GPG directory
+ for key in (pubkey, seckey):
+ gpg.import_keys(key)
+
+ # Attempt decryption with the given key
+ if passphrase:
+ passphrase = crypto_util.hash_codename(passphrase,
+ salt=crypto_util.SCRYPT_GPG_PEPPER)
+ decrypted_data = gpg.decrypt(msg, passphrase=passphrase)
+ self.assertTrue(decrypted_data.ok,
+ "Could not decrypt msg with key, gpg says: {}".format(decrypted_data.status))
+
+ # We have to clean up the temporary GPG dir
+ shutil.rmtree(gpg_tmp_dir)
+
def helper_test_reply(self, test_reply, expected_success=True):
test_msg = "This is a test message."
@@ -289,10 +322,24 @@ def helper_test_reply(self, test_reply, expected_success=True):
rv = journalist_app.get(col_url)
self.assertIn("reply-", rv.data)
- _block_on_reply_keypair_gen(codename)
+ soup = BeautifulSoup(rv.data)
+
+ # Download the reply and verify that it can be decrypted with the
+ # journalist's key as well as the source's reply key
+ sid = soup.select('input[name="sid"]')[0]['value']
+ checkbox_values = [ soup.select('input[name="doc_names_selected"]')[1]['value'] ]
+ rv = self.journalist_app.post('/bulk', data=dict(
+ sid=sid,
+ action='download',
+ doc_names_selected=checkbox_values
+ ), follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ pgp_msg_re = r'-----BEGIN PGP MESSAGE-----.*-----END PGP MESSAGE-----'
+ pgp_msg = re.search(pgp_msg_re, rv.data, re.MULTILINE|re.DOTALL).group(0)
+ self._can_decrypt_with_key(pgp_msg, config.JOURNALIST_KEY)
+ self._can_decrypt_with_key(pgp_msg, crypto_util.getkey(sid), codename)
# Test deleting reply on the journalist interface
- soup = BeautifulSoup(rv.data)
last_reply_number = len(soup.select('input[name="doc_names_selected"]')) - 1
self.helper_filenames_delete(soup, last_reply_number)
@@ -305,7 +352,7 @@ def helper_test_reply(self, test_reply, expected_success=True):
if not expected_success:
# there should be no reply
- self.assertTrue("You have received a reply." not in rv.data)
+ self.assertNotIn("You have received a reply.", rv.data)
else:
self.assertIn(
"You have received a reply. For your security, please delete all replies when you're done with them.", rv.data)
| Encrypt replies to application key as well
We currently only encrypt replies to the source's generated key, which means once they're sent only the source can read them. To help provide context for journalists, it would be helpful to also encrypt with the journalist's application key (the same key used to encrypt submissions). That way, they will be able to decrypt their replies at the same time they decrypt submissions, and will be able to read the whole back-and-forth at will.
| 2014-10-11T21:04:24Z | [] | [] |
|
freedomofpress/securedrop | 634 | freedomofpress__securedrop-634 | [
"549"
] | e2b49ed6d7b15b1614fecef8fb90d41dc9e65bee | diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -192,7 +192,7 @@ def async_genkey(sid, codename):
# if this was a redirect from the login page, flash a message if there are
# no replies to clarify "check for replies" flow (#393)
if request.args.get('from_login') == '1' and len(replies) == 0:
- flash("There are no replies at this time. You can submit more documents from this code name below.", "notification")
+ flash("There are no replies at this time. You can submit more documents from this codename below.", "notification")
return render_template('lookup.html', codename=g.codename, replies=replies,
flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))
| diff --git a/securedrop/tests/test_unit_source.py b/securedrop/tests/test_unit_source.py
--- a/securedrop/tests/test_unit_source.py
+++ b/securedrop/tests/test_unit_source.py
@@ -49,7 +49,7 @@ def test_generate(self):
session_codename = session['codename']
self.assertIn("Remember this code and keep it secret", rv.data)
self.assertIn(
- "To protect your identity, we're assigning you a unique code name.",
+ "To protect your identity, we're assigning you a unique codename.",
rv.data)
codename = self._find_codename(rv.data)
# default codename length is 8 words
| Inconsistent use of "codename" vs "code name"
We should use either "codename" or "code name" consistently throughout. For example:
- on http://localhost:8080/generate it says “remember this code”
- on /generate it says "we're assigning you a unique code name.”
- on /generate it says “already have a codename?”
- on /login it says “enter your codename”
- on /lookup it says "You can submit more documents from this code name below.”
- on /lookup it says "Remember, your codename is”
I prefer "codename".
| This is a duplicate of #495, but since this issue has more information I will go ahead and close the other one.
| 2014-10-11T21:29:57Z | [] | [] |
freedomofpress/securedrop | 647 | freedomofpress__securedrop-647 | [
"562"
] | f420b2b8cc278787ccc93da5481c8fe2237b4d55 | diff --git a/securedrop/db.py b/securedrop/db.py
--- a/securedrop/db.py
+++ b/securedrop/db.py
@@ -172,7 +172,12 @@ def _gen_salt(self, salt_bytes=32):
def _scrypt_hash(self, password, salt, params=None):
if not params:
params = self._SCRYPT_PARAMS
- return scrypt.hash(str(password), salt, **params)
+ # try clause for debugging intermittent scrypt "could not compute hash" error
+ try:
+ hash = scrypt.hash(str(password), salt, **params)
+ except scrypt.error as e:
+ print "Scrypt hashing failed for password='{}', salt='{}', params='{}', traceback: {}".format(password, salt, params, e)
+ return hash
def set_password(self, password):
self.pw_salt = self._gen_salt()
diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -18,6 +18,7 @@
from db import (db_session, Source, Submission, SourceStar, get_one_or_else,
Journalist, NoResultFound, WrongPasswordException,
BadTokenException)
+import worker
app = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)
app.config.from_object(config.JournalistInterfaceFlaskConfig)
@@ -339,7 +340,7 @@ def col(sid):
def delete_collection(source_id):
# Delete the source's collection of submissions
- store.delete_source_directory(source_id)
+ worker.enqueue(store.delete_source_directory, source_id)
# Delete the source's reply keypair
crypto_util.delete_reply_keypair(source_id)
@@ -494,7 +495,7 @@ def bulk_delete(sid, docs_selected):
if not doc['name'].endswith('reply.gpg'):
db_session.delete(Submission.query.filter(Submission.filename == doc['name']).one())
fn = store.path(sid, doc['name'])
- store.secure_unlink(fn)
+ worker.enqueue(store.secure_unlink, fn)
db_session.commit()
return render_template('delete.html', sid=sid,
codename=source.journalist_designation,
diff --git a/securedrop/manage.py b/securedrop/manage.py
--- a/securedrop/manage.py
+++ b/securedrop/manage.py
@@ -7,8 +7,10 @@
import unittest
import readline # makes the add_admin prompt kick ass
from getpass import getpass
+import signal
import qrcode
+import psutil
from db import db_session, Journalist
@@ -21,12 +23,42 @@
os.environ['SECUREDROP_ENV'] = 'dev'
+WORKER_PIDFILE = "/tmp/test_rqworker.pid"
+
+def get_pid_from_pidfile(pid_file_name):
+ with open(pid_file_name) as fp:
+ return int(fp.read())
+
+def _start_test_rqworker(config):
+ # needed to determine the directory to run the worker in
+ worker_running = False
+ try:
+ if psutil.pid_exists(get_pid_from_pidfile(WORKER_PIDFILE)):
+ worker_running = True
+ except IOError:
+ pass
+
+ if not worker_running:
+ tmp_logfile = open("/tmp/test_rqworker.log", "w")
+ subprocess.Popen(
+ [
+ "rqworker", "test",
+ "-P", config.SECUREDROP_ROOT,
+ "--pid", WORKER_PIDFILE
+ ],
+ stdout=tmp_logfile,
+ stderr=subprocess.STDOUT)
+
+def _stop_test_rqworker():
+ os.kill(get_pid_from_pidfile(WORKER_PIDFILE), signal.SIGTERM)
+
def start():
import config
source_rc = subprocess.call(['start-stop-daemon', '--start', '-b', '--quiet', '--pidfile',
config.SOURCE_PIDFILE, '--startas', '/bin/bash', '--', '-c', 'cd /vagrant/securedrop && python source.py'])
journo_rc = subprocess.call(['start-stop-daemon', '--start', '-b', '--quiet', '--pidfile',
config.JOURNALIST_PIDFILE, '--startas', '/bin/bash', '--', '-c', 'cd /vagrant/securedrop && python journalist.py'])
+
if source_rc + journo_rc == 0:
print "The web application is running, and available on your Vagrant host at the following addresses:"
print "Source interface: localhost:8080"
@@ -52,8 +84,12 @@ def test():
Runs the test suite
"""
os.environ['SECUREDROP_ENV'] = 'test'
+ import config
+ _start_test_rqworker(config)
test_cmds = ["py.test", "./test.sh"]
- sys.exit(int(any([subprocess.call(cmd) for cmd in test_cmds])))
+ test_rc = int(any([subprocess.call(cmd) for cmd in test_cmds]))
+ _stop_test_rqworker()
+ sys.exit(test_rc)
def test_unit():
"""
diff --git a/securedrop/worker.py b/securedrop/worker.py
new file mode 100644
--- /dev/null
+++ b/securedrop/worker.py
@@ -0,0 +1,11 @@
+import os
+
+from redis import Redis
+from rq import Queue
+
+queue_name = 'test' if os.environ['SECUREDROP_ENV'] == 'test' else 'default'
+
+q = Queue(name=queue_name, connection=Redis())
+
+def enqueue(*args, **kwargs):
+ q.enqueue(*args, **kwargs)
| diff --git a/securedrop/tests/common.py b/securedrop/tests/common.py
--- a/securedrop/tests/common.py
+++ b/securedrop/tests/common.py
@@ -1,11 +1,13 @@
-import config
-import db
import os
-import gnupg
import shutil
import uuid
-import crypto_util
+import subprocess
+
+import gnupg
+import config
+import db
+import crypto_util
def clean_root():
shutil.rmtree(config.SECUREDROP_DATA_ROOT)
@@ -61,6 +63,11 @@ def shared_setup():
# Do tests that should always run on app startup
crypto_util.do_runtime_tests()
+ # Start the Python-RQ worker if it's not already running
+ if not os.path.exists(config.WORKER_PIDFILE):
+ subprocess.Popen(["rqworker", "-P", config.SECUREDROP_ROOT,
+ "--pid", config.WORKER_PIDFILE])
+
def shared_teardown():
clean_root()
diff --git a/securedrop/tests/test_unit_integration.py b/securedrop/tests/test_unit_integration.py
--- a/securedrop/tests/test_unit_integration.py
+++ b/securedrop/tests/test_unit_integration.py
@@ -8,6 +8,7 @@
from time import sleep
import tempfile
import shutil
+import time
import mock
@@ -23,6 +24,7 @@
import journalist
import common
from db import db_session, Journalist
+import store
def _block_on_reply_keypair_gen(codename):
@@ -33,6 +35,25 @@ def _block_on_reply_keypair_gen(codename):
class TestIntegration(unittest.TestCase):
+ def _login_user(self):
+ self.journalist_app.post('/login', data=dict(
+ username=self.user.username,
+ password=self.user_pw,
+ token=self.user.totp.now()),
+ follow_redirects=True)
+
+ def _wait_for(self, function_with_assertion, timeout=5):
+ """Polling wait for an arbitrary assertion."""
+ # Thanks to http://chimera.labs.oreilly.com/books/1234000000754/ch20.html#_a_common_selenium_problem_race_conditions
+ start_time = time.time()
+ while time.time() - start_time < timeout:
+ try:
+ return function_with_assertion()
+ except AssertionError:
+ time.sleep(0.1)
+ # one more try, which will raise any errors if they are outstanding
+ return function_with_assertion()
+
def setUp(self):
common.shared_setup()
@@ -56,13 +77,6 @@ def setUp(self):
db_session.commit()
self._login_user()
- def _login_user(self):
- self.journalist_app.post('/login', data=dict(
- username=self.user.username,
- password=self.user_pw,
- token=self.user.totp.now()),
- follow_redirects=True)
-
def tearDown(self):
common.shared_teardown()
@@ -138,6 +152,13 @@ def test_submit_message(self):
self.assertEqual(rv.status_code, 200)
self.assertIn("No documents to display.", rv.data)
+ # the file should be deleted from the filesystem
+ # since file deletion is handled by a polling worker, this test needs
+ # to wait for the worker to get the job and execute it
+ self._wait_for(
+ lambda: self.assertFalse(os.path.exists(store.path(sid, doc_name)))
+ )
+
def test_submit_file(self):
"""When a source creates an account, test that a new entry appears in the journalist interface"""
test_file_contents = "This is a test file."
@@ -217,6 +238,13 @@ def test_submit_file(self):
self.assertEqual(rv.status_code, 200)
self.assertIn("No documents to display.", rv.data)
+ # the file should be deleted from the filesystem
+ # since file deletion is handled by a polling worker, this test needs
+ # to wait for the worker to get the job and execute it
+ self._wait_for(
+ lambda: self.assertFalse(os.path.exists(store.path(sid, doc_name)))
+ )
+
def test_reply_normal(self):
self.helper_test_reply("This is a test reply.", True)
@@ -365,6 +393,12 @@ def helper_test_reply(self, test_reply, expected_success=True):
), follow_redirects=True)
self.assertEqual(rv.status_code, 200)
self.assertIn("Reply deleted", rv.data)
+
+ # Make sure the reply is deleted from the filesystem
+ self._wait_for(
+ lambda: self.assertFalse(os.path.exists(store.path(sid, msgid)))
+ )
+
common.logout(source_app)
def test_delete_collection(self):
@@ -396,6 +430,11 @@ def test_delete_collection(self):
self.assertIn(escape("%s's collection deleted" % (col_name,)), rv.data)
self.assertIn("No documents have been submitted!", rv.data)
+ # Make sure the collection is deleted from the filesystem
+ self._wait_for(
+ lambda: self.assertFalse(os.path.exists(store.path(sid)))
+ )
+
def test_delete_collections(self):
"""Test the "delete selected" checkboxes on the index page that can be
used to delete multiple collections"""
@@ -422,13 +461,10 @@ def test_delete_collections(self):
self.assertEqual(rv.status_code, 200)
self.assertIn("%s collections deleted" % (num_sources,), rv.data)
- # TODO: functional tests (selenium)
- # This code just tests the underlying API and *does not* test the
- # interactions due to the Javascript in journalist.js. Once we have
- # functional tests, we should add tests for:
- # 1. Warning dialog appearance
- # 2. "Don't show again" checkbox behavior
- # 2. Correct behavior on "yes" and "no" buttons
+ # Make sure the collections are deleted from the filesystem
+ self._wait_for(
+ lambda: self.assertFalse(any([ os.path.exists(store.path(sid)) for sid in checkbox_values ]))
+ )
def test_filenames(self):
"""Test pretty, sequential filenames when source uploads messages and files"""
@@ -518,5 +554,10 @@ def helper_filenames_delete(self, soup, i):
self.assertEqual(rv.status_code, 200)
self.assertIn("File permanently deleted.", rv.data)
+ # Make sure the files were deleted from the filesystem
+ self._wait_for(
+ lambda: self.assertFalse(any([ os.path.exists(store.path(sid, doc_name)) for doc_name in checkbox_values ]))
+ )
+
if __name__ == "__main__":
unittest.main(verbosity=2)
| Large files take forever to be deleted and hang the journalist interface
We secure-remove files when they are deleted from the journalist interface. This can take a long time, especially if the files were very large, due to the repeated passes over the file performed by the secure-remove tool. We should do this asynchronously, updating the UI appropriately, and may want to display a notification to explain why it is taking so long.
| 2014-10-15T22:30:44Z | [] | [] |
|
freedomofpress/securedrop | 676 | freedomofpress__securedrop-676 | [
"637"
] | 5aa5c96d62b79554eb549d2da0844023026610a8 | diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -132,7 +132,7 @@ def generate():
# page, or inform them that they're logged in.
session.pop('logged_in', None)
- num_words = 8
+ num_words = 7
if request.method == 'POST':
num_words = int(request.form['number-words'])
if num_words not in range(7, 11):
| diff --git a/securedrop/tests/test_unit_source.py b/securedrop/tests/test_unit_source.py
--- a/securedrop/tests/test_unit_source.py
+++ b/securedrop/tests/test_unit_source.py
@@ -52,8 +52,8 @@ def test_generate(self):
"To protect your identity, we're assigning you a unique codename.",
rv.data)
codename = self._find_codename(rv.data)
- # default codename length is 8 words
- self.assertEqual(len(codename.split()), 8)
+ # default codename length is 7 words
+ self.assertEqual(len(codename.split()), 7)
# codename is also stored in the session - make sure it matches the
# codename displayed to the source
self.assertEqual(codename, escape(session_codename))
| Seven word code name option is weaker than the recommended default
When the source generates a new code name, the default is to recommend a string that is eight words long. The source can select strings that contain more words, but the source can also select a string that has fewer words than the recommended default. Available options should ideally only be stronger than the default recommendation, not weaker.
| I think we should change the recommended default to be 7 words instead of 8. Here are the reasons why:
1. 7 word codenames are easier to memorize. They seem (at least to me) to have a nice 4-3 rhythm to them, similar to a telephone number.
2. They provide a sufficient amount of entropy. `math.log(6699**7, 2) = 88.96811025580541`, and since we use scrypt with `N = 2**14`, the stored hashes have an additional 14 bits of entropy for a total of 102.96.
3. Available options will only be stronger than the default recommendation, as noted in the original comment.
I think 102.96 bits of entropy is probably good enough.
It would be great if we could make it 128 bits, because that's what cryptographers say makes it completely impossible to crack, assuming you could use all of the computing power available on Earth and hundreds of thousands of years. But 102.96 is really good anyway.
I think if sources want to memorize their codenames, making them 7 long is fine. If sources plan on writing them down, then they could do 9 words to pass the 128-bit threshold.
| 2014-10-22T23:54:11Z | [] | [] |
freedomofpress/securedrop | 703 | freedomofpress__securedrop-703 | [
"626"
] | 481e12fefdf9b789b58e5077c862cbbdd9748c2d | diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -158,7 +158,8 @@ def encrypt(plaintext, fingerprints, output=None):
out = encrypt_fn(plaintext,
*fingerprints,
output=output,
- always_trust=True)
+ always_trust=True,
+ armor=False)
if out.ok:
return out.data
else:
| diff --git a/securedrop/tests/test_unit_integration.py b/securedrop/tests/test_unit_integration.py
--- a/securedrop/tests/test_unit_integration.py
+++ b/securedrop/tests/test_unit_integration.py
@@ -277,7 +277,7 @@ def _can_decrypt_with_key(self, msg, key_fpr, passphrase=None):
salt=crypto_util.SCRYPT_GPG_PEPPER)
decrypted_data = gpg.decrypt(msg, passphrase=passphrase)
self.assertTrue(decrypted_data.ok,
- "Could not decrypt msg with key, gpg says: {}".format(decrypted_data.status))
+ "Could not decrypt msg with key, gpg says: {}".format(decrypted_data.stderr))
# We have to clean up the temporary GPG dir
shutil.rmtree(gpg_tmp_dir)
@@ -362,10 +362,10 @@ def helper_test_reply(self, test_reply, expected_success=True):
doc_names_selected=checkbox_values
), follow_redirects=True)
self.assertEqual(rv.status_code, 200)
- pgp_msg_re = r'-----BEGIN PGP MESSAGE-----.*-----END PGP MESSAGE-----'
- pgp_msg = re.search(pgp_msg_re, rv.data, re.MULTILINE|re.DOTALL).group(0)
- self._can_decrypt_with_key(pgp_msg, config.JOURNALIST_KEY)
- self._can_decrypt_with_key(pgp_msg, crypto_util.getkey(sid), codename)
+ zf = zipfile.ZipFile(StringIO(rv.data), 'r')
+ data = zf.read(zf.namelist()[0])
+ self._can_decrypt_with_key(data, config.JOURNALIST_KEY)
+ self._can_decrypt_with_key(data, crypto_util.getkey(sid), codename)
# Test deleting reply on the journalist interface
last_reply_number = len(soup.select('input[name="doc_names_selected"]')) - 1
| Don't armor encrypted submissions
SecureDrop currently armors encrypted submissions. This bloats the size of stored submissions significantly due to the encoding. For example, a 93 MB upload results in a 125.7 MB submission for the journalist to download.
Downloading anything over Tor is very slow (the aforementioned download took me, on average, 9 minutes to download). Therefore, unnecessarily increasing the size of submissions severely impacts usability. There is no reason that I can think of to ascii armor submissions - they are uploaded and downloaded over HTTP, which automatically handles encoding and de-encoding binary data.
| 2014-10-27T15:50:47Z | [] | [] |
|
freedomofpress/securedrop | 1,055 | freedomofpress__securedrop-1055 | [
"1047"
] | 71defa9c4222d2f343017a3b5e9594866cdfefc4 | diff --git a/securedrop/manage.py b/securedrop/manage.py
--- a/securedrop/manage.py
+++ b/securedrop/manage.py
@@ -22,13 +22,15 @@
# We need to import config in each function because we're running the tests
# directly, so it's important to set the environment correctly, depending on
# development or testing, before importing config.
-#
-# TODO: do we need to store *_PIDFILE in the application config? It seems like
-# an implementation detail that is specifc to this management script.
-
os.environ['SECUREDROP_ENV'] = 'dev'
-WORKER_PIDFILE = "/tmp/test_rqworker.pid"
+# TODO: the PID file for the redis worker is hard-coded below.
+# Ideally this constant would be provided by a test harness.
+# It has been intentionally omitted from `config.py.example`
+# in order to isolate the test vars from prod vars.
+# When refactoring the test suite, the TEST_WORKER_PIDFILE
+# TEST_WORKER_PIDFILE is also hard-coded in `tests/common.py`.
+TEST_WORKER_PIDFILE = "/tmp/securedrop_test_worker.pid"
def get_pid_from_pidfile(pid_file_name):
@@ -40,7 +42,7 @@ def _start_test_rqworker(config):
# needed to determine the directory to run the worker in
worker_running = False
try:
- if psutil.pid_exists(get_pid_from_pidfile(WORKER_PIDFILE)):
+ if psutil.pid_exists(get_pid_from_pidfile(TEST_WORKER_PIDFILE)):
worker_running = True
except IOError:
pass
@@ -51,14 +53,14 @@ def _start_test_rqworker(config):
[
"rqworker", "test",
"-P", config.SECUREDROP_ROOT,
- "--pid", WORKER_PIDFILE,
+ "--pid", TEST_WORKER_PIDFILE,
],
stdout=tmp_logfile,
stderr=subprocess.STDOUT)
def _stop_test_rqworker():
- os.kill(get_pid_from_pidfile(WORKER_PIDFILE), signal.SIGTERM)
+ os.kill(get_pid_from_pidfile(TEST_WORKER_PIDFILE), signal.SIGTERM)
def test():
| diff --git a/securedrop/tests/common.py b/securedrop/tests/common.py
--- a/securedrop/tests/common.py
+++ b/securedrop/tests/common.py
@@ -9,6 +9,14 @@
from db import init_db, db_session, Source, Submission
import crypto_util
+# TODO: the PID file for the redis worker is hard-coded below.
+# Ideally this constant would be provided by a test harness.
+# It has been intentionally omitted from `config.py.example`
+# in order to isolate the test vars from prod vars.
+# When refactoring the test suite, the TEST_WORKER_PIDFILE
+# TEST_WORKER_PIDFILE is also hard-coded in `manage.py`.
+TEST_WORKER_PIDFILE = "/tmp/securedrop_test_worker.pid"
+
def clean_root():
shutil.rmtree(config.SECUREDROP_DATA_ROOT)
@@ -70,9 +78,9 @@ def shared_setup():
crypto_util.do_runtime_tests()
# Start the Python-RQ worker if it's not already running
- if not os.path.exists(config.WORKER_PIDFILE):
+ if not os.path.exists(TEST_WORKER_PIDFILE):
subprocess.Popen(["rqworker", "-P", config.SECUREDROP_ROOT,
- "--pid", config.WORKER_PIDFILE])
+ "--pid", TEST_WORKER_PIDFILE])
def shared_teardown():
diff --git a/spec_tests/Rakefile b/spec_tests/Rakefile
--- a/spec_tests/Rakefile
+++ b/spec_tests/Rakefile
@@ -3,6 +3,7 @@ require 'rspec/core/rake_task'
task :spec => 'spec:all'
+task :staging => 'spec:staging'
task :default => :spec
# this function accepts a hostname and returns a hash
@@ -12,13 +13,22 @@ def roles(host)
roles = []
case host
when /^development$/
- roles << %w(common-development development)
+ (roles << %w(common-development development)).flatten!
when /^build$/
- roles << "build"
- when /^app-staging$/
- roles << %w(common-development common-staging app-staging)
- when /^mon-staging$/
- roles << %w(common-staging mon-staging)
+ (roles << "build").flatten!
+ when /^app-staging/
+ (roles << %w(common-development common-staging app-staging grsecurity)).flatten!
+ when /^mon-staging/
+ (roles << %w(common-staging mon-staging grsecurity)).flatten!
+ end
+
+ # DigitalOcean droplets don't support custom kernels, so remove
+ # any planned spectests for grsecurity. This is only relevant
+ # for the app and mon hosts, so don't delay other tests with check.
+ if host.match(/^(app|mon)/)
+ if `vagrant status #{host} --machine-readable`.match(/#{host},provider-name,digital_ocean/m)
+ roles.delete('grsecurity')
+ end
end
roles
end
@@ -40,6 +50,10 @@ end
namespace :spec do
# assign all hosts to serverspec task run
task :all => hosts.map { |h| h[:name] }
+
+ # assign only /staging/ hosts to serverspec task run
+ task :staging => hosts.select{ |h| h[:name].match(/staging$/) }.map{ |h| h[:name] }
+
hosts.each do |host|
desc "Run spectests against #{host[:name]}"
RSpec::Core::RakeTask.new(host[:name].to_sym) do |t|
diff --git a/spec_tests/spec/app-staging/apache_spec.rb b/spec_tests/spec/app-staging/apache_spec.rb
--- a/spec_tests/spec/app-staging/apache_spec.rb
+++ b/spec_tests/spec/app-staging/apache_spec.rb
@@ -105,7 +105,7 @@
</LimitExcept>
</Directory>
-<Directory #{TEST_VARS['securedrop_code']}>
+<Directory #{property['securedrop_code']}>
Options None
AllowOverride None
<Limit GET POST HEAD>
@@ -140,9 +140,9 @@
# declare source-specific apache configs
source_apache2_config_settings = [
'<VirtualHost 0.0.0.0:80>',
- "DocumentRoot #{TEST_VARS['securedrop_code']}/static",
- "Alias /static #{TEST_VARS['securedrop_code']}/static",
- "WSGIDaemonProcess source processes=2 threads=30 display-name=%{GROUP} python-path=#{TEST_VARS['securedrop_code']}",
+ "DocumentRoot #{property['securedrop_code']}/static",
+ "Alias /static #{property['securedrop_code']}/static",
+ "WSGIDaemonProcess source processes=2 threads=30 display-name=%{GROUP} python-path=#{property['securedrop_code']}",
'WSGIProcessGroup source',
'WSGIScriptAlias / /var/www/source.wsgi/',
'AddType text/html .py',
@@ -170,9 +170,9 @@
# declare document-specific apache configs
document_apache2_config_settings = [
'<VirtualHost 0.0.0.0:8080>',
- "DocumentRoot #{TEST_VARS['securedrop_code']}/static",
- "Alias /static #{TEST_VARS['securedrop_code']}/static",
- "WSGIDaemonProcess document processes=2 threads=30 display-name=%{GROUP} python-path=#{TEST_VARS['securedrop_code']}",
+ "DocumentRoot #{property['securedrop_code']}/static",
+ "Alias /static #{property['securedrop_code']}/static",
+ "WSGIDaemonProcess document processes=2 threads=30 display-name=%{GROUP} python-path=#{property['securedrop_code']}",
'WSGIProcessGroup document',
'WSGIScriptAlias / /var/www/document.wsgi/',
'AddType text/html .py',
diff --git a/spec_tests/spec/app-staging/apparmor_spec.rb b/spec_tests/spec/app-staging/apparmor_spec.rb
--- a/spec_tests/spec/app-staging/apparmor_spec.rb
+++ b/spec_tests/spec/app-staging/apparmor_spec.rb
@@ -25,6 +25,35 @@
end
end
+# declare expected app-armor capabilities for apache2
+apache2_capabilities = %w(
+ dac_override
+ kill
+ net_bind_service
+ sys_ptrace
+)
+# check for exact list of expected app-armor capabilities for apache2
+describe command('perl -nE \'/^\s+capability\s+(\w+),$/ && say $1\' /etc/apparmor.d/usr.sbin.apache2') do
+ apache2_capabilities.each do |apache2_capability|
+ its(:stdout) { should contain(apache2_capability) }
+ end
+end
+
+# ensure no extra capabilities are defined for apache2
+describe command('grep -ic capability /etc/apparmor.d/usr.sbin.apache2') do
+ its(:stdout) { should eq apache2_capabilities.length.to_s + "\n" }
+end
+
+# check for exact list of expected app-armor capabilities for tor
+describe command('perl -nE \'/^\s+capability\s+(\w+),$/ && say $1\' /etc/apparmor.d/usr.sbin.tor') do
+ its(:stdout) { should contain("setgid") }
+end
+
+# ensure no extra capabilities are defined for tor
+describe command('grep -ic capability /etc/apparmor.d/usr.sbin.tor') do
+ its(:stdout) { should eq "1\n" }
+end
+
# Explicitly check that enforced profiles are NOT
# present in /etc/apparmor.d/disable. Polling aa-status
# only checks the last config that was loaded, whereas
@@ -44,36 +73,60 @@
end
end
-# aa-status does not permit explicit state checking
-# of services, so this is an ugly hack that can easily
-# report false positives. It checks the number of profiles
-# in a given state, but doesn't check which ones. Argh!
-# TODO: Consider writing a nasty perl one-liner to filter
-# the output and ensure the services are filed correctly.
-describe command("aa-status --complaining") do
- its(:stdout) { should eq "2\n" }
+# declare app-armor profiles expected to be enforced
+enforced_apparmor_profiles = %w(
+ /sbin/dhclient
+ /usr/lib/NetworkManager/nm-dhcp-client.action
+ /usr/lib/connman/scripts/dhclient-script
+ /usr/sbin/apache2//DEFAULT_URI
+ /usr/sbin/apache2//HANDLING_UNTRUSTED_INPUT
+ /usr/sbin/ntpd
+ /usr/sbin/tcpdump
+ system_tor
+)
+# check for enforced app-armor profiles
+# this klunky one-liner uses bash, because serverspec defaults to sh,
+# then provides START and STOP patterns to sed, filters by profile
+# names according to leading whitespace, then trims leading whitespace
+describe command("aa-status") do
+ enforced_apparmor_profiles.each do |enforced_apparmor_profile|
+ its(:stdout) { should contain(enforced_apparmor_profile).from(/profiles are in enforce mode/).to(/profiles are in complain mode/) }
+ end
end
+# ensure number of expected enforced profiles matches number checked
describe command("aa-status --enforced") do
- its(:stdout) { should eq "8\n" }
+ its(:stdout) { should eq enforced_apparmor_profiles.length.to_s + "\n" }
end
-describe command("aa-status --profiled") do
- its(:stdout) { should eq "10\n" }
+# declare app-armor profiles expected to be complaining
+# the staging hosts enabled "complain" mode for more verbose
+# logging during development and testing; production hosts
+# should not have any complain mode.
+complaining_apparmor_profiles = %w(
+ /usr/sbin/apache2
+ /usr/sbin/tor
+)
+
+# check for complaining app-armor profiles
+describe command("aa-status") do
+ complaining_apparmor_profiles.each do |complaining_apparmor_profile|
+ its(:stdout) { should contain(complaining_apparmor_profile).from(/profiles are in complain mode/).to(/\d+ processes have profiles defined/) }
+ end
end
-describe command("aa-status --profiled") do
- its(:stdout) { should eq "10\n" }
+# ensure number of expected complaining profiles matches number checked
+describe command("aa-status --complaining") do
+ its(:stdout) { should eq complaining_apparmor_profiles.length.to_s + "\n" }
end
-# Check that the expected profiles are present in the aa-status command.
-#[ 'apache', 'tor', 'ntp'].each do |enforced_profile|
-# describe command("aa-status") do
-# it { should return_stdout /#{enforced_profile}/ }
-# end
-#end
+# ensure number of total profiles is sum of enforced and complaining profiles
+describe command("aa-status --profiled") do
+ total_profiles = enforced_apparmor_profiles.length + complaining_apparmor_profiles.length
+ its(:stdout) { should eq total_profiles.to_s + "\n" }
+end
# Ensure that there are no processes that are unconfined but have a profile
describe command("aa-status") do
- its(:stdout) { should match /0 processes are unconfined but have a profile defined/ }
+ its(:stdout) { should contain("0 processes are unconfined but have a profile defined") }
end
diff --git a/spec_tests/spec/app-staging/iptables_spec.rb b/spec_tests/spec/app-staging/iptables_spec.rb
--- a/spec_tests/spec/app-staging/iptables_spec.rb
+++ b/spec_tests/spec/app-staging/iptables_spec.rb
@@ -14,7 +14,7 @@
'-A INPUT -s 8.8.8.8/32 -p udp -m udp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
'-A INPUT -p udp -m udp --sport 123 --dport 123 -m state --state RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
'-A INPUT -p tcp -m multiport --sports 80,8080,443 -m state --state RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- '-A INPUT -s 10.0.1.3/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment "OSSEC server agent" -j ACCEPT',
+ "-A INPUT -s #{property['monitor_ip']}/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment \"OSSEC server agent\" -j ACCEPT",
'-A INPUT -i lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
'-A INPUT -p tcp -m state --state INVALID -m comment --comment "drop but do not log inbound invalid state packets" -j DROP',
'-A INPUT -m comment --comment "Drop and log all other incomming traffic" -j LOGNDROP',
@@ -22,20 +22,20 @@
'-A OUTPUT -p tcp -m tcp --sport 80 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
'-A OUTPUT -p udp -m udp --dport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
'-A OUTPUT -p tcp -m tcp --sport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A OUTPUT -p tcp -m owner --uid-owner 109 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tor instance that provides ssh access" -j ACCEPT',
- '-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner 109 -m state --state NEW -m limit --limit 3/min --limit-burst 3 -m comment --comment "SSH with rate limiting only thru tor" -j ACCEPT',
- '-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner 109 -m state --state RELATED,ESTABLISHED -m comment --comment "SSH with rate limiting only thru tor" -j ACCEPT',
- '-A OUTPUT -m owner --uid-owner 109 -m comment --comment "Drop all other traffic for the tor instance used for ssh" -j LOGNDROP',
- '-A OUTPUT -o lo -p tcp -m tcp --sport 80 -m owner --uid-owner 33 -m state --state RELATED,ESTABLISHED -m comment --comment "Restrict the apache user outbound connections" -j ACCEPT',
- '-A OUTPUT -o lo -p tcp -m tcp --sport 8080 -m owner --uid-owner 33 -m state --state RELATED,ESTABLISHED -m comment --comment "Restrict the apache user outbound connections" -j ACCEPT',
- '-A OUTPUT -s 127.0.0.1/32 -d 127.0.0.1/32 -o lo -p tcp -m owner --uid-owner 33 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "for redis worker all application user local loopback user" -j ACCEPT',
- '-A OUTPUT -m owner --uid-owner 33 -m comment --comment "Drop all other traffic by the securedrop user" -j LOGNDROP',
- '-A OUTPUT -m owner --gid-owner 108 -m comment --comment "Drop all other outbound traffic for ssh user" -j LOGNDROP',
+ "-A OUTPUT -p tcp -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"tor instance that provides ssh access\" -j ACCEPT",
+ "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW -m limit --limit 3/min --limit-burst 3 -m comment --comment \"SSH with rate limiting only thru tor\" -j ACCEPT",
+ "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"SSH with rate limiting only thru tor\" -j ACCEPT",
+ "-A OUTPUT -m owner --uid-owner #{property['tor_user_uid']} -m comment --comment \"Drop all other traffic for the tor instance used for ssh\" -j LOGNDROP",
+ "-A OUTPUT -o lo -p tcp -m tcp --sport 80 -m owner --uid-owner #{property['apache_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Restrict the apache user outbound connections\" -j ACCEPT",
+ "-A OUTPUT -o lo -p tcp -m tcp --sport 8080 -m owner --uid-owner #{property['apache_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Restrict the apache user outbound connections\" -j ACCEPT",
+ "-A OUTPUT -s 127.0.0.1/32 -d 127.0.0.1/32 -o lo -p tcp -m owner --uid-owner #{property['apache_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"for redis worker all application user local loopback user\" -j ACCEPT",
+ "-A OUTPUT -m owner --uid-owner #{property['apache_user_uid']} -m comment --comment \"Drop all other traffic by the securedrop user\" -j LOGNDROP",
+ "-A OUTPUT -m owner --gid-owner #{property['ssh_group_gid']} -m comment --comment \"Drop all other outbound traffic for ssh user\" -j LOGNDROP",
'-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
'-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
'-A OUTPUT -p udp -m udp --sport 123 --dport 123 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
'-A OUTPUT -p tcp -m multiport --dports 80,8080,443 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- '-A OUTPUT -d 10.0.1.3/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "OSSEC server agent" -j ACCEPT',
+ "-A OUTPUT -d #{property['monitor_ip']}/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"OSSEC server agent\" -j ACCEPT",
'-A OUTPUT -o lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
'-A OUTPUT -m comment --comment "Drop all other outgoing traffic" -j DROP',
'-A LOGNDROP -p tcp -m limit --limit 5/min -j LOG --log-tcp-options --log-ip-options --log-uid',
@@ -52,8 +52,8 @@
# from the ansible inventory should cover most use cases (except inventories
# with just the *.onion addresses).
unwanted_iptables_rules = [
- '-A OUTPUT -d 10.0.1.3 -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "ossec authd rule only required for initial agent registration"',
- '-A INPUT -s 10.0.1.3 -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -v ACCEPT -m comment --comment "ossec authd rule only required for initial agent registration"',
+ "-A OUTPUT -d #{property['monitor_ip']} -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
+ "-A INPUT -s #{property['monitor_ip']} -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -v ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
# These rules have the wrong interface for the vagrant mon-staging machine.
# Adding them in here to make sure ansible config changes don't introduce regressions.
diff --git a/spec_tests/spec/app-staging/ossec_agent_spec.rb b/spec_tests/spec/app-staging/ossec_agent_spec.rb
--- a/spec_tests/spec/app-staging/ossec_agent_spec.rb
+++ b/spec_tests/spec/app-staging/ossec_agent_spec.rb
@@ -1,10 +1,11 @@
# ensure hosts file references mon server by ip
# TODO: replace hardcoded ip for mon-staging host
describe file('/etc/hosts') do
- its(:content) { should match /^127\.0\.1\.1 app-staging app-staging$/ }
+ its(:content) { should match /^127\.0\.1\.1\s+app-staging\s+app-staging$/ }
# TODO: the "securedrop-monitor-server-alias" is an artifact of
# using the vagrant-hostmanager plugin. it may no longer be necessary
- its(:content) { should match /^10\.0\.1\.3 mon-staging securedrop-monitor-server-alias$/ }
+ mon_host_regex = Regexp.quote("#{property['monitor_ip']} mon-staging securedrop-monitor-server-alias")
+ its(:content) { should match /^#{mon_host_regex}$/ }
end
# ensure custom ossec-agent package is installed
@@ -18,6 +19,7 @@
it { should be_mode '644' }
it { should be_owned_by 'root' }
it { should be_grouped_into 'ossec' }
+ app_ip_regex = Regexp.quote("#{property['app_ip']}")
# this regex checks for a hex string of 64 chars, not a specific value
- its(:content) { should match /^1024 app-staging 10\.0\.1\.2 [0-9a-f]{64}$/ }
+ its(:content) { should match /^1024 app-staging #{app_ip_regex} [0-9a-f]{64}$/ }
end
diff --git a/spec_tests/spec/app-staging/securedrop_app_spec.rb b/spec_tests/spec/app-staging/securedrop_app_spec.rb
--- a/spec_tests/spec/app-staging/securedrop_app_spec.rb
+++ b/spec_tests/spec/app-staging/securedrop_app_spec.rb
@@ -1,18 +1,18 @@
# declare securedrop app directories
securedrop_app_directories = [
- TEST_VARS['securedrop_code'],
- TEST_VARS['securedrop_data'],
- "#{TEST_VARS['securedrop_data']}/store",
- "#{TEST_VARS['securedrop_data']}/keys",
- "#{TEST_VARS['securedrop_data']}/tmp",
+ property['securedrop_code'],
+ property['securedrop_data'],
+ "#{property['securedrop_data']}/store",
+ "#{property['securedrop_data']}/keys",
+ "#{property['securedrop_data']}/tmp",
]
# ensure securedrop app directories exist with correct permissions
securedrop_app_directories.each do |securedrop_app_directory|
describe file(securedrop_app_directory) do
it { should be_directory }
- it { should be_owned_by TEST_VARS['securedrop_user'] }
- it { should be_grouped_into TEST_VARS['securedrop_user'] }
+ it { should be_owned_by property['securedrop_user'] }
+ it { should be_grouped_into property['securedrop_user'] }
it { should be_mode '700' }
end
end
@@ -36,17 +36,17 @@
# ensure default logo header file exists
# TODO: add check for custom logo header file
-describe file("#{TEST_VARS['securedrop_code']}/static/i/logo.png") do
+describe file("#{property['securedrop_code']}/static/i/logo.png") do
it { should be_file }
# TODO: ansible task declares mode 400 but the file ends up as 644 on host
it { should be_mode '644' }
- it { should be_owned_by TEST_VARS['securedrop_user'] }
- it { should be_grouped_into TEST_VARS['securedrop_user'] }
+ it { should be_owned_by property['securedrop_user'] }
+ it { should be_grouped_into property['securedrop_user'] }
end
# ensure cronjob for securedrop tmp dir cleanup is enabled
describe cron do
- it { should have_entry "@daily #{TEST_VARS['securedrop_code']}/manage.py clean_tmp" }
+ it { should have_entry "@daily #{property['securedrop_code']}/manage.py clean_tmp" }
end
# ensure directory for worker logs is present
diff --git a/spec_tests/spec/app-staging/securedrop_app_test_spec.rb b/spec_tests/spec/app-staging/securedrop_app_test_spec.rb
--- a/spec_tests/spec/app-staging/securedrop_app_test_spec.rb
+++ b/spec_tests/spec/app-staging/securedrop_app_test_spec.rb
@@ -9,7 +9,7 @@
'Flask-Testing==0.4.2',
'mock==1.0.1',
'pytest==2.6.4',
- 'selenium==2.44.0',
+ 'selenium==2.45.0',
]
# ensure pip depdendencies are installed in staging.
# these are required for running unit and functional tests
diff --git a/spec_tests/spec/app-staging/tor_spec.rb b/spec_tests/spec/app-staging/tor_spec.rb
--- a/spec_tests/spec/app-staging/tor_spec.rb
+++ b/spec_tests/spec/app-staging/tor_spec.rb
@@ -17,3 +17,20 @@
end
end
+# declare app-specific tor service directories,
+# for mode and ownership checks. the parent dir
+# and the "ssh" service are validated in the
+# common-staging spectests.
+tor_service_directories = %w(
+ /var/lib/tor/services/document
+ /var/lib/tor/services/source
+)
+# ensure tor service dirs are owned by tor user and mode 0700
+tor_service_directories.each do |tor_service_directory|
+ describe file(tor_service_directory) do
+ it { should be_directory }
+ it { should be_mode('700') }
+ it { should be_owned_by 'debian-tor' }
+ it { should be_grouped_into 'debian-tor' }
+ end
+end
diff --git a/spec_tests/spec/common-development/securedrop_app_spec.rb b/spec_tests/spec/common-development/securedrop_app_spec.rb
--- a/spec_tests/spec/common-development/securedrop_app_spec.rb
+++ b/spec_tests/spec/common-development/securedrop_app_spec.rb
@@ -42,7 +42,7 @@
end
# ensure the securedrop application gpg pubkey is present
-describe file("#{TEST_VARS['securedrop_data']}/test_journalist_key.pub") do
+describe file("#{property['securedrop_data']}/test_journalist_key.pub") do
it { should be_file }
it { should be_owned_by 'root' }
it { should be_grouped_into 'root' }
@@ -50,34 +50,34 @@
end
# ensure config.py (settings for securedrop app) exists
-describe file("#{TEST_VARS['securedrop_code']}/config.py") do
+describe file("#{property['securedrop_code']}/config.py") do
it { should be_file }
- it { should be_owned_by TEST_VARS['securedrop_user'] }
- it { should be_grouped_into TEST_VARS['securedrop_user'] }
+ it { should be_owned_by property['securedrop_user'] }
+ it { should be_grouped_into property['securedrop_user'] }
it { should be_mode '600' }
its(:content) { should match /^JOURNALIST_KEY = '65A1B5FF195B56353CC63DFFCC40EF1228271441'$/ }
end
# ensure sqlite database exists for application
-describe file("#{TEST_VARS['securedrop_data']}/db.sqlite") do
+describe file("#{property['securedrop_data']}/db.sqlite") do
it { should be_file }
# TODO: perhaps 640 perms would work here
it { should be_mode '644' }
- it { should be_owned_by TEST_VARS['securedrop_user'] }
- it { should be_grouped_into TEST_VARS['securedrop_user'] }
+ it { should be_owned_by property['securedrop_user'] }
+ it { should be_grouped_into property['securedrop_user'] }
end
# declare config options for securedrop worker
securedrop_worker_config_options = [
'[program:securedrop_worker]',
'command=/usr/local/bin/rqworker',
- "directory=#{TEST_VARS['securedrop_code']}",
+ "directory=#{property['securedrop_code']}",
'autostart=true',
'autorestart=true',
'startretries=3',
'stderr_logfile=/var/log/securedrop_worker/err.log',
'stdout_logfile=/var/log/securedrop_worker/out.log',
- "user=#{TEST_VARS['securedrop_user']}",
+ "user=#{property['securedrop_user']}",
'environment=HOME="/tmp/python-gnupg"',
]
# ensure securedrop worker config for supervisor is present
diff --git a/spec_tests/spec/common-staging/cron_apt_spec.rb b/spec_tests/spec/common-staging/cron_apt_spec.rb
--- a/spec_tests/spec/common-staging/cron_apt_spec.rb
+++ b/spec_tests/spec/common-staging/cron_apt_spec.rb
@@ -1,5 +1,5 @@
# Check for critical packages
-['cron-apt', 'ntp', 'paxctl'].each do |pkg|
+['cron-apt', 'ntp'].each do |pkg|
describe package(pkg) do
it { should be_installed }
end
@@ -75,7 +75,7 @@
end
# ensure safe-upgrade has already been run
-describe command('aptitude --simulate safe-upgrade') do
+describe command('aptitude --simulate -y safe-upgrade') do
its(:exit_status) { should eq 0 }
its(:stdout) { should match /^No packages will be installed, upgraded, or removed\.$/ }
its(:stdout) { should match /0 packages upgraded, 0 newly installed, 0 to remove and 0 not upgraded\./ }
diff --git a/spec_tests/spec/common-staging/tor_spec.rb b/spec_tests/spec/common-staging/tor_spec.rb
--- a/spec_tests/spec/common-staging/tor_spec.rb
+++ b/spec_tests/spec/common-staging/tor_spec.rb
@@ -32,12 +32,19 @@
end
end
-# ensure parent directory for tor hidden services exists
-describe file('/var/lib/tor/services') do
- it { should be_directory }
- it { should be_mode('2755') }
- it { should be_owned_by 'debian-tor' }
- it { should be_grouped_into 'debian-tor' }
+# declare tor service directories, for mode and ownership checks
+tor_service_directories = %w(
+ /var/lib/tor/services
+ /var/lib/tor/services/ssh
+)
+# ensure tor service dirs are owned by tor user and mode 0700
+tor_service_directories.each do |tor_service_directory|
+ describe file(tor_service_directory) do
+ it { should be_directory }
+ it { should be_mode('700') }
+ it { should be_owned_by 'debian-tor' }
+ it { should be_grouped_into 'debian-tor' }
+ end
end
# ensure tor service is running
diff --git a/spec_tests/spec/development/securedrop_app_dev_spec.rb b/spec_tests/spec/development/securedrop_app_dev_spec.rb
--- a/spec_tests/spec/development/securedrop_app_dev_spec.rb
+++ b/spec_tests/spec/development/securedrop_app_dev_spec.rb
@@ -18,46 +18,58 @@
end
securedrop_app_directories = [
- TEST_VARS['securedrop_data'],
- "#{TEST_VARS['securedrop_data']}/keys",
- "#{TEST_VARS['securedrop_data']}/tmp",
- "#{TEST_VARS['securedrop_data']}/store",
+ property['securedrop_data'],
+ "#{property['securedrop_data']}/keys",
+ "#{property['securedrop_data']}/tmp",
+ "#{property['securedrop_data']}/store",
]
# ensure securedrop app directories exist with correct permissions
securedrop_app_directories.each do |securedrop_app_directory|
describe file(securedrop_app_directory) do
it { should be_directory }
- it { should be_owned_by TEST_VARS['securedrop_user'] }
- it { should be_grouped_into TEST_VARS['securedrop_user'] }
+ it { should be_owned_by property['securedrop_user'] }
+ it { should be_grouped_into property['securedrop_user'] }
it { should be_mode '700' }
end
end
# /vagrant has 770 permissions, so test
# separately from the 700 permissions above
-describe file(TEST_VARS['securedrop_code']) do
+describe file(property['securedrop_code']) do
it { should be_directory }
- it { should be_owned_by TEST_VARS['securedrop_user'] }
- it { should be_grouped_into TEST_VARS['securedrop_user'] }
- it { should be_mode '770' }
+ it { should be_owned_by property['securedrop_user'] }
+ it { should be_grouped_into property['securedrop_user'] }
+ # Vagrant VirtualBox environments show /vagrant as 770,
+ # but the Vagrant DigitalOcean droplet shows /vagrant as 775.
+ # This appears to be a side-effect of the default umask
+ # in the snapci instances. (The rsync provisioner for the
+ # vagrant-digitalocean plugin preserves permissions from the host.)
+ # The spectests for 'staging' still check for an explicit mode,
+ # so it's OK to relax this test for now.
+ #it { should be_mode '700' }
+ # TODO: should be 700 in all environments; ansible task is
+ # straightforward about this.
+ it { should be_readable.by('owner') }
+ it { should be_writable.by('owner') }
+ it { should be_executable.by('owner') }
end
# ensure cronjob for securedrop tmp dir cleanup is enabled
describe cron do
- # TODO: this should be using TEST_VARS, but the ansible role
+ # TODO: this should be using property, but the ansible role
# doesn't use a var, it's hard-coded. update ansible, then fix test.
- # it { should have_entry "@daily #{TEST_VARS['securedrop_code']}/manage.py clean_tmp" }
+ # it { should have_entry "@daily #{property['securedrop_code']}/manage.py clean_tmp" }
it { should have_entry "@daily /var/www/securedrop/manage.py clean_tmp" }
end
# ensure default logo header file exists
# TODO: add check for custom logo header file
-describe file("#{TEST_VARS['securedrop_code']}/static/i/logo.png") do
+describe file("#{property['securedrop_code']}/static/i/logo.png") do
it { should be_file }
# TODO: ansible task declares mode 400 but the file ends up as 644 on host
# TODO: 644 on app-staging, 664 in development
it { should be_mode '664' }
- it { should be_owned_by TEST_VARS['securedrop_user'] }
- it { should be_grouped_into TEST_VARS['securedrop_user'] }
+ it { should be_owned_by property['securedrop_user'] }
+ it { should be_grouped_into property['securedrop_user'] }
end
diff --git a/spec_tests/spec/common-staging/grsec_spec.rb b/spec_tests/spec/grsecurity/grsec_spec.rb
similarity index 72%
rename from spec_tests/spec/common-staging/grsec_spec.rb
rename to spec_tests/spec/grsecurity/grsec_spec.rb
--- a/spec_tests/spec/common-staging/grsec_spec.rb
+++ b/spec_tests/spec/grsecurity/grsec_spec.rb
@@ -46,9 +46,34 @@
# Check that paxtest does not report anything vulnerable
# Requires the package paxtest to be installed
# The paxtest package is currently being installed in the app-test role
-describe command("paxtest blackhat") do
- its(:stdout) { should_not match /vulnerable/ }
-end
+paxtest_check_killed = [
+ "Executable anonymous mapping",
+ "Executable bss",
+ "Executable data",
+ "Executable heap",
+ "Executable stack",
+ "Executable shared library bss",
+ "Executable shared library data",
+ "Executable anonymous mapping (mprotect)",
+ "Executable bss (mprotect)",
+ "Executable data (mprotect)",
+ "Executable heap (mprotect)",
+ "Executable stack (mprotect)",
+ "Executable shared library bss (mprotect)",
+ "Executable shared library data (mprotect)",
+ "Writable text segments",
+ "Return to function (memcpy)",
+ "Return to function (memcpy, PIE)",
+]
+# TODO: enable the paxtest checks below once the "paxtest"
+# package is included via the grsecurity role.
+#describe command("paxtest blackhat") do
+# paxtest_check_killed.each do |killed|
+# its(:stdout) { should match /^#{Regexp.escape(killed)}\s*:\sKilled/ }
+# end
+# its(:stdout) { should_not match /Vulnerable/i }
+# its(:exit_status) { should eq 0 }
+#end
# ensure generic linux kernels have been removed
describe command("dpkg --get-selections '^linux-image-.*generic$'") do
diff --git a/spec_tests/spec/mon-staging/iptables_spec.rb b/spec_tests/spec/mon-staging/iptables_spec.rb
--- a/spec_tests/spec/mon-staging/iptables_spec.rb
+++ b/spec_tests/spec/mon-staging/iptables_spec.rb
@@ -13,26 +13,26 @@
'-A INPUT -s 8.8.8.8/32 -p udp -m udp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
'-A INPUT -p udp -m udp --sport 123 --dport 123 -m state --state RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
'-A INPUT -p tcp -m multiport --sports 80,8080,443 -m state --state RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
+ "-A INPUT -s #{property['app_ip']}/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"Allow OSSEC agent to monitor\" -j ACCEPT",
'-A INPUT -p tcp -m tcp --sport 587 -m state --state RELATED,ESTABLISHED -m comment --comment "Allow ossec email alerts out" -j ACCEPT',
'-A INPUT -i lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
'-A INPUT -p tcp -m state --state INVALID -m comment --comment "drop but do not log inbound invalid state packets" -j DROP',
'-A INPUT -m comment --comment "Log and drop all other incomming traffic" -j LOGNDROP',
- '-A INPUT -s 10.0.1.2/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "Allow OSSEC agent to monitor" -j ACCEPT',
- '-A OUTPUT -d 10.0.1.2/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment "Allow OSSEC agent to monitor" -j ACCEPT',
'-A OUTPUT -p udp -m udp --dport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
'-A OUTPUT -p tcp -m tcp --sport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A OUTPUT -p tcp -m owner --uid-owner 109 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "Allow Tor out" -j ACCEPT',
- '-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner 109 -m state --state NEW -m limit --limit 3/min --limit-burst 3 -m comment --comment "SSH with rate limiting only thur tor" -j ACCEPT',
- '-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner 109 -m state --state RELATED,ESTABLISHED -m comment --comment "SSH with rate limiting only thur tor" -j ACCEPT',
- '-A OUTPUT -m owner --uid-owner 109 -m comment --comment "Drop all other traffic for the tor instance used for ssh" -j LOGNDROP',
- '-A OUTPUT -m owner --gid-owner 108 -m comment --comment "Drop all other outbound traffic for ssh user" -j LOGNDROP',
+ "-A OUTPUT -p tcp -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"Allow Tor out\" -j ACCEPT",
+ "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW -m limit --limit 3/min --limit-burst 3 -m comment --comment \"SSH with rate limiting only thur tor\" -j ACCEPT",
+ "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"SSH with rate limiting only thur tor\" -j ACCEPT",
+ "-A OUTPUT -m owner --uid-owner #{property['tor_user_uid']} -m comment --comment \"Drop all other traffic for the tor instance used for ssh\" -j LOGNDROP",
+ "-A OUTPUT -m owner --gid-owner #{property['ssh_group_gid']} -m comment --comment \"Drop all other outbound traffic for ssh user\" -j LOGNDROP",
'-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
'-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
'-A OUTPUT -p udp -m udp --sport 123 --dport 123 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
'-A OUTPUT -p tcp -m multiport --dports 80,8080,443 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- '-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner 110 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "postfix dns rule" -j ACCEPT',
- '-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner 110 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "postfix dns rule" -j ACCEPT',
- '-A OUTPUT -p tcp -m tcp --dport 587 -m owner --uid-owner 110 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "Allow ossec email alerts out" -j ACCEPT',
+ "-A OUTPUT -d #{property['app_ip']}/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment \"Allow OSSEC agent to monitor\" -j ACCEPT",
+ "-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner #{property['postfix_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"postfix dns rule\" -j ACCEPT",
+ "-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner #{property['postfix_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"postfix dns rule\" -j ACCEPT",
+ "-A OUTPUT -p tcp -m tcp --dport 587 -m owner --uid-owner #{property['postfix_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"Allow ossec email alerts out\" -j ACCEPT",
'-A OUTPUT -o lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
'-A OUTPUT -m comment --comment "Drop all other outgoing traffic" -j DROP',
'-A LOGNDROP -p tcp -m limit --limit 5/min -j LOG --log-tcp-options --log-ip-options --log-uid',
@@ -49,10 +49,10 @@
# from the ansible inventory should cover most use cases (except inventories
# with just the *.onion addresses).
unwanted_iptables_rules = [
- '-A INPUT -s 10.0.1.2 -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -v ACCEPT -m comment --comment "ossec authd rule only required for initial agent registration"',
- '-A OUTPUT -d 10.0.1.2 -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "ossec authd rule only required for initial agent registration"',
- '-A INPUT -s 10.0.1.2 -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT',
- '-A OUTPUT -d 10.01.2 -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -j ACCEPT',
+ "-A INPUT -s #{property['app_ip']} -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -v ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
+ "-A OUTPUT -d #{property['app_ip']} -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
+ "-A INPUT -s #{property['app_ip']} -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT",
+ "-A OUTPUT -d #{property['app_ip']} -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -j ACCEPT",
]
# check for wanted and unwanted iptables rules
diff --git a/spec_tests/spec/mon-staging/ossec_server_spec.rb b/spec_tests/spec/mon-staging/ossec_server_spec.rb
--- a/spec_tests/spec/mon-staging/ossec_server_spec.rb
+++ b/spec_tests/spec/mon-staging/ossec_server_spec.rb
@@ -1,8 +1,9 @@
# ensure hosts file references app server by ip
# TODO: replace hardcoded ip for app-staging host
describe file('/etc/hosts') do
- its(:content) { should match /^127\.0\.1\.1 mon-staging mon-staging$/ }
- its(:content) { should match /^10\.0\.1\.2 app-staging$/ }
+ its(:content) { should match /^127\.0\.1\.1\s+mon-staging\s+mon-staging$/ }
+ app_host_regex = Regexp.quote("#{property['app_ip']} app-staging")
+ its(:content) { should match /^#{app_host_regex}$/ }
end
# ensure required packages are installed
@@ -54,7 +55,7 @@
'smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache',
'smtp_tls_security_level = fingerprint',
'smtp_tls_fingerprint_digest = sha1',
- 'smtp_tls_fingerprint_cert_match = 9C:0A:CC:93:1D:E7:51:37:90:61:6B:A1:18:28:67:95:54:C5:69:A8',
+ 'smtp_tls_fingerprint_cert_match = D3:7C:82:FC:D0:5F:8F:D7:DA:A2:59:8C:42:D7:B2:9F:C1:9F:7E:60',
'smtp_tls_ciphers = high',
'smtp_tls_protocols = TLSv1.2 TLSv1.1 TLSv1 !SSLv3 !SSLv2',
'myhostname = monitor.securedrop',
@@ -86,7 +87,7 @@
# ensure ossec considers app-staging host "available"
describe command('/var/ossec/bin/list_agents -a') do
- its(:stdout) { should eq "app-staging-10.0.1.2 is available.\n" }
+ its(:stdout) { should eq "app-staging-#{property['app_ip']} is available.\n" }
end
# ensure ossec gpg homedir exists
diff --git a/spec_tests/spec/spec_helper.rb b/spec_tests/spec/spec_helper.rb
--- a/spec_tests/spec/spec_helper.rb
+++ b/spec_tests/spec/spec_helper.rb
@@ -18,41 +18,89 @@
host = ENV['TARGET_HOST']
-`vagrant up #{host}`
+# Using backticks for a subprocess call means
+# STDOUT will be masked, which blocks silently for
+# a long time if the host isn't up. Using IO.popen
+# instead allows for a tee-like interface
+#`vagrant up #{host}`
+IO.popen("vagrant up #{host}") do |output|
+ while line = output.gets do
+ # simply echo it back
+ puts line
+ end
+end
+# determine SSH config
config = Tempfile.new('', Dir.tmpdir)
config.write(`vagrant ssh-config #{host}`)
config.close
-
options = Net::SSH::Config.for(host, [config.path])
-
-options[:user] ||= Etc.getlogin
-
+options[:user]
set :host, options[:host_name] || host
set :ssh_options, options
-# accept basename for sought vars file,
-# then return a hash based on those settings
-def retrieve_vars(file_basename)
- fullpath = File.expand_path(File.join(File.dirname(__FILE__), 'vars', "#{file_basename}.yml"))
- vars_file = YAML.load_file(fullpath)
- return vars_file
-end
-# load custom vars for host
-case host
-when /^development$/
- TEST_VARS = retrieve_vars('development')
-when /^app-staging$/
- TEST_VARS = retrieve_vars('staging')
-end
+# retrieve dynamic vars for given hostname
+def retrieve_vars(hostname)
+ # accept basename for sought vars file,
+ # then return a hash based on those settings
+ def read_vars_file(file_basename)
+ vars_filepath = File.expand_path(File.join(
+ File.dirname(__FILE__), 'vars', "#{file_basename}.yml"
+ ))
+ return YAML.load_file(vars_filepath)
+ end
-# Disable sudo
-# set :disable_sudo, true
+ # crude case statement for determining var lookup
+ case hostname
+ when /^development$/
+ vars = read_vars_file('development')
+ when /-staging$/
+ # Both staging hosts need a similar list of vars.
+ vars = read_vars_file('staging')
+ vars['tor_user_uid'] = vagrant_ssh_cmd(hostname, "id -u debian-tor")
+ vars['ssh_group_gid'] = vagrant_ssh_cmd(hostname, "getent group ssh | cut -d: -f3")
+ # Ideally these IP addresses would be cached, since they don't
+ # change during a test run. Right now, both values are looked up twice,
+ # once for each staging host.
+ vars['app_ip'] = retrieve_ip_addr('app-staging')
+ vars['monitor_ip'] = retrieve_ip_addr('mon-staging')
+ # These vars are host-specific, so check hostname before querying.
+ if hostname.match(/^app/)
+ vars['apache_user_uid'] = vagrant_ssh_cmd(hostname, "id -u www-data")
+ elsif hostname.match(/^mon/)
+ vars['postfix_user_uid'] = vagrant_ssh_cmd(hostname, "id -u postfix")
+ end
+ end
+ return vars
+end
+# ssh into vagrant machine, run command, return output
+def vagrant_ssh_cmd(hostname, command)
+ # Every ssh connection will end with a "Connection closed" message.
+ # Since dynamic variable fetching makes several ssh calls,
+ # let's filter to remove that noisy output from stderr.
+ filter_stderr = "2> >( grep -vP '^Connection to [\d.]+ closed\.' )"
+ # Unfortunately it appears that all of stderr is being filtered,
+ # not just the grep pattern. Perhaps the popen4 gem would facilitate
+ # smarter filtering, but that doesn't seem worthwhile right now.
+ vagrant_cmd = "vagrant ssh #{hostname} --command '#{command}'"
+ # Ruby backticks use /bin/sh as shell, and /bin/sh doesn't support
+ # process redirection, so force use of /bin/bash
+ return `/bin/bash -c "#{vagrant_cmd} #{filter_stderr}"`
+end
-# Set environment variables
-# set :env, :LANG => 'C', :LC_MESSAGES => 'C'
+# look up ip address for given hostname,
+# so spectests are relevant regardless of provider
+def retrieve_ip_addr(hostname)
+ ip_output = vagrant_ssh_cmd(hostname, "hostname -I")
+ # Vagrant VirtualBox images will always have eth0 as the NAT device,
+ # but spectests need the private_network device instead.
+ iface1, iface2 = ip_output.split()
+ # If we have two devices, assume first is NAT and return second.
+ # Otherwise, assume eth0 is the primary address.
+ return iface2 ? iface2 : iface1
+end
-# Set PATH
-# set :path, '/sbin:/usr/local/sbin:$PATH'
+# load dynamic vars for host
+set_property retrieve_vars(host)
diff --git a/spec_tests/spec/vars/development.yml b/spec_tests/spec/vars/development.yml
--- a/spec_tests/spec/vars/development.yml
+++ b/spec_tests/spec/vars/development.yml
@@ -1,7 +1,4 @@
---
-securedrop_repo: /vagrant
-non_default_securedrop_user: vagrant
securedrop_user: vagrant
-ssh_users: vagrant
securedrop_code: /vagrant/securedrop
securedrop_data: /var/lib/securedrop
diff --git a/spec_tests/spec/vars/staging.yml b/spec_tests/spec/vars/staging.yml
--- a/spec_tests/spec/vars/staging.yml
+++ b/spec_tests/spec/vars/staging.yml
@@ -1,21 +1,6 @@
---
### General (used by more than one role) ###
-securedrop_repo: /vagrant
securedrop_code: /var/www/securedrop
securedrop_data: /var/lib/securedrop
securedrop_user: www-data
-dns_server: 8.8.8.8
-ssh_users: vagrant
-
-monitor_ip: 10.0.1.3
-monitor_hostname: mon-staging
-app_ip: 10.0.1.2
-app_hostname: app-staging
-
-# To allow direct connections to source document interfaces for ease of testing
-apache_listening_address: 0.0.0.0
-apache_allow_from: all
-source_apache_log_location: /var/log/apache2/source-error.log
-apache_logging_level: info
-
| Fix tor service permissions
Ensures correct mode for tor hidden service directories. Updates spectests to validate new file modes, and to test each service directory individually.
Confirmed that these playbook changes resolve the tor failure described in #1044. Playbooks finished without error on a fresh provision of staging VMs based on trusty64-14.04.2. Also spun up snapshots of a working VMs running the previous version of tor and ran the modified playbooks against them, again without errors.
File mode tasks use quotes to force stringification, which is relevant to #1006.
Resolves #1044.
| 2015-06-11T23:00:44Z | [] | [] |
|
freedomofpress/securedrop | 1,075 | freedomofpress__securedrop-1075 | [
"988",
"1050",
"1027"
] | a6c907c1c29de923f54423d1ee953447b1f3c8c4 | diff --git a/install_files/ansible-base/roles/upgrade/files/0.3pre_upgrade.py b/install_files/ansible-base/roles/upgrade/files/0.3pre_upgrade.py
--- a/install_files/ansible-base/roles/upgrade/files/0.3pre_upgrade.py
+++ b/install_files/ansible-base/roles/upgrade/files/0.3pre_upgrade.py
@@ -16,7 +16,10 @@ def backup_app():
t.add('/var/lib/securedrop/')
t.add('/var/lib/tor/services/')
t.add('/var/www/securedrop/config.py')
- t.add('/var/www/securedrop/static/i/logo.png')
+ try:
+ t.add('/var/www/securedrop/static/i/logo.png')
+ except OSError:
+ print "[!] Expected but non-essential file ('logo.png') not found. Continuing..."
print "** Backed up system to {} before migrating.".format(tar_fn)
diff --git a/migration_scripts/0.2.1/0.3_migrate.py b/migration_scripts/0.2.1/0.3_migrate.py
--- a/migration_scripts/0.2.1/0.3_migrate.py
+++ b/migration_scripts/0.2.1/0.3_migrate.py
@@ -26,8 +26,7 @@ def migrate_config_file(backup):
shutil.copy(config_fn, config_fn + '.backup')
# Substitute values in new config with values from old config
- old_config_path = 'var/chroot/source/var/www/securedrop/config.py'
- old_config = backup.extractfile(old_config_path).read()
+ old_config = backup.extractfile('var/chroot/source/var/www/securedrop/config.py').read()
new_config = open(config_fn, 'r').read()
subs = [
(r"JOURNALIST_KEY=('.*')", r"^(JOURNALIST_KEY = )('.*')"),
@@ -42,6 +41,9 @@ def migrate_config_file(backup):
with open(config_fn, 'w') as config:
config.write(new_config)
+ # Restart Apache so the web application picks up the changes to config.py
+ subprocess.call(["service", "apache2", "restart"])
+
def extract_tree_to(tar, selector, dest):
# http://stackoverflow.com/a/15171308/1093000
@@ -120,8 +122,7 @@ def migrate_database(backup):
# Copied from db.py to compute filesystem-safe journalist filenames
def journalist_filename(s):
valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_'
- return ''.join([c for c in s.lower().replace(' ',
- '_') if c in valid_chars])
+ return ''.join([c for c in s.lower().replace(' ', '_') if c in valid_chars])
# Migrate rows to new database with SQLAlchemy ORM
for source in sources:
@@ -159,8 +160,7 @@ def journalist_filename(s):
replies.sort(key=itemgetter(1))
if len(submissions) > 0:
- time = submissions[-1][1]
- migrated_source.last_updated = (datetime.utcfromtimestamp(time))
+ migrated_source.last_updated = datetime.utcfromtimestamp(submissions[-1][1])
else:
# The source will have the default .last_updated of utcnow(), which
# might be a little confusing, but it's the best we can do.
@@ -207,11 +207,7 @@ def journalist_filename(s):
if fn.startswith('reply-'):
new_fn = "{0}-{1}-reply.gpg".format(count+1, journalist_filename(source[1]))
else:
- new_fn = "{0}-{1}-{2}".format(count+1,
- journalist_filename(source[1]),
- "msg.gpg"
- if fn.endswith("msg.gpg") else
- "doc.zip.gpg")
+ new_fn = "{0}-{1}-{2}".format(count+1, journalist_filename(source[1]), "msg.gpg" if fn.endswith("msg.gpg") else "doc.zip.gpg")
# Move to the new filename
os.rename(os.path.join(source_dir, fn),
@@ -234,18 +230,15 @@ def journalist_filename(s):
db_session.commit()
# chown the database file to the securedrop user
- subprocess.call(['chown', 'www-data:www-data',
- "/var/lib/securedrop/db.sqlite"])
+ subprocess.call(['chown', 'www-data:www-data', "/var/lib/securedrop/db.sqlite"])
def migrate_custom_header_image(backup):
print "* Migrating custom header image..."
- extract_file_to(backup,
- ("var/chroot/source/var/www/securedrop/static/i/"
- "securedrop.png"),
+ extract_file_to(backup,
+ "var/chroot/source/var/www/securedrop/static/i/securedrop.png",
"/var/www/securedrop/static/i/logo.png")
- subprocess.call(['chown', '-R', 'www-data:www-data',
- "/var/www/securedrop/static/i/logo.png"])
+ subprocess.call(['chown', '-R', 'www-data:www-data', "/var/www/securedrop/static/i/logo.png"])
def migrate_tor_files(backup):
@@ -269,8 +262,7 @@ def migrate_tor_files(backup):
# Extract the original source interface THS key
extract_file_to(backup,
- ("var/chroot/source/var/lib/tor/hidden_service/"
- "private_key"),
+ "var/chroot/source/var/lib/tor/hidden_service/private_key",
os.path.join(source_ths_dir, "private_key"))
# chmod the files so they're owned by debian-tor:debian-tor
diff --git a/securedrop/db.py b/securedrop/db.py
--- a/securedrop/db.py
+++ b/securedrop/db.py
@@ -85,6 +85,9 @@ class Source(Base):
# keep track of how many interactions have happened, for filenames
interaction_count = Column(Integer, default=0, nullable=False)
+ # Don't create or bother checking excessively long codenames to prevent DoS
+ MAX_CODENAME_LEN = 128
+
def __init__(self, filesystem_id=None, journalist_designation=None):
self.filesystem_id = filesystem_id
self.journalist_designation = journalist_designation
@@ -206,6 +209,18 @@ class BadTokenException(Exception):
"""Raised when a user logins in with an incorrect TOTP token"""
+class InvalidPasswordLength(Exception):
+ """Raised when attempting to create a Journalist or log in with an invalid
+ password length"""
+
+ def __init__(self, password):
+ self.pw_len = len(password)
+
+ def __str__(self):
+ if self.pw_len > Journalist.MAX_PASSWORD_LEN:
+ return "Password too long (len={})".format(self.pw_len)
+
+
class Journalist(Base):
__tablename__ = "journalists"
id = Column(Integer, primary_key=True)
@@ -245,18 +260,21 @@ def _gen_salt(self, salt_bytes=32):
def _scrypt_hash(self, password, salt, params=None):
if not params:
params = self._SCRYPT_PARAMS
- # try clause for debugging intermittent scrypt "could not compute hash"
- # error
- try:
- return scrypt.hash(str(password), salt, **params)
- except scrypt.error as e:
- print "Scrypt hashing failed for password='{}', salt='{}', params='{}', traceback: {}".format(password, salt, params, e)
+ return scrypt.hash(str(password), salt, **params)
+
+ MAX_PASSWORD_LEN = 128
def set_password(self, password):
+ # Enforce a reasonable maximum length for passwords to avoid DoS
+ if len(password) > self.MAX_PASSWORD_LEN:
+ raise InvalidPasswordLength(password)
self.pw_salt = self._gen_salt()
self.pw_hash = self._scrypt_hash(password, self.pw_salt)
def valid_password(self, password):
+ # Avoid hashing passwords that are over the maximum length
+ if len(password) > self.MAX_PASSWORD_LEN:
+ raise InvalidPasswordLength(password)
return self._scrypt_hash(password, self.pw_salt) == self.pw_hash
def regenerate_totp_shared_secret(self):
diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -18,7 +18,7 @@
from db import (db_session, Source, Journalist, Submission, Reply,
SourceStar, get_one_or_else, NoResultFound,
WrongPasswordException, BadTokenException,
- LoginThrottledException)
+ LoginThrottledException, InvalidPasswordLength)
import worker
app = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)
@@ -181,6 +181,10 @@ def admin_add_user():
otp_secret=otp_secret)
db_session.add(new_user)
db_session.commit()
+ except InvalidPasswordLength:
+ form_valid = False
+ flash("Your password is too long (maximum length {} characters)".format(
+ Journalist.MAX_PASSWORD_LEN), "error")
except IntegrityError as e:
form_valid = False
if "username is not unique" in str(e):
@@ -254,7 +258,13 @@ def admin_edit_user(user_id):
if request.form['password'] != request.form['password_again']:
flash("Passwords didn't match", "error")
return redirect(url_for("admin_edit_user", user_id=user_id))
- user.set_password(request.form['password'])
+ try:
+ user.set_password(request.form['password'])
+ except InvalidPasswordLength:
+ flash("Your password is too long "
+ "(maximum length {} characters)".format(
+ Journalist.MAX_PASSWORD_LEN), "error")
+ return redirect(url_for("admin_edit_user", user_id=user_id))
user.is_admin = bool(request.form.get('is_admin'))
diff --git a/securedrop/manage.py b/securedrop/manage.py
--- a/securedrop/manage.py
+++ b/securedrop/manage.py
@@ -124,6 +124,13 @@ def add_admin():
while True:
password = getpass("Password: ")
password_again = getpass("Confirm Password: ")
+
+ if len(password) > Journalist.MAX_PASSWORD_LEN:
+ print ("Your password is too long (maximum length {} characters). "
+ "Please pick a shorter password.".format(
+ Journalist.MAX_PASSWORD_LEN))
+ continue
+
if password == password_again:
break
print "Passwords didn't match!"
@@ -137,20 +144,18 @@ def add_admin():
if otp_secret:
break
- admin = Journalist(
- username=username,
- password=password,
- is_admin=True,
- otp_secret=otp_secret)
try:
+ admin = Journalist(username=username,
+ password=password,
+ is_admin=True,
+ otp_secret=otp_secret)
db_session.add(admin)
db_session.commit()
except Exception as e:
if "username is not unique" in str(e):
print "ERROR: That username is already taken!"
else:
- print "ERROR: An unknown error occurred, traceback:"
- print e
+ print "ERROR: An unexpected error occurred, traceback: \n{}".format(e)
else:
print "Admin '{}' successfully added".format(username)
if not otp_secret:
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -128,6 +128,20 @@ def generate_unique_codename(num_words):
"""Generate random codenames until we get an unused one"""
while True:
codename = crypto_util.genrandomid(num_words)
+
+ # The maximum length of a word in the wordlist is 6 letters and the
+ # maximum codename length is 10 words, so it is currently impossible to
+ # generate a codename that is longer than the maximum codename length
+ # (currently 128 characters). This code is meant to be defense in depth
+ # to guard against potential future changes, such as modifications to
+ # the word list or the maximum codename length.
+ if len(codename) > Source.MAX_CODENAME_LEN:
+ app.logger.warning(
+ "Generated a source codename that was too long, "
+ "skipping it. This should not happen. "
+ "(Codename='{}')".format(codename))
+ continue
+
sid = crypto_util.hash_codename(codename) # scrypt (slow)
matching_sources = Source.query.filter(
Source.filesystem_id == sid).all()
@@ -334,22 +348,35 @@ def delete():
def valid_codename(codename):
- return os.path.exists(store.path(crypto_util.hash_codename(codename)))
+ # Ignore codenames that are too long to avoid DoS
+ if len(codename) > Source.MAX_CODENAME_LEN:
+ app.logger.info(
+ "Ignored attempted login because the codename was too long.")
+ return False
+
+ try:
+ filesystem_id = crypto_util.hash_codename(codename)
+ except crypto_util.CryptoException as e:
+ app.logger.info(
+ "Could not compute filesystem ID for codename '{}': {}".format(
+ codename, e))
+ abort(500)
+
+ source = Source.query.filter_by(filesystem_id=filesystem_id).first()
+ return source is not None
@app.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
codename = request.form['codename'].strip()
- try:
- valid = valid_codename(codename)
- except crypto_util.CryptoException:
- pass
+ if valid_codename(codename):
+ session.update(codename=codename, logged_in=True)
+ return redirect(url_for('lookup', from_login='1'))
else:
- if valid:
- session.update(codename=codename, logged_in=True)
- return redirect(url_for('lookup', from_login='1'))
- flash("Sorry, that is not a recognized codename.", "error")
+ app.logger.info(
+ "Login failed for invalid codename".format(codename))
+ flash("Sorry, that is not a recognized codename.", "error")
return render_template('login.html')
diff --git a/securedrop/version.py b/securedrop/version.py
--- a/securedrop/version.py
+++ b/securedrop/version.py
@@ -1 +1 @@
-__version__ = '0.3.2'
+__version__ = '0.3.4'
| diff --git a/securedrop/tests/test_journalist.py b/securedrop/tests/test_journalist.py
--- a/securedrop/tests/test_journalist.py
+++ b/securedrop/tests/test_journalist.py
@@ -1,7 +1,10 @@
-import journalist
import unittest
+
from mock import patch, ANY, MagicMock
+import journalist
+import common
+from db import Journalist, InvalidPasswordLength, db_session
class TestJournalist(unittest.TestCase):
@@ -74,3 +77,61 @@ def tearDownClass(cls):
# Reset the module variables that were changed to mocks so we don't
# break other tests
reload(journalist)
+
+
+class TestJournalistLogin(unittest.TestCase):
+
+ def setUp(self):
+ common.shared_setup()
+
+ # Patch the two-factor verification so it always succeeds
+ patcher = patch('db.Journalist.verify_token')
+ self.addCleanup(patcher.stop)
+ self.mock_journalist_verify_token = patcher.start()
+ self.mock_journalist_verify_token.return_value = True
+
+ self.username = "test user"
+ self.password = "test password"
+ self.user = Journalist(
+ username=self.username,
+ password=self.password)
+ db_session.add(self.user)
+ db_session.commit()
+
+ # Use a patched login function to avoid dealing with two-factor tokens
+ # (which are being ignored here anyway)
+ self.login = lambda username, password: \
+ Journalist.login(username, password, "")
+
+ def tearDown(self):
+ common.shared_teardown()
+ # TODO: figure out why this is necessary here, but unnecessary in all
+ # of the tests in `tests/test_unit_*.py`. Without this, the session
+ # continues to return values even if the underlying database is deleted
+ # (as in `shared_teardown`).
+ db_session.remove()
+
+ @patch('db.Journalist._scrypt_hash')
+ @patch('db.Journalist.valid_password', return_value=True)
+ def test_login_with_valid_length_password_calls_scrypt(
+ self, mock_scrypt_hash, mock_valid_password):
+ self.login(self.username, self.password)
+ self.assertTrue(mock_scrypt_hash.called,
+ "Failed to call _scrypt_hash for password w/ valid length")
+
+ @patch('db.Journalist._scrypt_hash')
+ def test_login_with_invalid_length_password_doesnt_call_scrypt(
+ self, mock_scrypt_hash):
+ print "test_login_with_invalid_length_password_calls_scrypt"
+ invalid_pw = 'a'*(Journalist.MAX_PASSWORD_LEN + 1)
+ with self.assertRaises(InvalidPasswordLength):
+ self.login(self.username, invalid_pw)
+ self.assertFalse(mock_scrypt_hash.called,
+ "Called _scrypt_hash for password w/ invalid length")
+
+ @classmethod
+ def tearDownClass(cls):
+ # Reset the module variables that were changed to mocks so we don't
+ # break other tests
+ reload(journalist)
+
diff --git a/securedrop/tests/test_unit_journalist.py b/securedrop/tests/test_unit_journalist.py
--- a/securedrop/tests/test_unit_journalist.py
+++ b/securedrop/tests/test_unit_journalist.py
@@ -13,7 +13,7 @@
import crypto_util
import journalist
import common
-from db import db_session, Source, Journalist
+from db import db_session, Source, Journalist, InvalidPasswordLength
# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
@@ -176,6 +176,15 @@ def test_bulk_download(self):
os.path.join(source.journalist_filename, files[0])
))
+ def test_max_password_length(self):
+ """Creating a Journalist with a password that is greater than the
+ maximum password length should raise an exception"""
+ overly_long_password = 'a'*(Journalist.MAX_PASSWORD_LEN + 1)
+ with self.assertRaises(InvalidPasswordLength):
+ temp_journalist = Journalist(
+ username="My Password is Too Big!",
+ password=overly_long_password)
+
if __name__ == "__main__":
unittest.main(verbosity=2)
diff --git a/securedrop/tests/test_unit_source.py b/securedrop/tests/test_unit_source.py
--- a/securedrop/tests/test_unit_source.py
+++ b/securedrop/tests/test_unit_source.py
@@ -11,6 +11,7 @@
from flask import session, escape
from mock import patch, ANY
import source
+from db import Source
os.environ['SECUREDROP_ENV'] = 'test'
@@ -234,6 +235,21 @@ def test_tor2web_warning(self):
self.assertEqual(rv.status_code, 200)
self.assertIn("You appear to be using Tor2Web.", rv.data)
+ @patch('crypto_util.hash_codename')
+ def test_login_with_overly_long_codename(self, mock_hash_codename):
+ """Attempting to login with an overly long codename should result in
+ an error, and scrypt should not be called to avoid DoS."""
+ overly_long_codename = 'a' * (Source.MAX_CODENAME_LEN + 1)
+ with self.client as client:
+ rv = client.post(
+ '/login',
+ data=dict(codename=overly_long_codename),
+ follow_redirects=True)
+ self.assertEqual(rv.status_code, 200)
+ self.assertIn("Sorry, that is not a recognized codename.", rv.data)
+ self.assertFalse(mock_hash_codename.called,
+ "Called hash_codename for codename w/ invalid length")
+
if __name__ == "__main__":
unittest.main(verbosity=2)
| Functional tests broken in local development environment
# STR
``` sh
vagrant destroy -f development && vagrant up development
vagrant ssh
cd /vagrant/securedrop
./manage.py test
```
**Expected Result:** The full test suite runs and all tests pass.
**Actual Result:** The functional tests error out with the following traceback:
```
test_submit_and_retrieve_happy_path (tests.functional.submit_and_retrieve_message.SubmitAndRetrieveMessage) ... * Running on http://127.0.0.1:50827/
* Running on http://127.0.0.1:46191/
ERROR
======================================================================
ERROR: test_submit_and_retrieve_happy_path (tests.functional.submit_and_retrieve_message.SubmitAndRetrieveMessage)
----------------------------------------------------------------------
Traceback (most recent call last):
File "tests/functional/submit_and_retrieve_message.py", line 15, in setUp
functional_test.FunctionalTest.setUp(self)
File "tests/functional/functional_test.py", line 82, in setUp
self.driver = self._create_webdriver()
File "tests/functional/functional_test.py", line 46, in _create_webdriver
return webdriver.Firefox(firefox_binary=firefox)
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/firefox/webdriver.py", line 59, in __init__
self.binary, timeout),
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/firefox/extension_connection.py", line 47, in __init__
self.binary.launch_browser(self.profile)
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/firefox/firefox_binary.py", line 66, in launch_browser
self._wait_until_connectable()
File "/usr/local/lib/python2.7/dist-packages/selenium/webdriver/firefox/firefox_binary.py", line 105, in _wait_until_connectable
raise WebDriverException("Can't load the profile. Profile "
WebDriverException: Message: Can't load the profile. Profile Dir: %s If you specified a log_file in the FirefoxBinary constructor, check it for details.
----------------------------------------------------------------------
Ran 1 test in 30.725s
FAILED (errors=1)
```
Update docs to use Unsafe Browser for LAN access
There are currently some steps of the install and admin documentation that direct the reader to access LAN addresses using the Tor Browser: for example, to configure the local firewall using the pfSense WebGUI. While this currently works in Tails, they have an [open ticket](https://labs.riseup.net/code/issues/7976) to block LAN access from the Tor Browser. The upstream issue is currently targeted for Tails 1.4.1, which is currently scheduled for release on [**6/30/15**](https://tails.boum.org/contribute/calendar/).
To fix this issue, change the documentation and update the screenshots to use the _Unsafe Browser_ for any access to web resources on the LAN.
Server Setup doc clarifications
1) specify that username + pwd have to be the same for both app and mon.
2) admin ssh key should be passwordless (prefered) or diceword-generated. this is because pinentry doesn't let you copy/paste.
see:
https://github.com/freedomofpress/securedrop/blob/0.3.3/docs/install.md#set-up-the-servers
| 2015-07-20T20:34:08Z | [] | [] |
|
freedomofpress/securedrop | 1,097 | freedomofpress__securedrop-1097 | [
"1092"
] | 775af0ebbab519d548a5184607f677fc935d4283 | diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -219,7 +219,6 @@ def admin_new_user_two_factor():
return render_template("admin_new_user_two_factor.html", user=user)
-
@app.route('/admin/reset-2fa-totp', methods=['POST'])
@admin_required
def admin_reset_two_factor_totp():
@@ -271,6 +270,11 @@ def admin_edit_user(user_id):
try:
db_session.add(user)
db_session.commit()
+ flash("Password successfully changed for user {} ".format(
+ user.username),
+ "notification"
+ )
+
except Exception as e:
db_session.rollback()
if "username is not unique" in str(e):
@@ -292,6 +296,81 @@ def admin_delete_user(user_id):
return redirect(url_for('admin_index'))
[email protected]('/account', methods=('GET', 'POST'))
+@login_required
+def edit_account():
+ user = g.user
+
+ if request.method == 'POST':
+ if request.form['password'] != "":
+ if request.form['password'] != request.form['password_again']:
+ flash("Passwords didn't match", "error")
+ return redirect(url_for("edit_account"))
+ try:
+ user.set_password(request.form['password'])
+ except InvalidPasswordLength:
+ flash("Your password is too long "
+ "(maximum length {} characters)".format(
+ Journalist.MAX_PASSWORD_LEN), "error")
+ return redirect(url_for("edit_account"))
+
+ try:
+ db_session.add(user)
+ db_session.commit()
+ flash(
+ "Password successfully changed!",
+ "notification")
+ except Exception as e:
+ flash(
+ "An unknown error occurred, please inform your administrator",
+ "error")
+ app.logger.error("Password change for '{}' failed: {}".format(
+ user, e))
+ db_session.rollback()
+ return render_template('edit_account.html')
+
+
[email protected]('/account/2fa', methods=('GET', 'POST'))
+@login_required
+def account_new_two_factor():
+ user = g.user
+
+ if request.method == 'POST':
+ token = request.form['token']
+ if user.verify_token(token):
+ flash(
+ "Two factor token successfully verified!",
+ "notification")
+ return redirect(url_for('edit_account'))
+ else:
+ flash("Two factor token failed to verify", "error")
+
+ return render_template('account_new_two_factor.html', user=user)
+
+
[email protected]('/account/reset-2fa-totp', methods=['POST'])
+@login_required
+def account_reset_two_factor_totp():
+ user = g.user
+ user.is_totp = True
+ user.regenerate_totp_shared_secret()
+ db_session.commit()
+ return redirect(url_for('account_new_two_factor'))
+
+
[email protected]('/account/reset-2fa-hotp', methods=['POST'])
+@login_required
+def account_reset_two_factor_hotp():
+ user = g.user
+ otp_secret = request.form.get('otp_secret', None)
+ if otp_secret:
+ user.set_hotp_secret(otp_secret)
+ db_session.commit()
+ return redirect(url_for('account_new_two_factor'))
+ else:
+ return render_template('account_edit_hotp_secret.html')
+
+
def make_star_true(sid):
source = get_source(sid)
if source.star:
| diff --git a/securedrop/tests/test_unit_integration.py b/securedrop/tests/test_unit_integration.py
--- a/securedrop/tests/test_unit_integration.py
+++ b/securedrop/tests/test_unit_integration.py
@@ -541,6 +541,71 @@ def test_filenames_delete(self):
soup.select('ul#submissions li a .filename')[2].contents[0])
self.assertTrue(re.match(submission_filename_re.format(4), filename))
+ def test_user_change_password(self):
+ """Test that a journalist can successfully login after changing their password"""
+
+ # change password
+ self.journalist_app.post('/account', data=dict(
+ password='newpass',
+ password_again='newpass'
+ ))
+
+ # logout
+ common.logout(self.journalist_app)
+
+ # login with new credentials should redirect to index page
+ rv = self.journalist_app.post('/login', data=dict(
+ username=self.user.username,
+ password='newpass',
+ token=self.user.totp.now(),
+ follow_redirects=True))
+ self.assertEqual(rv.status_code, 302)
+
+ def test_login_after_regenerate_totp(self):
+ """Test that journalists can login after resetting their Google Authenticator 2fa"""
+
+ # regenerate totp
+ self.journalist_app.post('/account/reset-2fa-totp')
+
+ # successful verification should redirect to /account
+ rv = self.journalist_app.post('/account/2fa', data=dict(
+ token=self.user.totp.now()))
+ self.assertEqual(rv.status_code, 302)
+
+ # log out
+ common.logout(self.journalist_app)
+
+ # login with new 2fa secret should redirect to index page
+ rv = self.journalist_app.post('/login', data=dict(
+ username=self.user.username,
+ password=self.user_pw,
+ token=self.user.totp.now(),
+ follow_redirects=True))
+ self.assertEqual(rv.status_code, 302)
+
+ def test_login_after_regenerate_hotp(self):
+ """Test that journalists can login after resetting their HOTP 2fa"""
+
+ # edit hotp
+ self.journalist_app.post('/account/reset-2fa-hotp', data=dict(
+ otp_secret=123456))
+
+ # successful verificaton should redirect to /account
+ rv = self.journalist_app.post('/account/2fa', data=dict(
+ token=self.user.hotp))
+ self.assertEqual(rv.status_code, 302)
+
+ # log out
+ common.logout(self.journalist_app)
+
+ # login with new 2fa secret should redirect to index page
+ rv = self.journalist_app.post('/login', data=dict(
+ username=self.user.username,
+ password=self.user_pw,
+ token=self.user.hotp,
+ follow_redirects=True))
+ self.assertEqual(rv.status_code, 302)
+
def helper_filenames_submit(self):
self.source_app.post('/submit', data=dict(
msg="This is a test.",
@@ -584,5 +649,6 @@ def helper_filenames_delete(self, soup, i):
self._wait_for(lambda: self.assertFalse(
any([os.path.exists(store.path(sid, doc_name)) for doc_name in checkbox_values])))
+
if __name__ == "__main__":
unittest.main(verbosity=2)
diff --git a/securedrop/tests/test_unit_journalist.py b/securedrop/tests/test_unit_journalist.py
--- a/securedrop/tests/test_unit_journalist.py
+++ b/securedrop/tests/test_unit_journalist.py
@@ -96,6 +96,17 @@ def test_admin_user_has_admin_link_in_index(self):
"Admin")
self.assertIn(admin_link, res.data)
+ def test_user_has_edit_account_link_in_index(self):
+ res = self.client.post(url_for('login'), data=dict(
+ username=self.user.username,
+ password=self.user_pw,
+ token=self.user.totp.now()),
+ follow_redirects=True)
+ edit_account_link = '<a href="{}">{}</a>'.format(
+ url_for('edit_account'),
+ "Edit Account")
+ self.assertIn(edit_account_link, res.data)
+
def _login_user(self):
self.client.post(url_for('login'), data=dict(
username=self.user.username,
@@ -138,7 +149,7 @@ def test_admin_authorization_for_posts(self):
def test_user_authorization_for_gets(self):
urls = [url_for('index'), url_for('col', sid='1'),
- url_for('doc', sid='1', fn='1')]
+ url_for('doc', sid='1', fn='1'), url_for('edit_account')]
for url in urls:
res = self.client.get(url)
@@ -147,11 +158,54 @@ def test_user_authorization_for_gets(self):
def test_user_authorization_for_posts(self):
urls = [url_for('add_star', sid='1'), url_for('remove_star', sid='1'),
url_for('col_process'), url_for('col_delete_single', sid='1'),
- url_for('reply'), url_for('generate_code'), url_for('bulk')]
+ url_for('reply'), url_for('generate_code'), url_for('bulk'),
+ url_for('account_new_two_factor'), url_for('account_reset_two_factor_totp'),
+ url_for('account_reset_two_factor_hotp')]
for url in urls:
res = self.client.post(url)
self.assert_status(res, 302)
+ def test_invalid_user_password_change(self):
+ self._login_user()
+ res = self.client.post(url_for('edit_account'), data=dict(
+ password='not',
+ password_again='thesame'))
+ self.assert_redirects(res, url_for('edit_account'))
+
+ def test_valid_user_password_change(self):
+ self._login_user()
+ res = self.client.post(url_for('edit_account'), data=dict(
+ password='valid',
+ password_again='valid'))
+ self.assertIn("Password successfully changed", res.data)
+
+ def test_regenerate_totp(self):
+ self._login_user()
+ oldTotp = self.user.totp
+
+ res = self.client.post(url_for('account_reset_two_factor_totp'))
+ newTotp = self.user.totp
+
+ # check that totp is different
+ self.assertNotEqual(oldTotp, newTotp)
+
+ # should redirect to verification page
+ self.assert_redirects(res, url_for('account_new_two_factor'))
+
+ def test_edit_hotp(self):
+ self._login_user()
+ oldHotp = self.user.hotp
+
+ res = self.client.post(url_for('account_reset_two_factor_hotp'), data=dict(
+ otp_secret=123456))
+ newHotp = self.user.hotp
+
+ # check that hotp is different
+ self.assertNotEqual(oldHotp, newHotp)
+
+ # should redirect to verification page
+ self.assert_redirects(res, url_for('account_new_two_factor'))
+
# TODO: more tests for admin interface
def test_bulk_download(self):
| Allow journalists to reset their own login credentials
Currently, it is possible for **admin** users to reset any other user's login credentials (password, 2fa). This is primarily useful for credential recovery, e.g. a journalist forgets their password, loses their two-factor device, or gets a new one.
I still advocate for keeping this feature in place as a disaster recovery mechanism. If a journalist cannot log in at all (lost password or 2fa device), then they will need an admin's assistance to regain access.
However, there is an additional use case where the journalist has not lost access, but wishes to switch to using a different two-factor device. One common example is where the journalist uses a TOTP app (like Google Authenticator) on their smartphone, and gets a new smart phone. For security reasons, these apps usually do not allow the two-factor secrets to be extracted or transferred without taking exceptional and potentially dangerous steps (e.g. rooting or jailbreaking the device's OS).
Another example would be a journalist who has been using a two-factor app on their phone, but wishes to switch to a two-factor hardware token (e.g. a Yubikey).
These use cases are all possible now with an admin's assistance, but this is cumbersome and may be difficult or impossible to arrange if the journalist and the admin are not physically co-located.
We should add support for journalists to reset or update their own credentials, while maintaining admin's ability to reset all user's credentials in case they lose access to their account.
Related to #1091.
| 2015-08-18T08:58:17Z | [] | [] |
|
freedomofpress/securedrop | 1,309 | freedomofpress__securedrop-1309 | [
"1288"
] | b39bf3868e2fe8bad8d5813951893068e239f85e | diff --git a/tails_files/securedrop_init.py b/tails_files/securedrop_init.py
--- a/tails_files/securedrop_init.py
+++ b/tails_files/securedrop_init.py
@@ -1,44 +1,47 @@
-#!/usr/bin/env python
+#!/usr/bin/python
import os
import sys
import subprocess
-if __name__ == '__main__':
- # check for root
- if os.geteuid() != 0:
- sys.exit('You need to run this as root')
+# check for root
+if os.geteuid() != 0:
+ sys.exit('You need to run this as root')
- # paths
- path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
- path_torrc_backup = '/etc/tor/torrc.bak'
- path_torrc = '/etc/tor/torrc'
+# paths
+path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
+path_torrc_backup = '/etc/tor/torrc.bak'
+path_torrc = '/etc/tor/torrc'
- # load torrc_additions
- if os.path.isfile(path_torrc_additions):
- torrc_additions = open(path_torrc_additions).read()
- else:
- sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
+# load torrc_additions
+if os.path.isfile(path_torrc_additions):
+ torrc_additions = open(path_torrc_additions).read()
+else:
+ sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
- # load torrc
- if os.path.isfile(path_torrc_backup):
- torrc = open(path_torrc_backup).read()
+# load torrc
+if os.path.isfile(path_torrc_backup):
+ torrc = open(path_torrc_backup).read()
+else:
+ if os.path.isfile(path_torrc):
+ torrc = open(path_torrc).read()
else:
- if os.path.isfile(path_torrc):
- torrc = open(path_torrc).read()
- else:
- sys.exit('Error opening {0} for reading'.format(path_torrc))
+ sys.exit('Error opening {0} for reading'.format(path_torrc))
- # save a backup
- open(path_torrc_backup, 'w').write(torrc)
+ # save a backup
+ open(path_torrc_backup, 'w').write(torrc)
- # append the additions
- open(path_torrc, 'w').write(torrc + torrc_additions)
+# append the additions
+open(path_torrc, 'w').write(torrc + torrc_additions)
- # reload tor
- subprocess.call(['/usr/sbin/service', 'tor', 'reload'])
+# reload tor
+try:
+ subprocess.check_call(['systemctl', 'reload', '[email protected]'])
+except subprocess.CalledProcessError:
+ sys.exit('Error reloading Tor')
- # success
- subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',
- 'Updated torrc!', 'You can now connect to your SecureDrop\ndocument interface.'])
+# notify the user
+subprocess.call(['tails-notify-user',
+ 'SecureDrop successfully auto-configured!',
+ 'You can now access the Document Interface.\nIf you are an admin, you can now SSH to the servers.'])
| diff --git a/docs/test_the_installation.rst b/docs/test_the_installation.rst
--- a/docs/test_the_installation.rst
+++ b/docs/test_the_installation.rst
@@ -10,15 +10,18 @@ SSH to both servers over Tor
On the Admin Workstation, you should be able to SSH to the App
Server and the Monitor Server. ::
+ $ ssh app
+ $ ssh mon
+
+The SSH aliases should have been configured automatically by running
+the ``install.sh`` script. If you're unable to connect via aliases,
+try using the verbose command format to troubleshoot: ::
+
$ ssh <username>@<app .onion>
$ ssh <username>@<mon .onion>
-If you set up :ref:`SSH Host Aliases` during the post-install
-setup for the Admin Workstation, you should be able to connect
-with the aliases: ::
-
- $ ssh app
- $ ssh mon
+.. tip:: You can find the Onion URLs for SSH in ``app-ssh-aths`` and
+ ``mon-ssh-aths`` inside the ``install_files/ansible-base`` directory.
Log in to both servers via TTY
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| NetworkManager hook notifications broken on Tails 2.x
The invocation of `notify-send` in `securedrop_init.py` does not show a notification in Tails 2.x like it did in Tails 1.x. This is due to dbus-related changes in Debian Jessie, and is a known issue as a quick [search](https://labs.riseup.net/code/projects/tails/search?utf8=%E2%9C%93&changesets=1&q=notify-send) of the Tails issue tracker demonstrates.
Furthermore, it looks like Tails has a special wrapper script, `tails-notify-user`, specifically meant for the use case of displaying notifications to the user from background scripts running as different users, so we should just use that instead.
| 2016-05-24T01:51:40Z | [] | [] |
|
freedomofpress/securedrop | 1,370 | freedomofpress__securedrop-1370 | [
"1360"
] | ad870c572852d9e4cacdebeda373cf47a84c84de | diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -5,6 +5,7 @@
from Crypto.Random import random
import gnupg
+from gnupg._util import _is_stream, _make_binary_stream
import scrypt
import config
@@ -154,6 +155,9 @@ def encrypt(plaintext, fingerprints, output=None):
fingerprints = [fingerprints, ]
fingerprints = [fpr.replace(' ', '') for fpr in fingerprints]
+ if not _is_stream(plaintext):
+ plaintext = _make_binary_stream(plaintext, "utf_8")
+
out = gpg.encrypt(plaintext,
*fingerprints,
output=output,
@@ -165,7 +169,7 @@ def encrypt(plaintext, fingerprints, output=None):
raise CryptoException(out.stderr)
-def decrypt(secret, plain_text):
+def decrypt(secret, ciphertext):
"""
>>> key = genkeypair('randomid', 'randomid')
>>> decrypt('randomid', 'randomid',
@@ -174,8 +178,7 @@ def decrypt(secret, plain_text):
'Goodbye, cruel world!'
"""
hashed_codename = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)
- return gpg.decrypt(plain_text, passphrase=hashed_codename).data
-
+ return gpg.decrypt(ciphertext, passphrase=hashed_codename).data
if __name__ == "__main__":
import doctest
| diff --git a/securedrop/tests/test_unit_integration.py b/securedrop/tests/test_unit_integration.py
--- a/securedrop/tests/test_unit_integration.py
+++ b/securedrop/tests/test_unit_integration.py
@@ -257,8 +257,21 @@ def test_submit_file(self):
def test_reply_normal(self):
self.helper_test_reply("This is a test reply.", True)
- def test_reply_unicode(self):
- self.helper_test_reply("Teşekkürler", True)
+ def test_unicode_reply_with_ansi_env(self):
+ # This makes python-gnupg handle encoding equivalent to if we were
+ # running SD in an environment where os.getenv("LANG") == "C".
+ # Unfortunately, with the way our test suite is set up simply setting
+ # that env var here will not have the desired effect. Instead we
+ # monkey-patch the GPG object that is called crypto_util to imitate the
+ # _encoding attribute it would have had it been initialized in a "C"
+ # environment. See
+ # https://github.com/freedomofpress/securedrop/issues/1360 for context.
+ old_encoding = crypto_util.gpg._encoding
+ crypto_util.gpg._encoding = "ansi_x3.4_1968"
+ try:
+ self.helper_test_reply("ᚠᛇᚻ᛫ᛒᛦᚦ᛫ᚠᚱᚩᚠᚢᚱ᛫ᚠᛁᚱᚪ᛫ᚷᛖᚻᚹᛦᛚᚳᚢᛗ", True)
+ finally:
+ crypto_util.gpg._encoding = old_encoding
def _can_decrypt_with_key(self, msg, key_fpr, passphrase=None):
"""
| Unicode in messages throws internal server error
Writing unicode as as a message throws an internal server error. SecureDrop should support unicode in messages for those of us whose instances are used by non-English speakers. Not only does this happen, we get the generic "internal server error" message that doesn't explain what went wrong or how. This is very bad for sources who upon reaching a SD instance will be unable to submit, not see why, and presumably assume SD isn't a service worth using.
Moving to Python 3 (#997) is probably the easiest way to do this since we get better unicode handling for minimal overhead.
| Duplicate from #1343 ?
@KwadroNaut Somewhat. Both the source and journalist app (SD 0.3.8) break if you pass in ö, ü, ä, or ß (that's all I tested, because that's the most pressing for me). That issue only mentions journalist replies. This specifies that this is a usability problem since non English speakers are likely to encounter "unresolvable" errors if they attempt to submit.
On a related note, I have mostly ported everything to Python 3 already. When I get a free evening, I can get the last error with hashing figured out.
https://github.com/heartsucker/securedrop/tree/python3
Hey guys. Since it seems like no one else is patching this, can I get some input to help me patch it faster?
@conorsch @garrettr @fowlslegs
Currently the issue is that Isis's `python-gnupg` package in Python 3 attempts to read files and strings with the buffer interface. Since we wrap all the files with the `SecureTemporaryFile`, this interface isn't exposed. I haven't found a way to get this class to work with the GPG package.
In [this comment](https://github.com/freedomofpress/securedrop/issues/1283#issuecomment-218835538), it's said that the physical security of the servers is outside the threat model. Does this mean it is acceptable to remove that class entirely and just use the normal Python file class?
@heartsucker Can you provide test input to trigger failure? As @mdrose notes in #1343, the error doesn't seem to occur under 0.3.8 or develop. On both the Source Interface (as a text submission) and Document Interface (as a text reply), the following strings submit successfully with no errors:
- Ich möchte dir was sagen
- Hallo! ö, ü, ä, or ß
- 我不好說中文
So I don't think the problem is as simple as we first thought.
The string "Hallo! ö, ü, ä, or ß" breaks my instance. You can test the one
linked at https://berlinleaks.org (on mobile, short reply).
Am 10.08.2016 21:25 schrieb "Conor" [email protected]:
> @heartsucker https://github.com/heartsucker Can you provide test input
> to trigger failure? As @mdrose https://github.com/mdrose notes in #1343
> https://github.com/freedomofpress/securedrop/issues/1343, the error
> doesn't seem to occur under 0.3.8 or develop. On both the Source Interface
> (as a text submission) and Document Interface (as a text reply), the
> following strings submit successfully with no errors:
> - Ich möchte dir was sagen
> - Hallo! ö, ü, ä, or ß
> - 我不好說中文
>
> So I don't think the problem is as simple as we first thought.
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub
> https://github.com/freedomofpress/securedrop/issues/1360#issuecomment-238976216,
> or mute the thread
> https://github.com/notifications/unsubscribe-auth/AD0DAEByUSffcwa279FTFD4_ot-GeuJDks5qeiWegaJpZM4JOUO4
> .
Update: I can trigger the error with all three of those strings when submitting from Tails to the test VM. Since it failed under Tails, but not under a dev environment using Tor, that may indicate we're looking at a client-side locale issue. It seems to be specifically Tor Browser that triggers the failure; I've not been able to reproduce the failure with other browsers, even when routing their traffic over Tor.
@conorsch I was also able to reproduce the error; all three strings break my instance in Tor Browser on Windows 10.
> Currently the issue is that Isis's `python-gnupg` package in Python 3 attempts to read files and strings with the buffer interface. Since we wrap all the files with the `SecureTemporaryFile`, this interface isn't exposed. I haven't found a way to get this class to work with the GPG package.
I don't think this is correct. `python-gnupg` calls the `SecureTemporaryFile.read()` [1] to read from the file or message submitted. The `SecureTemporaryFile.read()` method just calls the `read()` method on the underlying open file object [2]. This should not cause any problems with unicode characters because during writing to this file data is encoded as UTF-8 [3] and the `encryptor.encrypt()`/`decryptor.decrypt()` methods operate on raw bytes, so should have no effect on what's returned.
[1] https://github.com/isislovecruft/python-gnupg/blob/2.0.2/gnupg/_util.py#L238
[2] https://github.com/freedomofpress/securedrop/blob/release/0.3.8/securedrop/secure_tempfile.py#L75
[3] https://github.com/freedomofpress/securedrop/blob/release/0.3.8/securedrop/secure_tempfile.py#L61
@heartsucker Can you try to reproduce w/ the Deutsch locale version of TB: https://www.torproject.org/dist/torbrowser/6.0.3/tor-browser-linux64-6.0.3_de.tar.xz?
Or are you already using that?
@heartsucker Regarding your question:
> In this comment, it's said that the physical security of the servers is outside the threat model. Does this mean it is acceptable to remove [the `SecureTemporaryFile`] class entirely and just use the normal Python file class?
No, we don't want to drop use of that class lightly. I hear you on the physical-access-equals-root-access angle, but using `SecureTemporaryFile` grants us significant protections against forensic discovery, by preventing plaintext submissions from ever being written to disk, even temporarily.
I can test the German version next time I have time. I'm using English
locale TBB and English locale Tails. Both have the same problem.
As for the error I'm getting, it specifically says that SecureTempFile
doesn't support the buffer interface when I call gpg.encrypt(stf). I can
get a traceback when I have time as well.
Am 11.08.2016 00:11 schrieb "Conor" [email protected]:
> @heartsucker https://github.com/heartsucker Regarding your question:
>
> In this comment, it's said that the physical security of the servers is
> outside the threat model. Does this mean it is acceptable to remove [the
> SecureTemporaryFile] class entirely and just use the normal Python file
> class?
>
> No, we don't want to drop use of that class lightly. I hear you on the
> physical-access-equals-root-access angle, but using SecureTemporaryFile
> grants us significant protections against forensic discovery, by preventing
> plaintext submissions from ever being written to disk, even temporarily.
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub
> https://github.com/freedomofpress/securedrop/issues/1360#issuecomment-239020680,
> or mute the thread
> https://github.com/notifications/unsubscribe-auth/AD0DAJrZBOfaWv46MdMP6rk4Usl1WLoSks5qekx7gaJpZM4JOUO4
> .
I haven't had time to install the German TBB, but this is the traceback I'm getting with my attempted Python 3 implementation. Hopefully I can get to that this weekend.
```
tests/test_unit_source.py:246: in test_submit_sanitizes_filename
), follow_redirects=True)
/usr/local/lib/python3.4/dist-packages/werkzeug/test.py:788: in post
return self.open(*args, **kw)
/usr/local/lib/python3.4/dist-packages/flask/testing.py:113: in open
follow_redirects=follow_redirects)
/usr/local/lib/python3.4/dist-packages/werkzeug/test.py:751: in open
response = self.run_wsgi_app(environ, buffered=buffered)
/usr/local/lib/python3.4/dist-packages/werkzeug/test.py:668: in run_wsgi_app
rv = run_wsgi_app(self.application, environ, buffered=buffered)
/usr/local/lib/python3.4/dist-packages/werkzeug/test.py:871: in run_wsgi_app
app_rv = app(environ, start_response)
/usr/local/lib/python3.4/dist-packages/flask/app.py:2000: in __call__
return self.wsgi_app(environ, start_response)
/usr/local/lib/python3.4/dist-packages/flask/app.py:1991: in wsgi_app
response = self.make_response(self.handle_exception(e))
/usr/local/lib/python3.4/dist-packages/flask/app.py:1567: in handle_exception
reraise(exc_type, exc_value, tb)
/usr/local/lib/python3.4/dist-packages/flask/_compat.py:33: in reraise
raise value
/usr/local/lib/python3.4/dist-packages/flask/app.py:1988: in wsgi_app
response = self.full_dispatch_request()
/usr/local/lib/python3.4/dist-packages/flask/app.py:1641: in full_dispatch_request
rv = self.handle_user_exception(e)
/usr/local/lib/python3.4/dist-packages/flask/app.py:1544: in handle_user_exception
reraise(exc_type, exc_value, tb)
/usr/local/lib/python3.4/dist-packages/flask/_compat.py:33: in reraise
raise value
/usr/local/lib/python3.4/dist-packages/flask/app.py:1639: in full_dispatch_request
rv = self.dispatch_request()
/usr/local/lib/python3.4/dist-packages/flask/app.py:1625: in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
source.py:72: in decorated_function
return f(*args, **kwargs)
source.py:301: in submit
fh.stream))
store.py:115: in save_file_submission
crypto_util.encrypt(stf, config.JOURNALIST_KEY, encrypted_file_path)
crypto_util.py:158: in encrypt
armor=False)
/usr/local/lib/python3.4/dist-packages/gnupg/gnupg.py:972: in encrypt
stream = _make_binary_stream(data, self._encoding)
thing = <secure_tempfile.SecureTemporaryFile object at 0x7f383018a400>, encoding = 'utf_8', armor = True
def _make_binary_stream(thing, encoding=None, armor=True):
"""Encode **thing**, then make it stream/file-like.
:param thing: The thing to turn into a encoded stream.
:rtype: ``io.BytesIO`` or ``io.StringIO``.
:returns: The encoded **thing**, wrapped in an ``io.BytesIO`` (if
available), otherwise wrapped in a ``io.StringIO``.
"""
if _py3k:
if isinstance(thing, str):
thing = thing.encode(encoding)
else:
if type(thing) is not str:
thing = thing.encode(encoding)
try:
> rv = BytesIO(thing)
E TypeError: 'SecureTemporaryFile' does not support the buffer interface
```
Hey @heartsucker, thanks for reporting this, and sorry for the delay in patching it. We spent some time digging into this issue this afternoon; as a result, we have identified the cause, and have a proposal for a solution.
## Cause
First, to clarify: this issue is server-side, not client-side. I reproduced on a staging environment with either Chrome or Firefox as my client browser. This is not, as some earlier comments suggested, an issue related to the use of Tor Browser.
This issue occurs whenever we pass user-submitted text from the web application to the python-gnupg library. There are two primary situations where this occurs:
- When a source submits a message
- The message is encrypted to the application GPG public key
- When a journalist submits a reply
- The reply is encrypted to the corresponding source's public key (as well as the application GPG public key).
The web application consistently uses UTF-8 encoding, and the underlying Werkzeug library used by Flask helpfully converts values from the web application (such as user-submitted form fields in a POST request) to Python 2.x Unicode strings. **The bug is that we are passing these unicode strings directly to python-gnupg, when we should be encoding them first.**
`gnupg` gamely attempts to encode the strings for us (see [`_make_binary_stream`](https://github.com/isislovecruft/python-gnupg/blob/2beac24161b07e40642d93f5d85faeb9740f78db/gnupg/_util.py#L534)). However, for SecureDrop this fails with a `UnicodeEncodeError` when a message containing non-ASCII characters is submitted (which is the underlying cause of the internal server errors reported by OP).
This was a little surprising to us at first, because when we tried to reproduce the error in a Python interpreter there was no issue. python-gnupg [picked up the system's default encoding (UTF-8) as the preferred encoding and used that](https://github.com/isislovecruft/python-gnupg/blob/2beac24161b07e40642d93f5d85faeb9740f78db/gnupg/_meta.py#L194-L197), and everything was sunshine and moonbeams. So why wasn't it working in the production web application?
At this point, a useful clue was that this issue was only reproducible in the production environment, and not in the development environment. This environments use the same code for the web application, but differ significantly otherwise. One of the most significant differences between the environments is that production/staging uses Apache and runs the Flask web application with mod_wsgi, while development just uses the Flask-provided debugging server.
The discrepancy is caused by the fact that when Apache runs an application for WSGI, it starts it in an Apache-controlled environment. This environment can be configured via `/etc/apache2/envvars`, but SecureDrop does not modify it from the default. If you examine the default `envvars` file, you will see that it sets `LANG=C`, which in turn leads Python's `locale.getpreferredencoding` to return the non-Unicode-compatible `ANSI_X3.4-1968` instead of `UTF-8`. python-gnupg then tries to encode a Unicode string with `ANSI_X3.4-1968`, which is naturally doomed to fail, raising a UnicodeEncodeError which causes the observed internal server error by the user (source or journalist).
## Fix
One possible fix is to change the encoding used by mod_wsgi so it's `UTF-8` instead of `C`. Django [recommends](https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/modwsgi/) doing this so your application can handle file uploads where the filename contains Unicode characters. We should test file uploads with Unicode characters in the filename while we're addressing Unicode issues 😅 .
In addition, we should encode all of our strings as UTF-8 before passing them to python-gnupg, since it's better for the application to set the encoding explicitly rather than relying on the python-gnupg library's best guess of preferred output encoding. @fowlslegs is already working on a patch for this.
I did some perusing of the logs and found that this issue is a regression that was introduced in 0.3.7 as part of https://github.com/freedomofpress/securedrop/pull/1306/commits/f0e3fc9e1aa42442123c7edc556bf5a79471cab1, which was merged into release/0.3.7 in 37ba11c7b94f1a547aa5e8ed342e3da85a663362.
On the upside, this mistake exposed that our tests for this issue (`test_unit_integration::test_reply_unicode`) were not working correctly, so this provides an opportunity to fix them! Yay better tests!
| 2016-08-13T01:53:37Z | [] | [] |
freedomofpress/securedrop | 1,376 | freedomofpress__securedrop-1376 | [
"1313"
] | b2c8c0a9a06125230c83ec2ee65c861db38fe5d3 | diff --git a/install_files/ansible-base/roles/backup/files/backup.py b/install_files/ansible-base/roles/backup/files/backup.py
new file mode 100755
--- /dev/null
+++ b/install_files/ansible-base/roles/backup/files/backup.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python2.7
+"""
+This script is copied to the App server and run by the Ansible playbook. When
+run (as root), it collects all of the necessary information to backup the 0.3
+system and stores it in /tmp/sd-backup-0.3-TIME_STAMP.tar.gz.
+"""
+
+from datetime import datetime
+import os
+import tarfile
+
+def main():
+ backup_filename = 'sd-backup-{}.tar.gz'.format(
+ datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S"))
+
+ # This code assumes everything is in the default locations.
+ sd_data = '/var/lib/securedrop'
+
+ sd_code = '/var/www/securedrop'
+ sd_config = os.path.join(sd_code, "config.py")
+ sd_custom_logo = os.path.join(sd_code, "static/i/logo.png")
+
+ tor_hidden_services = "/var/lib/tor/services"
+ torrc = "/etc/tor/torrc"
+
+ with tarfile.open(backup_filename, 'w:gz') as backup:
+ backup.add(sd_config)
+ backup.add(sd_custom_logo)
+ backup.add(sd_data)
+ backup.add(tor_hidden_services)
+ backup.add(torrc)
+
+ print backup_filename
+
+if __name__ == "__main__":
+ main()
diff --git a/install_files/ansible-base/roles/backup/files/restore.py b/install_files/ansible-base/roles/backup/files/restore.py
new file mode 100755
--- /dev/null
+++ b/install_files/ansible-base/roles/backup/files/restore.py
@@ -0,0 +1,50 @@
+#!/usr/bin/python2.7
+"""
+This script and backup archive should be copied to the App server and run by
+the Ansible playbook. When run (as root), it restores the contents of the 0.3
+backup file to the machine it's run on.
+
+python restore.py sd-backup-TIMESTAMP.tar.gz
+"""
+
+import os
+import subprocess
+import sys
+import tarfile
+
+
+def verify_args():
+ usage = """
+Usage: restore.py <backup file>
+
+ <backup file> Path to a SecureDrop 0.3 backup created by backup.py"
+ """
+ if len(sys.argv) != 2:
+ print(usage)
+ sys.exit(1)
+
+ if not os.path.exists(sys.argv[1]):
+ print("<backup file> '{}' not found".format(sys.argv(1)))
+ sys.exit(1)
+
+ if os.geteuid() != 0:
+ print("This program must be run as root!")
+ sys.exit(1)
+
+
+def main():
+ verify_args()
+
+ with tarfile.open(sys.argv[1], 'r:*') as backup:
+ # This assumes that both the old installation (source of the backup)
+ # and the new installation (destination of the restore) used the
+ # default paths for various locations.
+ backup.extractall(path='/')
+
+ # Reload Tor and the web server so they pick up the new configuration
+ # If the process exits with a non-zero return code, raises an exception.
+ subprocess.check_call(['service', 'apache2', 'restart'])
+ subprocess.check_call(['service', 'tor', 'reload'])
+
+if __name__ == "__main__":
+ main()
diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -157,17 +157,11 @@ def encrypt(plaintext, fingerprints, output=None):
fingerprints = [fingerprints, ]
fingerprints = [fpr.replace(' ', '') for fpr in fingerprints]
- if isinstance(plaintext, unicode):
- plaintext = plaintext.encode('utf8')
-
- encrypt_fn = gpg.encrypt if isinstance(
- plaintext,
- str) else gpg.encrypt_file
- out = encrypt_fn(plaintext,
- *fingerprints,
- output=output,
- always_trust=True,
- armor=False)
+ out = gpg.encrypt(plaintext,
+ *fingerprints,
+ output=output,
+ always_trust=True,
+ armor=False)
if out.ok:
return out.data
else:
diff --git a/securedrop/secure_tempfile.py b/securedrop/secure_tempfile.py
--- a/securedrop/secure_tempfile.py
+++ b/securedrop/secure_tempfile.py
@@ -6,6 +6,7 @@
from Crypto.Random import random
from Crypto.Util import Counter
+from gnupg._util import _STREAMLIKE_TYPES
class SecureTemporaryFile(_TemporaryFileWrapper):
@@ -85,3 +86,8 @@ def read(self, count=None):
def close(self):
return _TemporaryFileWrapper.close(self)
+
+# python-gnupg will not recognize our SecureTemporaryFile as a stream-like type
+# and will attempt to call encode on it, thinking it's a string-like type. To
+# avoid this we add it the list of stream-like types.
+_STREAMLIKE_TYPES.append(_TemporaryFileWrapper)
diff --git a/securedrop/version.py b/securedrop/version.py
--- a/securedrop/version.py
+++ b/securedrop/version.py
@@ -1 +1 @@
-__version__ = '0.3.6'
+__version__ = '0.3.7'
| diff --git a/docs/test_the_installation.rst b/docs/test_the_installation.rst
--- a/docs/test_the_installation.rst
+++ b/docs/test_the_installation.rst
@@ -10,15 +10,18 @@ SSH to both servers over Tor
On the Admin Workstation, you should be able to SSH to the App
Server and the Monitor Server. ::
+ $ ssh app
+ $ ssh mon
+
+The SSH aliases should have been configured automatically by running
+the ``install.sh`` script. If you're unable to connect via aliases,
+try using the verbose command format to troubleshoot: ::
+
$ ssh <username>@<app .onion>
$ ssh <username>@<mon .onion>
-If you set up :ref:`SSH Host Aliases` during the post-install
-setup for the Admin Workstation, you should be able to connect
-with the aliases: ::
-
- $ ssh app
- $ ssh mon
+.. tip:: You can find the Onion URLs for SSH in ``app-ssh-aths`` and
+ ``mon-ssh-aths`` inside the ``install_files/ansible-base`` directory.
Log in to both servers via TTY
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| Merge changes from 0.3.7 into develop
This is a follow-up issue to track merging the changes from the 0.3.7 release into the development branch for the next minor release, 0.4.
| 2016-08-15T23:56:46Z | [] | [] |
|
freedomofpress/securedrop | 1,395 | freedomofpress__securedrop-1395 | [
"1384"
] | bcf4d95c6aa8d585bcf279c51920879bd869d6e1 | diff --git a/install_files/ansible-base/roles/backup/files/0.3_restore.py b/install_files/ansible-base/roles/backup/files/0.3_restore.py
--- a/install_files/ansible-base/roles/backup/files/0.3_restore.py
+++ b/install_files/ansible-base/roles/backup/files/0.3_restore.py
@@ -112,19 +112,19 @@ def restore_tor_files(zf):
tor_root_dir = "/var/lib/tor"
ths_root_dir = os.path.join(tor_root_dir, "services")
source_ths_dir = os.path.join(ths_root_dir, "source")
- document_ths_dir = os.path.join(ths_root_dir, "document")
+ journalist_ths_dir = os.path.join(ths_root_dir, "journalist")
print "* Deleting previous source THS interface..."
for fn in os.listdir(source_ths_dir):
os.remove(os.path.join(source_ths_dir, fn))
- print "* Deleting previous document ATHS interface..."
+ print "* Deleting previous journalist ATHS interface..."
- for fn in os.listdir(document_ths_dir):
- os.remove(os.path.join(document_ths_dir, fn))
+ for fn in os.listdir(journalist_ths_dir):
+ os.remove(os.path.join(journalist_ths_dir, fn))
- print "* Migrating source and document interface .onion..."
+ print "* Migrating source and journalist interface .onion..."
for zi in zf.infolist():
if "var/lib/tor/services/source" in zi.filename:
@@ -133,11 +133,11 @@ def restore_tor_files(zf):
"var/lib/tor/services/source",
"/var/lib/tor/services/source"),
"debian-tor")
- elif "var/lib/tor/services/document" in zi.filename:
+ elif "var/lib/tor/services/journalist" in zi.filename:
extract_to_path(zf, zi,
replace_prefix(zi.filename,
- "var/lib/tor/services/document",
- "/var/lib/tor/services/document"),
+ "var/lib/tor/services/journalist",
+ "/var/lib/tor/services/journalist"),
"debian-tor")
# Reload Tor to trigger registering the old Tor Hidden Services
diff --git a/install_files/ansible-base/roles/upgrade/files/0.3pre_upgrade.py b/install_files/ansible-base/roles/upgrade/files/0.3pre_upgrade.py
--- a/install_files/ansible-base/roles/upgrade/files/0.3pre_upgrade.py
+++ b/install_files/ansible-base/roles/upgrade/files/0.3pre_upgrade.py
@@ -40,7 +40,7 @@ def cleanup_deleted_sources(store_dir, c):
"""
In 0.3pre and 0.3, there were two bugs that could potentially lead
to the source directory failing to be deleted when a source was
- deleted from the Document Interface. We clean up these leftover
+ deleted from the Journalist Interface. We clean up these leftover
directories as part of the migration.
These sources can be identified because they have a source_dir in
diff --git a/migration_scripts/0.2.1/0.2.1_collect.py b/migration_scripts/0.2.1/0.2.1_collect.py
--- a/migration_scripts/0.2.1/0.2.1_collect.py
+++ b/migration_scripts/0.2.1/0.2.1_collect.py
@@ -30,7 +30,7 @@ def collect_securedrop_root(backup):
def collect_database(backup):
- # Copy the db file, which is only present in the document interface's
+ # Copy the db file, which is only present in the journalist interface's
# chroot jail in 0.2.1
db_file = "/var/chroot/document/var/www/securedrop/db.sqlite"
backup.add(db_file)
diff --git a/securedrop/management/run.py b/securedrop/management/run.py
--- a/securedrop/management/run.py
+++ b/securedrop/management/run.py
@@ -140,7 +140,7 @@ def cleanup(self):
def run():
"""
Starts development servers for both the Source Interface and the
- Document Interface concurrently. Their output is collected,
+ Journalist Interface concurrently. Their output is collected,
labeled, and sent to stdout to present a unified view to the
developer.
@@ -154,7 +154,7 @@ def run():
DevServerProcess('Source Interface',
['python', 'source.py'],
'blue'),
- DevServerProcess('Document Interface',
+ DevServerProcess('Journalist Interface',
['python', 'journalist.py'],
'cyan'),
]
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -203,7 +203,7 @@ def async_genkey(sid, codename):
crypto_util.genkeypair(sid, codename)
# Register key generation as update to the source, so sources will
- # filter to the top of the list in the document interface if a
+ # filter to the top of the list in the journalist interface if a
# flagged source logs in and has a key generated for them. #789
try:
source = Source.query.filter(Source.filesystem_id == sid).one()
diff --git a/tails_files/securedrop_init.py b/tails_files/securedrop_init.py
--- a/tails_files/securedrop_init.py
+++ b/tails_files/securedrop_init.py
@@ -44,4 +44,4 @@
# notify the user
subprocess.call(['tails-notify-user',
'SecureDrop successfully auto-configured!',
- 'You can now access the Document Interface.\nIf you are an admin, you can now SSH to the servers.'])
+ 'You can now access the Journalist Interface.\nIf you are an admin, you can now SSH to the servers.'])
| diff --git a/docs/test_the_installation.rst b/docs/test_the_installation.rst
--- a/docs/test_the_installation.rst
+++ b/docs/test_the_installation.rst
@@ -69,13 +69,13 @@ Test the web interfaces
- Usage of the Source Interface is covered by our :doc:`Source User
Manual <source>`.
-#. Test that you can access the Document Interface, and that you can log
+#. Test that you can access the Journalist Interface, and that you can log
in as the admin user you just created.
- Open the Tor Browser and navigate to the onion URL from
- app-document-aths. Enter your password and two-factor
+ app-journalist-aths. Enter your password and two-factor
authentication code to log in.
- - If you have problems logging in to the Admin/Document Interface,
+ - If you have problems logging in to the Admin/Journalist Interface,
SSH to the App Server and restart the ntp daemon to synchronize
the time: ``sudo service ntp restart``. Also check that your
smartphone's time is accurate and set to network time in its
@@ -85,7 +85,7 @@ Test the web interfaces
- While logged in as an admin, you can send a reply to the test
source submission you made earlier.
- - Usage of the Document Interface is covered by our :doc:`Journalist
+ - Usage of the Journalist Interface is covered by our :doc:`Journalist
User Manual <journalist>`.
#. Test that the source received the reply.
@@ -96,10 +96,10 @@ Test the web interfaces
is present.
#. We highly recommend that you create persistent bookmarks for the
- Source and Document Interface addresses within Tor Browser.
+ Source and Journalist Interface addresses within Tor Browser.
#. Remove the test submissions you made prior to putting SecureDrop to
- real use. On the main Document Interface page, select all sources and
+ real use. On the main Journalist Interface page, select all sources and
click 'Delete selected'.
Once you've tested the installation and verified that everything is
diff --git a/securedrop/tests/test_unit_integration.py b/securedrop/tests/test_unit_integration.py
--- a/securedrop/tests/test_unit_integration.py
+++ b/securedrop/tests/test_unit_integration.py
@@ -100,7 +100,7 @@ def test_submit_message(self):
self.assertEqual(rv.status_code, 200)
common.logout(source_app)
- # Request the Document Interface index
+ # Request the Journalist Interface index
rv = self.journalist_app.get('/')
self.assertEqual(rv.status_code, 200)
self.assertIn("Sources", rv.data)
@@ -556,7 +556,7 @@ def test_filenames_delete(self):
def test_user_change_password(self):
"""Test that a journalist can successfully login after changing their password"""
-
+
# change password
self.journalist_app.post('/account', data=dict(
password='newpass',
@@ -580,7 +580,7 @@ def test_login_after_regenerate_hotp(self):
# edit hotp
self.journalist_app.post('/account/reset-2fa-hotp', data=dict(
otp_secret=123456))
-
+
# successful verificaton should redirect to /account
rv = self.journalist_app.post('/account/2fa', data=dict(
token=self.user.hotp))
@@ -588,7 +588,7 @@ def test_login_after_regenerate_hotp(self):
# log out
common.logout(self.journalist_app)
-
+
# login with new 2fa secret should redirect to index page
rv = self.journalist_app.post('/login', data=dict(
username=self.user.username,
diff --git a/spec_tests/spec/app-general/apache_spec.rb b/spec_tests/spec/app-general/apache_spec.rb
--- a/spec_tests/spec/app-general/apache_spec.rb
+++ b/spec_tests/spec/app-general/apache_spec.rb
@@ -82,7 +82,7 @@
'Header unset Etag',
]
# declare block of directory declarations common to both
-# source and document interfaces.
+# source and journalist interfaces.
common_apache2_directory_declarations = <<eos
<Directory />
Options None
@@ -119,7 +119,7 @@
eos
# declare desired apache2 available sites
apache2_available_sites = [
- '/etc/apache2/sites-available/document.conf',
+ '/etc/apache2/sites-available/journalist.conf',
'/etc/apache2/sites-available/source.conf',
]
# check desired apache2 available sites for common headers
@@ -167,31 +167,31 @@
end
end
-# declare document-specific apache configs
-document_apache2_config_settings = [
+# declare journalist-specific apache configs
+journalist_apache2_config_settings = [
'Header set Cache-Control "max-age=1800"',
"<VirtualHost #{property['apache_listening_address']}:8080>",
"DocumentRoot #{property['securedrop_code']}/static",
"Alias /static #{property['securedrop_code']}/static",
- "WSGIDaemonProcess document processes=2 threads=30 display-name=%{GROUP} python-path=#{property['securedrop_code']}",
- 'WSGIProcessGroup document',
- 'WSGIScriptAlias / /var/www/document.wsgi/',
+ "WSGIDaemonProcess journalist processes=2 threads=30 display-name=%{GROUP} python-path=#{property['securedrop_code']}",
+ 'WSGIProcessGroup journalist',
+ 'WSGIScriptAlias / /var/www/journalist.wsgi/',
'AddType text/html .py',
'XSendFile On',
'XSendFilePath /var/lib/securedrop/store/',
'XSendFilePath /var/lib/securedrop/tmp/',
- 'ErrorLog /var/log/apache2/document-error.log',
- 'CustomLog /var/log/apache2/document-access.log combined',
+ 'ErrorLog /var/log/apache2/journalist-error.log',
+ 'CustomLog /var/log/apache2/journalist-access.log combined',
]
-# check document-specific apache2 config
-describe file('/etc/apache2/sites-available/document.conf') do
+# check journalist-specific apache2 config
+describe file('/etc/apache2/sites-available/journalist.conf') do
it { should be_file }
it { should be_owned_by 'root' }
it { should be_grouped_into 'root' }
it { should be_mode '644' }
- document_apache2_config_settings.each do |document_apache2_config_setting|
- document_apache2_config_setting_regex = Regexp.quote(document_apache2_config_setting)
- its(:content) { should match /^#{document_apache2_config_setting_regex}$/ }
+ journalist_apache2_config_settings.each do |journalist_apache2_config_setting|
+ journalist_apache2_config_setting_regex = Regexp.quote(journalist_apache2_config_setting)
+ its(:content) { should match /^#{journalist_apache2_config_setting_regex}$/ }
end
end
@@ -238,8 +238,8 @@
end
end
-# Are source and document interface sites enabled?
-['source', 'document'].each do |enabled_site|
+# Are source and journalist interface sites enabled?
+['source', 'journalist'].each do |enabled_site|
describe command("a2query -s #{enabled_site}") do
its(:stdout) { should match /^#{enabled_site} \(enabled/ }
end
diff --git a/spec_tests/spec/app-general/tor_spec.rb b/spec_tests/spec/app-general/tor_spec.rb
--- a/spec_tests/spec/app-general/tor_spec.rb
+++ b/spec_tests/spec/app-general/tor_spec.rb
@@ -2,7 +2,7 @@
torrc_settings = [
'HiddenServiceDir /var/lib/tor/services/source',
'HiddenServicePort 80 127.0.0.1:80',
- 'HiddenServiceDir /var/lib/tor/services/document',
+ 'HiddenServiceDir /var/lib/tor/services/journalist',
'HiddenServicePort 80 127.0.0.1:8080',
'HiddenServiceAuthorizeClient stealth journalist',
]
@@ -22,7 +22,7 @@
# and the "ssh" service are validated in the
# common-staging spectests.
tor_service_directories = %w(
- /var/lib/tor/services/document
+ /var/lib/tor/services/journalist
/var/lib/tor/services/source
)
# ensure tor service dirs are owned by tor user and mode 0700
diff --git a/spec_tests/spec/app-staging/iptables_spec.rb b/spec_tests/spec/app-staging/iptables_spec.rb
--- a/spec_tests/spec/app-staging/iptables_spec.rb
+++ b/spec_tests/spec/app-staging/iptables_spec.rb
@@ -91,7 +91,7 @@
listening_ports = [
22, # ssh
80, # source interface
- 8080, # document interface
+ 8080, # journalist interface
6001, # Xvfb
]
# ensure ports are listening
diff --git a/spec_tests/spec/development/iptables_spec.rb b/spec_tests/spec/development/iptables_spec.rb
--- a/spec_tests/spec/development/iptables_spec.rb
+++ b/spec_tests/spec/development/iptables_spec.rb
@@ -35,7 +35,7 @@
it { should be_listening.on('127.0.0.1').with('tcp') }
end
-# The Flask runners for the source and document interfaces
+# The Flask runners for the source and journalist interfaces
# aren't configured to run by default, e.g. on boot. Nor
# do the app tests cause them to be run. So, we shouldn't
# really expected them to be running.
@@ -44,7 +44,7 @@
# it { should be_listening.on('0.0.0.0').with('tcp') }
#end
#
-## check for document interface flask port listening
+## check for journalist interface flask port listening
#describe port(8081) do
# it { should be_listening.on('0.0.0.0').with('tcp') }
#end
| Proposal: Change all references of "Document Interface" to "Journalist Interface" to be consistend with code
Document interface is ambiguous because it could mean the source interface because sources submit documents, or it could mean the the journalist interface because the journalist reads documents.
I think this is entirely a documentation issue (plus one of two lines in some `bash` script), and doesn't impact sources in any way.
| In favor.
| 2016-08-27T20:05:44Z | [] | [] |
freedomofpress/securedrop | 1,425 | freedomofpress__securedrop-1425 | [
"1155"
] | 444ea7cfc2e2f9148027be7b0fb77d8cbf3fa8a9 | diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -9,7 +9,7 @@
from threading import Thread
import operator
from flask import (Flask, request, render_template, session, redirect, url_for,
- flash, abort, g, send_file)
+ flash, abort, g, send_file, Markup)
from flask_wtf.csrf import CsrfProtect
from flask.ext.assets import Environment
@@ -394,11 +394,18 @@ def login():
flash("Sorry, that is not a recognized codename.", "error")
return render_template('login.html')
+
@app.route('/logout')
def logout():
if logged_in():
session.clear()
- flash("Thank you for logging out.", "notification")
+ tor_msg = Markup("""<strong>Important:</strong> Thank you for logging out.
+ Please fully end your session by restarting
+ Tor Browser: Click the <img src='static/i/toronion.png'
+ alt='Tor icon' /> Tor onion icon in the toolbar above,
+ click <strong> New Identity</strong> and click
+ <strong>Yes</strong> in the dialog box that appears.""")
+ flash(tor_msg, "error")
return redirect(url_for('index'))
| diff --git a/securedrop/tests/functional/source_navigation_steps.py b/securedrop/tests/functional/source_navigation_steps.py
--- a/securedrop/tests/functional/source_navigation_steps.py
+++ b/securedrop/tests/functional/source_navigation_steps.py
@@ -62,5 +62,5 @@ def _source_submits_a_message(self):
def _source_logs_out(self):
logout_button = self.driver.find_element_by_id('logout').click()
- notification = self.driver.find_element_by_css_selector('p.notification')
+ notification = self.driver.find_element_by_css_selector('p.error')
self.assertIn('Thank you for logging out.', notification.text)
| Reset identity via Torbutton both after using SecureDrop
It would be good to instruct everyone accessing the SecureDrop web interface to reset their identity after visiting the page. Even though Tor Browser does isolate most of it's state between sites and closes the rendezvous point circuit to once you've navigated away from a site, it's probably safest to do things this way because not everything has been wiped from memory. For example, if you press ctrl+shift+t (on Linux) after closing a tab, Tor Browser will re-open it. So it would be best practice to reset identity after using SD if you wish to continue using Tor Browsers.
| @fowlslegs Either reset identity via TorButton, or just quit the browser (not just close the window) when they're done. Remember when bank and credit card processor websites would tell you to do that? That was so your session cookies would get cleared by the browser shutdown. It was always kind of stupid when they did it, but it makes more sense in our weird use case where cookies with `Expires:` might actually be more of a forensic risk than session cookies.
We could add a message encouraging either a reset or a full browser shutdown after a source logs out, which is being added in #1165.
As far as choosing between resetting identity or shutting the browser down, I'm not sure which is better. "Reset Identity" is a nicer user experience, IMO, since it brings that back into a "fresh" Tor Browser.
| 2016-10-20T18:57:18Z | [] | [] |
freedomofpress/securedrop | 1,440 | freedomofpress__securedrop-1440 | [
"1188"
] | ce4f46ed00c0791572be7e74cf0b2a65e9a9f214 | diff --git a/securedrop/db.py b/securedrop/db.py
--- a/securedrop/db.py
+++ b/securedrop/db.py
@@ -130,9 +130,9 @@ class Submission(Base):
source_id = Column(Integer, ForeignKey('sources.id'))
source = relationship(
"Source",
- backref=backref(
- 'submissions',
- order_by=id))
+ backref=backref("submissions", order_by=id, cascade="delete")
+ )
+
filename = Column(String(255), nullable=False)
size = Column(Integer, nullable=False)
downloaded = Column(Boolean, default=False)
@@ -158,7 +158,10 @@ class Reply(Base):
order_by=id))
source_id = Column(Integer, ForeignKey('sources.id'))
- source = relationship("Source", backref=backref('replies', order_by=id))
+ source = relationship(
+ "Source",
+ backref=backref("replies", order_by=id, cascade="delete")
+ )
filename = Column(String(255), nullable=False)
size = Column(Integer, nullable=False)
diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -444,7 +444,7 @@ def col(sid):
def delete_collection(source_id):
# Delete the source's collection of submissions
- worker.enqueue(store.delete_source_directory, source_id)
+ job = worker.enqueue(store.delete_source_directory, source_id)
# Delete the source's reply keypair
crypto_util.delete_reply_keypair(source_id)
@@ -453,6 +453,7 @@ def delete_collection(source_id):
source = get_source(source_id)
db_session.delete(source)
db_session.commit()
+ return job
@app.route('/col/process', methods=('POST',))
diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -148,7 +148,9 @@ def secure_unlink(fn, recursive=False):
command.append('-r')
command.append(fn)
subprocess.check_call(command)
+ return "success"
def delete_source_directory(source_id):
secure_unlink(path(source_id), recursive=True)
+ return "success"
diff --git a/securedrop/worker.py b/securedrop/worker.py
--- a/securedrop/worker.py
+++ b/securedrop/worker.py
@@ -11,4 +11,4 @@
def enqueue(*args, **kwargs):
- q.enqueue(*args, **kwargs)
+ return q.enqueue(*args, **kwargs)
| diff --git a/securedrop/tests/common.py b/securedrop/tests/common.py
--- a/securedrop/tests/common.py
+++ b/securedrop/tests/common.py
@@ -8,7 +8,7 @@
# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
-from db import init_db, db_session, Source, Submission
+from db import init_db, db_session, Journalist, Reply, Source, Submission
import crypto_util
# TODO: the PID file for the redis worker is hard-coded below.
@@ -42,15 +42,19 @@ def init_gpg():
return gpg
+def create_file(filename):
+ dirname = os.path.dirname(filename)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ with open(filename, 'w') as fp:
+ fp.write(str(uuid.uuid4()))
+
+
def setup_test_docs(sid, files):
filenames = [os.path.join(config.STORE_DIR, sid, file) for file in files]
for filename in filenames:
- dirname = os.path.dirname(filename)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
- with open(filename, 'w') as fp:
- fp.write(str(uuid.uuid4()))
+ create_file(filename)
# Add Submission to the db
source = Source.query.filter(Source.filesystem_id == sid).one()
@@ -61,6 +65,22 @@ def setup_test_docs(sid, files):
return filenames
+def setup_test_replies(sid, journo_id, files):
+ filenames = [os.path.join(config.STORE_DIR, sid, file) for file in files]
+
+ for filename in filenames:
+ create_file(filename)
+
+ # Add Reply to the db
+ source = Source.query.filter(Source.filesystem_id == sid).one()
+ journalist = Journalist.query.filter(Journalist.id == journo_id).one()
+ reply = Reply(journalist, source, os.path.basename(filename))
+ db_session.add(reply)
+ db_session.commit()
+
+ return filenames
+
+
def new_codename(client, session):
"""Helper function to go through the "generate codename" flow"""
with client as c:
diff --git a/securedrop/tests/test_unit_journalist.py b/securedrop/tests/test_unit_journalist.py
--- a/securedrop/tests/test_unit_journalist.py
+++ b/securedrop/tests/test_unit_journalist.py
@@ -5,17 +5,21 @@
import unittest
import zipfile
import mock
+import time
+import datetime
from flask_testing import TestCase
from flask import url_for, escape
+# Set environment variable so config.py uses a test environment
+os.environ['SECUREDROP_ENV'] = 'test'
+import config
+
import crypto_util
import journalist
import common
-from db import db_session, Source, Journalist, InvalidPasswordLength
-
-# Set environment variable so config.py uses a test environment
-os.environ['SECUREDROP_ENV'] = 'test'
+from db import (db_session, Source, Submission, Journalist, Reply,
+ InvalidPasswordLength)
class TestJournalist(TestCase):
@@ -23,6 +27,25 @@ class TestJournalist(TestCase):
def create_app(self):
return journalist.app
+ def add_source_and_submissions(self):
+ sid = 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZJPKJFECLS2NZ4G4U3QOZCFKTTPNZMVIWDCJBBHMUDBGFHXCQ3R'
+ codename = crypto_util.display_id()
+ crypto_util.genkeypair(sid, codename)
+ source = Source(sid, codename)
+ db_session.add(source)
+ db_session.commit()
+ files = ['1-abc1-msg.gpg', '2-abc2-msg.gpg']
+ filenames = common.setup_test_docs(sid, files)
+ return source, files
+
+ def add_source_and_replies(self):
+ source, files = self.add_source_and_submissions()
+ files = ['1-def-reply.gpg', '2-def-reply.gpg']
+ filenames = common.setup_test_replies(source.filesystem_id,
+ self.user.id,
+ files)
+ return source, files
+
def setUp(self):
common.shared_setup()
@@ -415,18 +438,86 @@ def test_edit_hotp(self):
# should redirect to verification page
self.assertRedirects(res, url_for('account_new_two_factor'))
+ def test_delete_source_deletes_submissions(self):
+ """Verify that when a source is deleted, the submissions that
+ correspond to them are also deleted."""
+
+ source, files = self.add_source_and_submissions()
+
+ journalist.delete_collection(source.filesystem_id)
+
+ # Source should be gone
+ results = db_session.query(Source).filter(Source.id == source.id).all()
+ self.assertEqual(results, [])
+
+ # Submissions should be gone
+ results = db_session.query(Submission.source_id == source.id).all()
+ self.assertEqual(results, [])
+
+ def test_delete_source_deletes_replies(self):
+ """Verify that when a source is deleted, the replies that
+ correspond to them are also deleted."""
+
+ source, files = self.add_source_and_replies()
+
+ journalist.delete_collection(source.filesystem_id)
+
+ # Source should be gone
+ results = db_session.query(Source).filter(Source.id == source.id).all()
+ self.assertEqual(results, [])
+
+ # Replies should be gone
+ results = db_session.query(Reply.source_id == source.id).all()
+ self.assertEqual(results, [])
+
+ def test_delete_source_deletes_source_key(self):
+ """Verify that when a source is deleted, the PGP key that corresponds
+ to them is also deleted."""
+
+ source, files = self.add_source_and_submissions()
+
+ # Source key exists
+ source_key = crypto_util.getkey(source.filesystem_id)
+ self.assertNotEqual(source_key, None)
+
+ journalist.delete_collection(source.filesystem_id)
+
+ # Source key no longer exists
+ source_key = crypto_util.getkey(source.filesystem_id)
+ self.assertEqual(source_key, None)
+
+ def test_delete_source_deletes_docs_on_disk(self):
+ """Verify that when a source is deleted, the encrypted documents that
+ exist on disk is also deleted."""
+
+ source, files = self.add_source_and_submissions()
+
+ # Encrypted documents exists
+ dir_source_docs = os.path.join(config.STORE_DIR, source.filesystem_id)
+
+ self.assertTrue(os.path.exists(dir_source_docs))
+
+ job = journalist.delete_collection(source.filesystem_id)
+
+ # Block for up to 5s to await asynchronous Redis job result
+ timeout = datetime.datetime.now() + datetime.timedelta(0,5)
+ while 1:
+ if job.result == "success":
+ break
+ elif datetime.datetime.now() > timeout:
+ self.assertTrue(False)
+
+ # Encrypted documents no longer exist
+ dir_source_docs = os.path.join(config.STORE_DIR, source.filesystem_id)
+ self.assertFalse(os.path.exists(dir_source_docs))
+
def test_bulk_download(self):
- sid = 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZJPKJFECLS2NZ4G4U3QOZCFKTTPNZMVIWDCJBBHMUDBGFHXCQ3R'
- source = Source(sid, crypto_util.display_id())
- db_session.add(source)
- db_session.commit()
- files = ['1-abc1-msg.gpg', '2-abc2-msg.gpg']
- filenames = common.setup_test_docs(sid, files)
+ source, files = self.add_source_and_submissions()
self._login_user()
rv = self.client.post('/bulk', data=dict(
action='download',
- sid=sid,
+ sid=source.filesystem_id,
doc_names_selected=files
))
| Delete Submissions that have had their Sources deleted
Right now if you delete a source, it does not delete the corresponding submissions from the database.
| 2016-11-02T22:55:43Z | [] | [] |
|
freedomofpress/securedrop | 1,486 | freedomofpress__securedrop-1486 | [
"383"
] | 1b639dd95502095ed40dbbc0e5549eeab2fa2057 | diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -687,19 +687,16 @@ def download(zip_basename, submissions):
:param list submissions: A list of :class:`db.Submission`s to
include in the zipfile.
"""
- # Mark the submissions that are about to be downloaded as such
+ zf = store.get_bulk_archive(submissions,
+ zip_directory=zip_basename)
+ attachment_filename = "{}--{}.zip".format(
+ zip_basename, datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S"))
+
+ # Mark the submissions that have been downloaded as such
for submission in submissions:
submission.downloaded = True
db_session.commit()
- filenames = [store.path(submission.source.filesystem_id,
- submission.filename)
- for submission in submissions]
-
- zf = store.get_bulk_archive(filenames,
- zip_directory=zip_basename)
- attachment_filename = "{}--{}.zip".format(
- zip_basename, datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S"))
return send_file(zf.name, mimetype="application/zip",
attachment_filename=attachment_filename,
as_attachment=True)
diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -68,17 +68,29 @@ def path(*s):
return absolute
-def get_bulk_archive(filenames, zip_directory=''):
+def get_bulk_archive(selected_submissions, zip_directory=''):
+ """Generate a zip file from the selected submissions"""
zip_file = tempfile.NamedTemporaryFile(prefix='tmp_securedrop_bulk_dl_',
dir=config.TEMP_DIR,
delete=False)
+ sources = set([i.source.journalist_designation for i in selected_submissions])
+ # The below nested for-loops are there to create a more usable
+ # folder structure per #383
with zipfile.ZipFile(zip_file, 'w') as zip:
- for filename in filenames:
- verify(filename)
- zip.write(filename, arcname=os.path.join(
- zip_directory,
- os.path.basename(filename)
- ))
+ for source in sources:
+ submissions = [s for s in selected_submissions if s.source.journalist_designation == source]
+ for submission in submissions:
+ filename = path(submission.source.filesystem_id,
+ submission.filename)
+ verify(filename)
+ document_number = submission.filename.split('-')[0]
+ zip.write(filename, arcname=os.path.join(
+ zip_directory,
+ source,
+ "%s_%s" % (document_number,
+ submission.source.last_updated.date()),
+ os.path.basename(filename)
+ ))
return zip_file
| diff --git a/securedrop/tests/test_unit_journalist.py b/securedrop/tests/test_unit_journalist.py
--- a/securedrop/tests/test_unit_journalist.py
+++ b/securedrop/tests/test_unit_journalist.py
@@ -554,37 +554,51 @@ def test_delete_source_deletes_docs_on_disk(self):
def test_download_selected_submissions_from_source(self):
source, _ = utils.db_helper.init_source()
- submissions = set(utils.db_helper.submit(source, 4))
-
+ submissions = utils.db_helper.submit(source, 4)
selected_submissions = random.sample(submissions, 2)
selected_fnames = [submission.filename
for submission in selected_submissions]
+ selected_fnames.sort()
self._login_user()
resp = self.client.post(
'/bulk', data=dict(action='download',
sid=source.filesystem_id,
doc_names_selected=selected_fnames))
+
# The download request was succesful, and the app returned a zipfile
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/zip')
self.assertTrue(zipfile.is_zipfile(StringIO(resp.data)))
+
# The submissions selected are in the zipfile
for filename in selected_fnames:
- self.assertTrue(zipfile.ZipFile(StringIO(resp.data)).getinfo(
- os.path.join(source.journalist_filename, filename)))
+ self.assertTrue(
+ # Check that the expected filename is in the zip file
+ zipfile.ZipFile(StringIO(resp.data)).getinfo(
+ os.path.join(
+ source.journalist_filename,
+ source.journalist_designation,
+ "%s_%s" % (filename.split('-')[0], source.last_updated.date()),
+ filename
+ ))
+ )
+
# The submissions not selected are absent from the zipfile
- not_selected_submissions = submissions.difference(selected_submissions)
+ not_selected_submissions = set(submissions).difference(selected_submissions)
not_selected_fnames = [submission.filename
for submission in not_selected_submissions]
+
for filename in not_selected_fnames:
- try:
+ with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(resp.data)).getinfo(
- os.path.join(source.journalist_filename, filename))
- except KeyError:
- pass
- else:
- self.assertTrue(False)
+ os.path.join(
+ source.journalist_filename,
+ source.journalist_designation,
+ "%s_%s" % (filename.split('-')[0],
+ source.last_updated.date()),
+ filename
+ ))
def _bulk_download_setup(self):
"""Create a couple sources, make some submissions on their behalf,
@@ -592,19 +606,21 @@ def _bulk_download_setup(self):
sources."""
self.source0, _ = utils.db_helper.init_source()
self.source1, _ = utils.db_helper.init_source()
- self.submissions0 = set(utils.db_helper.submit(self.source0, 2))
- self.submissions1 = set(utils.db_helper.submit(self.source1, 3))
+ self.journo0, _ = utils.db_helper.init_journalist()
+ self.submissions0 = utils.db_helper.submit(self.source0, 2)
+ self.submissions1 = utils.db_helper.submit(self.source1, 3)
self.downloaded0 = random.sample(self.submissions0, 1)
utils.db_helper.mark_downloaded(*self.downloaded0)
- self.not_downloaded0 = self.submissions0.difference(self.downloaded0)
+ self.not_downloaded0 = set(self.submissions0).difference(self.downloaded0)
self.downloaded1 = random.sample(self.submissions1, 2)
utils.db_helper.mark_downloaded(*self.downloaded1)
- self.not_downloaded1 = self.submissions1.difference(self.downloaded1)
+ self.not_downloaded1 = set(self.submissions1).difference(self.downloaded1)
def test_download_unread_all_sources(self):
self._bulk_download_setup()
self._login_user()
+
# Download all unread messages from all sources
self.resp = self.client.post(
'/col/process',
@@ -616,50 +632,98 @@ def test_download_unread_all_sources(self):
self.assertEqual(self.resp.status_code, 200)
self.assertEqual(self.resp.content_type, 'application/zip')
self.assertTrue(zipfile.is_zipfile(StringIO(self.resp.data)))
+
# All the not dowloaded submissions are in the zipfile
- for submission in self.not_downloaded0.union(self.not_downloaded1):
+ for submission in self.not_downloaded0:
self.assertTrue(
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
- os.path.join('unread', submission.filename))
+ os.path.join(
+ "unread",
+ self.source0.journalist_designation,
+ "%s_%s" % (submission.filename.split('-')[0],
+ self.source0.last_updated.date()),
+ submission.filename
+ ))
)
+ for submission in self.not_downloaded1:
+ self.assertTrue(
+ zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
+ os.path.join(
+ "unread",
+ self.source1.journalist_designation,
+ "%s_%s" % (submission.filename.split('-')[0],
+ self.source1.last_updated.date()),
+ submission.filename
+ ))
+ )
+
# All the downloaded submissions are absent from the zipfile
- for submission in self.downloaded0 + self.downloaded1:
- try:
+ for submission in self.downloaded0:
+ with self.assertRaises(KeyError):
+ zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
+ os.path.join(
+ "unread",
+ self.source0.journalist_designation,
+ "%s_%s" % (submission.filename.split('-')[0],
+ self.source0.last_updated.date()),
+ submission.filename
+ ))
+
+ for submission in self.downloaded1:
+ with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
- os.path.join('unread', submission.filename))
- except KeyError:
- pass
- else:
- self.assertTrue(False)
+ os.path.join(
+ "unread",
+ self.source1.journalist_designation,
+ "%s_%s" % (submission.filename.split('-')[0],
+ self.source1.last_updated.date()),
+ submission.filename
+ ))
+
def test_download_all_selected_sources(self):
self._bulk_download_setup()
self._login_user()
+
# Dowload all messages from self.source1
self.resp = self.client.post(
'/col/process',
data=dict(action='download-all',
cols_selected=[self.source1.filesystem_id]))
+ resp = self.client.post('/col/process',
+ data=dict(action='download-all',
+ cols_selected=[self.source1.filesystem_id]))
+
# The download request was succesful, and the app returned a zipfile
- self.assertEqual(self.resp.status_code, 200)
- self.assertEqual(self.resp.content_type, 'application/zip')
- self.assertTrue(zipfile.is_zipfile(StringIO(self.resp.data)))
+ self.assertEqual(resp.status_code, 200)
+ self.assertEqual(resp.content_type, 'application/zip')
+ self.assertTrue(zipfile.is_zipfile(StringIO(resp.data)))
+
# All messages from self.source1 are in the zipfile
for submission in self.submissions1:
self.assertTrue(
- zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
- os.path.join('all', submission.filename))
+ zipfile.ZipFile(StringIO(resp.data)).getinfo(
+ os.path.join(
+ "all",
+ self.source1.journalist_designation,
+ "%s_%s" % (submission.filename.split('-')[0],
+ self.source1.last_updated.date()),
+ submission.filename)
+ )
)
- # All messages from self.source2 are absent from the zipfile
+
+ # All messages from self.source0 are absent from the zipfile
for submission in self.submissions0:
- try:
- zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
- os.path.join('all', submission.filename))
- except KeyError:
- pass
- else:
- self.assertTrue(False)
+ with self.assertRaises(KeyError):
+ zipfile.ZipFile(StringIO(resp.data)).getinfo(
+ os.path.join(
+ "all",
+ self.source0.journalist_designation,
+ "%s_%s" % (submission.filename.split('-')[0],
+ self.source0.last_updated.date()),
+ submission.filename)
+ )
def test_add_star_redirects_to_index(self):
source, _ = utils.db_helper.init_source()
diff --git a/securedrop/tests/test_unit_store.py b/securedrop/tests/test_unit_store.py
--- a/securedrop/tests/test_unit_store.py
+++ b/securedrop/tests/test_unit_store.py
@@ -39,7 +39,7 @@ def test_get_zip(self):
submission.filename)
for submission in submissions]
- archive = zipfile.ZipFile(store.get_bulk_archive(filenames))
+ archive = zipfile.ZipFile(store.get_bulk_archive(submissions))
archivefile_contents = archive.namelist()
for archived_file, actual_file in zip(archivefile_contents, filenames):
| Bulking downloading folder structure
I've noticed that in practice, it's really helpful if each message/document is in it's own folder. The reason is because in Tails, you double-click a .gpg file to decrypt it, and it creates the new file in the same folder. If it's a doc, then you right-click on the .zip to extract it to get the original file that was uploaded. Sometimes (if the source is savvy and follows the journalist key instructions) this is another .gpg file with a different name, and you double-click on that to decrypt it. And sometimes, it decrypts into a .zip or .tar.gz or something that the source sent, so you extract that. Etc, etc.
Basically, each message generates at least 2 files, sometimes 3 (if they post a gpg-encrypted message in the box). And each doc generates at least 3 files (.zip.gpg, .zip, and whatever is extracted from the .zip), sometimes way more. It gets really disorganized to have all of the files just sitting there in the same folder.
(Also, sometimes a source tries to upload the same document twice because they weren't sure if it worked the first time. If the two .zip.gpg files are in the same folder, you end up extracting the zip files over each other when the contents have the same filename.)
So anyway, I propose that the bulk download feature (and any future bulk download features, such as #372) shouldn't just put all the messages/docs into a zip file, but rather should build a directory structure like this:
```
| source_codename
|--| year-month-date
|--|--| doc_number
|--|--|--| document.gpg
```
So for example, if the source codename is "Ottoman mate" and they uploaded a message and then a document on May 22, the directory structure would be:
```
ottoman-mate
ottoman-mate/2014-05-22
ottoman-mate/2014-05-22/1
ottoman-mate/2014-05-22/1/1-ottoman_mate-msg.gpg
ottoman-mate/2014-05-22/2
ottoman-mate/2014-05-22/2/2-ottoman_mate-doc.zip.gpg
```
Make sense? I don't think the directory structure needs to be exactly like this, as long as it's organized sequentially and each message/doc is in it's own folder. (And the dates are super useful, especially if there's weeks of back-and-forth between journalist and source, to look back -- but the date could just be the date of last activity.)
| @fowlslegs and I discussed this in person and decided this was a good way to organize the folder structure without having to make the changes to the db to save timestamps on each submission (which would be needed for the original structure) and could/would leak information as discussed in #822
Structure:
```
| source_codename
|--| <doc_number>_<Source.last_updated>
|--|--| document.gpg
```
Example:
```
ottoman-mate
ottoman-mate/1_2014-05-22/
ottoman-mate/1_2014-05-22/1-ottoman_mate-msg.gpg
ottoman-mate/2_2014-05-22/
ottoman-mate/2_2014-05-22/2-ottoman_mate-doc.zip.gpg
```
| 2016-12-05T19:24:36Z | [] | [] |
freedomofpress/securedrop | 1,521 | freedomofpress__securedrop-1521 | [
"1508"
] | 42bf0aec3afd36ade96371ff90f96d74741bdb53 | diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -126,7 +126,7 @@ def index():
custom_notification=config.CUSTOM_NOTIFICATION)
-def generate_unique_codename(num_words):
+def generate_unique_codename(num_words=7):
"""Generate random codenames until we get an unused one"""
while True:
codename = crypto_util.genrandomid(num_words)
@@ -158,18 +158,9 @@ def generate():
"to create a new account, you should log out first.", "notification")
return redirect(url_for('lookup'))
- num_words = 7
- if request.method == 'POST':
- num_words = int(request.form['number-words'])
- if num_words not in range(7, 11):
- abort(403)
-
- codename = generate_unique_codename(num_words)
+ codename = generate_unique_codename()
session['codename'] = codename
- return render_template(
- 'generate.html',
- codename=codename,
- num_words=num_words)
+ return render_template('generate.html', codename=codename)
@app.route('/create', methods=['POST'])
| diff --git a/securedrop/tests/test_unit_source.py b/securedrop/tests/test_unit_source.py
--- a/securedrop/tests/test_unit_source.py
+++ b/securedrop/tests/test_unit_source.py
@@ -15,7 +15,7 @@
import utils
-class TestSourceApppp(TestCase):
+class TestSourceApp(TestCase):
def create_app(self):
return source.app
@@ -58,24 +58,6 @@ def test_generate(self):
# codename displayed to the source
self.assertEqual(codename, escape(session_codename))
- def test_regenerate_valid_lengths(self):
- """Make sure we can regenerate all valid length codenames"""
- for codename_len in xrange(7, 11):
- response = self.client.post('/generate', data={
- 'number-words': str(codename_len),
- })
- self.assertEqual(response.status_code, 200)
- codename = self._find_codename(response.data)
- self.assertEquals(len(codename.split()), codename_len)
-
- def test_regenerate_invalid_lengths(self):
- """If the codename length is invalid, it should return 403 Forbidden"""
- for codename_len in (2, 999):
- response = self.client.post('/generate', data={
- 'number-words': str(codename_len),
- })
- self.assertEqual(response.status_code, 403)
-
def test_generate_has_login_link(self):
"""The generate page should have a link to remind people to login
if they already have a codename, rather than create a new one.
| Remove source choice in number of words in codename
I think it would look a bit nicer and streamline the user experience if we got rid of the option to choose the number of Diceware words in the generate view for sources. I don't see a reason to make the codename longer than 7 words considering the strong scrypt parameters we've chosen. I assume it can be a confusing option for sources who don't understand how Diceware serves as an entropy encoding mechanism or how their codename fits into the security model of SD (i.e., most all sources).
| 2017-01-13T00:45:42Z | [] | [] |
|
freedomofpress/securedrop | 1,527 | freedomofpress__securedrop-1527 | [
"1610"
] | 8a27df2be44664d05baa57c79c3af98526ac92d0 | diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -1,7 +1,9 @@
+#!/usr/bin/env python
# -*- coding: utf-8 -*-
+
+from base64 import b32encode
import os
import subprocess
-from base64 import b32encode
from Crypto.Random import random
import gnupg
@@ -70,7 +72,7 @@ def clean(s, also=''):
>>> clean("Hello, world!")
Traceback (most recent call last):
...
- CryptoException: invalid input
+ CryptoException: invalid input: Hello, world!
>>> clean("Helloworld")
'Helloworld'
"""
@@ -94,20 +96,34 @@ def display_id():
def hash_codename(codename, salt=SCRYPT_ID_PEPPER):
- """
- >>> hash_codename('Hello, world!')
- 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZLFHBTS6WLCHFHTOLRSGQXUQLRQHFMXKOKKOQ4WQ6SXGZXDAS3Z'
+ """Salts and hashes a codename using scrypt.
+
+ :param str codename: A source's codename.
+ :param str salt: The salt to mix with the codename when hashing.
+ :returns: A base32 encoded string; the salted codename hash.
"""
return b32encode(scrypt.hash(clean(codename), salt, **SCRYPT_PARAMS))
def genkeypair(name, secret):
- """
+ """Generate a GPG key through batch file key generation. A source's
+ codename is salted with SCRYPT_GPG_PEPPER and hashed with scrypt to
+ provide the passphrase used to encrypt their private key. Their name
+ should be their filesystem id.
+
>>> if not gpg.list_keys(hash_codename('randomid')):
... genkeypair(hash_codename('randomid'), 'randomid').type
... else:
... u'P'
u'P'
+
+ :param str name: The source's filesystem id (their codename, salted
+ with SCRYPT_ID_PEPPER, and hashed with scrypt).
+ :param str secret: The source's codename.
+ :returns: a :class:`GenKey <gnupg._parser.GenKey>` object, on which
+ the ``__str__()`` method may be called to return the
+ generated key's fingeprint.
+
"""
name = clean(name)
secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)
@@ -168,8 +184,8 @@ def encrypt(plaintext, fingerprints, output=None):
def decrypt(secret, ciphertext):
"""
>>> key = genkeypair('randomid', 'randomid')
- >>> decrypt('randomid', 'randomid',
- ... encrypt('randomid', 'Goodbye, cruel world!')
+ >>> decrypt('randomid',
+ ... encrypt('Goodbye, cruel world!', str(key))
... )
'Goodbye, cruel world!'
"""
diff --git a/securedrop/manage.py b/securedrop/manage.py
--- a/securedrop/manage.py
+++ b/securedrop/manage.py
@@ -1,210 +1,181 @@
#!/usr/bin/env python
+# -*- coding: utf-8 -*-
-import sys
+import argparse
+from getpass import getpass
import os
import shutil
-import subprocess
import signal
-import qrcode
+import sys
+import traceback
+
import psutil
+import qrcode
from sqlalchemy.orm.exc import NoResultFound
-from getpass import getpass
-from argparse import ArgumentParser
-from db import db_session, Journalist
-from management import run
-
-# We need to import config in each function because we're running the tests
-# directly, so it's important to set the environment correctly, depending on
-# development or testing, before importing config.
os.environ['SECUREDROP_ENV'] = 'dev'
-
-# TODO: the PID file for the redis worker is hard-coded below.
-# Ideally this constant would be provided by a test harness.
-# It has been intentionally omitted from `config.py.example`
-# in order to isolate the test vars from prod vars.
-# When refactoring the test suite, the TEST_WORKER_PIDFILE
-# TEST_WORKER_PIDFILE is also hard-coded in `tests/common.py`.
-TEST_WORKER_PIDFILE = "/tmp/securedrop_test_worker.pid"
-
-
-def get_pid_from_pidfile(pid_file_name):
- with open(pid_file_name) as fp:
- return int(fp.read())
+import config
+from db import db_session, init_db, Journalist
+from management import run
-def _start_test_rqworker(config): # pragma: no cover
- # needed to determine the directory to run the worker in
- worker_running = False
+def reset(): # pragma: no cover
+ """Clears the SecureDrop development applications' state, restoring them to
+ the way they were immediately after running `setup_dev.sh`. This command:
+ 1. Erases the development sqlite database file.
+ 2. Regenerates the database.
+ 3. Erases stored submissions and replies from the store dir.
+ """
+ # Erase the development db file
+ assert hasattr(config, 'DATABASE_FILE'), ("TODO: ./manage.py doesn't know "
+ 'how to clear the db if the '
+ 'backend is not sqlite')
try:
- if psutil.pid_exists(get_pid_from_pidfile(TEST_WORKER_PIDFILE)):
- worker_running = True
- except IOError:
+ os.remove(config.DATABASE_FILE)
+ except OSError as exc:
pass
- if not worker_running:
- tmp_logfile = open("/tmp/test_rqworker.log", "w")
- subprocess.Popen(
- [
- "rqworker", "test",
- "-P", config.SECUREDROP_ROOT,
- "--pid", TEST_WORKER_PIDFILE,
- ],
- stdout=tmp_logfile,
- stderr=subprocess.STDOUT)
-
-
-def _stop_test_rqworker(): # pragma: no cover
- os.kill(get_pid_from_pidfile(TEST_WORKER_PIDFILE), signal.SIGTERM)
-
-
-def test(): # pragma: no cover
- """
- Runs the test suite
- """
- os.environ['SECUREDROP_ENV'] = 'test'
- import config
- _start_test_rqworker(config)
- test_cmds = [["py.test", "--cov"], "./test.sh"]
- test_rc = int(any([subprocess.call(cmd) for cmd in test_cmds]))
- _stop_test_rqworker()
- sys.exit(test_rc)
+ # Regenerate the database
+ init_db()
+ # Clear submission/reply storage
+ try:
+ os.stat(config.STORE_DIR)
+ except OSError as exc:
+ pass
+ else:
+ for source_dir in os.listdir(config.STORE_DIR):
+ try:
+ # Each entry in STORE_DIR is a directory corresponding to a source
+ shutil.rmtree(os.path.join(config.STORE_DIR, source_dir))
+ except OSError as exc:
+ pass
+ return 0
-def test_unit(): # pragma: no cover
- """
- Runs the unit tests.
- """
- os.environ['SECUREDROP_ENV'] = 'test'
- import config
- _start_test_rqworker(config)
- test_rc = int(subprocess.call(["py.test", "--cov"]))
- _stop_test_rqworker()
- sys.exit(test_rc)
+def add_admin(): # pragma: no cover
+ return _add_user(is_admin=True)
-def reset():
- """
- Clears the SecureDrop development application's state, restoring it to the
- way it was immediately after running `setup_dev.sh`. This command:
- 1. Erases the development sqlite database file
- 2. Regenerates the database
- 3. Erases stored submissions and replies from the store dir
- """
- import config
- import db
- # Erase the development db file
- assert hasattr(
- config, 'DATABASE_FILE'), "TODO: ./manage.py doesn't know how to clear the db if the backend is not sqlite"
- os.remove(config.DATABASE_FILE)
+def add_journalist(): # pragma: no cover
+ return _add_user()
- # Regenerate the database
- db.init_db()
- # Clear submission/reply storage
- for source_dir in os.listdir(config.STORE_DIR):
- # Each entry in STORE_DIR is a directory corresponding to a source
- shutil.rmtree(os.path.join(config.STORE_DIR, source_dir))
-
-
-def add_admin():
+def _add_user(is_admin=False): # pragma: no cover
while True:
- username = raw_input("Username: ")
+ username = raw_input('Username: ')
if Journalist.query.filter_by(username=username).first():
- print "Sorry, that username is already in use."
+ print('Sorry, that username is already in use.')
else:
break
while True:
- password = getpass("Password: ")
- password_again = getpass("Confirm Password: ")
+ password = getpass('Password: ')
+ password_again = getpass('Confirm Password: ')
if len(password) > Journalist.MAX_PASSWORD_LEN:
- print ("Your password is too long (maximum length {} characters). "
- "Please pick a shorter password.".format(
- Journalist.MAX_PASSWORD_LEN))
+ print('Your password is too long (maximum length {} characters). '
+ 'Please pick a shorter '
+ 'password.'.format(Journalist.MAX_PASSWORD_LEN))
continue
if password == password_again:
break
- print "Passwords didn't match!"
+ print("Passwords didn't match!")
- hotp_input = raw_input("Is this admin using a YubiKey [HOTP]? (y/N): ")
+ hotp_input = raw_input('Will this user be using a YubiKey [HOTP]? (y/N): ')
otp_secret = None
- if hotp_input.lower() == "y" or hotp_input.lower() == "yes":
+ if hotp_input.lower() in ('y', 'yes'):
while True:
otp_secret = raw_input(
- "Please configure your YubiKey and enter the secret: ")
+ 'Please configure your YubiKey and enter the secret: ')
if otp_secret:
break
try:
- admin = Journalist(username=username,
- password=password,
- is_admin=True,
- otp_secret=otp_secret)
- db_session.add(admin)
+ user = Journalist(username=username,
+ password=password,
+ is_admin=is_admin,
+ otp_secret=otp_secret)
+ db_session.add(user)
db_session.commit()
- except Exception as e:
- if "username is not unique" in str(e):
- print "ERROR: That username is already taken!"
+ except Exception as exc:
+ if 'username is not unique' in exc:
+ print('ERROR: That username is already taken!')
else:
- print "ERROR: An unexpected error occurred, traceback: \n{}".format(e)
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ print(repr(traceback.format_exception(exc_type, exc_value,
+ exc_traceback)))
+ return 1
else:
- print "Admin '{}' successfully added".format(username)
+ print('User "{}" successfully added'.format(username))
if not otp_secret:
- # Print the QR code for Google Authenticator
- print
- print "Scan the QR code below with Google Authenticator:"
- print
- uri = admin.totp.provisioning_uri(
- username,
- issuer_name="SecureDrop")
+ # Print the QR code for FreeOTP/ Google Authenticator
+ print('\nScan the QR code below with FreeOTP or Google '
+ 'Authenticator:\n')
+ uri = user.totp.provisioning_uri(username,
+ issuer_name='SecureDrop')
qr = qrcode.QRCode()
qr.add_data(uri)
qr.print_ascii(tty=sys.stdout.isatty())
- print
- print "If the barcode does not render correctly, try changing your terminal's font, (Monospace for Linux, Menlo for OS X)."
- print "If you are using iTerm on Mac OS X, you will need to change the \"Non-ASCII Font\", which is your profile's Text settings."
- print
- print "Can't scan the barcode? Enter following shared secret manually:"
- print admin.formatted_otp_secret
- print
-
-
-def delete_user():
- """
- Deletes a journalist or administrator from the application.
- """
-
- while True:
- username = raw_input("Username to delete: ")
+ print('\nIf the barcode does not render correctly, try changing '
+ "your terminal's font (Monospace for Linux, Menlo for OS "
+ 'X). If you are using iTerm on Mac OS X, you will need to '
+ 'change the "Non-ASCII Font", which is your profile\'s Text '
+ "settings.\n\nCan't scan the barcode? Enter following "
+ 'shared secret '
+ 'manually:\n{}\n'.format(user.formatted_otp_secret))
+ return 0
+
+
+def delete_user(): # pragma: no cover
+ """Deletes a journalist or administrator from the application."""
+ # Select user to delete
+ username = raw_input('Username to delete: ')
+ try:
+ selected_user = Journalist.query.filter_by(username=username).one()
+ except NoResultFound:
+ print('ERROR: That user was not found!')
+ return 0
+
+ # Confirm deletion if user is found
+ confirmation = raw_input('Are you sure you want to delete user '
+ '{} (y/n)?'.format(selected_user))
+ if confirmation.lower() != 'y':
+ print('Confirmation not received: user "{}" was NOT '
+ 'deleted'.format(username))
+ return 0
+
+ # Try to delete user from the database
+ try:
+ db_session.delete(selected_user)
+ db_session.commit()
+ except:
+ # If the user was deleted between the user selection and confirmation,
+ # (e.g., through the web app), we don't report any errors. If the user
+ # is still there, but there was a error deleting them from the
+ # database, we do report it.
try:
selected_user = Journalist.query.filter_by(username=username).one()
- break
except NoResultFound:
- print "ERROR: That user was not found!"
-
- db_session.delete(selected_user)
- db_session.commit()
- print "User '{}' successfully deleted".format(username)
-
-
-def clean_tmp():
- """Cleanup the SecureDrop temp directory. This is intended to be run as an
- automated cron job. We skip files that are currently in use to avoid
- deleting files that are currently being downloaded."""
+ pass
+ else:
+ raise
+
+ print('User "{}" successfully deleted'.format(username))
+ return 0
+
+
+def clean_tmp(): # pragma: no cover
+ """Cleanup the SecureDrop temp directory. This is intended to be run
+ as an automated cron job. We skip files that are currently in use to
+ avoid deleting files that are currently being downloaded."""
# Inspired by http://stackoverflow.com/a/11115521/1093000
- import config
-
def file_in_use(fname):
- in_use = False
-
for proc in psutil.process_iter():
try:
open_files = proc.open_files()
- in_use = in_use or any([open_file.path == fname
+ in_use = False or any([open_file.path == fname
for open_file in open_files])
# Early return for perf
if in_use:
@@ -221,46 +192,59 @@ def listdir_fullpath(d):
# Thanks to http://stackoverflow.com/a/120948/1093000
return [os.path.join(d, f) for f in os.listdir(d)]
- for path in listdir_fullpath(config.TEMP_DIR):
- if not file_in_use(path):
- os.remove(path)
-
-
-def get_args(): # pragma: no cover
- parser = ArgumentParser(prog=__file__,
- description='A tool to help admins manage and devs hack')
-
- subparsers = parser.add_subparsers()
-
- run_subparser = subparsers.add_parser('run', help='Run the dev webserver (source & journalist)')
- run_subparser.set_defaults(func=run)
-
- unit_test_subparser = subparsers.add_parser('unit-test', help='Run the unit tests')
- unit_test_subparser.set_defaults(func=test_unit)
-
- test_subparser = subparsers.add_parser('test', help='Run the full test suite')
- test_subparser.set_defaults(func=test)
-
- reset_subparser = subparsers.add_parser('reset', help="DANGER!!! Clears the SecureDrop application's state")
- reset_subparser.set_defaults(func=reset)
-
- add_admin_subparser = subparsers.add_parser('add-admin', help='Add a new admin to the application')
- add_admin_subparser.set_defaults(func=add_admin)
-
- delete_user_subparser = subparsers.add_parser('delete-user', help='Delete a user from the application')
- delete_user_subparser.set_defaults(func=delete_user)
-
- clean_tmp_subparser = subparsers.add_parser('clean-tmp', help='Cleanup the SecureDrop temp directory')
- clean_tmp_subparser.set_defaults(func=clean_tmp)
+ try:
+ os.stat(config.TEMP_DIR)
+ except OSError as exc:
+ pass
+ else:
+ for path in listdir_fullpath(config.TEMP_DIR):
+ if not file_in_use(path):
+ os.remove(path)
+
+ return 0
+
+
+def get_args():
+ parser = argparse.ArgumentParser(prog=__file__, description='Management '
+ 'and testing utility for SecureDrop.')
+ subps = parser.add_subparsers()
+ # Run WSGI app
+ run_subp = subps.add_parser('run', help='Run the Werkzeug source & '
+ 'journalist WSGI apps. WARNING!!! For '
+ 'development only, not to be used in '
+ 'production.')
+ run_subp.set_defaults(func=run)
+ # Add/remove journalists + admins
+ admin_subp = subps.add_parser('add-admin', help='Add an admin to the '
+ 'application.')
+ admin_subp.set_defaults(func=add_admin)
+ journalist_subp = subps.add_parser('add-journalist', help='Add a '
+ 'journalist to the application.')
+ journalist_subp.set_defaults(func=add_journalist)
+ delete_user_subp = subps.add_parser('delete-user', help='Delete a user '
+ 'from the application.')
+ delete_user_subp.set_defaults(func=delete_user)
+
+ # Reset application state
+ reset_subp = subps.add_parser('reset', help='DANGER!!! Clears the '
+ "SecureDrop application's state.")
+ reset_subp.set_defaults(func=reset)
+ # Cleanup the SD temp dir
+ clean_tmp_subp = subps.add_parser('clean-tmp', help='Cleanup the '
+ 'SecureDrop temp directory.')
+ clean_tmp_subp.set_defaults(func=clean_tmp)
return parser
-if __name__ == "__main__": # pragma: no cover
+def _run_from_commandline(): # pragma: no cover
try:
args = get_args().parse_args()
- # calling like this works because all functions take zero arguments
- args.func()
+ rc = args.func()
+ sys.exit(rc)
except KeyboardInterrupt:
- print # So our prompt appears on a nice new line
- exit(1)
+ sys.exit(signal.SIGINT)
+
+
+if __name__ == '__main__': # pragma: no cover
+ _run_from_commandline()
diff --git a/securedrop/management/run.py b/securedrop/management/run.py
--- a/securedrop/management/run.py
+++ b/securedrop/management/run.py
@@ -153,6 +153,19 @@ def run(): # pragma: no cover
* https://stackoverflow.com/questions/22565606/python-asynhronously-print-stdout-from-multiple-subprocesses
"""
+ print \
+"""
+ ____ ____
+/\\ _`\\ /\\ _`\\
+\\ \\,\\L\\_\\ __ ___ __ __ _ __ __\\ \\ \\/\\ \\ _ __ ___ _____
+ \\/_\\__ \\ /'__`\\ /'___\\/\\ \\/\\ \\/\\`'__\\/'__`\\ \\ \\ \\ \\/\\`'__\\/ __`\\/\\ '__`\\
+ /\\ \\L\\ \\/\\ __//\\ \\__/\\ \\ \\_\\ \\ \\ \\//\\ __/\\ \\ \\_\\ \\ \\ \\//\\ \\L\\ \\ \\ \\L\\ \\
+ \\ `\\____\\ \\____\\ \\____\\\\ \\____/\\ \\_\\\\ \\____\\\\ \\____/\\ \\_\\\\ \\____/\\ \\ ,__/
+ \\/_____/\\/____/\\/____/ \\/___/ \\/_/ \\/____/ \\/___/ \\/_/ \\/___/ \\ \\ \\/
+ \\ \\_\\
+ \\/_/
+"""
+
procs = [
lambda: DevServerProcess('Source Interface',
['python', 'source.py'],
| diff --git a/securedrop/test.sh b/securedrop/test.sh
deleted file mode 100755
--- a/securedrop/test.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-if [ $(which vagrant) ] ; then
- echo ""
- echo "*** You probably want to run tests from vagrant. Run 'vagrant ssh', then 'cd /vagrant/securedrop' and re-run this script***"
- echo ""
-fi
-
-export PYTHONPATH=./tests
-export SECUREDROP_ENV=test
-
-# -f makes unittest fail fast, so we can use && to avoid burying test failures
-python -m unittest -fv tests.functional.submit_and_retrieve_message && \
-python -m unittest -fv tests.functional.submit_and_retrieve_file && \
-python -m unittest -fv tests.functional.admin_interface
-
diff --git a/securedrop/tests/functional/__init__.py b/securedrop/tests/functional/__init__.py
deleted file mode 100644
diff --git a/securedrop/tests/test_journalist.py b/securedrop/tests/test_journalist.py
deleted file mode 100644
--- a/securedrop/tests/test_journalist.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import os
-from mock import patch, ANY, MagicMock
-import unittest
-
-from db import db_session, InvalidPasswordLength, Journalist
-import journalist
-import utils
-
-class TestJournalistApp(unittest.TestCase):
-
- def setUp(self):
- journalist.logged_in = MagicMock()
- journalist.request = MagicMock()
- journalist.url_for = MagicMock()
- journalist.redirect = MagicMock()
- journalist.abort = MagicMock()
- journalist.db_session = MagicMock()
- journalist.get_docs = MagicMock()
- journalist.get_or_else = MagicMock()
-
- def _set_up_request(self, cols_selected, action):
- journalist.request.form.__contains__.return_value = True
- journalist.request.form.getlist = MagicMock(return_value=cols_selected)
- journalist.request.form.__getitem__.return_value = action
-
- @patch("journalist.col_delete")
- def test_col_process_delegates_to_col_delete(self, col_delete):
- cols_selected = ['source_id']
- self._set_up_request(cols_selected, 'delete')
-
- journalist.col_process()
-
- col_delete.assert_called_with(cols_selected)
-
- @patch("journalist.col_star")
- def test_col_process_delegates_to_col_star(self, col_star):
- cols_selected = ['source_id']
- self._set_up_request(cols_selected, 'star')
-
- journalist.col_process()
-
- col_star.assert_called_with(cols_selected)
-
- @patch("journalist.col_un_star")
- def test_col_process_delegates_to_col_un_star(self, col_un_star):
- cols_selected = ['source_id']
- self._set_up_request(cols_selected, 'un-star')
-
- journalist.col_process()
-
- col_un_star.assert_called_with(cols_selected)
-
- @patch("journalist.abort")
- def test_col_process_returns_404_with_bad_action(self, abort):
- cols_selected = ['source_id']
- self._set_up_request(cols_selected, 'something-random')
-
- journalist.col_process()
-
- abort.assert_called_with(ANY)
-
- @patch("journalist.make_star_true")
- @patch("journalist.db_session")
- def test_col_star_call_db_(self, db_session, make_star_true):
- journalist.col_star(['sid'])
-
- make_star_true.assert_called_with('sid')
-
- @patch("journalist.db_session")
- def test_col_un_star_call_db(self, db_session):
- journalist.col_un_star([])
-
- db_session.commit.assert_called_with()
-
-
- @classmethod
- def tearDownClass(cls):
- # Reset the module variables that were changed to mocks so we don't
- # break other tests
- reload(journalist)
-
-
-class TestJournalistLogin(unittest.TestCase):
-
- def setUp(self):
- utils.env.setup()
-
- # Patch the two-factor verification so it always succeeds
- utils.db_helper.mock_verify_token(self)
-
- self.user, self.user_pw = utils.db_helper.init_journalist()
-
- def tearDown(self):
- utils.env.teardown()
- # TODO: figure out why this is necessary here, but unnecessary in all
- # of the tests in `tests/test_unit_*.py`. Without this, the session
- # continues to return values even if the underlying database is deleted
- # (as in `shared_teardown`).
- db_session.remove()
-
- @patch('db.Journalist._scrypt_hash')
- @patch('db.Journalist.valid_password', return_value=True)
- def test_valid_login_calls_scrypt(self, mock_scrypt_hash, mock_valid_password):
- Journalist.login(self.user.username, self.user_pw, 'mocked')
- self.assertTrue(mock_scrypt_hash.called,
- "Failed to call _scrypt_hash for password w/ valid length")
-
- @patch('db.Journalist._scrypt_hash')
- def test_login_with_invalid_password_doesnt_call_scrypt(self, mock_scrypt_hash):
- invalid_pw = 'a'*(Journalist.MAX_PASSWORD_LEN + 1)
- with self.assertRaises(InvalidPasswordLength):
- Journalist.login(self.user.username, invalid_pw, 'mocked')
- self.assertFalse(mock_scrypt_hash.called,
- "Called _scrypt_hash for password w/ invalid length")
-
- @classmethod
- def tearDownClass(cls):
- # Reset the module variables that were changed to mocks so we don't
- # break other tests
- reload(journalist)
diff --git a/securedrop/tests/test_single_star.py b/securedrop/tests/test_single_star.py
deleted file mode 100644
--- a/securedrop/tests/test_single_star.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import unittest
-import journalist
-from mock import patch, ANY, MagicMock
-
-
-class TestJournalist(unittest.TestCase):
-
- def setUp(self):
- journalist.logged_in = MagicMock()
- journalist.make_star_true = MagicMock()
- journalist.db_session = MagicMock()
- journalist.url_for = MagicMock()
- journalist.redirect = MagicMock()
- journalist.get_one_or_else = MagicMock()
-
- @patch('journalist.url_for')
- @patch('journalist.redirect')
- def test_add_star_renders_template(self, redirect, url_for):
- redirect_template = journalist.add_star('sid')
-
- self.assertEqual(redirect_template, redirect(url_for('index')))
-
- @patch('journalist.db_session')
- def test_add_star_makes_commits(self, db_session):
- journalist.add_star('sid')
-
- db_session.commit.assert_called_with()
-
- @patch('journalist.make_star_true')
- def test_single_delegates_to_make_star_true(self, make_star_true):
- sid = 'sid'
-
- journalist.add_star(sid)
-
- make_star_true.assert_called_with(sid)
-
- @patch('journalist.url_for')
- @patch('journalist.redirect')
- def test_remove_star_renders_template(self, redirect, url_for):
- redirect_template = journalist.remove_star('sid')
-
- self.assertEqual(redirect_template, redirect(url_for('index')))
-
- @patch('journalist.db_session')
- def test_remove_star_makes_commits(self, db_session):
- journalist.remove_star('sid')
-
- db_session.commit.assert_called_with()
-
- @patch('journalist.make_star_false')
- def test_remove_star_delegates_to_make_star_false(self, make_star_false):
- sid = 'sid'
-
- journalist.remove_star(sid)
-
- make_star_false.assert_called_with(sid)
-
- @classmethod
- def tearDownClass(cls):
- # Reset the module variables that were changed to mocks so we don't
- # break other tests
- reload(journalist)
diff --git a/tests/.coveragerc b/tests/.coveragerc
new file mode 100644
--- /dev/null
+++ b/tests/.coveragerc
@@ -0,0 +1,3 @@
+[run]
+branch = True
+source = ../securedrop
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+from os.path import abspath, dirname, join, realpath
+import sys
+
+# The tests directory should be adjacent to the securedrop directory. By adding
+# the securedrop directory to sys.path here, all test modules are able to
+# directly import modules in the securedrop directory.
+sys.path.append(abspath(join(dirname(realpath(__file__)), '..', 'securedrop')))
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+import os
+import shutil
+import signal
+import subprocess
+
+import psutil
+import pytest
+
+os.environ['SECUREDROP_ENV'] = 'test'
+import config
+
+# TODO: the PID file for the redis worker is hard-coded below.
+# Ideally this constant would be provided by a test harness.
+# It has been intentionally omitted from `config.py.example`
+# in order to isolate the test vars from prod vars.
+TEST_WORKER_PIDFILE = '/tmp/securedrop_test_worker.pid'
+
[email protected](scope='session')
+def setUptearDown():
+ _start_test_rqworker(config)
+ yield
+ _stop_test_rqworker()
+ _cleanup_test_securedrop_dataroot(config)
+
+
+def _start_test_rqworker(config):
+ if not psutil.pid_exists(_get_pid_from_file(TEST_WORKER_PIDFILE)):
+ tmp_logfile = open('/tmp/test_rqworker.log', 'w')
+ subprocess.Popen(['rqworker', 'test',
+ '-P', config.SECUREDROP_ROOT,
+ '--pid', TEST_WORKER_PIDFILE],
+ stdout=tmp_logfile,
+ stderr=subprocess.STDOUT)
+
+
+def _stop_test_rqworker():
+ rqworker_pid = _get_pid_from_file(TEST_WORKER_PIDFILE)
+ if rqworker_pid:
+ os.kill(rqworker_pid, signal.SIGTERM)
+ try:
+ os.remove(TEST_WORKER_PIDFILE)
+ except OSError:
+ pass
+
+
+def _get_pid_from_file(pid_file_name):
+ try:
+ return int(open(pid_file_name).read())
+ except IOError:
+ return None
+
+
+def _cleanup_test_securedrop_dataroot(config):
+ # Keyboard interrupts or dropping to pdb after a test failure sometimes
+ # result in the temporary test SecureDrop data root not being deleted.
+ try:
+ shutil.rmtree(config.SECUREDROP_DATA_ROOT)
+ except OSError:
+ pass
diff --git a/securedrop/test_journalist_key.pub b/tests/files/test_journalist_key.pub
similarity index 100%
rename from securedrop/test_journalist_key.pub
rename to tests/files/test_journalist_key.pub
diff --git a/securedrop/test_journalist_key.sec b/tests/files/test_journalist_key.sec
similarity index 100%
rename from securedrop/test_journalist_key.sec
rename to tests/files/test_journalist_key.sec
diff --git a/securedrop/tests/__init__.py b/tests/functional/__init__.py
similarity index 100%
rename from securedrop/tests/__init__.py
rename to tests/functional/__init__.py
diff --git a/securedrop/tests/functional/functional_test.py b/tests/functional/functional_test.py
similarity index 81%
rename from securedrop/tests/functional/functional_test.py
rename to tests/functional/functional_test.py
--- a/securedrop/tests/functional/functional_test.py
+++ b/tests/functional/functional_test.py
@@ -4,6 +4,7 @@
import mock
from multiprocessing import Process
import os
+from os.path import abspath, dirname, join, realpath
import shutil
import signal
import socket
@@ -13,19 +14,20 @@
import unittest
import urllib2
+from Crypto import Random
import gnupg
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.firefox import firefox_binary
-# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import db
import journalist
import source
-import tests.utils as utils
+import tests.utils.env as env
+LOG_DIR = abspath(join(dirname(realpath(__file__)), '..', 'log'))
class FunctionalTest():
@@ -37,7 +39,7 @@ def _unused_port(self):
return port
def _create_webdriver(self):
- log_file = open('tests/log/firefox.log', 'a')
+ log_file = open(join(LOG_DIR, 'firefox.log'), 'a')
log_file.write(
'\n\n[%s] Running Functional Tests\n' % str(
datetime.now()))
@@ -54,8 +56,8 @@ def setUp(self):
signal.signal(signal.SIGUSR1, lambda _, s: traceback.print_stack(s))
- utils.env.create_directories()
- self.gpg = utils.env.init_gpg()
+ env.create_directories()
+ self.gpg = env.init_gpg()
db.init_db()
source_port = self._unused_port()
@@ -65,6 +67,13 @@ def setUp(self):
self.journalist_location = "http://localhost:%d" % journalist_port
def start_source_server():
+ # We call Random.atfork() here because we fork the source and
+ # journalist server from the main Python process we use to drive
+ # our browser with multiprocessing.Process() below. These child
+ # processes inherit the same RNG state as the parent process, which
+ # is a problem because they would produce identical output if we
+ # didn't re-seed them after forking.
+ Random.atfork()
source.app.run(
port=source_port,
debug=True,
@@ -72,6 +81,7 @@ def start_source_server():
threaded=True)
def start_journalist_server():
+ Random.atfork()
journalist.app.run(
port=journalist_port,
debug=True,
@@ -99,7 +109,7 @@ def start_journalist_server():
self.secret_message = 'blah blah blah'
def tearDown(self):
- utils.env.teardown()
+ env.teardown()
self.driver.quit()
self.source_process.terminate()
self.journalist_process.terminate()
diff --git a/securedrop/tests/functional/journalist_navigation_steps.py b/tests/functional/journalist_navigation_steps.py
similarity index 91%
rename from securedrop/tests/functional/journalist_navigation_steps.py
rename to tests/functional/journalist_navigation_steps.py
--- a/securedrop/tests/functional/journalist_navigation_steps.py
+++ b/tests/functional/journalist_navigation_steps.py
@@ -6,7 +6,8 @@
from selenium.common.exceptions import NoSuchElementException
-from db import db_session, Journalist
+import tests.utils.db_helper as db_helper
+from db import Journalist
class JournalistNavigationSteps():
@@ -48,37 +49,15 @@ def _login_user(self, username, password, token):
def _journalist_logs_in(self):
# Create a test user for logging in
- test_user_info = dict(
- username='test',
- password='test')
- test_user = Journalist(**test_user_info)
- db_session.add(test_user)
- db_session.commit()
-
- self._login_user(test_user_info['username'],
- test_user_info['password'],
- 'mocked')
+ self.user, self.user_pw = db_helper.init_journalist()
+ self._login_user(self.user.username, self.user_pw, 'mocked')
headline = self.driver.find_element_by_css_selector('span.headline')
self.assertIn('Sources', headline.text)
def _admin_logs_in(self):
- # Create a test admin user for logging in
- admin_user_info = dict(
- username='admin',
- password='admin',
- is_admin=True)
- admin_user = Journalist(**admin_user_info)
- db_session.add(admin_user)
- db_session.commit()
-
- # Stash the admin user on self so we can use it in later tests
- self.admin_user = admin_user_info
- self.admin_user['orm_obj'] = admin_user
-
- self._login_user(admin_user_info['username'],
- admin_user_info['password'],
- 'mocked')
+ self.admin, self.admin_pw = db_helper.init_journalist(is_admin=True)
+ self._login_user(self.admin.username, self.admin_pw, 'mocked')
# Admin user should log in to the same interface as a normal user,
# since there may be users who wish to be both journalists and admins.
@@ -208,9 +187,7 @@ def _admin_can_edit_new_user(self):
# Log the new user out
self._logout()
- self._login_user(self.admin_user['username'],
- self.admin_user['password'],
- 'mocked')
+ self._login_user(self.admin.username, self.admin_pw, 'mocked')
# Go to the admin interface
admin_interface_link = self.driver.find_element_by_link_text('Admin')
@@ -257,9 +234,7 @@ def _admin_can_edit_new_user(self):
# Log the admin user back in
self._logout()
- self._login_user(self.admin_user['username'],
- self.admin_user['password'],
- 'mocked')
+ self._login_user(self.admin.username, self.admin_pw, 'mocked')
# Go to the admin interface
admin_interface_link = self.driver.find_element_by_link_text('Admin')
diff --git a/securedrop/tests/functional/source_navigation_steps.py b/tests/functional/source_navigation_steps.py
similarity index 100%
rename from securedrop/tests/functional/source_navigation_steps.py
rename to tests/functional/source_navigation_steps.py
diff --git a/securedrop/tests/functional/submission_not_in_memory.py b/tests/functional/submission_not_in_memory.py
similarity index 100%
rename from securedrop/tests/functional/submission_not_in_memory.py
rename to tests/functional/submission_not_in_memory.py
diff --git a/securedrop/tests/functional/admin_interface.py b/tests/functional/test_admin_interface.py
similarity index 91%
rename from securedrop/tests/functional/admin_interface.py
rename to tests/functional/test_admin_interface.py
--- a/securedrop/tests/functional/admin_interface.py
+++ b/tests/functional/test_admin_interface.py
@@ -20,6 +20,3 @@ def test_admin_interface(self):
self._admin_adds_a_user()
self._new_user_can_log_in()
self._admin_can_edit_new_user()
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/securedrop/tests/functional/submit_and_retrieve_file.py b/tests/functional/test_submit_and_retrieve_file.py
similarity index 96%
rename from securedrop/tests/functional/submit_and_retrieve_file.py
rename to tests/functional/test_submit_and_retrieve_file.py
--- a/securedrop/tests/functional/submit_and_retrieve_file.py
+++ b/tests/functional/test_submit_and_retrieve_file.py
@@ -38,6 +38,3 @@ def test_source_cancels_at_submit_page(self):
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_hits_cancel_at_submit_page()
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/securedrop/tests/functional/submit_and_retrieve_message.py b/tests/functional/test_submit_and_retrieve_message.py
similarity index 93%
rename from securedrop/tests/functional/submit_and_retrieve_message.py
rename to tests/functional/test_submit_and_retrieve_message.py
--- a/securedrop/tests/functional/submit_and_retrieve_message.py
+++ b/tests/functional/test_submit_and_retrieve_message.py
@@ -26,7 +26,3 @@ def test_submit_and_retrieve_happy_path(self):
self._journalist_logs_in()
self._journalist_checks_messages()
self._journalist_downloads_message()
-
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/securedrop/tests/log/.gitignore b/tests/log/.gitignore
similarity index 100%
rename from securedrop/tests/log/.gitignore
rename to tests/log/.gitignore
diff --git a/tests/pytest.ini b/tests/pytest.ini
new file mode 100644
--- /dev/null
+++ b/tests/pytest.ini
@@ -0,0 +1,4 @@
+[pytest]
+testpaths = . functional
+usefixtures = setUptearDown
+addopts = --cov=../securedrop/
diff --git a/securedrop/tests/test_unit_crypto_util.py b/tests/test_crypto_util.py
similarity index 81%
rename from securedrop/tests/test_unit_crypto_util.py
rename to tests/test_crypto_util.py
--- a/securedrop/tests/test_unit_crypto_util.py
+++ b/tests/test_crypto_util.py
@@ -1,12 +1,8 @@
-#!/usr/bin/env python
# -*- coding: utf-8 -*-
-
import os
import unittest
-# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
-
import config
import crypto_util
import utils
@@ -27,6 +23,3 @@ def test_clean(self):
crypto_util.clean('foo bar`') # backtick is not currently allowed
with self.assertRaises(crypto_util.CryptoException):
crypto_util.clean('bar baz~') # tilde is not currently allowed
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/securedrop/tests/test_unit_db.py b/tests/test_db.py
similarity index 97%
rename from securedrop/tests/test_unit_db.py
rename to tests/test_db.py
--- a/securedrop/tests/test_unit_db.py
+++ b/tests/test_db.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
@@ -79,7 +78,3 @@ def test_journalist_string_representation(self):
def test_source_string_representation(self):
test_source, _ = db_helper.init_source()
test_source.__repr__()
-
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/securedrop/tests/test_unit_integration.py b/tests/test_integration.py
similarity index 99%
rename from securedrop/tests/test_unit_integration.py
rename to tests/test_integration.py
--- a/securedrop/tests/test_unit_integration.py
+++ b/tests/test_integration.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
# -*- coding: utf-8 -*-
-
from cStringIO import StringIO
import gzip
import mock
@@ -16,7 +14,6 @@
from flask import session, g, escape
import gnupg
-# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import crypto_util
@@ -615,7 +612,3 @@ def helper_filenames_delete(self, soup, i):
# Make sure the files were deleted from the filesystem
utils.async.wait_for_assertion(lambda: self.assertFalse(
any([os.path.exists(store.path(sid, doc_name)) for doc_name in checkbox_values])))
-
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/securedrop/tests/test_unit_journalist.py b/tests/test_journalist.py
similarity index 83%
rename from securedrop/tests/test_unit_journalist.py
rename to tests/test_journalist.py
--- a/securedrop/tests/test_unit_journalist.py
+++ b/tests/test_journalist.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
# -*- coding: utf-8 -*-
-
from cStringIO import StringIO
import os
import random
@@ -10,8 +8,8 @@
from flask import url_for, escape
from flask_testing import TestCase
+from mock import patch, ANY, MagicMock
-# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import crypto_util
@@ -732,5 +730,174 @@ def test_add_star_redirects_to_index(self):
self.assertRedirects(resp, url_for('index'))
-if __name__ == "__main__":
- unittest.main(verbosity=2)
+class TestJournalistAppTwo(unittest.TestCase):
+
+ def setUp(self):
+ journalist.logged_in = MagicMock()
+ journalist.request = MagicMock()
+ journalist.url_for = MagicMock()
+ journalist.redirect = MagicMock()
+ journalist.abort = MagicMock()
+ journalist.db_session = MagicMock()
+ journalist.get_docs = MagicMock()
+ journalist.get_or_else = MagicMock()
+
+ def _set_up_request(self, cols_selected, action):
+ journalist.request.form.__contains__.return_value = True
+ journalist.request.form.getlist = MagicMock(return_value=cols_selected)
+ journalist.request.form.__getitem__.return_value = action
+
+ @patch("journalist.col_delete")
+ def test_col_process_delegates_to_col_delete(self, col_delete):
+ cols_selected = ['source_id']
+ self._set_up_request(cols_selected, 'delete')
+
+ journalist.col_process()
+
+ col_delete.assert_called_with(cols_selected)
+
+ @patch("journalist.col_star")
+ def test_col_process_delegates_to_col_star(self, col_star):
+ cols_selected = ['source_id']
+ self._set_up_request(cols_selected, 'star')
+
+ journalist.col_process()
+
+ col_star.assert_called_with(cols_selected)
+
+ @patch("journalist.col_un_star")
+ def test_col_process_delegates_to_col_un_star(self, col_un_star):
+ cols_selected = ['source_id']
+ self._set_up_request(cols_selected, 'un-star')
+
+ journalist.col_process()
+
+ col_un_star.assert_called_with(cols_selected)
+
+ @patch("journalist.abort")
+ def test_col_process_returns_404_with_bad_action(self, abort):
+ cols_selected = ['source_id']
+ self._set_up_request(cols_selected, 'something-random')
+
+ journalist.col_process()
+
+ abort.assert_called_with(ANY)
+
+ @patch("journalist.make_star_true")
+ @patch("journalist.db_session")
+ def test_col_star_call_db_(self, db_session, make_star_true):
+ journalist.col_star(['sid'])
+
+ make_star_true.assert_called_with('sid')
+
+ @patch("journalist.db_session")
+ def test_col_un_star_call_db(self, db_session):
+ journalist.col_un_star([])
+
+ db_session.commit.assert_called_with()
+
+
+ @classmethod
+ def tearDownClass(cls):
+ # Reset the module variables that were changed to mocks so we don't
+ # break other tests
+ reload(journalist)
+
+
+class TestJournalistLogin(unittest.TestCase):
+
+ def setUp(self):
+ utils.env.setup()
+
+ # Patch the two-factor verification so it always succeeds
+ utils.db_helper.mock_verify_token(self)
+
+ self.user, self.user_pw = utils.db_helper.init_journalist()
+
+ def tearDown(self):
+ utils.env.teardown()
+ # TODO: figure out why this is necessary here, but unnecessary in all
+ # of the tests in `tests/test_unit_*.py`. Without this, the session
+ # continues to return values even if the underlying database is deleted
+ # (as in `shared_teardown`).
+ db_session.remove()
+
+ @patch('db.Journalist._scrypt_hash')
+ @patch('db.Journalist.valid_password', return_value=True)
+ def test_valid_login_calls_scrypt(self, mock_scrypt_hash, mock_valid_password):
+ Journalist.login(self.user.username, self.user_pw, 'mocked')
+ self.assertTrue(mock_scrypt_hash.called,
+ "Failed to call _scrypt_hash for password w/ valid length")
+
+ @patch('db.Journalist._scrypt_hash')
+ def test_login_with_invalid_password_doesnt_call_scrypt(self, mock_scrypt_hash):
+ invalid_pw = 'a'*(Journalist.MAX_PASSWORD_LEN + 1)
+ with self.assertRaises(InvalidPasswordLength):
+ Journalist.login(self.user.username, invalid_pw, 'mocked')
+ self.assertFalse(mock_scrypt_hash.called,
+ "Called _scrypt_hash for password w/ invalid length")
+
+ @classmethod
+ def tearDownClass(cls):
+ # Reset the module variables that were changed to mocks so we don't
+ # break other tests
+ reload(journalist)
+
+
+class TestJournalist(unittest.TestCase):
+
+ def setUp(self):
+ journalist.logged_in = MagicMock()
+ journalist.make_star_true = MagicMock()
+ journalist.db_session = MagicMock()
+ journalist.url_for = MagicMock()
+ journalist.redirect = MagicMock()
+ journalist.get_one_or_else = MagicMock()
+
+ @patch('journalist.url_for')
+ @patch('journalist.redirect')
+ def test_add_star_renders_template(self, redirect, url_for):
+ redirect_template = journalist.add_star('sid')
+
+ self.assertEqual(redirect_template, redirect(url_for('index')))
+
+ @patch('journalist.db_session')
+ def test_add_star_makes_commits(self, db_session):
+ journalist.add_star('sid')
+
+ db_session.commit.assert_called_with()
+
+ @patch('journalist.make_star_true')
+ def test_single_delegates_to_make_star_true(self, make_star_true):
+ sid = 'sid'
+
+ journalist.add_star(sid)
+
+ make_star_true.assert_called_with(sid)
+
+ @patch('journalist.url_for')
+ @patch('journalist.redirect')
+ def test_remove_star_renders_template(self, redirect, url_for):
+ redirect_template = journalist.remove_star('sid')
+
+ self.assertEqual(redirect_template, redirect(url_for('index')))
+
+ @patch('journalist.db_session')
+ def test_remove_star_makes_commits(self, db_session):
+ journalist.remove_star('sid')
+
+ db_session.commit.assert_called_with()
+
+ @patch('journalist.make_star_false')
+ def test_remove_star_delegates_to_make_star_false(self, make_star_false):
+ sid = 'sid'
+
+ journalist.remove_star(sid)
+
+ make_star_false.assert_called_with(sid)
+
+ @classmethod
+ def tearDownClass(cls):
+ # Reset the module variables that were changed to mocks so we don't
+ # break other tests
+ reload(journalist)
diff --git a/securedrop/tests/test_unit_manage.py b/tests/test_manage.py
similarity index 89%
rename from securedrop/tests/test_unit_manage.py
rename to tests/test_manage.py
--- a/securedrop/tests/test_unit_manage.py
+++ b/tests/test_manage.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# -*- coding: utf-8 -*-
import manage
@@ -6,7 +5,6 @@
class TestManagePy(unittest.TestCase):
-
def test_parse_args(self):
# just test that the arg parser is stable
manage.get_args()
diff --git a/securedrop/tests/test_unit_source.py b/tests/test_source.py
similarity index 99%
rename from securedrop/tests/test_unit_source.py
rename to tests/test_source.py
--- a/securedrop/tests/test_unit_source.py
+++ b/tests/test_source.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# -*- coding: utf-8 -*-
from cStringIO import StringIO
from mock import patch, ANY
@@ -285,7 +284,3 @@ def test_login_with_overly_long_codename(self, mock_hash_codename):
self.assertFalse(mock_hash_codename.called,
"Called hash_codename for codename w/ invalid "
"length")
-
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/securedrop/tests/test_unit_store.py b/tests/test_store.py
similarity index 93%
rename from securedrop/tests/test_unit_store.py
rename to tests/test_store.py
--- a/securedrop/tests/test_unit_store.py
+++ b/tests/test_store.py
@@ -1,12 +1,9 @@
-#!/usr/bin/env python
# -*- coding: utf-8 -*-
-
import os
import unittest
import zipfile
import crypto_util
-# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
from db import db_session, Source
@@ -57,6 +54,3 @@ def test_rename_valid_submission(self):
actual_filename = store.rename_submission(source.filesystem_id, old_filename,
new_journalist_filename)
self.assertEquals(actual_filename, expected_filename)
-
-if __name__ == "__main__":
- unittest.main(verbosity=2)
diff --git a/securedrop/tests/test_unit_template_filters.py b/tests/test_template_filters.py
similarity index 93%
rename from securedrop/tests/test_unit_template_filters.py
rename to tests/test_template_filters.py
--- a/securedrop/tests/test_unit_template_filters.py
+++ b/tests/test_template_filters.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import os
@@ -6,10 +5,6 @@
import template_filters
-# Set environment variable so config.py uses a test environment
-os.environ['SECUREDROP_ENV'] = 'test'
-
-
class TestTemplateFilters(unittest.TestCase):
def test_datetimeformat_default_fmt(self):
@@ -59,4 +54,4 @@ def test_relative_timestamp_days(self):
def test_relative_timestamp_none(self):
test_time = datetime.utcnow() - timedelta(days=999)
result = template_filters._relative_timestamp(test_time)
- self.assertEquals(None, result)
\ No newline at end of file
+ self.assertEquals(None, result)
diff --git a/securedrop/tests/utils/__init__.py b/tests/utils/__init__.py
similarity index 100%
rename from securedrop/tests/utils/__init__.py
rename to tests/utils/__init__.py
diff --git a/securedrop/tests/utils/async.py b/tests/utils/async.py
similarity index 100%
rename from securedrop/tests/utils/async.py
rename to tests/utils/async.py
diff --git a/securedrop/tests/utils/db_helper.py b/tests/utils/db_helper.py
similarity index 98%
rename from securedrop/tests/utils/db_helper.py
rename to tests/utils/db_helper.py
--- a/securedrop/tests/utils/db_helper.py
+++ b/tests/utils/db_helper.py
@@ -5,7 +5,6 @@
import mock
import os
-# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import crypto_util
diff --git a/securedrop/tests/utils/env.py b/tests/utils/env.py
similarity index 74%
rename from securedrop/tests/utils/env.py
rename to tests/utils/env.py
--- a/securedrop/tests/utils/env.py
+++ b/tests/utils/env.py
@@ -2,16 +2,18 @@
"""Testing utilities related to setup and teardown of test environment.
"""
import os
+from os.path import abspath, dirname, exists, isdir, join, realpath
import shutil
import subprocess
import gnupg
-# Set environment variable so config.py uses a test environment
os.environ['SECUREDROP_ENV'] = 'test'
import config
import crypto_util
-from db import init_db
+from db import init_db, db_session
+
+FILES_DIR = abspath(join(dirname(realpath(__file__)), '..', 'files'))
# TODO: the PID file for the redis worker is hard-coded below. Ideally this
# constant would be provided by a test harness. It has been intentionally
@@ -26,7 +28,7 @@ def create_directories():
"""
for d in (config.SECUREDROP_DATA_ROOT, config.STORE_DIR,
config.GPG_KEY_DIR, config.TEMP_DIR):
- if not os.path.isdir(d):
+ if not isdir(d):
os.mkdir(d)
@@ -36,7 +38,8 @@ def init_gpg():
"""
gpg = gnupg.GPG(homedir=config.GPG_KEY_DIR)
# Faster to import a pre-generated key than to gen a new one every time.
- for keyfile in ("test_journalist_key.pub", "test_journalist_key.sec"):
+ for keyfile in (join(FILES_DIR, "test_journalist_key.pub"),
+ join(FILES_DIR, "test_journalist_key.sec")):
gpg.import_keys(open(keyfile).read())
return gpg
@@ -49,11 +52,16 @@ def setup():
# Do tests that should always run on app startup
crypto_util.do_runtime_tests()
# Start the Python-RQ worker if it's not already running
- if not os.path.exists(TEST_WORKER_PIDFILE):
+ if not exists(TEST_WORKER_PIDFILE):
subprocess.Popen(["rqworker",
"-P", config.SECUREDROP_ROOT,
"--pid", TEST_WORKER_PIDFILE])
def teardown():
- shutil.rmtree(config.SECUREDROP_DATA_ROOT)
+ db_session.remove()
+ try:
+ shutil.rmtree(config.SECUREDROP_DATA_ROOT)
+ except OSError as exc:
+ if 'No such file or directory' not in exc:
+ raise
| Test database teardown is not done correctly
For our `TestCase`s we generally call the following functions from `tests.utils.env` in the `setUp` and `tearDown` methods:
```py
def setup():
"""Set up the file system, GPG, and database."""
create_directories()
init_gpg()
init_db()
# Do tests that should always run on app startup
crypto_util.do_runtime_tests()
# Start the Python-RQ worker if it's not already running
if not os.path.exists(TEST_WORKER_PIDFILE):
subprocess.Popen(["rqworker",
"-P", config.SECUREDROP_ROOT,
"--pid", TEST_WORKER_PIDFILE])
def teardown():
shutil.rmtree(config.SECUREDROP_DATA_ROOT)
```
As you can see, we basically just blow away the test data root. Surprisingly, this has been sufficient to get all our tests passing. Working on my `encrypted-at-rest-tagging` branch I discovered that this approach is actually pretty problematic. For example, in the code below the second test will fail with an `ObjectDeletedError` when `self.admin.db_key_salt` is accessed unless you comment out the first. This is because after the database has been deleted and re-created once, we're still using the same session, engine, and connection, which seems to be the problem. I tried playing around with `sqlalchemy.inspect` to get a better idea of the problem and also refreshing and expiring the session, but don't have any particular successful results to share. I think more important than getting a strong handle of exactly what the problem is considering the complexity of the SQLAlchemy package, that we just follow best practices outlined in their documentation such as http://docs.sqlalchemy.org/en/latest/orm/session_transaction.html#joining-a-session-into-an-external-transaction-such-as-for-test-suites.
```py
class TestColumnEncryption(unittest.TestCase):
def setUp(self):
env.setup()
self.db_key = crypto_util.gen_db_key()
self.admin, self.admin_pw = \
db_helper.init_journalist(is_admin=True, db_key=self.db_key)
self.source, _ = db_helper.init_source()
def tearDown(self):
env.teardown()
def test_decrypt_db_key(self):
self.assertEqual(self.admin.decrypt_db_key(self.admin_pw), self.db_key)
def test_decrypt_db_key_after_password_change(self):
old_salt = self.admin.db_key_salt
old_encrypted_db_key = self.admin.encrypted_db_key
new_pw = crypto_util.genrandomid()
self.admin.set_password(new_pw, db_key=self.db_key)
new_salt = self.admin.db_key_salt
new_encrypted_db_key = self.admin.encrypted_db_key
self.assertNotEqual(self.admin.decrypt_db_key(self.admin_pw), self.db_key)
self.assertEqual(self.admin.decrypt_db_key(new_pw), self.db_key)
self.assertNotEqual(old_salt, new_salt)
self.assertNotEqual(old_encrypted_db_key, new_encrypted_db_key)
```
| 2017-01-18T04:27:30Z | [] | [] |
|
freedomofpress/securedrop | 1,558 | freedomofpress__securedrop-1558 | [
"1526"
] | 09ca44cd9a42eb9a566923b4296b2dbdea5e9789 | diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -20,6 +20,7 @@
import crypto_util
import store
import template_filters
+import util
from db import db_session, Source, Submission, Reply, get_one_or_else
from request_that_secures_file_uploads import RequestThatSecuresFileUploads
from jinja2 import evalcontextfilter
@@ -292,18 +293,24 @@ def submit():
fh.stream))
if first_submission:
- flash(
- "Thanks for submitting something to SecureDrop! Please check back later for replies.",
- "notification")
+ flash(Markup("""{svg}<div class="message"><strong>Success!</strong>
+ <p>Thank you for sending this information to us.
+ Please check back later for replies. <a href="#codename-hint">
+ Forgot your codename?</a></p></div>
+ """.format(svg=util.svg('success_checkmark.svg'))),
+ "success")
else:
- if msg:
- flash("Thanks! We received your message.", "notification")
- if fh:
- flash(
- '{} "{}".'.format(
- "Thanks! We received your document",
- fh.filename or '[unnamed]'),
- "notification")
+ if msg and not fh:
+ things = 'message'
+ elif not msg and fh:
+ things = 'document'
+ else:
+ things = 'message and document'
+
+ flash(Markup("""{svg}<div class="message"><p>Thanks! We received your
+ {things}.</p></div>
+ """.format(svg=util.svg('success_checkmark.svg'), things=things)),
+ "success")
for fname in fnames:
submission = Submission(g.source, fname)
@@ -392,17 +399,14 @@ def login():
def logout():
if logged_in():
session.clear()
- tor_msg = Markup("""<strong>Important:</strong><br>
- Thank you for logging out!<br>
- Please fully end your session by restarting
- Tor Browser:<br>
- 1. Click the
- <img src='static/i/toronion.png' alt='Tor icon' />
- Tor onion icon in the toolbar above.<br>
- 2. Click <strong> New Identity</strong>.<br>
- 3. Click <strong>Yes</strong> in the dialog box
- that appears.""")
- flash(tor_msg, "error")
+ msg = Markup("""<div class="icon">{svg}</div>
+ <div class="message"><strong>Important!</strong><br>
+ <p>Thank you for exiting your session! Please select "New
+ Identity" from the green Onion button in the Tor browser
+ to clear all history of your SecureDrop usage from this
+ device.</p></div>
+ """.format(svg=util.svg('hand_with_fingerprint.svg')))
+ flash(msg, "important")
return redirect(url_for('index'))
diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -12,6 +12,7 @@
from werkzeug import secure_filename
from secure_tempfile import SecureTemporaryFile
+from util import PathException
import logging
log = logging.getLogger(__name__)
@@ -20,15 +21,6 @@
"^(?P<index>\d+)\-[a-z0-9-_]*(?P<file_type>msg|doc\.(gz|zip)|reply)\.gpg$").match
-class PathException(Exception):
-
- """An exception raised by `store.verify` when it encounters a bad path. A path
- can be bad when it is not absolute, not normalized, not within
- `config.STORE_DIR`, or doesn't match the filename format.
- """
- pass
-
-
def verify(p):
"""Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and
matches the filename format.
diff --git a/securedrop/util.py b/securedrop/util.py
new file mode 100644
--- /dev/null
+++ b/securedrop/util.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+from os import path
+
+from flask import current_app
+
+
+class PathException(Exception):
+
+ """An exception raised by `util.verify` when it encounters a bad path. A path
+ can be bad when it is not absolute or not normalized.
+ """
+ pass
+
+
+def svg(filename):
+ """Safely takes a filename and returns the contents of the file in the
+ static directory as a string.
+ """
+
+ if not filename.endswith('.svg'):
+ raise PathException('File must have .svg extension, but got {}'
+ .format(filename))
+
+ target_path = path.join(path.abspath(current_app.static_folder), 'i', 'svg',
+ filename)
+ if not path.isabs(target_path):
+ raise PathException('Expected path to SVG to the absolute and '
+ 'normalized, but found {}'.format(target_path))
+
+ with open(target_path) as f:
+ return f.read()
| diff --git a/securedrop/tests/functional/journalist_navigation_steps.py b/securedrop/tests/functional/journalist_navigation_steps.py
--- a/securedrop/tests/functional/journalist_navigation_steps.py
+++ b/securedrop/tests/functional/journalist_navigation_steps.py
@@ -156,7 +156,7 @@ def _admin_adds_a_user(self):
# Successfully verifying the code should redirect to the admin
# interface, and flash a message indicating success
- flashed_msgs = self.driver.find_elements_by_css_selector('p.flash')
+ flashed_msgs = self.driver.find_elements_by_css_selector('.flash')
self.assertIn(("Two factor token successfully verified for user"
" {}!").format(self.new_user['username']),
[el.text for el in flashed_msgs])
diff --git a/securedrop/tests/functional/source_navigation_steps.py b/securedrop/tests/functional/source_navigation_steps.py
--- a/securedrop/tests/functional/source_navigation_steps.py
+++ b/securedrop/tests/functional/source_navigation_steps.py
@@ -61,10 +61,8 @@ def _source_submits_a_file(self):
submit_button.click()
notification = self.driver.find_element_by_css_selector(
- 'p.notification')
- expected_notification = ('Thanks for submitting something '
- 'to SecureDrop! Please check back '
- 'later for replies.')
+ '.success')
+ expected_notification = 'Thank you for sending this information to us'
self.assertIn(expected_notification, notification.text)
def _source_submits_a_message(self):
@@ -76,12 +74,11 @@ def _source_submits_a_message(self):
submit_button.click()
notification = self.driver.find_element_by_css_selector(
- 'p.notification')
- self.assertIn('Thanks for submitting something to SecureDrop!'
- ' Please check back later for replies.',
+ '.success')
+ self.assertIn('Thank you for sending this information to us',
notification.text)
def _source_logs_out(self):
logout_button = self.driver.find_element_by_id('logout').click()
- notification = self.driver.find_element_by_css_selector('p.error')
- self.assertIn('Thank you for logging out!', notification.text)
+ notification = self.driver.find_element_by_css_selector('.important')
+ self.assertIn('Thank you for exiting your session!', notification.text)
diff --git a/securedrop/tests/test_unit_source.py b/securedrop/tests/test_unit_source.py
--- a/securedrop/tests/test_unit_source.py
+++ b/securedrop/tests/test_unit_source.py
@@ -129,7 +129,7 @@ def test_login_and_logout(self):
self.assertTrue(session['logged_in'])
resp = c.get('/logout', follow_redirects=True)
self.assertTrue(not session)
- self.assertIn('Thank you for logging out!', resp.data)
+ self.assertIn('Thank you for exiting your session!', resp.data)
def test_login_with_whitespace(self):
"""Test that codenames with leading or trailing whitespace still work"""
@@ -172,7 +172,7 @@ def test_initial_submission_notification(self):
resp = self._dummy_submission()
self.assertEqual(resp.status_code, 200)
self.assertIn(
- "Thanks for submitting something to SecureDrop! Please check back later for replies.",
+ "Thank you for sending this information to us",
resp.data)
def test_submit_message(self):
@@ -183,7 +183,7 @@ def test_submit_message(self):
fh=(StringIO(''), ''),
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
- self.assertIn("Thanks! We received your message.", resp.data)
+ self.assertIn("Thanks! We received your", resp.data)
def test_submit_empty_message(self):
self._new_codename()
@@ -206,7 +206,7 @@ def test_submit_big_message(self):
fh=(StringIO(''), ''),
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
- self.assertIn("Thanks! We received your message.", resp.data)
+ self.assertIn("Thanks! We received your", resp.data)
def test_submit_file(self):
self._new_codename()
@@ -216,12 +216,7 @@ def test_submit_file(self):
fh=(StringIO('This is a test'), 'test.txt'),
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
- self.assertIn(
- escape(
- '{} "{}"'.format(
- "Thanks! We received your document",
- "test.txt")),
- resp.data)
+ self.assertIn('Thanks! We received you', resp.data)
def test_submit_both(self):
self._new_codename()
@@ -231,13 +226,7 @@ def test_submit_both(self):
fh=(StringIO('This is a test'), 'test.txt'),
), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
- self.assertIn("Thanks! We received your message.", resp.data)
- self.assertIn(
- escape(
- '{} "{}"'.format(
- "Thanks! We received your document",
- 'test.txt')),
- resp.data)
+ self.assertIn("Thanks! We received your", resp.data)
@patch('gzip.GzipFile')
def test_submit_sanitizes_filename(self, gzipfile):
diff --git a/securedrop/tests/test_unit_util.py b/securedrop/tests/test_unit_util.py
new file mode 100644
--- /dev/null
+++ b/securedrop/tests/test_unit_util.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+import source
+import unittest
+import util
+from util import PathException
+
+
+class TestUtil(unittest.TestCase):
+
+ def test_svg_valid(self):
+ with source.app.app_context():
+ res = util.svg('success_checkmark.svg')
+ self.assertIn('<svg', res)
+
+ def test_svg_bad_extension(self):
+ with source.app.app_context():
+ with self.assertRaises(PathException):
+ util.svg('config.py')
+
+ def test_svg_bad_path(self):
+ with source.app.app_context():
+ with self.assertRaises(PathException):
+ util.svg('../../../../etc/hosts')
| Make Tor Browser logout instructions more user friendly
This is a UX suggestion by @ninavizz to improve the notification displayed to sources upon logout (click on image below to see her suggestion for this issue in greater detail (and a nice icon we can use)):
![screen shot 2017-01-17 at 5 08 18 pm](https://cloud.githubusercontent.com/assets/7832803/22050226/d2d2f5f2-dced-11e6-9192-d4689c5fd995.png)
| Hey folks! I'm not a developer, otherwise I'd do this myself... but to those who are and would like to code this, I offer the below (with a hearty "thank you!"). Thx, @redshiftzero
Note: It's important that the hue of the dialog bubble *not* appear red, as that's been established to cause anxiety and punitive feelings, with many users. So—just FYI. :)
`#673466` Title: Important!
`#55555` Text: "Thank you for exiting your session! Please select “New Identity” from the Onion button in the Tor browser, to clear all history of your SecureDrop usage from this device."
Icon: in the code, please include the CC credit:
“Fingerprint” icon is by Travis Avery from the Noun Project.
Icon's SVG code:
`<svg xmlns="http://www.w3.org/2000/svg" width="135" height="99" viewBox="0 0 135.3 98.8"><style>.a{fill:#673466;}</style><path d="M9.4 57.4c1.2-2.1 3.1-3.2 6.1-3.2 1.8 0 3.6 0.4 5.5 1.2l17.7 7.4c0.4 3.3 3.3 6 6.7 6h23.1c0.7 0 1.2-0.6 1.2-1.2 0-0.7-0.6-1.2-1.2-1.2H45.5c-2.4 0-4.4-1.9-4.4-4.3l0 0c0.1-2.3 2-4.3 4.4-4.3l0 0h11.9c3.5 0 12.3-2.8 17.1-4.4 4.1-1.4 8.1-2 11.9-1.8 2 0.1 3.8 0.4 5.2 0.9 3.1 1 9 4.4 12.7 6.6 1 0.6 1.9 1 2.5 1.4v8.3 17.6l-3.8-2.9c-4.1-3.1-9.2-4.5-14.4-4.2 -5.2 0.3-9 0.8-11.1 1.2l-11.8 2.5c-5.1 1-10.3 1-14.9-0.3 -1.5-0.4-2.9-0.9-3.9-1.4L10.5 60.9c-0.7-0.3-1-0.8-1.2-1.4C9.1 58.9 9.2 58.2 9.4 57.4z" class="a"/><path d="M111.8 56.3h3.4c1.3 0 2.4 1.1 2.4 2.4v30.8c0 1.3-1.1 2.4-2.4 2.4h-3.4c-1.3 0-2.4-1.1-2.4-2.4V58.8C109.4 57.4 110.4 56.4 111.8 56.3z" class="a"/><path d="M32.4 43.5c-0.8-4.5-1.2-9.2-1.2-13.9 0-0.4 0-0.7 0-1.1 0 0 0 0 0 0 0-4.1 1.3-7.8 3.4-10.8 0.4-0.6 1.1-0.9 1.8-0.9 0.5 0 0.9 0.1 1.2 0.4 0.6 0.4 0.9 1 0.9 1.8 0 0.5-0.1 0.9-0.4 1.2 -1.7 2.4-2.7 5.3-2.7 8.5 0 0 0 0 0-0.1 0 0.4 0 0.7 0 1.1 0 4.5 0.4 8.9 1.1 12.7 0 0.1 0 0.3 0 0.4 0 1.1-0.8 1.9-1.8 2.1 -0.1 0-0.3 0-0.4 0 0 0 0 0 0 0 -1.1 0-1.9-0.8-2.1-1.8L32.4 43.5zM41 15.6c0 0 0 0 0 0 -0.7 0-1.4-0.4-1.8-0.9 -0.2-0.3-0.4-0.8-0.4-1.2 0-0.7 0.4-1.4 0.9-1.7 3.2-2.1 7.1-3.3 11.2-3.3 4.1 0 7.8 1.2 11 3.2 0.6 0.4 1 1 1 1.8 0 0.4-0.1 0.8-0.4 1.2 -0.4 0.6-1.1 1-1.8 1 -0.4 0-0.9-0.1-1.1-0.3 -2.5-1.6-5.4-2.5-8.6-2.5 -3.2 0-6.2 1-8.7 2.6C41.9 15.4 41.5 15.6 41 15.6L41 15.6 41 15.6zM60.6 39.4c0.2-0.1 0.5-0.1 0.7-0.1 0.9 0 1.7 0.6 2.1 1.5 0.5 1.4 1.2 2.6 2.1 3.7 0.3 0.4 0.5 0.8 0.5 1.3 0 0.7-0.3 1.3-0.8 1.7 -0.4 0.3-0.8 0.5-1.3 0.5 -0.7 0-1.3-0.3-1.7-0.8 -1.2-1.5-2.1-3.1-2.8-5 -0.1-0.2-0.1-0.5-0.1-0.7 0-0.9 0.6-1.7 1.5-2L60.6 39.4zM66.4 28.6c0-3.2-1-6.2-2.8-8.5 -0.3-0.3-0.4-0.8-0.4-1.2 0-0.7 0.4-1.4 0.9-1.7 0.4-0.2 0.8-0.4 1.3-0.4 0.7 0 1.4 0.4 1.7 0.8 2.3 3.1 3.6 6.9 3.6 11 0 0.7-0.1 1.3-0.1 2 0 2.8 0.4 5.4 1.1 7.8 0.1 0.3 0.2 0.5 0.2 0.8 0 0.9-0.5 1.7-1.3 2 -0.3 0.1-0.5 0.2-0.8 0.2 -0.9 0-1.7-0.5-2-1.1 -1-3-1.5-6.1-1.5-9.4 0-0.7 0-1.5 0.1-2.1L66.4 28.6zM60.2 37.1c-0.1 0-0.1 0-0.2 0 0 0 0 0 0 0 -1.1 0-2-0.8-2.2-1.9 -0.2-1.9-0.3-4.1-0.3-6.6 -0.1-3.6-3.1-6.4-6.6-6.4 -3.6 0-6.5 2.8-6.6 6.4 0 0.7 0 1.3 0 2.1 0 0 0 0 0 0 0 1.2-0.9 2.1-2.1 2.1 0 0 0 0 0 0 -1.2 0-2.2-1-2.2-2.2 0-0.7 0-1.3 0-2.1 0.2-5.9 5-10.7 11-10.7 6 0 10.8 4.7 11 10.7 0 2.4 0.1 4.4 0.2 6.2 0 0.1 0 0.1 0 0.2 0 1.1-0.9 2.1-2 2.1V37.1zM53 28.6c0 9.6 1.2 15.1 4.3 19.7 0.2 0.3 0.4 0.7 0.4 1.2 0 0.8-0.4 1.4-1 1.8 -0.3 0.2-0.8 0.4-1.2 0.4 -0.8 0-1.4-0.4-1.8-1 -3.6-5.4-5-11.5-5-22.1 0-1.2 1-2.2 2.2-2.2s2.2 1 2.2 2.2V28.6zM44.7 37.1c0.3 4.1 1.4 8 2.9 11.4 0.1 0.3 0.2 0.6 0.2 0.9 0 0.9-0.5 1.6-1.3 2 -0.3 0.1-0.6 0.2-0.9 0.2 -0.9 0-1.6-0.5-1.9-1 -1.8-4-2.9-8.4-3.3-13.1 0 0 0-0.1 0-0.1 0-1.2 1-2.2 2.2-2.2 1.1 0 2 0.8 2.2 1.9L44.7 37.1z" class="a"/></svg>`
Hi! I'm interested in helping out with this project and it looks like these UI changes might be a good place to get started.
I've noticed there are a few Issues raised for the flash messages, so I'd be happy to do all these together since they rely on underlying styles for the flash messages being altered. Has anyone started work on these yet, or are you happy for me to have a look at this?
Hi @jcmcreate! Afaik nobody has started on this issue yet. If you would like to, by all means jump in!
There is also a big list of UI changes suggested [here](https://github.com/freedomofpress/securedrop/issues/1536) that I'm thinking I may turn into a checklist w/ associated "sub"-issues next week (a bit disorganized as is, but great stuff nonetheless). If you do start working on something, please do make a comment on the relevant issue, so others don't also start working on it.
@fowlslegs Thanks! I'm more than happy to work on this issue, and I'll comment in any others that I could work alongside on too.
Are there any branches where the UI changes are being worked on at present?
@jcmcreate We tend to be pretty granular with PRs when possible because it makes review easier, and we end up getting more new code in faster when we do it one little piece at a time. So in order to make your PR granular, you'll want to branch off develop and generally implement just a single feature/fix. Though, if a number of features are related feel free to implement them in the same branch/PR (e.g., you could change all the flashed messages in a single PR, but don't also change some other UI element as well).
@fowlslegs Makes sense :) I'll do as you suggest and add all the flashes in a single PR. Some of the underlying styles for all the flashes have been altered slightly, so it would make sense to do them all in one bulk. | 2017-02-05T19:22:51Z | [] | [] |
freedomofpress/securedrop | 1,616 | freedomofpress__securedrop-1616 | [
"1580"
] | 812fb5c2a95af8f2ec73be77313f661b30dd2866 | diff --git a/testinfra/conftest.py b/testinfra/conftest.py
new file mode 100644
--- /dev/null
+++ b/testinfra/conftest.py
@@ -0,0 +1,38 @@
+"""
+Configuration for TestInfra test suite for SecureDrop.
+Handles importing host-specific test vars, so test functions
+can be reused across multiple hosts, with varied targets.
+
+Vars should be placed in `testinfra/vars/<hostname>.yml`.
+"""
+
+import os
+import sys
+import yaml
+import pytest
+
+
+target_host = os.environ['SECUREDROP_TESTINFRA_TARGET_HOST']
+assert target_host != ""
+
+
+def securedrop_import_testinfra_vars(hostname, with_header=False):
+ """
+ Import vars from a YAML file to populate tests with host-specific
+ values used in checks. For instance, the SecureDrop docroot will
+ be under /vagrant in development, but /var/www/securedrop in staging.
+
+ Vars must be stored in `testinfra/vars/<hostname>.yml`.
+ """
+ filepath = os.path.join(os.path.dirname(__file__), "vars", hostname+".yml")
+ hostvars = yaml.load(open(filepath, 'r'))
+ if with_header:
+ hostvars = dict(securedrop_test_vars=hostvars)
+ return hostvars
+
+
+
+
+
+def pytest_namespace():
+ return securedrop_import_testinfra_vars(target_host, with_header=True)
| diff --git a/docs/development/config_tests.rst b/docs/development/config_tests.rst
new file mode 100644
--- /dev/null
+++ b/docs/development/config_tests.rst
@@ -0,0 +1,115 @@
+Configuration Tests
+===================
+
+testinfra_ tests verify the end state of the vagrant machines. Any
+changes to the Ansible configuration should have a corresponding
+spectest.
+
+.. _testinfra: https://testinfra.readthedocs.io/en/latest/
+
+Installation
+------------
+
+.. code:: sh
+
+ pip install -r testinfra/requirements.txt
+
+Running the tests
+-----------------
+
+In order to run the tests, first create and provision the VM you intend
+to test:
+
+.. code:: sh
+
+ vagrant up development
+ vagrant up /staging/
+
+.. note:: The staging machines must be rebooted via in order to finalize
+ the iptables config. You must manually reboot the machines via
+ ``vagrant reload /staging/`` prior to running the config tests
+ to ensure the config is valid.
+
+Running all VMs concurrently may cause performance
+problems if you have less than 8GB of RAM. You can isolate specific
+machines for faster testing:
+
+.. code:: sh
+
+ $ ./testinfra/test.py development
+ $ ./testinfra/test.py app-staging
+ $ ./testinfra/test.py mon-staging
+
+.. note:: The config tests for the ``app-prod`` and ``mon-prod`` hosts are
+ incomplete. Further changes are necessary to run the tests via
+ SSH over Authenticated Tor Hidden Service (ATHS), for both local
+ testing via Vagrant and automated testing via CI.
+
+Test failure against any host will generate a report with informative output
+about the specific test that triggered the error. The wrapper script
+will also exit with a non-zero status code.
+
+Updating the tests
+------------------
+
+Changes to the Ansible config should result in failing config tests, but
+only if an existing task was modified. If you add a new task, make
+sure to add a corresponding spectest to validate that state after a
+new provisioning run. Tests import variables from separate YAML files
+than the Ansible playbooks: ::
+
+ testinfra/vars/
+ ├── app-prod.yml
+ ├── app-staging.yml
+ ├── build.yml
+ ├── development.yml
+ ├── mon-prod.yml
+ └── mon-staging.yml
+
+Any variable changes in the Ansible config should have a corresponding
+entry in these vars files. These vars are dynamically loaded for each
+host via the ``testinfra/conftest.py`` file. Make sure to add your tests to
+relevant location for the host you plan to test: ::
+
+ testinfra/app/
+ ├── apache
+ │ ├── test_apache_journalist_interface.py
+ │ ├── test_apache_service.py
+ │ ├── test_apache_source_interface.py
+ │ └── test_apache_system_config.py
+ ├── test_apparmor.py
+ ├── test_appenv.py
+ └── test_ossec.py
+
+In the example above, to add a new test for the ``app-staging`` host,
+add a new file to the ``testinfra/spec/app-staging`` directory.
+
+Config test layout
+------------------
+
+The config tests are mostly broken up according to machines in the
+Vagrantfile: ::
+
+ testinfra/
+ ├── app
+ ├── app-code
+ ├── build
+ ├── common
+ ├── development
+ └── mon
+
+Ideally the config tests would be broken up according to roles,
+mirroring the Ansible configuration. Prior to the reorganization of
+the Ansible layout, the tests are rather tightly coupled to hosts. The
+layout of config tests is therefore subject to change.
+
+Testing strategy
+----------------
+
+The config tests currently emphasize testing implementation rather than
+functionality. This is a temporary measure to increase the current testing
+baseline for validating the Ansible provisioning flow, to aid in migrating
+to a current version of Ansible (v2+). After the Ansible version is current,
+the config tests can be improved to validate behavior, such as confirming
+ports are blocked via external network calls, rather than simply checking
+that the iptables rules are formatted as expected.
diff --git a/docs/development/spec_tests.rst b/docs/development/spec_tests.rst
deleted file mode 100644
--- a/docs/development/spec_tests.rst
+++ /dev/null
@@ -1,118 +0,0 @@
-Serverspec Tests
-================
-
-serverspec_ tests verify the end state of the vagrant machines. Any
-changes to the Ansible configuration should have a corresponding
-spectest.
-
-.. _serverspec: http://serverspec.org
-
-Install directions (Ubuntu/Debian)
----------------------------
-
-.. code:: sh
-
- apt-get install bundler
- cd spec_tests/
- bundle update
-
-.. note:: If you run into an error regarding the version of
- ``bundler`` such as "Bundler could not find compatible versions",
- then you can install and run a particular version of ``bundler`` via:
-
- .. code:: sh
-
- gem install bundler -v 1.12.5
- bundle _1.12.5_ install
- bundle _1.12.5_ exec rake spec:development
-
-
-Running the tests
------------------
-
-In order to run the tests, each VM will be created and provisioned, if
-necessary. Running all VMs concurrently may cause performance
-problems if you have less than 8GB of RAM. You can isolate specific
-machines for faster testing:
-
-.. code:: sh
-
- $ cd spec_tests
- $ bundle exec rake --tasks # check output for desired machine
- rake spec:app-prod # Run spectests against app-prod
- rake spec:app-staging # Run spectests against app-staging
- rake spec:build # Run spectests against build
- rake spec:development # Run spectests against development
- rake spec:mon-prod # Run spectests against mon-prod
- rake spec:mon-staging # Run spectests against mon-staging
-
- $ bundle exec rake spec:staging
-
-The invocation above will run tests only against the ``staging`` VMs.
-You can run against multiple environments in a single invocation:
-
-.. code:: sh
-
- $ cd spec_tests
- $ bundle exec rake spec:development spec:staging
-
-Test failure against any host will cause Serverspec to exit, and stop
-executing tests against subsequent hosts. In order to run the ``spec:prod``
-task, you will need to configure :ref:`SSH access over Tor <ssh_over_tor>`.
-
-Updating the tests
-------------------
-
-Changes to the ansible config should result in failing spectests, but
-only if an existing task was modified. If you add a new task, make
-sure to add a corresponding spectest to validate that state after a
-new provisioning run. Tests import variables from separate YAML files
-than the Ansible playbooks: ::
-
- spec_tests/spec/vars
- ├── development.yml
- └── staging.yml
-
-Any variable changes in the Ansible config should have a corresponding
-entry in these vars files. These vars are dynamically loaded for each
-host via the ``spec_helper.rb`` file. Make sure to add your tests to
-relevant location for the host you plan to test: ::
-
- spec_tests/spec/app-staging
- ├── apache_spec.rb
- ├── apparmor_spec.rb
- ├── iptables_spec.rb
- ├── ossec_agent_spec.rb
- ├── securedrop_app_spec.rb
- ├── securedrop_app_test_spec.rb
- └── tor_spec.rb
-
-In the example above, to add a new test for the ``app-staging`` host,
-add a new file to the ``spec_tests/spec/app-staging`` directory.
-
-Spectest layout
----------------
-
-The serverspec tests are mostly broken up according to machines in the
-Vagrantfile: ::
-
- spec_tests/spec
- ├── app-staging
- ├── build
- ├── common-development
- ├── common-staging
- ├── development
- ├── mon-staging
- └── vars
-
-There are a few exceptions:
-
-- ``common-development`` shares tests between ``development`` and
- ``app-staging``
-- ``common-staging`` shares tests between ``app-staging`` and
- ``mon-staging``
-
-Ideally the serverspec tests would be broken up according to roles,
-mirroring the Ansible configuration. Prior to the reorganization of
-the Ansible layout, the tests are rather tightly coupled to hosts. The
-layout of spectests is therefore subject to change.
diff --git a/spec_tests/.bundle/config b/spec_tests/.bundle/config
deleted file mode 100644
--- a/spec_tests/.bundle/config
+++ /dev/null
@@ -1,3 +0,0 @@
----
-BUNDLE_PATH: ./gems/
-BUNDLE_DISABLE_SHARED_GEMS: '1'
diff --git a/spec_tests/.gemrc b/spec_tests/.gemrc
deleted file mode 100644
--- a/spec_tests/.gemrc
+++ /dev/null
@@ -1,8 +0,0 @@
-:backtrace: false
-:benchmark: false
-:bulk_threshold: 1000
-:sources:
-- http://rubygems.org/
-:update_sources: true
-:verbose: true
-gem: --no-ri --no-rdoc
diff --git a/spec_tests/.rspec b/spec_tests/.rspec
deleted file mode 100644
--- a/spec_tests/.rspec
+++ /dev/null
@@ -1,4 +0,0 @@
---color
-#--format documentation
---require spec_helper
---order random
diff --git a/spec_tests/Gemfile b/spec_tests/Gemfile
deleted file mode 100644
--- a/spec_tests/Gemfile
+++ /dev/null
@@ -1,5 +0,0 @@
-source 'https://rubygems.org'
-
-gem 'serverspec'
-gem 'rake'
-
diff --git a/spec_tests/Rakefile b/spec_tests/Rakefile
deleted file mode 100644
--- a/spec_tests/Rakefile
+++ /dev/null
@@ -1,78 +0,0 @@
-require 'rake'
-require 'rspec/core/rake_task'
-
-
-task :spec => 'spec:all'
-task :staging => 'spec:staging'
-task :default => :spec
-
-# this function accepts a hostname and returns a hash
-# that includes roles assigned to that host. changes made
-# in ansible config must be manually duplicated here.
-def roles(host)
- roles = []
- case host
- when /^development$/
- (roles << %w(app-test common-development development)).flatten!
- when /^build$/
- (roles << "build").flatten!
- when /^app-staging/
- (roles << %w(app-test app-general common-development common app-staging grsecurity ip6tables)).flatten!
- when /^mon-staging/
- (roles << %w(mon-general mon-staging common grsecurity ip6tables)).flatten!
- when /^app-prod/
- (roles << %w(app-prod app-general common common-development grsecurity ip6tables)).flatten!
- when /^mon-prod/
- (roles << %w(mon-general mon-prod common grsecurity ip6tables)).flatten!
- end
-
- # SSH access over Tor required for prod config,
- # so assume machines have already been provisioned
- # and ATHS values fetched, then force SSH over Tor.
- if host.match(/-prod$/)
- ENV['SECUREDROP_SSH_OVER_TOR'] = "1"
- end
-
- # DigitalOcean droplets don't support custom kernels, so remove
- # any planned spectests for grsecurity. This is only relevant
- # for the app and mon hosts, so don't delay other tests with check.
- if host.match(/^(app|mon)/)
- if `vagrant status #{host} --machine-readable`.match(/#{host},provider-name,digital_ocean/m)
- roles.delete('grsecurity')
- end
- end
- roles
-end
-
-
-# manually declare hosts, and assign them roles for testing
-hosts = %w(development build app-staging mon-staging app-prod mon-prod).map do |host|
- {
- :name => host,
- :roles => roles(host)
- }
-end
-
-
-# dynamically determine roles per host
-# this brilliant pragma conceived by @vincentbernat, see
-# https://github.com/vincentbernat/serverspec-example
-# http://vincent.bernat.im/en/blog/2014-serverspec-test-infrastructure.html
-namespace :spec do
- # assign all hosts to serverspec task run
- task :all => hosts.map { |h| h[:name] }
-
- # assign only /staging/ hosts to serverspec task run
- task :staging => hosts.select{ |h| h[:name].match(/staging$/) }.map{ |h| h[:name] }
- task :prod => hosts.select{ |h| h[:name].match(/prod$/) }.map{ |h| h[:name] }
-
- hosts.each do |host|
- desc "Run spectests against #{host[:name]}"
- RSpec::Core::RakeTask.new(host[:name].to_sym) do |t|
- ENV['TARGET_HOST'] = host[:name]
- t.pattern = './spec/{' + host[:roles].join(",") + '}/*_spec.rb'
- puts "Running spectests for host '#{host[:name]}'..."
- t.verbose = false
- end
- end
-end
diff --git a/spec_tests/spec/app-general/apache_spec.rb b/spec_tests/spec/app-general/apache_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-general/apache_spec.rb
+++ /dev/null
@@ -1,279 +0,0 @@
-# declare required apache packages
-apache_packages = [
- 'apache2-mpm-worker',
- 'libapache2-mod-wsgi',
- 'libapache2-mod-xsendfile',
-]
-# ensure required apache packages are installed
-apache_packages.each do |apache_package|
- describe package(apache_package) do
- it { should be_installed }
- end
-end
-
-# ensure required apache2 security config file is present
-# TODO: /etc/apache2/security is superfluous, see issue #643
-# once ansible playbook is updated to remove it, this check
-# should be removed as well.
-describe file('/etc/apache2/security') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- its(:content) { should match "ServerTokens Prod" }
- its(:content) { should match "ServerSignature Off" }
- its(:content) { should match "TraceEnable Off" }
-end
-
-# declare required apache2 config settings
-apache2_config_settings = [
- 'Mutex file:${APACHE_LOCK_DIR} default',
- 'PidFile ${APACHE_PID_FILE}',
- 'Timeout 60',
- 'KeepAlive On',
- 'MaxKeepAliveRequests 100',
- 'KeepAliveTimeout 5',
- 'User www-data',
- 'Group www-data',
- 'AddDefaultCharset UTF-8',
- 'DefaultType None',
- 'HostnameLookups Off',
- 'ErrorLog /dev/null',
- 'LogLevel crit',
- 'IncludeOptional mods-enabled/*.load',
- 'IncludeOptional mods-enabled/*.conf',
- 'Include ports.conf',
- 'IncludeOptional sites-enabled/*.conf',
- 'ServerTokens Prod',
- 'ServerSignature Off',
- 'TraceEnable Off',
-]
-# ensure required apache2 config settings are present
-describe file('/etc/apache2/apache2.conf') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- apache2_config_settings.each do |apache2_config_setting|
- apache2_config_setting_regex = Regexp.quote(apache2_config_setting)
- its(:content) { should match /^#{apache2_config_setting_regex}$/ }
- end
-end
-
-# ensure apache2 ports conf is present
-describe file('/etc/apache2/ports.conf') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- ["80", "8080"].each do |listening_port|
- listening_regex = Regexp.quote("Listen #{property['apache_listening_address']}:#{listening_port}")
- its(:content) { should match /^#{listening_regex}$/ }
- end
-end
-
-# declare desired apache headers for vhost configs
-apache2_common_headers = [
- 'Header edit Set-Cookie ^(.*)$ $1;HttpOnly',
- 'Header always append X-Frame-Options: DENY',
- 'Header set X-XSS-Protection: "1; mode=block"',
- 'Header set X-Content-Type-Options: nosniff',
- 'Header set X-Download-Options: noopen',
- # using string literal syntax here (%{}) to avoid manual quote escaping
- %{Header set X-Content-Security-Policy: "default-src 'self'"},
- %{Header set Content-Security-Policy: "default-src 'self'"},
- 'Header unset Etag',
-]
-# declare block of directory declarations common to both
-# source and journalist interfaces.
-common_apache2_directory_declarations = <<eos
-<Directory />
- Options None
- AllowOverride None
- Order deny,allow
- Deny from all
-</Directory>
-
-<Directory /var/www/>
- Options None
- AllowOverride None
- <Limit GET POST HEAD>
- Order allow,deny
- allow from #{property['apache_allow_from']}
- </Limit>
- <LimitExcept GET POST HEAD>
- Order deny,allow
- Deny from all
- </LimitExcept>
-</Directory>
-
-<Directory #{property['securedrop_code']}>
- Options None
- AllowOverride None
- <Limit GET POST HEAD>
- Order allow,deny
- allow from #{property['apache_allow_from']}
- </Limit>
- <LimitExcept GET POST HEAD>
- Order deny,allow
- Deny from all
- </LimitExcept>
-</Directory>
-eos
-# declare desired apache2 available sites
-apache2_available_sites = [
- '/etc/apache2/sites-available/journalist.conf',
- '/etc/apache2/sites-available/source.conf',
-]
-# check desired apache2 available sites for common headers
-apache2_available_sites.each do |apache2_available_site|
- describe file(apache2_available_site) do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- apache2_common_headers.each do |apache2_common_header|
- apache2_common_header_regex = Regexp.quote(apache2_common_header)
- its(:content) { should match /^#{apache2_common_header_regex}$/ }
- end
- its(:content) { should contain(common_apache2_directory_declarations) }
- end
-end
-
-# declare source-specific apache configs
-source_apache2_config_settings = [
- 'Header set Cache-Control "max-age=1800, must-revalidate"',
- "<VirtualHost #{property['apache_listening_address']}:80>",
- "DocumentRoot #{property['securedrop_code']}/static",
- "Alias /static #{property['securedrop_code']}/static",
- "WSGIDaemonProcess source processes=2 threads=30 display-name=%{GROUP} python-path=#{property['securedrop_code']}",
- 'WSGIProcessGroup source',
- 'WSGIScriptAlias / /var/www/source.wsgi/',
- 'AddType text/html .py',
- 'XSendFile Off',
- 'LimitRequestBody 524288000',
- 'ErrorDocument 400 /notfound',
- 'ErrorDocument 401 /notfound',
- 'ErrorDocument 403 /notfound',
- 'ErrorDocument 404 /notfound',
- 'ErrorDocument 500 /notfound',
- "ErrorLog #{property['apache_source_log']}",
-]
-# check source-specific apache2 config
-describe file('/etc/apache2/sites-available/source.conf') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_grouped_into 'root' }
- it { should be_mode '644' }
- source_apache2_config_settings.each do |source_apache2_config_setting|
- source_apache2_config_setting_regex = Regexp.quote(source_apache2_config_setting)
- its(:content) { should match /^#{source_apache2_config_setting_regex}$/ }
- end
-end
-
-# declare journalist-specific apache configs
-journalist_apache2_config_settings = [
- 'Header set Cache-Control "max-age=1800"',
- "<VirtualHost #{property['apache_listening_address']}:8080>",
- "DocumentRoot #{property['securedrop_code']}/static",
- "Alias /static #{property['securedrop_code']}/static",
- "WSGIDaemonProcess journalist processes=2 threads=30 display-name=%{GROUP} python-path=#{property['securedrop_code']}",
- 'WSGIProcessGroup journalist',
- 'WSGIScriptAlias / /var/www/journalist.wsgi/',
- 'AddType text/html .py',
- 'XSendFile On',
- 'XSendFilePath /var/lib/securedrop/store/',
- 'XSendFilePath /var/lib/securedrop/tmp/',
- 'ErrorLog /var/log/apache2/journalist-error.log',
- 'CustomLog /var/log/apache2/journalist-access.log combined',
-]
-# check journalist-specific apache2 config
-describe file('/etc/apache2/sites-available/journalist.conf') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_grouped_into 'root' }
- it { should be_mode '644' }
- journalist_apache2_config_settings.each do |journalist_apache2_config_setting|
- journalist_apache2_config_setting_regex = Regexp.quote(journalist_apache2_config_setting)
- its(:content) { should match /^#{journalist_apache2_config_setting_regex}$/ }
- end
-end
-
-# declare apache2 enabled modules
-apache2_enabled_modules = [
- 'access_compat',
- 'authn_core',
- 'alias',
- 'authz_core',
- 'authz_host',
- 'authz_user',
- 'deflate',
- 'filter',
- 'dir',
- 'headers',
- 'mime',
- 'mpm_event',
- 'negotiation',
- 'reqtimeout',
- 'rewrite',
- 'wsgi',
- 'xsendfile',
-]
-# ensure required apache2 modules are enabled
-apache2_enabled_modules.each do |enabled_module|
- describe command("a2query -m #{enabled_module}") do
- its(:stdout) { should match /^#{enabled_module} \(enabled/ }
- end
-end
-
-# declare apache2 disabled modules
-apache2_disabled_modules = [
- 'auth_basic',
- 'authn_file',
- 'autoindex',
- 'env',
- 'setenvif',
- 'status',
-]
-# ensure unwanted apache2 modules are disabled
-apache2_disabled_modules.each do |disabled_module|
- describe command("a2query -m #{disabled_module}") do
- its(:stderr) { should match /^No module matches #{disabled_module}/ }
- end
-end
-
-# Are source and journalist interface sites enabled?
-['source', 'journalist'].each do |enabled_site|
- describe command("a2query -s #{enabled_site}") do
- its(:stdout) { should match /^#{enabled_site} \(enabled/ }
- end
-end
-
-# Are default sites disabled?
-['000-default'].each do |disabled_site|
- describe command("a2query -s #{disabled_site}") do
- its(:stderr) { should match /^No site matches #{disabled_site}/ }
- end
-end
-
-# Is apache running as user X
-describe service('apache2') do
- it { should be_enabled }
- it { should be_running }
-end
-
-# ensure securedrop user account is configured properly
-describe user('www-data') do
- it { should exist }
- it { should have_home_directory '/var/www' }
- it { should have_login_shell '/usr/sbin/nologin' }
-end
-
-# Apache should be listening on 80 and 8080.
-# In staging, expect the service to be bound to 0.0.0.0,
-# but in prod, it should be restricted to 127.0.0.1.
-listening_ports = ['80', '8080']
-listening_ports.each do |listening_port|
- describe port(listening_port) do
- it { should be_listening.with('tcp') }
- it { should be_listening.on(property['apache_listening_address']).with('tcp') }
- end
-end
-
-# Check firewall rule
diff --git a/spec_tests/spec/app-general/ossec_agent_spec.rb b/spec_tests/spec/app-general/ossec_agent_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-general/ossec_agent_spec.rb
+++ /dev/null
@@ -1,30 +0,0 @@
-# ensure hosts file references mon server by ip
-describe file('/etc/hosts') do
- localhost_regex = /^#{Regexp.quote('127.0.0.1')}(\s+#{property['app_hostname']}){2}$/
- its(:content) { should match localhost_regex }
- # TODO: the "securedrop-monitor-server-alias" is an artifact of
- # using the vagrant-hostmanager plugin. it may no longer be necessary
- mon_host_regex = Regexp.quote("#{property['monitor_ip']} #{property['monitor_hostname']} securedrop-monitor-server-alias")
- its(:content) { should match /^#{mon_host_regex}$/ }
-end
-
-# Regression test to check for duplicate entries.
-describe command('uniq --repeated /etc/hosts') do
- its(:stdout) { should eq "" }
-end
-
-# ensure custom ossec-agent package is installed
-describe package('securedrop-ossec-agent') do
- it { should be_installed }
-end
-
-# ensure client keyfile for ossec-agent is present
-describe file('/var/ossec/etc/client.keys') do
- it { should be_file }
- it { should be_mode '644' }
- it { should be_owned_by 'root' }
- it { should be_grouped_into 'ossec' }
- app_ip_regex = Regexp.quote("#{property['app_hostname']} #{property['app_ip']}")
- # this regex checks for a hex string of 64 chars, not a specific value
- its(:content) { should match /^1024 #{app_ip_regex} [0-9a-f]{64}$/ }
-end
diff --git a/spec_tests/spec/app-general/securedrop_app_spec.rb b/spec_tests/spec/app-general/securedrop_app_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-general/securedrop_app_spec.rb
+++ /dev/null
@@ -1,58 +0,0 @@
-# declare securedrop app directories
-securedrop_app_directories = [
- property['securedrop_code'],
- property['securedrop_data'],
- "#{property['securedrop_data']}/store",
- "#{property['securedrop_data']}/keys",
- "#{property['securedrop_data']}/tmp",
-]
-
-# ensure securedrop app directories exist with correct permissions
-securedrop_app_directories.each do |securedrop_app_directory|
- describe file(securedrop_app_directory) do
- it { should be_directory }
- it { should be_owned_by property['securedrop_user'] }
- it { should be_grouped_into property['securedrop_user'] }
- it { should be_mode '700' }
- end
-end
-
-# ensure securedrop-app-code package is installed
-describe package('securedrop-app-code') do
- it { should be_installed }
-end
-
-# ensure test gpg key is present in app keyring
-describe command('su -s /bin/bash -c "gpg --homedir /var/lib/securedrop/keys --list-keys 28271441" www-data') do
- its(:exit_status) { should eq 0 }
- expected_output = <<-eos
-pub 4096R/28271441 2013-10-12
-uid SecureDrop Test/Development (DO NOT USE IN PRODUCTION)
-sub 4096R/A2201B2A 2013-10-12
-
-eos
- its(:stdout) { should eq expected_output }
-end
-
-# ensure default logo header file exists
-# TODO: add check for custom logo header file
-describe file("#{property['securedrop_code']}/static/i/logo.png") do
- it { should be_file }
- # TODO: ansible task declares mode 400 but the file ends up as 644 on host
- it { should be_mode '644' }
- it { should be_owned_by property['securedrop_user'] }
- it { should be_grouped_into property['securedrop_user'] }
-end
-
-# ensure cron job for securedrop tmp dir cleanup is enabled
-describe cron do
- it { should have_entry "@daily #{property['securedrop_code']}/manage.py clean-tmp" }
-end
-
-# ensure directory for worker logs is present
-describe file('/var/log/securedrop_worker') do
- it { should be_directory }
- it { should be_mode '644' }
- it { should be_owned_by 'root' }
- it { should be_grouped_into 'root' }
-end
diff --git a/spec_tests/spec/app-general/tor_spec.rb b/spec_tests/spec/app-general/tor_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-general/tor_spec.rb
+++ /dev/null
@@ -1,36 +0,0 @@
-# declare app-staging torrc settings
-torrc_settings = [
- 'HiddenServiceDir /var/lib/tor/services/source',
- 'HiddenServicePort 80 127.0.0.1:80',
- 'HiddenServiceDir /var/lib/tor/services/journalist',
- 'HiddenServicePort 80 127.0.0.1:8080',
- 'HiddenServiceAuthorizeClient stealth journalist',
-]
-# ensure torrc for app-staging host contains entries
-# for both journalist and source ATHSes. the admin
-# ATHS and other settings are already checked as part of the
-# common-staging serverspec tests
-describe file('/etc/tor/torrc') do
- torrc_settings.each do |torrc_setting|
- torrc_setting_regex = Regexp.quote(torrc_setting)
- its(:content) { should match /^#{torrc_setting_regex}$/ }
- end
-end
-
-# declare app-specific tor service directories,
-# for mode and ownership checks. the parent dir
-# and the "ssh" service are validated in the
-# common-staging spectests.
-tor_service_directories = %w(
- /var/lib/tor/services/journalist
- /var/lib/tor/services/source
-)
-# ensure tor service dirs are owned by tor user and mode 0700
-tor_service_directories.each do |tor_service_directory|
- describe file(tor_service_directory) do
- it { should be_directory }
- it { should be_mode('700') }
- it { should be_owned_by 'debian-tor' }
- it { should be_grouped_into 'debian-tor' }
- end
-end
diff --git a/spec_tests/spec/app-prod/apparmor_spec.rb b/spec_tests/spec/app-prod/apparmor_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-prod/apparmor_spec.rb
+++ /dev/null
@@ -1,130 +0,0 @@
-# Apparmor package dependencies
-['apparmor', 'apparmor-utils' ].each do |pkg|
- describe package(pkg) do
- it { should be_installed }
- end
-end
-
-# Check that apparmor is enabled.
-# The command returns error code if AppArmor not enabled
-describe command("aa-status --enabled") do
- its(:exit_status) { should eq 0 }
-end
-
-# Staging role has two profiles in complain mode:
-# tor and apache2. Make sure the config file does NOT include
-# that flag, since restarting apparmor will load whatever's on disk
-['tor', 'apache2'].each do |complaining_process|
- describe file("/etc/apparmor.d/usr.sbin.#{complaining_process}") do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- its(:content) { should_not match /^\/usr\/sbin\/#{complaining_process} flags=\(complain\) \{/ }
- end
-end
-
-# declare expected app-armor capabilities for apache2
-apache2_capabilities = %w(
- kill
- net_bind_service
- sys_ptrace
-)
-# check for exact list of expected app-armor capabilities for apache2
-describe command('perl -nE \'/^\s+capability\s+(\w+),$/ && say $1\' /etc/apparmor.d/usr.sbin.apache2') do
- apache2_capabilities.each do |apache2_capability|
- its(:stdout) { should contain(apache2_capability) }
- end
-end
-
-# ensure no extra capabilities are defined for apache2
-describe command('grep -ic capability /etc/apparmor.d/usr.sbin.apache2') do
- its(:stdout) { should eq apache2_capabilities.length.to_s + "\n" }
-end
-
-# check for exact list of expected app-armor capabilities for tor
-describe command('perl -nE \'/^\s+capability\s+(\w+),$/ && say $1\' /etc/apparmor.d/usr.sbin.tor') do
- its(:stdout) { should contain("setgid") }
-end
-
-# ensure no extra capabilities are defined for tor
-describe command('grep -ic capability /etc/apparmor.d/usr.sbin.tor') do
- its(:stdout) { should eq "1\n" }
-end
-
-# Explicitly check that enforced profiles are NOT
-# present in /etc/apparmor.d/disable. Polling aa-status
-# only checks the last config that was loaded, whereas
-# checking for symlinks in the `disabled` dir checks
-# the config to be loaded when the apparmor service is bounced.
-enforced_profiles = [
- 'ntpd',
- 'apache2',
- 'tcpdump',
- 'tor',
-]
-enforced_profiles.each do |enforced_profile|
- describe file("/etc/apparmor.d/disabled/usr.sbin.#{enforced_profile}") do
- it { should_not be_file }
- it { should_not be_directory }
- it { should_not be_symlink }
- end
-end
-
-# declare app-armor profiles expected to be enforced
-enforced_apparmor_profiles = %w(
- /sbin/dhclient
- /usr/lib/NetworkManager/nm-dhcp-client.action
- /usr/lib/connman/scripts/dhclient-script
- /usr/sbin/apache2
- /usr/sbin/apache2//DEFAULT_URI
- /usr/sbin/apache2//HANDLING_UNTRUSTED_INPUT
- /usr/sbin/ntpd
- /usr/sbin/tcpdump
- /usr/sbin/tor
- system_tor
-)
-# check for enforced app-armor profiles
-# this klunky one-liner uses bash, because serverspec defaults to sh,
-# then provides START and STOP patterns to sed, filters by profile
-# names according to leading whitespace, then trims leading whitespace
-describe command("aa-status") do
- enforced_apparmor_profiles.each do |enforced_apparmor_profile|
- its(:stdout) { should contain(enforced_apparmor_profile).from(/profiles are in enforce mode/).to(/profiles are in complain mode/) }
- end
-end
-
-# ensure number of expected enforced profiles matches number checked
-describe command("aa-status --enforced") do
- its(:stdout) { should eq enforced_apparmor_profiles.length.to_s + "\n" }
-end
-
-# declare app-armor profiles expected to be complaining
-# the staging hosts enabled "complain" mode for more verbose
-# logging during development and testing; production hosts
-# should not have any complain mode.
-complaining_apparmor_profiles = %w(
- /usr/sbin/apache2
- /usr/sbin/tor
-)
-
-# check for complaining app-armor profiles; should NOT exist in prod
-describe command("aa-status") do
- complaining_apparmor_profiles.each do |complaining_apparmor_profile|
- its(:stdout) { should_not contain(complaining_apparmor_profile).from(/profiles are in complain mode/).to(/\d+ processes have profiles defined/) }
- end
-end
-
-# ensure number of expected complaining profiles matches number checked
-describe command("aa-status --complaining") do
- its(:stdout) { should eq "0\n" }
-end
-
-# ensure number of total profiles matches enforced profiles
-describe command("aa-status --profiled") do
- its(:stdout) { should eq enforced_apparmor_profiles.length.to_s + "\n" }
-end
-
-# Ensure that there are no processes that are unconfined but have a profile
-describe command("aa-status") do
- its(:stdout) { should contain("0 processes are unconfined but have a profile defined") }
-end
diff --git a/spec_tests/spec/app-prod/iptables_spec.rb b/spec_tests/spec/app-prod/iptables_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-prod/iptables_spec.rb
+++ /dev/null
@@ -1,79 +0,0 @@
-# declare desired iptables rules
-# These rules should be present in prod and staging
-# TODO: There are also hardcoded IP addresses in this section.
-desired_iptables_rules = [
- '-P INPUT DROP',
- '-P FORWARD DROP',
- '-P OUTPUT DROP',
- '-N LOGNDROP',
- '-A INPUT -p tcp -m state --state RELATED,ESTABLISHED -m comment --comment "Allow traffic back for tor" -j ACCEPT',
- '-A INPUT -i lo -p tcp -m tcp --dport 80 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "Allow tor connection from local loopback to connect to source int" -j ACCEPT',
- '-A INPUT -i lo -p tcp -m tcp --dport 8080 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "Allow tor connection from local loopback to connect to document int" -j ACCEPT',
- '-A INPUT -s 127.0.0.1/32 -d 127.0.0.1/32 -i lo -p tcp -m state --state RELATED,ESTABLISHED -m comment --comment "for redis worker all application user local loopback user" -j ACCEPT',
- '-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A INPUT -s 8.8.8.8/32 -p udp -m udp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A INPUT -p udp -m udp --sport 123 --dport 123 -m state --state RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
- '-A INPUT -p tcp -m multiport --sports 80,8080,443 -m state --state RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- "-A INPUT -s #{property['monitor_ip']}/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment \"OSSEC server agent\" -j ACCEPT",
- '-A INPUT -i lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
- '-A INPUT -p tcp -m state --state INVALID -m comment --comment "drop but do not log inbound invalid state packets" -j DROP',
- '-A INPUT -m comment --comment "Drop and log all other incoming traffic" -j LOGNDROP',
- "-A OUTPUT -p tcp -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"tor instance that provides ssh access\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW -m limit --limit 3/min --limit-burst 3 -m comment --comment \"Rate limit traffic from tor to the ssh dameon\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Allow the established traffic from tor to the ssh dameon\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW -m comment --comment \"Drop all other new connections from tor to the ssh dameon\" -j LOGNDROP",
- "-A OUTPUT -o lo -p tcp -m tcp --sport 80 -m owner --uid-owner #{property['apache_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Restrict the apache user outbound connections\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --sport 8080 -m owner --uid-owner #{property['apache_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Restrict the apache user outbound connections\" -j ACCEPT",
- "-A OUTPUT -s 127.0.0.1/32 -d 127.0.0.1/32 -o lo -p tcp -m owner --uid-owner #{property['apache_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"for redis worker all application user local loopback user\" -j ACCEPT",
- "-A OUTPUT -m owner --uid-owner #{property['apache_user_uid']} -m comment --comment \"Drop all other traffic by the securedrop user\" -j LOGNDROP",
- "-A OUTPUT -m owner --gid-owner #{property['ssh_group_gid']} -m comment --comment \"Drop all other outbound traffic for ssh user\" -j LOGNDROP",
- '-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A OUTPUT -p udp -m udp --sport 123 --dport 123 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
- '-A OUTPUT -p tcp -m multiport --dports 80,8080,443 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- "-A OUTPUT -d #{property['monitor_ip']}/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"OSSEC server agent\" -j ACCEPT",
- '-A OUTPUT -o lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
- '-A OUTPUT -m comment --comment "Drop all other outgoing traffic" -j DROP',
- '-A LOGNDROP -p tcp -m limit --limit 5/min -j LOG --log-tcp-options --log-ip-options --log-uid',
- '-A LOGNDROP -p udp -m limit --limit 5/min -j LOG --log-ip-options --log-uid',
- '-A LOGNDROP -p icmp -m limit --limit 5/min -j LOG --log-ip-options --log-uid',
- '-A LOGNDROP -j DROP',
-]
-# declare unwanted iptables rules
-# These rules should have been removed by the `remove_authd_exceptions` role
-# TODO: The Vagrantfile virtualbox static IP was hardcoded into the two rules
-# below. This will need to be fixed. Possibly with using something like
-# https://github.com/volanja/ansible_spec Using the values for IP addresses
-# from the ansible inventory should cover most use cases (except inventories
-# with just the *.onion addresses).
-unwanted_iptables_rules = [
- "-A OUTPUT -d #{property['monitor_ip']} -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
- "-A INPUT -s #{property['monitor_ip']} -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -v ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
-
- # These rules have the wrong interface for the vagrant mon-staging machine.
- # Adding them in here to make sure ansible config changes don't introduce regressions.
- '-A INPUT -s 10.0.2.15/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment "OSSEC server agent" -j ACCEPT',
- '-A OUTPUT -d 10.0.2.15/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "OSSEC server agent" -j ACCEPT',
-
- # These rules belong in the staging environment, not production.
- '-A OUTPUT -p tcp -m tcp --sport 8080 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A INPUT -p tcp -m tcp --dport 8080 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A INPUT -p tcp -m tcp --dport 80 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
-
- '-A INPUT -p udp -m udp --sport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A INPUT -p tcp -m tcp --dport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
-
- '-A OUTPUT -p tcp -m tcp --sport 80 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A OUTPUT -p udp -m udp --dport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A OUTPUT -p tcp -m tcp --sport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
-]
-
-# check for wanted and unwanted iptables rules
-describe iptables do
- unwanted_iptables_rules.each do |unwanted_iptables_rule|
- it { should_not have_rule(unwanted_iptables_rule) }
- end
- desired_iptables_rules.each do |desired_iptables_rule|
- it { should have_rule(desired_iptables_rule) }
- end
-end
diff --git a/spec_tests/spec/app-staging/apparmor_spec.rb b/spec_tests/spec/app-staging/apparmor_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-staging/apparmor_spec.rb
+++ /dev/null
@@ -1,131 +0,0 @@
-# Apparmor package dependencies
-['apparmor', 'apparmor-utils' ].each do |pkg|
- describe package(pkg) do
- it { should be_installed }
- end
-end
-
-# Check that apparmor is enabled.
-# The command returns error code if AppArmor not enabled
-describe command("aa-status --enabled") do
- its(:exit_status) { should eq 0 }
-end
-
-# SecureDrop apache apparmor profile
-# Staging role has two profiles in complain mode:
-# tor and apache2. Make sure the config file includes
-# that flag, since restarting apparmor will load
-# whatever's on disk
-['tor', 'apache2'].each do |complaining_process|
- describe file("/etc/apparmor.d/usr.sbin.#{complaining_process}") do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- its(:content) { should match /^\/usr\/sbin\/#{complaining_process} flags=\(complain\) \{/ }
- end
-end
-
-# declare expected app-armor capabilities for apache2
-apache2_capabilities = %w(
- kill
- net_bind_service
- sys_ptrace
-)
-# check for exact list of expected app-armor capabilities for apache2
-describe command('perl -nE \'/^\s+capability\s+(\w+),$/ && say $1\' /etc/apparmor.d/usr.sbin.apache2') do
- apache2_capabilities.each do |apache2_capability|
- its(:stdout) { should contain(apache2_capability) }
- end
-end
-
-# ensure no extra capabilities are defined for apache2
-describe command('grep -ic capability /etc/apparmor.d/usr.sbin.apache2') do
- its(:stdout) { should eq apache2_capabilities.length.to_s + "\n" }
-end
-
-# check for exact list of expected app-armor capabilities for tor
-describe command('perl -nE \'/^\s+capability\s+(\w+),$/ && say $1\' /etc/apparmor.d/usr.sbin.tor') do
- its(:stdout) { should contain("setgid") }
-end
-
-# ensure no extra capabilities are defined for tor
-describe command('grep -ic capability /etc/apparmor.d/usr.sbin.tor') do
- its(:stdout) { should eq "1\n" }
-end
-
-# Explicitly check that enforced profiles are NOT
-# present in /etc/apparmor.d/disable. Polling aa-status
-# only checks the last config that was loaded, whereas
-# checking for symlinks in the `disabled` dir checks
-# the config to be loaded when the apparmor service is bounced.
-enforced_profiles = [
- 'ntpd',
- 'apache2',
- 'tcpdump',
- 'tor',
-]
-enforced_profiles.each do |enforced_profile|
- describe file("/etc/apparmor.d/disabled/usr.sbin.#{enforced_profile}") do
- it { should_not be_file }
- it { should_not be_directory }
- it { should_not be_symlink }
- end
-end
-
-# declare app-armor profiles expected to be enforced
-enforced_apparmor_profiles = %w(
- /sbin/dhclient
- /usr/lib/NetworkManager/nm-dhcp-client.action
- /usr/lib/connman/scripts/dhclient-script
- /usr/sbin/apache2//DEFAULT_URI
- /usr/sbin/apache2//HANDLING_UNTRUSTED_INPUT
- /usr/sbin/ntpd
- /usr/sbin/tcpdump
- system_tor
-)
-# check for enforced app-armor profiles
-# this klunky one-liner uses bash, because serverspec defaults to sh,
-# then provides START and STOP patterns to sed, filters by profile
-# names according to leading whitespace, then trims leading whitespace
-describe command("aa-status") do
- enforced_apparmor_profiles.each do |enforced_apparmor_profile|
- its(:stdout) { should contain(enforced_apparmor_profile).from(/profiles are in enforce mode/).to(/profiles are in complain mode/) }
- end
-end
-
-# ensure number of expected enforced profiles matches number checked
-describe command("aa-status --enforced") do
- its(:stdout) { should eq enforced_apparmor_profiles.length.to_s + "\n" }
-end
-
-# declare app-armor profiles expected to be complaining
-# the staging hosts enabled "complain" mode for more verbose
-# logging during development and testing; production hosts
-# should not have any complain mode.
-complaining_apparmor_profiles = %w(
- /usr/sbin/apache2
- /usr/sbin/tor
-)
-
-# check for complaining app-armor profiles
-describe command("aa-status") do
- complaining_apparmor_profiles.each do |complaining_apparmor_profile|
- its(:stdout) { should contain(complaining_apparmor_profile).from(/profiles are in complain mode/).to(/\d+ processes have profiles defined/) }
- end
-end
-
-# ensure number of expected complaining profiles matches number checked
-describe command("aa-status --complaining") do
- its(:stdout) { should eq complaining_apparmor_profiles.length.to_s + "\n" }
-end
-
-# ensure number of total profiles is sum of enforced and complaining profiles
-describe command("aa-status --profiled") do
- total_profiles = enforced_apparmor_profiles.length + complaining_apparmor_profiles.length
- its(:stdout) { should eq total_profiles.to_s + "\n" }
-end
-
-# Ensure that there are no processes that are unconfined but have a profile
-describe command("aa-status") do
- its(:stdout) { should contain("0 processes are unconfined but have a profile defined") }
-end
diff --git a/spec_tests/spec/app-staging/iptables_spec.rb b/spec_tests/spec/app-staging/iptables_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-staging/iptables_spec.rb
+++ /dev/null
@@ -1,107 +0,0 @@
-# declare desired iptables rules
-# These rules should be present in prod and staging
-# TODO: There are also hardcoded IP addresses in this section.
-desired_iptables_rules = [
- '-P INPUT DROP',
- '-P FORWARD DROP',
- '-P OUTPUT DROP',
- '-N LOGNDROP',
- "-A INPUT -i #{property['staging_iface']} -p tcp -m tcp --dport 8080 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT",
- "-A INPUT -i #{property['staging_iface']} -p tcp -m tcp --dport 80 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT",
- '-A INPUT -p udp -m udp --sport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- "-A INPUT -i #{property['staging_iface']} -p tcp -m tcp --dport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT",
- '-A INPUT -p tcp -m state --state RELATED,ESTABLISHED -m comment --comment "Allow traffic back for tor" -j ACCEPT',
- '-A INPUT -i lo -p tcp -m tcp --dport 80 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "Allow tor connection from local loopback to connect to source int" -j ACCEPT',
- '-A INPUT -i lo -p tcp -m tcp --dport 8080 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "Allow tor connection from local loopback to connect to document int" -j ACCEPT',
- '-A INPUT -s 127.0.0.1/32 -d 127.0.0.1/32 -i lo -p tcp -m state --state RELATED,ESTABLISHED -m comment --comment "for redis worker all application user local loopback user" -j ACCEPT',
- '-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A INPUT -s 8.8.8.8/32 -p udp -m udp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A INPUT -p udp -m udp --sport 123 --dport 123 -m state --state RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
- '-A INPUT -p tcp -m multiport --sports 80,8080,443 -m state --state RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- "-A INPUT -s #{property['monitor_ip']}/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment \"OSSEC server agent\" -j ACCEPT",
- '-A INPUT -i lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
- '-A INPUT -p tcp -m state --state INVALID -m comment --comment "drop but do not log inbound invalid state packets" -j DROP',
- '-A INPUT -m comment --comment "Drop and log all other incoming traffic" -j LOGNDROP',
- "-A OUTPUT -o #{property['staging_iface']} -p tcp -m tcp --sport 8080 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT",
- "-A OUTPUT -o #{property['staging_iface']} -p tcp -m tcp --sport 80 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT",
- '-A OUTPUT -p udp -m udp --dport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- "-A OUTPUT -o #{property['staging_iface']} -p tcp -m owner --uid-owner 0 -m tcp --sport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW -m limit --limit 3/min --limit-burst 3 -m comment --comment \"Rate limit traffic from tor to the ssh dameon\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW -m comment --comment \"Drop all other new connections from tor to the ssh dameon\" -j LOGNDROP",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Allow the established traffic from tor to the ssh dameon\" -j ACCEPT",
- "-A OUTPUT -m owner --uid-owner #{property['tor_user_uid']} -m comment --comment \"Drop all other traffic for the tor instance used for ssh\" -j LOGNDROP",
- "-A OUTPUT -o lo -p tcp -m tcp --sport 80 -m owner --uid-owner #{property['apache_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Restrict the apache user outbound connections\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --sport 8080 -m owner --uid-owner #{property['apache_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Restrict the apache user outbound connections\" -j ACCEPT",
- "-A OUTPUT -s 127.0.0.1/32 -d 127.0.0.1/32 -o lo -p tcp -m owner --uid-owner #{property['apache_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"for redis worker all application user local loopback user\" -j ACCEPT",
- "-A OUTPUT -m owner --uid-owner #{property['apache_user_uid']} -m comment --comment \"Drop all other traffic by the securedrop user\" -j LOGNDROP",
- "-A OUTPUT -m owner --gid-owner #{property['ssh_group_gid']} -m comment --comment \"Drop all other outbound traffic for ssh user\" -j LOGNDROP",
- '-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A OUTPUT -p udp -m udp --sport 123 --dport 123 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
- '-A OUTPUT -p tcp -m multiport --dports 80,8080,443 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- "-A OUTPUT -d #{property['monitor_ip']}/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"OSSEC server agent\" -j ACCEPT",
- '-A OUTPUT -o lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
- '-A OUTPUT -m comment --comment "Drop all other outgoing traffic" -j DROP',
- '-A LOGNDROP -p tcp -m limit --limit 5/min -j LOG --log-tcp-options --log-ip-options --log-uid',
- '-A LOGNDROP -p udp -m limit --limit 5/min -j LOG --log-ip-options --log-uid',
- '-A LOGNDROP -p icmp -m limit --limit 5/min -j LOG --log-ip-options --log-uid',
- '-A LOGNDROP -j DROP',
-]
-
-# declare unwanted iptables rules
-# These rules should have been removed by the `remove_authd_exceptions` role
-# TODO: The Vagrantfile virtualbox static IP was hardcoded into the two rules
-# below. This will need to be fixed. Possibly with using something like
-# https://github.com/volanja/ansible_spec Using the values for IP addresses
-# from the ansible inventory should cover most use cases (except inventories
-# with just the *.onion addresses).
-unwanted_iptables_rules = [
- "-A OUTPUT -d #{property['monitor_ip']} -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
- "-A INPUT -s #{property['monitor_ip']} -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -v ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
-
- # These rules have the wrong interface for the vagrant mon-staging machine.
- # Adding them in here to make sure ansible config changes don't introduce regressions.
- '-A INPUT -s 10.0.2.15/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment "OSSEC server agent" -j ACCEPT',
- '-A OUTPUT -d 10.0.2.15/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "OSSEC server agent" -j ACCEPT',
-]
-
-# check for wanted and unwanted iptables rules
-describe iptables do
- unwanted_iptables_rules.each do |unwanted_iptables_rule|
- it { should_not have_rule(unwanted_iptables_rule) }
- end
- desired_iptables_rules.each do |desired_iptables_rule|
- it { should have_rule(desired_iptables_rule) }
- end
-end
-
-
-# try to validate local networking config
-describe host(property['monitor_hostname']) do
- monitor_ip_regex = Regexp.quote(property['monitor_ip'])
- its(:ipaddress) { should match /^#{monitor_ip_regex}$/ }
- it { should be_resolvable.by('hosts') }
- it { should_not be_reachable }
- # in staging, direct access allows ssh.
- # prod hosts should NOT have access on 22.
- it { should_not be_reachable.with( :port => 22, :proto => 'tcp') }
-end
-
-# declare ports expected to be listening
-listening_ports = [
- 22, # ssh
- 80, # source interface
- 8080, # journalist interface
- 6001, # Xvfb
-]
-# ensure ports are listening
-listening_ports.each do |listening_port|
- describe port(listening_port) do
- it { should be_listening.on('0.0.0.0').with('tcp') }
- end
-end
-
-# check redis worker listening port
-describe port(6379) do
- it { should be_listening.on('127.0.0.1').with('tcp') }
-end
diff --git a/spec_tests/spec/app-staging/securedrop_app_test_spec.rb b/spec_tests/spec/app-staging/securedrop_app_test_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-staging/securedrop_app_test_spec.rb
+++ /dev/null
@@ -1,40 +0,0 @@
-# ensure logging is enabled for source interface in staging
-describe file('/var/www/source.wsgi') do
- it { should be_file }
- it { should be_owned_by 'www-data' }
- it { should be_grouped_into 'www-data' }
- it { should be_mode '640' }
- its(:content) { should match /^import logging$/ }
- its(:content) { should match /^logging\.basicConfig\(stream=sys\.stderr\)$/ }
-end
-
-# ensure app-armor profiles are in complain mode for staging
-# there are two profiles that should be in complain mode:
-# - usr.sbin.apache2
-# - usr.sbin.tor
-describe command('aa-status') do
- expected_output = <<-eos
-2 profiles are in complain mode.
- /usr/sbin/apache2
- /usr/sbin/tor
-eos
- its(:stdout) { should contain(expected_output) }
-end
-
-# These pip dependencies are staging-specific; they may NOT match
-# what's specified in `securedrop/requirements/test-requirements.txt`,
-# since they're pulled in via the prod packages on apt.freedom.press.
-pip_dependencies = [
- 'Flask-Testing==0.5.0',
- 'mock==2.0.0',
- 'pytest==3.0.1',
- 'selenium==2.53.6',
-]
-# ensure pip depdendencies are installed in staging.
-# these are required for running unit and functional tests
-describe command('pip freeze') do
- pip_dependencies.each do |pip_dependency|
- its(:stdout) { should contain(pip_dependency) }
- end
-end
-
diff --git a/spec_tests/spec/app-test/xvfb_spec.rb b/spec_tests/spec/app-test/xvfb_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/app-test/xvfb_spec.rb
+++ /dev/null
@@ -1,89 +0,0 @@
-# declare apt package dependencies for running tests
-apt_dependencies = [
- 'firefox',
- 'xvfb',
-]
-# ensure apt package dependencies are installed
-apt_dependencies.each do |apt_dependency|
- describe package(apt_dependency) do
- it { should be_installed }
- end
-end
-
-# ensure xvfb service config is present
-describe file('/etc/init.d/xvfb') do
- it { should be_file }
- it { should be_mode '700' }
- it { should be_owned_by 'root' }
- it { should be_grouped_into 'root' }
- xvfb_init_content = <<-eos
-# This is the /etc/init.d/xvfb script. We use it to launch xvfb at boot in the
-# development environment so we can easily run the functional tests.
-
-XVFB=/usr/bin/Xvfb
-XVFBARGS=":1 -screen 0 1024x768x24 -ac +extension GLX +render -noreset"
-PIDFILE=/var/run/xvfb.pid
-case "$1" in
- start)
- echo -n "Starting virtual X frame buffer: Xvfb"
- start-stop-daemon --start --quiet --pidfile $PIDFILE --make-pidfile --background --exec $XVFB -- $XVFBARGS
- echo "."
- ;;
- stop)
- echo -n "Stopping virtual X frame buffer: Xvfb"
- start-stop-daemon --stop --quiet --pidfile $PIDFILE
- echo "."
- ;;
- restart)
- $0 stop
- $0 start
- ;;
- *)
- echo "Usage: /etc/init.d/xvfb {start|stop|restart}"
- exit 1
-esac
-
-exit 0
-eos
- its(:content) { should eq xvfb_init_content }
-end
-
-# ensure xvfb is configured to start on boot via update-rc.d
-# the `-n` option to update-rc.d is dry-run
-describe command('update-rc.d -n xvfb defaults') do
- its(:exit_status) { should eq 0 }
- expected_output_regex = Regexp.quote('System start/stop links for /etc/init.d/xvfb already exist.')
- its(:stdout) { should match /^\s{1}#{expected_output_regex}$/ }
-end
-
-# ensure DISPLAY environment variable is set on boot
-describe file('/etc/profile.d/xvfb_display.sh') do
- it { should be_file }
- it { should be_mode '444' }
- it { should be_owned_by 'root' }
- it { should be_grouped_into 'root' }
- its(:content) { should eq "export DISPLAY=:1\n" }
-end
-
-# ensure that xvfb service is running
-describe service('Xvfb') do
- # TODO: `enabled` check in serverspec uses a case-sensitive grep,
- # so the all-lowercase filename /etc/init.d/xvfb fails an enabled
- # check as a result. modify this in ansible config, then update test.
- # it { should be_enabled }
- # TODO: ansible config does not enforce service=started for xvfb, but should.
- # if app-staging has be rebooted/reloaded, then the service will be running.
- it { should be_running }
-end
-describe service('xvfb') do
- # TODO: (duplicate of above). rename /etc/init.d/{x,X}vfb in ansible config
- it { should be_enabled }
-end
-
-# TODO: confirm that DISPLAY environment variable is currently set
-# will likely need to leverage a spec_helper for this, since
-# env vars are ignored by serverspec's default ssh config
-#describe command('echo $DISPLAY') do
-# its(:stdout) { should eq ":1\n" }
-#end
-
diff --git a/spec_tests/spec/build/build_securedrop_deb_spec.rb b/spec_tests/spec/build/build_securedrop_deb_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/build/build_securedrop_deb_spec.rb
+++ /dev/null
@@ -1,73 +0,0 @@
-# TODO: dynamically read the securedrop_app_code_version var
-securedrop_app_code_version = "0.3.2"
-# declare development apt dependencies for building
-development_apt_dependencies = [
- 'libssl-dev',
- 'python-dev',
- 'python-pip',
-]
-# ensure development apt dependencies are installed
-development_apt_dependencies.each do |development_apt_dependency|
- describe package(development_apt_dependency) do
- it { should be_installed }
- end
-end
-
-# ensure "wheel" is installed via pip
-describe command('pip freeze') do
- its(:stdout) { should contain('wheel==0.24.0') }
-end
-
-# declare required directories
-required_directories = [
-# "/tmp/build/securedrop-ossec-agent-2.8.1+#{securedrop_app_code_version}-amd64/",
-# "/tmp/build/securedrop-ossec-server-2.8.1+#{securedrop_app_code_version}-amd64/",
- '/vagrant/build',
-]
-# ensure required directories exist
-required_directories.each do |required_directory|
- describe file(required_directory) do
- it { should be_directory }
- end
-end
-
-# declare filenames for built debs
-wanted_debs = [
- "/vagrant/build/securedrop-app-code-#{securedrop_app_code_version}-amd64.deb",
- "/vagrant/build/securedrop-ossec-agent-2.8.1+#{securedrop_app_code_version}-amd64.deb",
- "/vagrant/build/securedrop-ossec-server-2.8.1+#{securedrop_app_code_version}-amd64.deb",
-]
-wanted_debs.each do |wanted_deb|
- # ensure required debs exist
- describe file(wanted_deb) do
- it { should be_file }
- end
-
- # get file basename of package, stripping leading dirs
- deb_basename = File.basename(wanted_deb)
-
- # cut up filename to extract package name
- # this garish regex finds just the package name and strips the version info, e.g.
- # from 'securedrop-ossec-agent-2.8.1+0.3.1-amd64.deb' it will return
- # 'securedrop-ossec-agent'
- package_name = deb_basename.scan(/^([a-z\-]+(?!\d))/)[0][0].to_s
-
- # ensure required debs appear installable
- describe command("dpkg --install --dry-run #{wanted_deb}") do
- its(:exit_status) { should eq 0 }
- its(:stdout) { should contain("Selecting previously unselected package #{package_name}.") }
-# its(:stdout) { should contain("Preparing to unpack #{deb_basename} ...")}
- end
-
- # ensure control fields are populated as expected
- # TODO: these checks are rather superficial, and don't actually confirm that the
- # .deb files are not broken. at a later date, consider integration tests
- # that actually use these built files during an ansible provisioning run.
- describe command("dpkg-deb --field #{wanted_deb}") do
- its(:exit_status) { should eq 0 }
- its(:stdout) { should contain("Maintainer: SecureDrop Team <[email protected]>") }
- its(:stdout) { should contain("Homepage: https://securedrop.org") }
- its(:stdout) { should contain("Package: #{package_name}")}
- its(:stdout) { should contain("Architecture: amd64") }
- end
-end
diff --git a/spec_tests/spec/common-development/securedrop_app_spec.rb b/spec_tests/spec/common-development/securedrop_app_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/common-development/securedrop_app_spec.rb
+++ /dev/null
@@ -1,98 +0,0 @@
-# this file tests for app-related config
-# common to "development" and "app-staging"
-
-# ensure default apache html directory is absent
-describe file('/var/www/html') do
- it { should_not exist }
-end
-
-# declare securedrop-app package dependencies
-securedrop_package_dependencies = [
- 'apparmor-utils',
- 'gnupg2',
- 'haveged',
- 'python',
- 'python-pip',
- 'redis-server',
- 'secure-delete',
- 'sqlite',
- 'supervisor',
-]
-# ensure securedrop-app dependencies are installed
-securedrop_package_dependencies.each do |securedrop_package_dependency|
- describe package(securedrop_package_dependency) do
- it { should be_installed }
- end
-end
-
-# ensure haveged's low entrop watermark is sufficiently high
-describe file('/etc/default/haveged') do
- it { should be_file }
- it { should be_mode '644' }
- it { should be_owned_by 'root' }
- it { should be_grouped_into 'root' }
- its(:content) { should match /^DAEMON_ARGS="-w 2400"$/ }
-end
-
-# Regression test to check for duplicate entries.
-describe command('uniq --repeated /etc/default/haveged') do
- its(:stdout) { should eq "" }
-end
-
-# ensure haveged is running
-describe service('haveged') do
- it { should be_enabled }
- it { should be_running }
-end
-
-# ensure the securedrop application gpg pubkey is present
-describe file("#{property['securedrop_data']}/test_journalist_key.pub") do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_grouped_into 'root' }
- it { should be_mode '644' }
-end
-
-# ensure config.py (settings for securedrop app) exists
-describe file("#{property['securedrop_code']}/config.py") do
- it { should be_file }
- it { should be_owned_by property['securedrop_user'] }
- it { should be_grouped_into property['securedrop_user'] }
- it { should be_mode '600' }
- its(:content) { should match /^JOURNALIST_KEY = '65A1B5FF195B56353CC63DFFCC40EF1228271441'$/ }
-end
-
-# ensure sqlite database exists for application
-describe file("#{property['securedrop_data']}/db.sqlite") do
- it { should be_file }
- # TODO: perhaps 640 perms would work here
- it { should be_mode '644' }
- it { should be_owned_by property['securedrop_user'] }
- it { should be_grouped_into property['securedrop_user'] }
-end
-
-# declare config options for securedrop worker
-securedrop_worker_config_options = [
- '[program:securedrop_worker]',
- 'command=/usr/local/bin/rqworker',
- "directory=#{property['securedrop_code']}",
- 'autostart=true',
- 'autorestart=true',
- 'startretries=3',
- 'stderr_logfile=/var/log/securedrop_worker/err.log',
- 'stdout_logfile=/var/log/securedrop_worker/out.log',
- "user=#{property['securedrop_user']}",
- 'environment=HOME="/tmp/python-gnupg"',
-]
-# ensure securedrop worker config for supervisor is present
-describe file('/etc/supervisor/conf.d/securedrop_worker.conf') do
- it { should be_file }
- it { should be_mode '644' }
- it { should be_owned_by 'root' }
- it { should be_grouped_into 'root' }
- securedrop_worker_config_options.each do |securedrop_worker_config_option|
- securedrop_worker_config_option_regex = Regexp.quote(securedrop_worker_config_option)
- its(:content) { should match /^#{securedrop_worker_config_option_regex}$/ }
- end
-end
-
diff --git a/spec_tests/spec/common/cron_apt_spec.rb b/spec_tests/spec/common/cron_apt_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/common/cron_apt_spec.rb
+++ /dev/null
@@ -1,96 +0,0 @@
-# Check for critical packages
-['cron-apt', 'ntp'].each do |pkg|
- describe package(pkg) do
- it { should be_installed }
- end
-end
-
-# ensure custom cron-apt config file is present
-describe file('/etc/cron-apt/config') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- its(:content) { should match /^SYSLOGON="always"$/ }
- its(:content) { should match /^EXITON=error$/ }
-end
-
-# list desired security repositories
-security_repositories = [
- 'deb http://security.ubuntu.com/ubuntu trusty-security main',
- 'deb-src http://security.ubuntu.com/ubuntu trusty-security main',
- 'deb http://security.ubuntu.com/ubuntu trusty-security universe',
- 'deb-src http://security.ubuntu.com/ubuntu trusty-security universe',
- 'deb [arch=amd64] https://apt.freedom.press trusty main',
- 'deb http://deb.torproject.org/torproject.org trusty main',
-]
-# ensure custom security.list file is present
-describe file('/etc/apt/security.list') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- security_repositories.each do |repo|
- repo_regex = Regexp.quote(repo)
- its(:content) { should match /^#{repo_regex}$/ }
- end
-end
-
-# ensure cron-apt updates the security.list packages
-describe file('/etc/cron-apt/action.d/0-update') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- repo_regex = Regexp.quote('update -o quiet=2 -o Dir::Etc::SourceList=/etc/apt/security.list -o Dir::Etc::SourceParts=""')
- its(:content) { should match /^#{repo_regex}$/ }
-end
-
-# ensure cron-apt upgrades the security.list packages
-describe file('/etc/cron-apt/action.d/5-security') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- its(:content) { should match /^autoclean -y$/ }
- config_regex = Regexp.quote('dist-upgrade -y -o APT::Get::Show-Upgraded=true -o Dir::Etc::SourceList=/etc/apt/security.list -o Dpkg::Options::=--force-confdef -o Dpkg::Options::=--force-confold')
- its(:content) { should match /^#{config_regex}$/ }
- its(:content) { should match /^autoremove -y$/ }
-end
-
-# ensure default cron-apt file to download all updates does not exist
-describe file('/etc/cron-apt/action.d/3-download') do
- it { should_not exist }
-end
-
-desired_cronjobs = [
- '0 4 * * * root /usr/bin/test -x /usr/sbin/cron-apt && /usr/sbin/cron-apt && /sbin/reboot'
-]
-# Checking for old cron jobs to guard against regressions.
-unwanted_cronjobs = [
- '0 4 * * * root /usr/bin/test -x /usr/sbin/cron-apt && /usr/sbin/cron-apt',
- '0 5 * * * root /sbin/reboot',
-]
-# ensure the cron.d config for cron-apt exists
-describe file('/etc/cron.d/cron-apt') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- desired_cronjobs.each do |cronjob|
- cronjob_regex = Regexp.quote(cronjob)
- its(:content) { should match /^#{cronjob_regex}$/ }
- end
- unwanted_cronjobs.each do |cronjob|
- cronjob_regex = Regexp.quote(cronjob)
- its(:content) { should_not match /^#{cronjob_regex}$/ }
- end
-end
-
-# ensure safe-upgrade has already been run
-describe command('aptitude --simulate -y safe-upgrade') do
- its(:exit_status) { should eq 0 }
- its(:stdout) { should match /^No packages will be installed, upgraded, or removed\.$/ }
- its(:stdout) { should match /0 packages upgraded, 0 newly installed, 0 to remove and 0 not upgraded\./ }
-end
-
-# TODO: In order to validate the intended system state post-provisioning,
-# may be simplest to compare output of `dpkg --get-selections`
-# from a clean box versus a post-provisioned one. However,
-# there will be environment-specific items in this list (e.g. vbox)
-# that must be pruned.
diff --git a/spec_tests/spec/common/fpf_repo_spec.rb b/spec_tests/spec/common/fpf_repo_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/common/fpf_repo_spec.rb
+++ /dev/null
@@ -1,25 +0,0 @@
-# ensure FPF repo is present
-describe file('/etc/apt/sources.list.d/apt_freedom_press.list') do
- its(:content) { should match /^deb \[arch=amd64\] https:\/\/apt\.freedom\.press trusty main$/ }
-end
-
-# ensure FPF has correct fingerprint
-describe command('apt-key finger') do
- fpf_gpg_pub_key_info = '/etc/apt/trusted.gpg.d/securedrop-keyring.gpg
----------------------------------------------
-pub 4096R/00F4AD77 2016-10-20 [expires: 2017-10-20]
- Key fingerprint = 2224 5C81 E3BA EB41 38B3 6061 310F 5612 00F4 AD77
-uid SecureDrop Release Signing Key'
-
- # Using Regexp.quote() to escape regex special characters such as [].
- its(:stdout) { should contain(Regexp.quote(fpf_gpg_pub_key_info)) }
-
- fpf_gpg_pub_key_fingerprint_expired = 'B89A 29DB 2128 160B 8E4B 1B4C BADD E0C7 FC9F 6818'
- fpf_gpg_pub_key_info_expired = "pub 4096R/FC9F6818 2014-10-26 [expired: 2016-10-27]
- Key fingerprint = #{fpf_gpg_pub_key_fingerprint_expired}
-uid Freedom of the Press Foundation Master Signing Key"
-
- its(:stdout) { should_not contain(Regexp.quote(fpf_gpg_pub_key_info_expired)) }
- # Extra check to for just the old fingerprint; more durable in case formatting is off.
- its(:stdout) { should_not contain(fpf_gpg_pub_key_fingerprint_expired) }
-end
diff --git a/spec_tests/spec/common/server_hardening_spec.rb b/spec_tests/spec/common/server_hardening_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/common/server_hardening_spec.rb
+++ /dev/null
@@ -1,60 +0,0 @@
-# declare desired sysctl flags
-sysctl_flags = {
- 'net.ipv4.tcp_max_syn_backlog' => 4096,
- 'net.ipv4.tcp_syncookies' => 1,
- 'net.ipv4.conf.all.rp_filter' => 1,
- 'net.ipv4.conf.all.accept_source_route' => 0,
- 'net.ipv4.conf.all.accept_redirects' => 0,
- 'net.ipv4.conf.all.secure_redirects' => 0,
- 'net.ipv4.conf.default.rp_filter' => 1,
- 'net.ipv4.conf.default.accept_source_route' => 0,
- 'net.ipv4.conf.default.accept_redirects' => 0,
- 'net.ipv4.conf.default.secure_redirects' => 0,
- 'net.ipv4.icmp_echo_ignore_broadcasts' => 1,
- 'net.ipv4.ip_forward' => 0,
- 'net.ipv4.conf.all.send_redirects' => 0,
- 'net.ipv4.conf.default.send_redirects' => 0,
- 'net.ipv6.conf.all.disable_ipv6' => 1,
- 'net.ipv6.conf.default.disable_ipv6' => 1,
- 'net.ipv6.conf.lo.disable_ipv6' => 1,
-}
-# ensure sysctl flags are set correctly
-sysctl_flags.each do |sysctl_flag, value|
- describe linux_kernel_parameter(sysctl_flag) do
- its(:value) { should eq value }
- end
-end
-
-# ensure DNS server is named
-# TODO: nameserver var is hard-coded below. consider
-# dynamically populated this var via spec_helper.
-describe file('/etc/resolvconf/resolv.conf.d/base') do |resolvconf|
- it { should be_mode '644' }
- its(:content) { should match /^nameserver 8\.8\.8\.8$/ }
-end
-
-disabled_kernel_modules = [
- 'bluetooth',
- 'iwlwifi',
-]
-disabled_kernel_modules.each do |disabled_kernel_module|
- describe kernel_module(disabled_kernel_module) do
- it { should_not be_loaded }
- end
- describe file('/etc/modprobe.d/blacklist.conf') do
- its(:content) { should match /^blacklist #{disabled_kernel_module}$/ }
- end
-end
-
-describe package('ntp') do
- it { should be_installed }
-end
-
-# ensure swap space is disabled
-describe command('swapon --summary') do
- # by using the `eq` operator here, we're ensuring the entirety of stdout is checked
- its(:stdout) { should eq "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n" }
- # a leading slash will indicate a fullpath to a swapfile
- its(:stdout) { should_not match /^\// }
-end
-
diff --git a/spec_tests/spec/common/tor_spec.rb b/spec_tests/spec/common/tor_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/common/tor_spec.rb
+++ /dev/null
@@ -1,74 +0,0 @@
-# ensure tor repo is present
-describe file('/etc/apt/sources.list.d/deb_torproject_org_torproject_org.list') do
- repo_regex = Regexp.quote('deb http://deb.torproject.org/torproject.org trusty main')
- its(:content) { should match /^#{repo_regex}$/ }
-end
-
-# ensure packages from tor repo are installed
-['deb.torproject.org-keyring', 'tor'].each do |pkg|
- describe package(pkg) do
- it { should be_installed }
- end
-end
-
-# declare common settings for torrc
-torrc_settings = [
- 'SocksPort 0',
- 'SafeLogging 1',
- 'RunAsDaemon 1',
- 'Sandbox 1',
- 'HiddenServiceDir /var/lib/tor/services/ssh',
- 'HiddenServicePort 22 127.0.0.1:22',
- 'HiddenServiceAuthorizeClient stealth admin',
-]
-# ensure common settings are present in torrc
-# these settings should exist in both app-staging and mon-staging
-describe file('/etc/tor/torrc') do
- it { should be_file }
- it { should be_mode '644' }
- it { should be_owned_by 'debian-tor' }
- torrc_settings.each do |torrc_setting|
- torrc_setting_regex = Regexp.quote(torrc_setting)
- its(:content) { should match /^#{torrc_setting_regex}$/ }
- end
-end
-
-# declare tor service directories, for mode and ownership checks
-tor_service_directories = %w(
- /var/lib/tor/services
- /var/lib/tor/services/ssh
-)
-# ensure tor service dirs are owned by tor user and mode 0700
-tor_service_directories.each do |tor_service_directory|
- describe file(tor_service_directory) do
- it { should be_directory }
- it { should be_mode('700') }
- it { should be_owned_by 'debian-tor' }
- it { should be_grouped_into 'debian-tor' }
- end
-end
-
-# ensure tor service is running
-describe service('tor') do
- it { should be_running }
- it { should be_enabled }
-end
-
-# Likely overkill
-describe command('service tor status') do
- its(:exit_status) { should eq 0 }
- its(:stdout) { should match /tor is running/ }
-end
-
-# ensure tor repo gpg key matches
-describe command('apt-key finger') do
- tor_gpg_pub_key_info = '/etc/apt/trusted.gpg.d/deb.torproject.org-keyring.gpg
------------------------------------------------------
-pub 2048R/886DDD89 2009-09-04 [expires: 2020-08-29]
- Key fingerprint = A3C4 F0F9 79CA A22C DBA8 F512 EE8C BC9E 886D DD89
-uid deb.torproject.org archive signing key
-sub 2048R/219EC810 2009-09-04 [expires: 2018-08-30]'
-
- # Using Regexp.quote() to escape regex special chars such as [].
- its(:stdout) { should contain(Regexp.quote(tor_gpg_pub_key_info)) }
-end
diff --git a/spec_tests/spec/common/user_config_spec.rb b/spec_tests/spec/common/user_config_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/common/user_config_spec.rb
+++ /dev/null
@@ -1,31 +0,0 @@
-# ensure sudoers file is present
-['/etc/sudoers'].each do |sudoers|
- describe file(sudoers) do
- it { should be_mode '440' }
- it { should be_readable.by_user('root') }
- it { should_not be_readable.by('others') }
- its(:content) { should match /^Defaults\s+env_reset$/ }
- its(:content) { should match /^Defaults\s+mail_badpass$/ }
- its(:content) { should contain('Defaults\s+secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"') }
- its(:content) { should match /^%sudo\s+ALL=\(ALL\)\s+NOPASSWD:\s+ALL$/ }
- its(:content) { should contain('Defaults:%sudo\s+!requiretty') }
- end
-end
-
-# ensure securedrop-specific bashrc additions are present
-describe file('/etc/profile.d/securedrop_additions.sh') do
- non_interactive_str = Regexp.quote('[[ $- != *i* ]] && return')
- its(:content) { should match /^#{non_interactive_str}$/ }
- its(:content) { should match /^if which tmux >\/dev\/null 2>&1; then$/ }
- tmux_check = Regexp.quote('test -z "$TMUX" && (tmux attach || tmux new-session)')
- its(:content) { should match /^\s+#{tmux_check}$/ }
-end
-
-# TODO: 'vagrant' user only valid in local vbox environment.
-# find some way to read this variable dynamically.
-# probably best to parse the YAML vars file via spec_helper.rb
-describe file('/home/vagrant/.bashrc') do |bashrc|
- # Regression test: SecureDrop bashrc additions were previously added to local
- # ~/.bashrc files in admin accounts, so now we're checking that the line does NOT exist.
- its(:content) { should_not match /^. \/etc\/bashrc\.securedrop_additions$/ }
-end
diff --git a/spec_tests/spec/development/development_spec.rb b/spec_tests/spec/development/development_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/development/development_spec.rb
+++ /dev/null
@@ -1,56 +0,0 @@
-# ensure development apt dependencies are installed
-development_apt_dependencies = [
- 'libssl-dev',
- 'python-dev',
- 'python-pip',
-]
-# ensure development apt dependencies are installed
-development_apt_dependencies.each do |development_apt_dependency|
- describe package(development_apt_dependency) do
- it { should be_installed }
- end
-end
-
-# declare securedrop app pip requirements
-# since this is the development role, the pip dependencies
-# should be installed directly, rather than relying on the
-# apt packages, which would clobber local changes
-# versions here are intentionally hardcoded
-pip_requirements = [
- 'Flask-Testing==0.6.1',
- 'Flask==0.11.1',
- 'Jinja2==2.8',
- 'MarkupSafe==0.23',
- 'Werkzeug==0.11.11',
- 'beautifulsoup4==4.5.1',
- 'click==6.6',
- 'coverage==4.2',
- 'first==2.0.1',
- 'funcsigs==1.0.2',
- 'itsdangerous==0.24',
- 'mock==2.0.0',
- 'pbr==1.10.0',
- 'pip-tools==1.7.0',
- 'py==1.4.31',
- 'pytest-cov==2.3.1',
- 'pytest==3.0.2',
- 'selenium==2.53.6',
- 'six==1.10.0',
-]
-# ensure securedrop app pip requirements are installed
-describe command('pip freeze') do
- pip_requirements.each do |pip_requirement|
- its(:stdout) { should contain(pip_requirement) }
- end
-end
-
-# ensure that the SECUREDROP_ENV var is set to "dev"
-# TODO: this isn't really checking that the env var is set,
-# just that it's declared in the bashrc. spec_helper ignores
-# env vars via ssh by default, so start there.
-describe file('/home/vagrant/.bashrc') do
- it { should be_file }
- it { should be_owned_by 'vagrant' }
- its(:content) { should match /^export SECUREDROP_ENV=dev$/ }
-end
-
diff --git a/spec_tests/spec/development/iptables_spec.rb b/spec_tests/spec/development/iptables_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/development/iptables_spec.rb
+++ /dev/null
@@ -1,50 +0,0 @@
-# Declare desired iptables rules
-# The 'development' machine doesn't have any custom
-# iptables rules, so just check for the default chains.
-desired_iptables_rules = [
- '-P INPUT ACCEPT',
- '-P FORWARD ACCEPT',
- '-P OUTPUT ACCEPT',
-]
-
-# check for wanted and unwanted iptables rules
-describe iptables do
- desired_iptables_rules.each do |desired_iptables_rule|
- it { should have_rule(desired_iptables_rule) }
- end
-end
-
-# if any iptables rules are ever added, this test will
-# fail, so tests can be written for the new rules.
-describe command('iptables -S | wc -l') do
- its(:stdout) { should eq desired_iptables_rules.length.to_s + "\n" }
-end
-
-# check for ssh listening (this really shouldn't ever fail)
-describe port(22) do
- it { should be_listening.on('0.0.0.0').with('tcp') }
-end
-
-# check for ssh listening (this really shouldn't ever fail)
-describe port(22) do
- it { should be_listening.on('0.0.0.0').with('tcp') }
-end
-
-# check for redis worker port listening
-describe port(6379) do
- it { should be_listening.on('127.0.0.1').with('tcp') }
-end
-
-# The Flask runners for the source and journalist interfaces
-# aren't configured to run by default, e.g. on boot. Nor
-# do the app tests cause them to be run. So, we shouldn't
-# really expected them to be running.
-## check for source interface flask port listening
-#describe port(8080) do
-# it { should be_listening.on('0.0.0.0').with('tcp') }
-#end
-#
-## check for journalist interface flask port listening
-#describe port(8081) do
-# it { should be_listening.on('0.0.0.0').with('tcp') }
-#end
diff --git a/spec_tests/spec/development/securedrop_app_dev_spec.rb b/spec_tests/spec/development/securedrop_app_dev_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/development/securedrop_app_dev_spec.rb
+++ /dev/null
@@ -1,80 +0,0 @@
-# development role excludes apache, in favor of flask runner,
-# so ensure that apache is not installed.
-unwanted_packages = %w(
- securedrop-app-code
- apache2-mpm-worker
- libapache2-mod-wsgi
- libapache2-mod-xsendfile
-)
-unwanted_packages.each do |unwanted_package|
- describe package(unwanted_package) do
- it { should_not be_installed }
- end
-end
-
-# ensure default html dir is absent
-describe file("/var/www/html") do
- it { should_not exist }
-end
-
-securedrop_app_directories = [
- property['securedrop_data'],
- "#{property['securedrop_data']}/keys",
- "#{property['securedrop_data']}/tmp",
- "#{property['securedrop_data']}/store",
-]
-# ensure securedrop app directories exist with correct permissions
-securedrop_app_directories.each do |securedrop_app_directory|
- describe file(securedrop_app_directory) do
- it { should be_directory }
- it { should be_owned_by property['securedrop_user'] }
- it { should be_grouped_into property['securedrop_user'] }
- it { should be_mode '700' }
- end
-end
-
-# /vagrant has 770 permissions, so test
-# separately from the 700 permissions above
-describe file(property['securedrop_code']) do
- it { should be_directory }
- it { should be_owned_by property['securedrop_user'] }
- it { should be_grouped_into property['securedrop_user'] }
- # Vagrant VirtualBox environments show /vagrant as 770,
- # but the Vagrant DigitalOcean droplet shows /vagrant as 775.
- # This appears to be a side-effect of the default umask
- # in the snapci instances. (The rsync provisioner for the
- # vagrant-digitalocean plugin preserves permissions from the host.)
- # The spectests for 'staging' still check for an explicit mode,
- # so it's OK to relax this test for now.
- #it { should be_mode '700' }
- # TODO: should be 700 in all environments; ansible task is
- # straightforward about this.
- it { should be_readable.by('owner') }
- it { should be_writable.by('owner') }
- it { should be_executable.by('owner') }
-end
-
-# ensure cron job for securedrop tmp dir cleanup is enabled
-describe cron do
- # TODO: this should be using property, but the ansible role
- # doesn't use a var, it's hard-coded. update ansible, then fix test.
- # it { should have_entry "@daily #{property['securedrop_code']}/manage.py clean-tmp" }
- it { should have_entry "@daily /vagrant/securedrop/manage.py clean-tmp" }
-end
-
-
-# ensure default logo header file exists
-# TODO: add check for custom logo header file
-describe file("#{property['securedrop_code']}/static/i/logo.png") do
- it { should be_file }
- # TODO: Ansible task declares mode 400 but not as string, needs to be fixed
- # and tests updated. Also, not using "mode" in tests below because umask
- # on snapci machines differs from the /vagrant folder in dev VM.
- # Fixing Ansible task may fix differing perms.
- it { should be_writable.by('owner') }
- it { should be_readable.by('owner') }
- it { should be_readable.by('group') }
- it { should be_readable.by('others') }
- it { should be_owned_by property['securedrop_user'] }
- it { should be_grouped_into property['securedrop_user'] }
-end
diff --git a/spec_tests/spec/development/securedrop_app_test_spec.rb b/spec_tests/spec/development/securedrop_app_test_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/development/securedrop_app_test_spec.rb
+++ /dev/null
@@ -1,23 +0,0 @@
-# These checks are for the role `app-test`,
-# which is different from simply `app`, which
-# is tested in `securedrop_app_spec.rb`.
-# TODO: the Ansible role filters tasks from the
-# app-test role via the "non-development" tag.
-# There should be better separation between hosts
-# and roles, e.g. "development" and "app-test",
-# perhaps leveraging host_vars. Fix that, then
-# come back here and refactor the various versions
-# of `securedrop_app_test_spec.rb`.
-
-# ensure wsgi file is absent in development
-describe file("/var/www/source.wsgi") do
- it { should_not exist }
-end
-
-# ensure app-armor profiles are NOT in complain mode
-# that's only for staging. in staging, these profiles complain:
-# - usr.sbin.apache2
-# - usr.sbin.tor
-describe command('aa-status') do
- its(:stdout) { should contain('0 profiles are in complain mode.') }
-end
diff --git a/spec_tests/spec/grsecurity/grsec_spec.rb b/spec_tests/spec/grsecurity/grsec_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/grsecurity/grsec_spec.rb
+++ /dev/null
@@ -1,107 +0,0 @@
-# ensure ssh motd is disabled (grsec balks at ubuntu's default motd)
-describe file('/etc/pam.d/sshd') do
- its(:content) { should_not match /pam\.motd/ }
-end
-
-['securedrop-grsec', 'paxctl'].each do |pkg|
- describe package(pkg) do
- it { should be_installed }
- end
-end
-
-unwanted_kernel_metapackages = [
- 'linux-signed-image-generic-lts-utopic',
- 'linux-signed-image-generic',
- 'linux-signed-generic-lts-utopic',
- 'linux-signed-generic',
-]
-unwanted_kernel_metapackages.each do |metapkg|
- describe package(metapkg) do
- it { should_not be_installed }
- end
-end
-
-# ensure that system is running grsec kernel
-describe file('/proc/sys/kernel/grsecurity/grsec_lock') do
- it { should be_mode '600' }
- it { should be_owned_by('root') }
- its(:size) { should eq 0 }
-end
-
-# ensure that system reports it's running grsec kernel (lazy)
-describe command("uname -r") do
- its(:stdout) { should match /grsec$/ }
-end
-
-# Check that the grsec sysctl settings are correct
-describe 'Grsecurity kernel parameters' do
- context linux_kernel_parameter('kernel.grsecurity.grsec_lock') do
- its(:value) { should eq 1 }
- end
- context linux_kernel_parameter('kernel.grsecurity.rwxmap_logging') do
- its(:value) { should eq 0 }
- end
-end
-
-# Check that paxtest does not report anything vulnerable
-# Requires the package paxtest to be installed
-# The paxtest package is currently being installed in the app-test role
-paxtest_check_killed = [
- "Executable anonymous mapping",
- "Executable bss",
- "Executable data",
- "Executable heap",
- "Executable stack",
- "Executable shared library bss",
- "Executable shared library data",
- "Executable anonymous mapping (mprotect)",
- "Executable bss (mprotect)",
- "Executable data (mprotect)",
- "Executable heap (mprotect)",
- "Executable stack (mprotect)",
- "Executable shared library bss (mprotect)",
- "Executable shared library data (mprotect)",
- "Writable text segments",
- "Return to function (memcpy)",
- "Return to function (memcpy, PIE)",
-]
-# TODO: enable the paxtest checks below once the "paxtest"
-# package is included via the grsecurity role.
-#describe command("paxtest blackhat") do
-# paxtest_check_killed.each do |killed|
-# its(:stdout) { should match /^#{Regexp.escape(killed)}\s*:\sKilled/ }
-# end
-# its(:stdout) { should_not match /Vulnerable/i }
-# its(:exit_status) { should eq 0 }
-#end
-
-# ensure generic linux kernels have been removed
-describe command("dpkg --get-selections '^linux-image-.*generic$'") do
- its(:stdout) { should_not match /^linux-image-.*generic$/ }
- its(:stderr) { should match /^dpkg: no packages found matching / }
- its(:exit_status) { should eq 0 }
-end
-
-# ensure linux kernel headers have been removed
-describe command("dpkg --get-selections '^linux-headers-.*'") do
- its(:stdout) { should_not match /^linux-headers-.*/ }
- its(:stderr) { should match /^dpkg: no packages found matching / }
- its(:exit_status) { should eq 0 }
-end
-
-# ensure grub-pc is marked as manually installed (necessary vagrant/vbox)
-describe command('apt-mark showmanual grub-pc') do
- its(:stdout) { should match /^grub-pc$/ }
-end
-
-# ensure old packages have been autoremoved
-describe command('apt-get --dry-run autoremove') do
- its(:stdout) { should match /^0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded\.$/ }
- its(:exit_status) { should eq 0 }
-end
-
-# Check pax flags for apache tor
-# paxctl -v /usr/sbin/apache2
-# paxctl -v /usr/sbin/tor
-# paxctl -v /usr/sbin/ntp
-# paxctl -v /usr/sbin/apt
diff --git a/spec_tests/spec/ip6tables/ipt6tables_spec.rb b/spec_tests/spec/ip6tables/ipt6tables_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/ip6tables/ipt6tables_spec.rb
+++ /dev/null
@@ -1,23 +0,0 @@
-# All servers should have default DROP chains
-# for IPv6 iptables rules. Therefore the ip6tables
-# spec tests are isolated in a separate directory,
-# for easy reuse among hosts.
-
-# Declare exact output for ip6tables-save
-desired_ip6tables_output = <<END_IPV6_TABLE_RULES
-*filter
-:INPUT DROP [0:0]
-:FORWARD DROP [0:0]
-:OUTPUT DROP [0:0]
-COMMIT
-END_IPV6_TABLE_RULES
-# Check for DROP rules on ip6tables.
-# serverspec does have support for the 'ip6tables' type,
-# but that doesn't ensure rule order. Since the expected IPv6 rules
-# are simply default DROP rules for default chains,
-# check for the exact output. Make sure to filter out first and
-# last lines, which are just comments with the date.
-describe command("ip6tables-save | sed '1d;$d'") do
- its(:stdout) { should eq desired_ip6tables_output }
-end
-
diff --git a/spec_tests/spec/mon-general/ossec_server_spec.rb b/spec_tests/spec/mon-general/ossec_server_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/mon-general/ossec_server_spec.rb
+++ /dev/null
@@ -1,186 +0,0 @@
-# ensure hosts file references app server by ip
-describe file('/etc/hosts') do
- localhost_regex = /^#{Regexp.quote('127.0.0.1')}(\s+#{property['monitor_hostname']}){2}$/
- its(:content) { should match localhost_regex }
- app_host_regex = Regexp.quote("#{property['app_ip']} #{property['app_hostname']}")
- its(:content) { should match /^#{app_host_regex}$/ }
-end
-
-# Regression test to check for duplicate entries.
-describe command('uniq --repeated /etc/hosts') do
- its(:stdout) { should eq "" }
-end
-
-# ensure required packages are installed
-['postfix', 'procmail', 'mailutils', 'securedrop-ossec-server'].each do |pkg|
- describe package(pkg) do
- it { should be_installed }
- end
-end
-
-# ensure custom /etc/aliases is present
-describe file('/etc/aliases') do
- it { should be_file }
- it { should be_mode '644' }
- its(:content) { should match /^root: ossec$/ }
-end
-
-# ensure sasl password for smtp relay is configured
-# TODO: values below are hardcoded. for staging,
-# this is probably ok.
-describe file('/etc/postfix/sasl_passwd') do
- sasl_passwd_regex = Regexp.quote('[smtp.gmail.com]:587 [email protected]:password123')
- its(:content) { should match /^#{sasl_passwd_regex}$/ }
- it { should be_mode '400' }
-end
-
-# declare desired regex checks for stripping smtp headers
-header_checks = [
- '/^X-Originating-IP:/ IGNORE',
- '/^X-Mailer:/ IGNORE',
- '/^Mime-Version:/ IGNORE',
- '/^User-Agent:/ IGNORE',
- '/^Received:/ IGNORE',
-]
-# ensure header_checks regex to strip smtl headers are present
-describe file ('/etc/postfix/header_checks') do
- it { should be_file }
- it { should be_mode '644' }
- header_checks.each do |header_check|
- header_check_regex = Regexp.quote(header_check)
- its(:content) { should match /^#{header_check_regex}$/ }
- end
-end
-
-postfix_settings = [
- 'relayhost = [smtp.gmail.com]:587',
- 'smtp_sasl_auth_enable = yes',
- 'smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd',
- 'smtp_sasl_security_options = noanonymous',
- 'smtp_use_tls = yes',
- 'smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache',
-
- # The security settings below are the default values
- # set by the Ansible config as of #1100, but the playbooks
- # still support TLS fingerprint verification. If you're testing
- # the fingerprint verification functionality, you'll need to
- # swap the strings below so the tests check for the appropriate config lines.
- # 'smtp_tls_fingerprint_digest = sha1',
- # 'smtp_tls_fingerprint_cert_match = 6D:87:EE:CB:D0:37:2F:88:B8:29:06:FB:35:F4:65:00:7F:FD:84:29',
- 'smtp_tls_security_level = secure',
- 'smtp_tls_CApath = /etc/ssl/certs',
-
- 'smtp_tls_ciphers = high',
- 'smtp_tls_protocols = TLSv1.2 TLSv1.1 TLSv1 !SSLv3 !SSLv2',
- 'myhostname = ossec.server',
- 'myorigin = $myhostname',
- 'smtpd_banner = $myhostname ESMTP $mail_name (Ubuntu)',
- 'biff = no',
- 'append_dot_mydomain = no',
- 'readme_directory = no',
- 'smtp_header_checks = regexp:/etc/postfix/header_checks',
- 'mailbox_command = /usr/bin/procmail',
- 'inet_interfaces = loopback-only',
- 'alias_maps = hash:/etc/aliases',
- 'alias_database = hash:/etc/aliases',
- 'mydestination = $myhostname, localhost.localdomain , localhost',
- 'mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128',
- 'mailbox_size_limit = 0',
- 'recipient_delimiter = +',
-
- # This line was merged into develop via #1102,
- # but hasn't made it into a release yet, so don't check for it.
- # 'maximal_queue_lifetime = 14d',
-]
-# ensure all desired postfix settings are declared
-describe file('/etc/postfix/main.cf') do
- it { should be_file }
- it { should be_owned_by 'root' }
- it { should be_mode '644' }
- postfix_settings.each do |postfix_setting|
- postfix_setting_regex = Regexp.quote(postfix_setting)
- its(:content) { should match /^#{postfix_setting_regex}$/ }
- end
-end
-
-# ensure ossec considers app-staging host "available"
-describe command('/var/ossec/bin/list_agents -a') do
- its(:stdout) { should eq "#{property['app_hostname']}-#{property['app_ip']} is available.\n" }
-end
-
-# ensure ossec gpg homedir exists
-describe file("/var/ossec/.gnupg") do
- it { should be_directory }
- it { should be_owned_by "ossec" }
- it { should be_mode '700' }
-end
-
-# ensure test admin gpg pubkey is present
-describe file('/var/ossec/test_admin_key.pub') do
- it { should be_file }
- it { should be_mode '644' }
-end
-
-# ensure test admin gpg pubkey is in ossec keyring
-describe command('su -s /bin/bash -c "gpg --homedir /var/ossec/.gnupg --list-keys EDDDC102" ossec') do
- its(:exit_status) { should eq 0 }
- # gpg dumps a lot of output to stderr, rather than stdout
- expected_output = <<-eos
-pub 4096R/EDDDC102 2014-10-15
-uid Test/Development (DO NOT USE IN PRODUCTION) (Admin's OSSEC Alert GPG key) <[email protected]>
-sub 4096R/97D2EB39 2014-10-15
-
-eos
- its(:stdout) { should eq expected_output }
-end
-
-# ensure key files for ossec-server exist
-['/var/ossec/etc/sslmanager.key', '/var/ossec/etc/sslmanager.cert'].each do |keyfile|
- describe file(keyfile) do
- it { should be_file }
- it { should be_mode '644' }
- it { should be_owned_by 'root' }
- end
-end
-
-# declare ossec procmail settings
-ossec_procmail_settings = [
- 'VERBOSE=yes',
- 'MAILDIR=/var/mail/',
- 'DEFAULT=$MAILDIR',
- 'LOGFILE=/var/log/procmail.log',
- 'SUBJECT=`formail -xSubject:`',
- ':0 c',
- '*^To:.*root.*',
- '|/var/ossec/send_encrypted_alarm.sh',
-]
-# ensure ossec procmailrc has desired settings
-describe file("/var/ossec/.procmailrc") do
- it { should be_file }
- it { should be_mode '644' }
- it { should be_owned_by 'ossec' }
- ossec_procmail_settings.each do |ossec_procmail_setting|
- ossec_procmail_setting_regex = Regexp.quote(ossec_procmail_setting)
- its(:content) { should match /^#{ossec_procmail_setting_regex}$/ }
- end
-end
-
-describe file('/var/ossec/send_encrypted_alarm.sh') do
- it { should be_file }
- it { should be_owned_by 'ossec' }
- it { should be_mode '750' }
-end
-
-describe file('/var/log/procmail.log') do
- it { should be_file }
- it { should be_mode '660' }
- it { should be_owned_by 'ossec' }
-end
-
-# ensure that authd is no longer running,
-# as stipulated in the `remove_authd_exemptions` role
-describe command('pgrep ossec-authd') do
- # pgrep returns 1 if it finds no matching process
- its(:exit_status) { should eq 1 }
- its(:stdout) { should eq '' }
-end
diff --git a/spec_tests/spec/mon-prod/iptables_spec.rb b/spec_tests/spec/mon-prod/iptables_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/mon-prod/iptables_spec.rb
+++ /dev/null
@@ -1,71 +0,0 @@
-# declare desired iptables rules
-# These rules should be present in prod.
-desired_iptables_rules = [
- '-P INPUT DROP',
- '-P FORWARD DROP',
- '-P OUTPUT DROP',
- '-N LOGNDROP',
- '-A INPUT -p tcp -m state --state RELATED,ESTABLISHED -m comment --comment "Allow traffic back for tor" -j ACCEPT',
- '-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A INPUT -s 8.8.8.8/32 -p udp -m udp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A INPUT -p udp -m udp --sport 123 --dport 123 -m state --state RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
- '-A INPUT -p tcp -m multiport --sports 80,8080,443 -m state --state RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- "-A INPUT -s #{property['app_ip']}/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"Allow OSSEC agent to monitor\" -j ACCEPT",
- '-A INPUT -p tcp -m tcp --sport 587 -m state --state RELATED,ESTABLISHED -m comment --comment "Allow ossec email alerts out" -j ACCEPT',
- '-A INPUT -i lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
- '-A INPUT -p tcp -m state --state INVALID -m comment --comment "drop but do not log inbound invalid state packets" -j DROP',
- '-A INPUT -m comment --comment "Drop and log all other incoming traffic" -j LOGNDROP',
- "-A OUTPUT -p tcp -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"tor instance that provides ssh access\" -j ACCEPT",
- "-A OUTPUT -m owner --uid-owner #{property['tor_user_uid']} -m comment --comment \"Drop all other traffic for the tor instance used for ssh\" -j LOGNDROP",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW -m limit --limit 3/min --limit-burst 3 -m comment --comment \"Rate limit traffic from tor to the ssh dameon\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Allow the established traffic from tor to the ssh dameon\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW -m comment --comment \"Drop all other new connections from tor to the ssh dameon\" -j LOGNDROP",
- "-A OUTPUT -m owner --uid-owner #{property['tor_user_uid']} -m comment --comment \"Drop all other traffic for the tor instance used for ssh\" -j LOGNDROP",
- "-A OUTPUT -m owner --gid-owner #{property['ssh_group_gid']} -m comment --comment \"Drop all other outbound traffic for ssh user\" -j LOGNDROP",
- '-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A OUTPUT -p udp -m udp --sport 123 --dport 123 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
- '-A OUTPUT -p tcp -m multiport --dports 80,8080,443 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- "-A OUTPUT -d #{property['app_ip']}/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment \"Allow OSSEC agent to monitor\" -j ACCEPT",
- "-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner #{property['postfix_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"postfix dns rule\" -j ACCEPT",
- "-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner #{property['postfix_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"postfix dns rule\" -j ACCEPT",
- "-A OUTPUT -p tcp -m tcp --dport 587 -m owner --uid-owner #{property['postfix_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"Allow ossec email alerts out\" -j ACCEPT",
- '-A OUTPUT -o lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
- '-A OUTPUT -m comment --comment "Drop all other outgoing traffic" -j DROP',
- '-A LOGNDROP -p tcp -m limit --limit 5/min -j LOG --log-tcp-options --log-ip-options --log-uid',
- '-A LOGNDROP -p udp -m limit --limit 5/min -j LOG --log-ip-options --log-uid',
- '-A LOGNDROP -p icmp -m limit --limit 5/min -j LOG --log-ip-options --log-uid',
- '-A LOGNDROP -j DROP',
-]
-
-# declare unwanted iptables rules
-# These rules should have been removed by the `remove_authd_exceptions` role
-# TODO: The Vagrantfile virtualbox static IP was hardcoded into the two rules
-# below. This will need to be fixed. Possibly with using something like
-# https://github.com/volanja/ansible_spec Using the values for IP addresses
-# from the ansible inventory should cover most use cases (except inventories
-# with just the *.onion addresses).
-unwanted_iptables_rules = [
- "-A INPUT -s #{property['app_ip']} -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -v ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
- "-A OUTPUT -d #{property['app_ip']} -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
- "-A INPUT -s #{property['app_ip']} -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT",
- "-A OUTPUT -d #{property['app_ip']} -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -j ACCEPT",
-
-
- # These rules are from the staging environment
- '-A INPUT -p udp -m udp --sport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A OUTPUT -p udp -m udp --dport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A INPUT -p tcp -m tcp --dport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- '-A OUTPUT -p tcp -m tcp --sport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
-]
-
-# check for wanted and unwanted iptables rules
-describe iptables do
- unwanted_iptables_rules.each do |unwanted_iptables_rule|
- it { should_not have_rule(unwanted_iptables_rule) }
- end
-
- desired_iptables_rules.each do |desired_iptables_rule|
- it { should have_rule(desired_iptables_rule) }
- end
-end
diff --git a/spec_tests/spec/mon-staging/iptables_spec.rb b/spec_tests/spec/mon-staging/iptables_spec.rb
deleted file mode 100644
--- a/spec_tests/spec/mon-staging/iptables_spec.rb
+++ /dev/null
@@ -1,100 +0,0 @@
-# declare desired iptables rules
-# These rules should be present in prod and staging
-# TODO: There are also hardcoded IP addresses in this section.
-# These rules were exported from a fully provisioned
-# mon-staging host running the develop branch around 2015-04-10.
-# That means they are post-0.3.2 and therefore may need to be tweaked
-# to test older versions.
-desired_iptables_rules = [
- '-P INPUT DROP',
- '-P FORWARD DROP',
- '-P OUTPUT DROP',
- '-N LOGNDROP',
- '-A INPUT -p udp -m udp --sport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- "-A INPUT -i #{property['staging_iface']} -p tcp -m tcp --dport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT",
- '-A INPUT -p tcp -m state --state RELATED,ESTABLISHED -m comment --comment "Allow traffic back for tor" -j ACCEPT',
- '-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A INPUT -s 8.8.8.8/32 -p udp -m udp --sport 53 -m state --state RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A INPUT -p udp -m udp --sport 123 --dport 123 -m state --state RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
- '-A INPUT -p tcp -m multiport --sports 80,8080,443 -m state --state RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- "-A INPUT -s #{property['app_ip']}/32 -p udp -m udp --dport 1514 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"Allow OSSEC agent to monitor\" -j ACCEPT",
- '-A INPUT -p tcp -m tcp --sport 587 -m state --state RELATED,ESTABLISHED -m comment --comment "Allow ossec email alerts out" -j ACCEPT',
- '-A INPUT -i lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
- '-A INPUT -p tcp -m state --state INVALID -m comment --comment "drop but do not log inbound invalid state packets" -j DROP',
- '-A INPUT -m comment --comment "Drop and log all other incoming traffic" -j LOGNDROP',
- '-A OUTPUT -p udp -m udp --dport 53 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT',
- "-A OUTPUT -o #{property['staging_iface']} -p tcp -m tcp --sport 22 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT",
- "-A OUTPUT -p tcp -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"tor instance that provides ssh access\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state NEW -m limit --limit 3/min --limit-burst 3 -m comment --comment \"Rate limit traffic from tor to the ssh dameon\" -j ACCEPT",
- "-A OUTPUT -o lo -p tcp -m tcp --dport 22 -m owner --uid-owner #{property['tor_user_uid']} -m state --state RELATED,ESTABLISHED -m comment --comment \"Allow the established traffic from tor to the ssh dameon\" -j ACCEPT",
- "-A OUTPUT -m owner --uid-owner #{property['tor_user_uid']} -m comment --comment \"Drop all other traffic for the tor instance used for ssh\" -j LOGNDROP",
- "-A OUTPUT -m owner --gid-owner #{property['ssh_group_gid']} -m comment --comment \"Drop all other outbound traffic for ssh user\" -j LOGNDROP",
- '-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "tcp/udp dns" -j ACCEPT',
- '-A OUTPUT -p udp -m udp --sport 123 --dport 123 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment ntp -j ACCEPT',
- '-A OUTPUT -p tcp -m multiport --dports 80,8080,443 -m owner --uid-owner 0 -m state --state NEW,RELATED,ESTABLISHED -m comment --comment "apt updates" -j ACCEPT',
- "-A OUTPUT -d #{property['app_ip']}/32 -p udp -m udp --sport 1514 -m state --state RELATED,ESTABLISHED -m comment --comment \"Allow OSSEC agent to monitor\" -j ACCEPT",
- "-A OUTPUT -d 8.8.8.8/32 -p tcp -m tcp --dport 53 -m owner --uid-owner #{property['postfix_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"postfix dns rule\" -j ACCEPT",
- "-A OUTPUT -d 8.8.8.8/32 -p udp -m udp --dport 53 -m owner --uid-owner #{property['postfix_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"postfix dns rule\" -j ACCEPT",
- "-A OUTPUT -p tcp -m tcp --dport 587 -m owner --uid-owner #{property['postfix_user_uid']} -m state --state NEW,RELATED,ESTABLISHED -m comment --comment \"Allow ossec email alerts out\" -j ACCEPT",
- '-A OUTPUT -o lo -m comment --comment "Allow lo to lo traffic all protocols" -j ACCEPT',
- '-A OUTPUT -m comment --comment "Drop all other outgoing traffic" -j DROP',
- '-A LOGNDROP -p tcp -m limit --limit 5/min -j LOG --log-tcp-options --log-ip-options --log-uid',
- '-A LOGNDROP -p udp -m limit --limit 5/min -j LOG --log-ip-options --log-uid',
- '-A LOGNDROP -p icmp -m limit --limit 5/min -j LOG --log-ip-options --log-uid',
- '-A LOGNDROP -j DROP',
-]
-
-# declare unwanted iptables rules
-# These rules should have been removed by the `remove_authd_exceptions` role
-# TODO: The Vagrantfile virtualbox static IP was hardcoded into the two rules
-# below. This will need to be fixed. Possibly with using something like
-# https://github.com/volanja/ansible_spec Using the values for IP addresses
-# from the ansible inventory should cover most use cases (except inventories
-# with just the *.onion addresses).
-unwanted_iptables_rules = [
- "-A INPUT -s #{property['app_ip']} -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -v ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
- "-A OUTPUT -d #{property['app_ip']} -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment \"ossec authd rule only required for initial agent registration\"",
- "-A INPUT -s #{property['app_ip']} -p tcp --dport 1515 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT",
- "-A OUTPUT -d #{property['app_ip']} -p tcp --sport 1515 -m state --state ESTABLISHED,RELATED -j ACCEPT",
-]
-
-# check for wanted and unwanted iptables rules
-describe iptables do
- unwanted_iptables_rules.each do |unwanted_iptables_rule|
- it { should_not have_rule(unwanted_iptables_rule) }
- end
-
- desired_iptables_rules.each do |desired_iptables_rule|
- it { should have_rule(desired_iptables_rule) }
- end
-end
-
-# TODO: the iptables rules aren't checked in order,
-# just by grepping individually for each rule.
-# Rule order matters, so work on a before/after chain
-# that validates order.
-
-
-# try to validate local networking config
-describe host(property['app_hostname']) do
- app_ip_regex = Regexp.quote(property['app_ip'])
- its(:ipaddress) { should match /^#{app_ip_regex}$/ }
- it { should be_resolvable.by('hosts') }
- # TODO: consider adding checks for service absence here.
-end
-
-# check for ssh listening (direct access in staging)
-describe port(22) do
- it{ should be_listening.on('0.0.0.0').with('tcp') }
-end
-
-# check for postfix listening for sending ossec email alerts
-describe port(25) do
- it{ should be_listening.on('127.0.0.1').with('tcp') }
-end
-
-# check for ossec-server listening
-describe port(1514) do
- it{ should be_listening.on('0.0.0.0').with('udp') }
-end
diff --git a/spec_tests/spec/spec_helper.rb b/spec_tests/spec/spec_helper.rb
deleted file mode 100644
--- a/spec_tests/spec/spec_helper.rb
+++ /dev/null
@@ -1,118 +0,0 @@
-require 'serverspec'
-require 'net/ssh'
-require 'tempfile'
-require 'yaml'
-
-set :backend, :ssh
-
-if ENV['ASK_SUDO_PASSWORD']
- begin
- require 'highline/import'
- rescue LoadError
- fail "highline is not available. Try installing it."
- end
- set :sudo_password, ask("Enter sudo password: ") { |q| q.echo = false }
-else
- set :sudo_password, ENV['SUDO_PASSWORD']
-end
-
-host = ENV['TARGET_HOST']
-
-# Using backticks for a subprocess call means
-# STDOUT will be masked, which blocks silently for
-# a long time if the host isn't up. Using IO.popen
-# instead allows for a tee-like interface
-#`vagrant up #{host}`
-IO.popen("vagrant up #{host}") do |output|
- while line = output.gets do
- # simply echo it back
- puts line
- end
-end
-
-# Determine SSH config for this host.
-config = Tempfile.new('', Dir.tmpdir)
-config.write(`vagrant ssh-config #{host}`)
-config.close
-options = Net::SSH::Config.for(host, [config.path])
-options[:user]
-set :host, options[:host_name] || host
-set :ssh_options, options
-
-
-# Given a hostname, return dynamic variables for spectests.
-# Variables include entries hard-coded in a YAML file specifically
-# for spectests, as well as values retrieved from VMs over SSH.
-def retrieve_vars(hostname)
-
- # Accept basename for vars YAML file,
- # then return a hash of those settings.
- def read_vars_file(file_basename)
- vars_filepath = File.expand_path(File.join(
- File.dirname(__FILE__), 'vars', "#{file_basename}.yml"
- ))
- return YAML.load_file(vars_filepath)
- end
- # This clunky if statement ain't pretty, but it gets the job done.
- # A case statement assumes only one permissible match, whereas many
- # matches should be able to add variables before this function returns.
- if hostname.match(/^development$/)
- vars = read_vars_file('development')
- end
- if hostname.match(/-staging$/)
- vars = read_vars_file('staging')
- # Ideally these IP addresses would be cached, since they don't
- # change during a test run. Right now, both values are looked up twice,
- # once for each app/mon host.
- vars['app_ip'] = retrieve_ip_addr('app-staging')
- vars['monitor_ip'] = retrieve_ip_addr('mon-staging')
- end
- if hostname.match(/-prod$/)
- vars = read_vars_file('prod')
- vars['app_ip'] = retrieve_ip_addr('app-prod')
- vars['monitor_ip'] = retrieve_ip_addr('mon-prod')
- end
- if hostname.match(/^(app|mon)/)
- vars['tor_user_uid'] = vagrant_ssh_cmd(hostname, "id -u debian-tor")
- vars['ssh_group_gid'] = vagrant_ssh_cmd(hostname, "getent group ssh | cut -d: -f3")
- end
- if hostname.match(/^app/)
- vars['apache_user_uid'] = vagrant_ssh_cmd(hostname, "id -u www-data")
- end
- if hostname.match(/^mon/)
- vars['postfix_user_uid'] = vagrant_ssh_cmd(hostname, "id -u postfix")
- end
- return vars
-end
-
-# SSH into Vagrant machine, run command, return output.
-def vagrant_ssh_cmd(hostname, command)
- # Dump STDERR, to avoid noisy "Connection closed" messages.
- # Error code is checked below.
- filter_stderr = "2> /dev/null"
- vagrant_cmd = "vagrant ssh #{hostname} --command '#{command}'"
- result = `#{vagrant_cmd} #{filter_stderr}`.rstrip
- if $? != 0
- puts "Command failed: #{vagrant_cmd}"
- exit(1)
- end
- return result
-end
-
-# Look up IP address for given hostname, so spectests
-# have accurate dynamic vars regardless of provider.
-def retrieve_ip_addr(hostname)
- ip_output = vagrant_ssh_cmd(hostname, "hostname -I")
- iface1, iface2 = ip_output.split()
- # Vagrant VirtualBox images will always have eth0 as the NAT device,
- # so we actually what the IP address from the eth1 device on VirtualBox hosts.
- # In other testing environments, we can assume the first address is correct.
- if `vagrant status #{hostname} --machine-readable`.match(/#{hostname},provider-name,virtualbox/m)
- return iface2
- else
- return iface1
- end
-end
-
-# Load dynamic variables for current host.
-set_property retrieve_vars(host)
diff --git a/spec_tests/spec/vars/prod.yml b/spec_tests/spec/vars/prod.yml
deleted file mode 100644
--- a/spec_tests/spec/vars/prod.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-### General (used by more than one role) ###
-securedrop_code: /var/www/securedrop
-securedrop_data: /var/lib/securedrop
-securedrop_user: www-data
-
-app_hostname: app-prod
-monitor_hostname: mon-prod
-
-apache_listening_address: 127.0.0.1
-apache_source_log: /dev/null
-apache_allow_from: 127.0.0.1
diff --git a/spec_tests/spec/vars/staging.yml b/spec_tests/spec/vars/staging.yml
deleted file mode 100644
--- a/spec_tests/spec/vars/staging.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-### General (used by more than one role) ###
-securedrop_code: /var/www/securedrop
-securedrop_data: /var/lib/securedrop
-securedrop_user: www-data
-
-app_hostname: app-staging
-monitor_hostname: mon-staging
-
-apache_listening_address: 0.0.0.0
-apache_source_log: /var/log/apache2/source-error.log
-apache_allow_from: all
-
-# Hard-coding the default interface used in the
-# staging rules for now. In VirtualBox, eth0 is a
-# NAT device used for SSH port forwards. In DigitalOcean,
-# eth0 is the only device, so still the correct device
-# to allow staging access to. TODO: virtualize firewall
-# so these iface values are more flexible. Also consider
-# enabling private_networking in DO.
-staging_iface: eth0
diff --git a/testinfra/app-code/test_haveged.py b/testinfra/app-code/test_haveged.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app-code/test_haveged.py
@@ -0,0 +1,34 @@
+def test_haveged_config(File):
+ """
+ Ensure haveged's low entrop watermark is sufficiently high.
+ """
+ f = File('/etc/default/haveged')
+ assert f.is_file
+ assert f.user == 'root'
+ assert f.group == 'root'
+ assert oct(f.mode) == '0644'
+ assert f.contains('^DAEMON_ARGS="-w 2400"$')
+
+
+def test_haveged_no_duplicate_lines(Command):
+ """
+ Regression test to check for duplicate entries. Earlier playbooks
+ for configuring the SD instances needlessly appended the `DAEMON_ARGS`
+ line everytime the playbook was run. Fortunately the duplicate lines don't
+ break the service, but it's still poor form.
+ """
+ c = Command("uniq --repeated /etc/default/haveged")
+ assert c.rc == 0
+ assert c.stdout == ""
+
+
+def test_haveged_is_running(Service, Sudo):
+ """
+ Ensure haveged service is running, to provide additional entropy.
+ """
+ # Sudo is necessary to read /proc when running under grsecurity,
+ # which the App hosts do. Not technically necessary under development.
+ with Sudo():
+ s = Service("haveged")
+ assert s.is_running
+ assert s.is_enabled
diff --git a/testinfra/app-code/test_redis_worker.py b/testinfra/app-code/test_redis_worker.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app-code/test_redis_worker.py
@@ -0,0 +1,44 @@
+import pytest
+import re
+
+
+securedrop_test_vars = pytest.securedrop_test_vars
+
+
[email protected]('config_line', [
+ '[program:securedrop_worker]',
+ 'command=/usr/local/bin/rqworker',
+ "directory={}".format(securedrop_test_vars.securedrop_code),
+ 'autostart=true',
+ 'autorestart=true',
+ 'startretries=3',
+ 'stderr_logfile=/var/log/securedrop_worker/err.log',
+ 'stdout_logfile=/var/log/securedrop_worker/out.log',
+ "user={}".format(securedrop_test_vars.securedrop_user),
+ 'environment=HOME="/tmp/python-gnupg"',
+])
+def test_redis_worker_configuration(File, config_line):
+ """
+ Ensure SecureDrop Redis worker config for supervisor service
+ management is configured correctly.
+ """
+ f = File('/etc/supervisor/conf.d/securedrop_worker.conf')
+ # Config lines may have special characters such as [] which will
+ # throw off the regex matching, so let's escape those chars.
+ regex = re.escape(config_line)
+ assert f.contains('^{}$'.format(regex))
+
+
+def test_redis_worker_config_file(File):
+ """
+ Ensure SecureDrop Redis worker config for supervisor service
+ management has proper ownership and mode.
+
+ Using separate test so that the parametrization doesn't rerun
+ the file mode checks, which would be useless.
+ """
+ f = File('/etc/supervisor/conf.d/securedrop_worker.conf')
+ assert f.is_file
+ assert oct(f.mode) == '0644'
+ assert f.user == "root"
+ assert f.group == "root"
diff --git a/testinfra/app-code/test_securedrop_app_code.py b/testinfra/app-code/test_securedrop_app_code.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app-code/test_securedrop_app_code.py
@@ -0,0 +1,77 @@
+import pytest
+
+
+securedrop_test_vars = pytest.securedrop_test_vars
+
+
+def test_apache_default_docroot_is_absent(File):
+ """
+ Ensure that the default docroot for Apache, containing static HTML
+ under Debian, has been removed. Leaving it in place can be a privacy
+ leak, as it displays version information by default.
+ """
+ assert not File('/var/www/html').exists
+
+
[email protected]('package', [
+ 'apparmor-utils',
+ 'gnupg2',
+ 'haveged',
+ 'python',
+ 'python-pip',
+ 'redis-server',
+ 'secure-delete',
+ 'sqlite',
+ 'supervisor',
+])
+def test_securedrop_application_apt_dependencies(Package, package):
+ """
+ Ensure apt dependencies required to install `securedrop-app-code`
+ are present. These should be pulled in automatically via apt,
+ due to specification in Depends in package control file.
+ """
+ assert Package(package).is_installed
+
+
+def test_securedrop_application_test_journalist_key(File, Sudo):
+ """
+ Ensure the SecureDrop Application GPG public key file is present.
+ This is a test-only pubkey provided in the repository strictly for testing.
+ """
+ pubkey_file = File("{}/test_journalist_key.pub".format(
+ securedrop_test_vars.securedrop_data))
+ # Sudo is only necessary when testing against app hosts, since the
+ # permissions are tighter. Let's elevate privileges so we're sure
+ # we can read the correct file attributes and test them.
+ with Sudo():
+ assert pubkey_file.is_file
+ assert pubkey_file.user == "root"
+ assert pubkey_file.group == "root"
+ assert oct(pubkey_file.mode) == "0644"
+
+ # Let's make sure the corresponding fingerprint is specified
+ # in the SecureDrop app configuration.
+ securedrop_config = File("{}/config.py".format(
+ securedrop_test_vars.securedrop_code))
+ with Sudo():
+ assert securedrop_config.is_file
+ assert securedrop_config.user == securedrop_test_vars.securedrop_user
+ assert securedrop_config.group == securedrop_test_vars.securedrop_user
+ assert oct(securedrop_config.mode) == "0600"
+ assert securedrop_config.contains(
+ "^JOURNALIST_KEY = '65A1B5FF195B56353CC63DFFCC40EF1228271441'$")
+
+
+def test_securedrop_application_sqlite_db(File, Sudo):
+ """
+ Ensure sqlite database exists for application. The database file should be
+ created by Ansible on first run.
+ """
+ # Sudo is necessary under the App hosts, which have restrictive file
+ # permissions on the doc root. Not technically necessary under dev host.
+ with Sudo():
+ f = File("{}/db.sqlite".format(securedrop_test_vars.securedrop_data))
+ assert f.is_file
+ assert f.user == securedrop_test_vars.securedrop_user
+ assert f.group == securedrop_test_vars.securedrop_user
+ assert oct(f.mode) == "0644"
diff --git a/testinfra/app/apache/test_apache_journalist_interface.py b/testinfra/app/apache/test_apache_journalist_interface.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app/apache/test_apache_journalist_interface.py
@@ -0,0 +1,142 @@
+import pytest
+import re
+
+
+# Hard-coding test vars for development during
+# transition from ServerSpec to TestInfra. Test vars
+# should be imported based on hostname.
+securedrop_test_vars = dict(
+ securedrop_user="vagrant",
+ securedrop_code="/var/www/securedrop",
+ securedrop_data="/var/lib/securedrop",
+ apache_allow_from="all",
+ apache_listening_address="0.0.0.0",
+ apache_source_log="/var/log/apache2/source-error.log",
+)
+
+
+# Setting once so it can be reused in multiple tests.
+wanted_apache_headers = [
+ 'Header edit Set-Cookie ^(.*)$ $1;HttpOnly',
+ 'Header always append X-Frame-Options: DENY',
+ 'Header set X-XSS-Protection: "1; mode=block"',
+ 'Header set X-Content-Type-Options: nosniff',
+ 'Header set X-Download-Options: noopen',
+ "Header set X-Content-Security-Policy: \"default-src 'self'\"",
+ "Header set Content-Security-Policy: \"default-src 'self'\"",
+ 'Header unset Etag',
+]
+
+# Test is not DRY; haven't figured out how to parametrize on
+# multiple inputs, so explicitly redeclaring test logic.
[email protected]("header", wanted_apache_headers)
+def test_apache_headers_journalist_interface(File, header):
+ """
+ Test for expected headers in Document Interface vhost config.
+ """
+ f = File("/etc/apache2/sites-available/journalist.conf")
+ assert f.is_file
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0644"
+ header_regex = "^{}$".format(re.escape(header))
+ assert re.search(header_regex, f.content, re.M)
+
+# Block of directory declarations for Apache vhost is common
+# to both Source and Journalist interfaces. Hardcoding these values
+# across multiple test files to speed up development; they should be
+# written once and imported in a DRY manner.
+common_apache2_directory_declarations = """
+<Directory />
+ Options None
+ AllowOverride None
+ Order deny,allow
+ Deny from all
+</Directory>
+
+<Directory /var/www/>
+ Options None
+ AllowOverride None
+ <Limit GET POST HEAD>
+ Order allow,deny
+ allow from {apache_allow_from}
+ </Limit>
+ <LimitExcept GET POST HEAD>
+ Order deny,allow
+ Deny from all
+ </LimitExcept>
+</Directory>
+
+<Directory {securedrop_code}>
+ Options None
+ AllowOverride None
+ <Limit GET POST HEAD>
+ Order allow,deny
+ allow from {apache_allow_from}
+ </Limit>
+ <LimitExcept GET POST HEAD>
+ Order deny,allow
+ Deny from all
+ </LimitExcept>
+</Directory>
+""".lstrip().rstrip().format(**securedrop_test_vars)
+
+
+# declare journalist-specific apache configs
[email protected]("apache_opt", [
+ 'Header set Cache-Control "max-age=1800"',
+ "<VirtualHost {}:8080>".format(securedrop_test_vars['apache_listening_address']),
+ "DocumentRoot {}/static".format(securedrop_test_vars['securedrop_code']),
+ "Alias /static {}/static".format(securedrop_test_vars['securedrop_code']),
+ "WSGIDaemonProcess journalist processes=2 threads=30 display-name=%{GROUP}"+" python-path={}".format(securedrop_test_vars['securedrop_code']),
+ 'WSGIProcessGroup journalist',
+ 'WSGIScriptAlias / /var/www/journalist.wsgi/',
+ 'AddType text/html .py',
+ 'XSendFile On',
+ 'XSendFilePath /var/lib/securedrop/store/',
+ 'XSendFilePath /var/lib/securedrop/tmp/',
+ 'ErrorLog /var/log/apache2/journalist-error.log',
+ 'CustomLog /var/log/apache2/journalist-access.log combined',
+])
+def test_apache_config_journalist_interface(File, apache_opt):
+ """
+ Ensure the necessary Apache settings for serving the application
+ are in place. Some values will change according to the host,
+ e.g. app-staging versus app-prod will have different listening
+ addresses, depending on whether Tor connections are forced.
+
+ These checks apply only to the Document Interface, used by Journalists.
+ """
+ f = File("/etc/apache2/sites-available/journalist.conf")
+ assert f.is_file
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0644"
+ regex = "^{}$".format(re.escape(apache_opt))
+ assert re.search(regex, f.content, re.M)
+
+
+def test_apache_journalist_interface_vhost(File):
+ """
+ Ensure the document root is configured with correct access restrictions
+ for serving Journalist Interface application code.
+ """
+ f = File("/etc/apache2/sites-available/journalist.conf")
+ assert common_apache2_directory_declarations in f.content
+
+
+# Expect to fail pending fix for LogFormat declaration.
[email protected]
+def test_apache_logging_journalist_interface(File):
+ """
+ Check that logging is configured correctly for the Journalist Interface.
+ The actions of Journalists are logged by the system, so that an Admin can
+ investigate incidents and track access.
+
+ Logs were broken for some period of time, logging only "combined" to the logfile,
+ rather than the combined LogFormat intended.
+ """
+ f = File("/var/log/apache2/journalist-access.log")
+ assert f.is_file
+ assert f.size > 0 # will fail if no journalist account used
+ assert not f.contains("^combined$")
diff --git a/testinfra/app/apache/test_apache_service.py b/testinfra/app/apache/test_apache_service.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app/apache/test_apache_service.py
@@ -0,0 +1,79 @@
+import pytest
+import re
+
+
+# Hard-coding test vars for development during
+# transition from ServerSpec to TestInfra. Test vars
+# should be imported based on hostname.
+securedrop_test_vars = dict(
+ securedrop_user="vagrant",
+ securedrop_code="/var/www/securedrop",
+ securedrop_data="/var/lib/securedrop",
+ apache_allow_from="all",
+ apache_listening_address="0.0.0.0",
+ apache_source_log="/var/log/apache2/source-error.log",
+)
+
+
[email protected]("apache_site", [
+ "source",
+ "journalist",
+])
+def test_apache_enabled_sites(Command, Sudo, apache_site):
+ """
+ Ensure the Source and Journalist interfaces are enabled.
+ """
+ with Sudo():
+ c = Command("/usr/sbin/a2query -s {}".format(apache_site))
+ assert "{} (enabled".format(apache_site) in c.stdout
+ assert c.rc == 0
+
+
[email protected]("apache_site", [
+ "000-default",
+])
+def test_apache_disabled_sites(Command, apache_site):
+ """
+ Ensure the default HTML document root is disabled.
+ """
+ c = Command("a2query -s {}".format(apache_site))
+ assert "No site matches {} (disabled".format(apache_site) in c.stderr
+ assert c.rc == 32
+
+
+def test_apache_service(Service, Sudo):
+ """
+ Ensure Apache service is running.
+ """
+ # Sudo is necessary to run `service apache2 status`, otherwise
+ # the service is falsely reported as not running.
+ with Sudo():
+ s = Service("apache2")
+ assert s.is_running
+ assert s.is_enabled
+
+
+def test_apache_user(User):
+ """
+ Ensure user account for running application code is configured correctly.
+ """
+ u = User("www-data")
+ assert u.exists
+ assert u.home == "/var/www"
+ assert u.shell == "/usr/sbin/nologin"
+
+
[email protected]("port", [
+ "80",
+ "8080",
+])
+def test_apache_listening(Socket, Sudo, port):
+ """
+ Ensure Apache is listening on proper ports and interfaces.
+ In staging, expect the service to be bound to 0.0.0.0,
+ but in prod, it should be restricted to 127.0.0.1.
+ """
+ # Sudo is necessary to read from /proc/net/tcp.
+ with Sudo():
+ s = Socket("tcp://{}:{}".format(securedrop_test_vars['apache_listening_address'], port))
+ assert s.is_listening
diff --git a/testinfra/app/apache/test_apache_source_interface.py b/testinfra/app/apache/test_apache_source_interface.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app/apache/test_apache_source_interface.py
@@ -0,0 +1,56 @@
+import pytest
+import re
+
+
+securedrop_test_vars = pytest.securedrop_test_vars
+
+
[email protected]("header", securedrop_test_vars.wanted_apache_headers)
+def test_apache_headers_source_interface(File, header):
+ """
+ Test for expected headers in Source Interface vhost config.
+ """
+ f = File("/etc/apache2/sites-available/source.conf")
+ assert f.is_file
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0644"
+ header_regex = "^{}$".format(re.escape(header))
+ assert re.search(header_regex, f.content, re.M)
+
+
[email protected]("apache_opt", [
+ 'Header set Cache-Control "max-age=1800, must-revalidate"',
+ "<VirtualHost {}:80>".format(securedrop_test_vars.apache_listening_address),
+ "DocumentRoot {}/static".format(securedrop_test_vars.securedrop_code),
+ "Alias /static {}/static".format(securedrop_test_vars.securedrop_code),
+ "WSGIDaemonProcess source processes=2 threads=30 display-name=%{GROUP}"+" python-path={}".format(securedrop_test_vars.securedrop_code),
+ 'WSGIProcessGroup source',
+ 'WSGIScriptAlias / /var/www/source.wsgi/',
+ 'AddType text/html .py',
+ 'XSendFile Off',
+ 'LimitRequestBody 524288000',
+ 'ErrorDocument 400 /notfound',
+ 'ErrorDocument 401 /notfound',
+ 'ErrorDocument 403 /notfound',
+ 'ErrorDocument 404 /notfound',
+ 'ErrorDocument 500 /notfound',
+ "ErrorLog {}".format(securedrop_test_vars.apache_source_log),
+])
+def test_apache_config_source_interface(File, apache_opt):
+ """
+ Ensure the necessary Apache settings for serving the application
+ are in place. Some values will change according to the host,
+ e.g. app-staging versus app-prod will have different listening
+ addresses, depending on whether Tor connections are forced.
+
+ These checks apply only to the Source Interface, used by Sources.
+ """
+ f = File("/etc/apache2/sites-available/source.conf")
+ assert f.is_file
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0644"
+ regex = "^{}$".format(re.escape(apache_opt))
+ assert re.search(regex, f.content, re.M)
+
diff --git a/testinfra/app/apache/test_apache_system_config.py b/testinfra/app/apache/test_apache_system_config.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app/apache/test_apache_system_config.py
@@ -0,0 +1,173 @@
+import pytest
+import re
+
+
+# Hard-coding test vars for development during
+# transition from ServerSpec to TestInfra. Test vars
+# should be imported based on hostname.
+securedrop_test_vars = dict(
+ securedrop_user="vagrant",
+ securedrop_code="/var/www/securedrop",
+ securedrop_data="/var/lib/securedrop",
+ apache_allow_from="all",
+ apache_listening_address="0.0.0.0",
+ apache_source_log="/var/log/apache2/source-error.log",
+)
+
+
[email protected]("package", [
+ "apache2-mpm-worker",
+ "libapache2-mod-wsgi",
+ "libapache2-mod-xsendfile",
+])
+def test_apache_apt_packages(Package, package):
+ """
+ Ensure required Apache packages are installed.
+ """
+ assert Package(package).is_installed
+
+
[email protected]("apache_opt", [
+ "ServerTokens Prod",
+ "ServerSignature Off",
+ "TraceEnable Off",
+])
+def test_apache_security_config(File, apache_opt):
+ """
+ Ensure required apache2 security config file is present.
+
+ Refer to #643, which states that /etc/apache2/security
+ is superfluous, and not even used in our config right now.
+ We should update the Ansible config to move the file
+ to /etc/apache2/conf-available/security.conf.
+ """
+ f = File("/etc/apache2/security")
+ assert f.is_file
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0644"
+
+ assert f.contains("^{}$".format(apache_opt))
+
+
+# OK to fail here, pending updates to Ansible config.
[email protected]
+def test_apache_security_config_deprecated(File):
+ """
+ Ensure that /etc/apache2/security is absent. See #643 for discussion.
+ Tokens set in that file should be moved to
+ /etc/apache2/conf-available/security.conf.
+ """
+ assert not File("/etc/apache2/security").exists
+ assert File("/etc/apache2/config-available/security.conf").exists
+
+
[email protected]("apache_opt", [
+ 'Mutex file:${APACHE_LOCK_DIR} default',
+ 'PidFile ${APACHE_PID_FILE}',
+ 'Timeout 60',
+ 'KeepAlive On',
+ 'MaxKeepAliveRequests 100',
+ 'KeepAliveTimeout 5',
+ 'User www-data',
+ 'Group www-data',
+ 'AddDefaultCharset UTF-8',
+ 'DefaultType None',
+ 'HostnameLookups Off',
+ 'ErrorLog /dev/null',
+ 'LogLevel crit',
+ 'IncludeOptional mods-enabled/*.load',
+ 'IncludeOptional mods-enabled/*.conf',
+ 'Include ports.conf',
+ 'IncludeOptional sites-enabled/*.conf',
+ 'ServerTokens Prod',
+ 'ServerSignature Off',
+ 'TraceEnable Off',
+])
+def test_apache_config_settings(File, apache_opt):
+ """
+ Check required Apache config settings for general server.
+ These checks do not target individual interfaces, e.g.
+ Source versus Document Interface, and instead apply to
+ Apache more generally.
+ """
+ f = File("/etc/apache2/apache2.conf")
+ assert f.is_file
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0644"
+ assert re.search("^{}$".format(re.escape(apache_opt)), f.content, re.M)
+
+
[email protected]("port", [
+ "80",
+ "8080",
+])
+def test_apache_ports_config(File, SystemInfo, port):
+ """
+ Ensure Apache ports config items, which specify how the
+ Source and Document Interfaces are configured to be served
+ over Tor. On staging hosts, they will listen on any interface,
+ to permit port forwarding for local testing, but in production,
+ they're restricted to localhost, for use over Tor.
+ """
+ f = File("/etc/apache2/ports.conf")
+ assert f.is_file
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0644"
+
+ listening_regex = "^Listen {}:{}$".format(re.escape(
+ securedrop_test_vars['apache_listening_address']), port)
+ assert f.contains(listening_regex)
+
+
[email protected]("apache_module", [
+ 'access_compat',
+ 'authn_core',
+ 'alias',
+ 'authz_core',
+ 'authz_host',
+ 'authz_user',
+ 'deflate',
+ 'filter',
+ 'dir',
+ 'headers',
+ 'mime',
+ 'mpm_event',
+ 'negotiation',
+ 'reqtimeout',
+ 'rewrite',
+ 'wsgi',
+ 'xsendfile',
+])
+def test_apache_modules_present(Command, Sudo, apache_module):
+ """
+ Ensure presence of required Apache modules. Application will not work
+ correctly if these are missing. A separate test will check for
+ disabled modules.
+ """
+ with Sudo():
+ c = Command("/usr/sbin/a2query -m {}".format(apache_module))
+ assert "{} (enabled".format(apache_module) in c.stdout
+ assert c.rc == 0
+
+
[email protected]("apache_module", [
+ 'auth_basic',
+ 'authn_file',
+ 'autoindex',
+ 'env',
+ 'setenvif',
+ 'status',
+])
+def test_apache_modules_absent(Command, Sudo, apache_module):
+ """
+ Ensure absence of unwanted Apache modules. Application does not require
+ these modules, so they should be disabled to reduce attack surface.
+ A separate test will check for disabled modules.
+ """
+ with Sudo():
+ c = Command("/usr/sbin/a2query -m {}".format(apache_module))
+ assert "No module matches {} (disabled".format(apache_module) in c.stderr
+ assert c.rc == 32
diff --git a/testinfra/app/test_apparmor.py b/testinfra/app/test_apparmor.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app/test_apparmor.py
@@ -0,0 +1,97 @@
+import os
+import pytest
+
+
+sdvars = pytest.securedrop_test_vars
+
+
[email protected]('pkg', ['apparmor', 'apparmor-utils'])
+def test_apparmor_pkg(Package, pkg):
+ """ Apparmor package dependencies """
+ assert Package(pkg).is_installed
+
+def test_apparmor_enabled(Command, Sudo):
+ """ Check that apparmor is enabled """
+ with Sudo():
+ assert Command("aa-status --enabled").rc == 0
+
+apache2_capabilities = [
+ 'kill',
+ 'net_bind_service',
+ 'sys_ptrace'
+ ]
[email protected]('cap', apache2_capabilities)
+def test_apparmor_apache_capabilities(Command, cap):
+ """ check for exact list of expected app-armor capabilities for apache2 """
+ c = Command("perl -nE \'/^\s+capability\s+(\w+),$/ && say $1\' /etc/apparmor.d/usr.sbin.apache2")
+ assert cap in c.stdout
+
+def test_apparmor_apache_exact_capabilities(Command):
+ """ ensure no extra capabilities are defined for apache2 """
+ c = Command("grep -ic capability /etc/apparmor.d/usr.sbin.apache2")
+ assert str(len(apache2_capabilities)) == c.stdout
+
+tor_capabilities = ['setgid']
[email protected]('cap', tor_capabilities)
+def test_apparmor_tor_capabilities(Command, cap):
+ """ check for exact list of expected app-armor capabilities for tor """
+ c = Command("perl -nE \'/^\s+capability\s+(\w+),$/ && say $1\' /etc/apparmor.d/usr.sbin.tor")
+ assert cap in c.stdout
+
+def test_apparmor_apache_exact_capabilities(Command):
+ """ ensure no extra capabilities are defined for tor """
+ c = Command("grep -ic capability /etc/apparmor.d/usr.sbin.tor")
+ assert str(len(tor_capabilities)) == c.stdout
+
+enforced_profiles = [
+ 'ntpd',
+ 'apache2',
+ 'tcpdump',
+ 'tor']
[email protected]('profile', enforced_profiles)
+def test_apparmor_ensure_not_disabled(File, Sudo, profile):
+ """ Explicitly check that enforced profiles are NOT in /etc/apparmor.d/disable
+ Polling aa-status only checks the last config that was loaded, this ensures
+ it wont be disabled on reboot.
+ """
+ f = File("/etc/apparmor.d/disabled/usr.sbin.{}".format(profile))
+ with Sudo():
+ assert not f.exists
+
[email protected](os.environ['SECUREDROP_TESTINFRA_TARGET_HOST'] != 'app-staging',
+ reason='only to be run on app-staging')
[email protected]('complain_pkg', sdvars.apparmor_complain)
+def test_app_apparmor_complain(Command, Sudo, complain_pkg):
+ """ Ensure app-armor profiles are in complain mode for staging """
+ with Sudo():
+ awk = "awk '/[0-9]+ profiles.*complain./{flag=1;next}/^[0-9]+.*/{flag=0}flag'"
+ c = Command.check_output("aa-status | {}".format(awk))
+ assert complain_pkg in c
+
[email protected](os.environ['SECUREDROP_TESTINFRA_TARGET_HOST'] != 'app-staging',
+ reason='only to be run on app-staging')
+def test_app_apparmor_complain_count(Command, Sudo):
+ """ Ensure right number of app-armor profiles are in complain mode """
+ with Sudo():
+ c = Command.check_output("aa-status --complaining")
+ assert c == str(len(sdvars.apparmor_complain))
+
[email protected]('aa_enforced', sdvars.apparmor_enforce)
+def test_apparmor_enforced(Command, Sudo, aa_enforced):
+ awk = "awk '/[0-9]+ profiles.*enforce./{flag=1;next}/^[0-9]+.*/{flag=0}flag'"
+ with Sudo():
+ c = Command.check_output("aa-status | {}".format(awk))
+ assert aa_enforced in c
+
+def test_apparmor_total_profiles(Command, Sudo):
+ """ ensure number of total profiles is sum of enforced and complaining profiles """
+ with Sudo():
+ total_expected = str((len(sdvars.apparmor_enforce)
+ + len(sdvars.apparmor_complain)))
+ assert Command.check_output("aa-status --profiled") == total_expected
+
+def test_aastatus_unconfined(Command, Sudo):
+ """ Ensure that there are no processes that are unconfined but have a profile """
+ unconfined_chk = "0 processes are unconfined but have a profile defined"
+ with Sudo():
+ assert unconfined_chk in Command("aa-status").stdout
diff --git a/testinfra/app/test_appenv.py b/testinfra/app/test_appenv.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app/test_appenv.py
@@ -0,0 +1,66 @@
+import pytest
+import os
+
+sdvars = pytest.securedrop_test_vars
+
[email protected]('exp_pip_pkg', sdvars.pip_deps)
+def test_app_pip_deps(PipPackage, exp_pip_pkg):
+ """ Ensure pip dependencies are installed """
+ pip = PipPackage.get_packages()
+ assert pip[exp_pip_pkg['name']]['version'] == exp_pip_pkg['version']
+
+
+def test_app_wsgi(File, Sudo):
+ """ ensure logging is enabled for source interface in staging """
+ f = File("/var/www/source.wsgi")
+ with Sudo():
+ assert f.is_file
+ assert oct(f.mode) == "0640"
+ assert f.user == 'www-data'
+ assert f.group == 'www-data'
+ assert f.contains("^import logging$")
+ assert f.contains("^logging\.basicConfig(stream=sys\.stderr)$")
+
[email protected]('app_dir', sdvars.app_directories)
+def test_app_directories(File, Sudo, app_dir):
+ """ ensure securedrop app directories exist with correct permissions """
+ f = File(app_dir)
+ with Sudo():
+ assert f.is_directory
+ assert f.user == sdvars.securedrop_user
+ assert f.group == sdvars.securedrop_user
+ assert oct(f.mode) == "0700"
+
+def test_app_code_pkg(Package):
+ """ ensure securedrop-app-code package is installed """
+ assert Package("securedrop-app-code").is_installed
+
+def test_gpg_key_in_keyring(Command, Sudo):
+ """ ensure test gpg key is present in app keyring """
+ with Sudo(sdvars.securedrop_user):
+ c = Command("gpg --homedir /var/lib/securedrop/keys --list-keys 28271441")
+ assert "pub 4096R/28271441 2013-10-12" in c.stdout
+
+def test_ensure_logo(File, Sudo):
+ """ ensure default logo header file exists """
+ f = File("{}/static/i/logo.png".format(sdvars.securedrop_code))
+ with Sudo():
+ assert oct(f.mode) == "0644"
+ assert f.user == sdvars.securedrop_user
+ assert f.group == sdvars.securedrop_user
+
+def test_securedrop_tmp_clean_cron(Command, Sudo):
+ """ Ensure securedrop tmp clean cron job in place """
+ with Sudo():
+ cronlist = Command("crontab -l").stdout
+ cronjob = "@daily {}/manage.py clean-tmp".format(sdvars.securedrop_code)
+ assert cronjob in cronlist
+
+def test_app_workerlog_dir(File, Sudo):
+ """ ensure directory for worker logs is present """
+ f = File('/var/log/securedrop_worker')
+ with Sudo():
+ assert f.is_directory
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0644"
diff --git a/testinfra/app/test_network.py b/testinfra/app/test_network.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app/test_network.py
@@ -0,0 +1,41 @@
+import os
+import difflib
+import pytest
+from jinja2 import Template
+
+
+securedrop_test_vars = pytest.securedrop_test_vars
+
+
+def test_app_iptables_rules(SystemInfo, Command, Sudo, Ansible):
+
+ # Build a dict of variables to pass to jinja for iptables comparison
+ kwargs = dict(
+ mon_ip=securedrop_test_vars.mon_ip,
+ default_interface = Ansible("setup")["ansible_facts"]["ansible_default_ipv4"]["interface"],
+ tor_user_id = Command.check_output("id -u debian-tor"),
+ securedrop_user_id = Command.check_output("id -u www-data"),
+ ssh_group_gid = Command.check_output("getent group ssh | cut -d: -f3"),
+ dns_server = securedrop_test_vars.dns_server)
+
+ # Build iptables scrape cmd, purge comments + counters
+ iptables = "iptables-save | sed 's/ \[[0-9]*\:[0-9]*\]//g' | egrep -v '^#'"
+ iptables_file = "{}/iptables-{}.j2".format(
+ os.path.dirname(os.path.abspath(__file__)),
+ SystemInfo.hostname)
+
+ # template out a local iptables jinja file
+ jinja_iptables = Template(open(iptables_file,'r').read())
+ iptables_expected = jinja_iptables.render(**kwargs)
+
+ with Sudo():
+ # Actually run the iptables scrape command
+ iptables = Command.check_output(iptables)
+ # print diff comparison (only shows up in pytests if test fails or
+ # verbosity turned way up)
+ for iptablesdiff in difflib.context_diff(iptables_expected.split('\n'),
+ iptables.split('\n')):
+ print(iptablesdiff)
+ # Conduct the string comparison of the expected and actual iptables
+ # ruleset
+ assert iptables_expected == iptables
diff --git a/testinfra/app/test_ossec.py b/testinfra/app/test_ossec.py
new file mode 100644
--- /dev/null
+++ b/testinfra/app/test_ossec.py
@@ -0,0 +1,43 @@
+import re
+import pytest
+
+sdvars = pytest.securedrop_test_vars
+
+def test_hosts_files(File, SystemInfo):
+ """ Ensure host files mapping are in place """
+ f = File('/etc/hosts')
+
+ hostname = SystemInfo.hostname
+ mon_ip = sdvars.mon_ip
+ app_host = sdvars.app_hostname
+ mon_host = sdvars.monitor_hostname
+
+ assert f.contains('^127.0.0.1')
+ assert f.contains('^127.0.0.1\t*{0}\t*{0}$'.format(app_host))
+ assert f.contains('^{}\s*{}\s*securedrop-monitor-server-alias$'.format(
+ mon_ip,
+ mon_host))
+
+def test_hosts_duplicate(Command):
+ """ Regression test for duplicate entries """
+ assert Command.check_output("uniq --repeated /etc/hosts") == ""
+
+def test_ossec_agent_installed(Package):
+ """ Check that ossec-agent package is present """
+ assert Package("securedrop-ossec-agent").is_installed
+
+def test_ossec_keyfile_present(File, Command, Sudo, SystemInfo):
+ """ ensure client keyfile for ossec-agent is present """
+ pattern = "^1024 {} {} [0-9a-f]{{64}}$".format(
+ sdvars.app_hostname,
+ sdvars.app_ip)
+ regex = re.compile(pattern)
+
+ with Sudo():
+ f = File("/var/ossec/etc/client.keys")
+ assert f.exists
+ assert oct(f.mode) == "0644"
+ assert f.user == "root"
+ assert f.group == "ossec"
+ assert f.content_string
+ assert bool(re.search(regex, f.content))
diff --git a/testinfra/build/test_build_dependencies.py b/testinfra/build/test_build_dependencies.py
new file mode 100644
--- /dev/null
+++ b/testinfra/build/test_build_dependencies.py
@@ -0,0 +1,72 @@
+import pytest
+
+
+securedrop_test_vars = pytest.securedrop_test_vars
+
+
+def get_build_directories():
+ """
+ Helper function to retrieve module-namespace test vars and format
+ the strings to interpolate version info. Keeps the test vars DRY
+ in terms of version info, and required since we can't rely on
+ Jinja-based evaluation of the YAML files (so we can't trivially
+ reuse vars in other var values, as is the case with Ansible).
+ """
+ substitutions = dict(
+ securedrop_version=securedrop_test_vars.securedrop_version,
+ ossec_version=securedrop_test_vars.ossec_version,
+ keyring_version=securedrop_test_vars.keyring_version,
+ )
+ build_directories = [d.format(**substitutions) for d in securedrop_test_vars.build_directories]
+ return build_directories
+
+
+build_directories = get_build_directories()
+
+
[email protected]("package", [
+ "libssl-dev",
+ "python-dev",
+ "python-pip",
+])
+def test_build_dependencies(Package, package):
+ """
+ Ensure development apt dependencies are installed.
+ """
+ assert Package(package).is_installed
+
+
+def test_pip_wheel_installed(Command):
+ """
+ Ensure `wheel` is installed via pip, for packaging Python
+ dependencies into a Debian package.
+ """
+ c = Command("pip freeze")
+ assert "wheel==0.24.0" in c.stdout
+ assert c.rc == 0
+
+
+def test_sass_gem_installed(Command):
+ """
+ Ensure the `sass` Ruby gem is installed, for compiling SASS to CSS.
+ """
+ # The Ansible config uses state=latest, so the version may change;
+ # intentionally tracking an explicit version here so we can track
+ # a specific version down the road.
+ c = Command("gem list")
+ assert "sass (3.4.23)" in c.stdout
+ assert c.rc == 0
+
+
[email protected]("directory", get_build_directories())
+def test_build_directories(File, directory):
+ """
+ Ensure the build directories are present. These directories are
+ the top-level of the Debian packages being created. They contain
+ nested subdirs of varying complexity, depending on package.
+ """
+ if '{}' in directory:
+ directory = directory.format(securedrop_test_vars.securedrop_version)
+ assert File(directory).is_directory
+
+
diff --git a/testinfra/build/test_securedrop_deb_package.py b/testinfra/build/test_securedrop_deb_package.py
new file mode 100644
--- /dev/null
+++ b/testinfra/build/test_securedrop_deb_package.py
@@ -0,0 +1,134 @@
+import pytest
+import os
+import re
+
+
+securedrop_test_vars = pytest.securedrop_test_vars
+
+
+def extract_package_name_from_filepath(filepath):
+ """
+ Helper function to infer intended package name from
+ the absolute filepath, using a rather garish regex.
+ E.g., given:
+ securedrop-ossec-agent-2.8.2+0.3.10-amd64.deb
+
+ retuns:
+
+ securedrop-ossec-agent
+
+ which can then be used for comparisons in dpkg output.
+ """
+ deb_basename = os.path.basename(filepath)
+ package_name = re.search('^([a-z\-]+(?!\d))', deb_basename).groups()[0]
+ assert deb_basename.startswith(package_name)
+ return package_name
+
+
+def get_deb_packages():
+ """
+ Helper function to retrieve module-namespace test vars and format
+ the strings to interpolate version info. Keeps the test vars DRY
+ in terms of version info, and required since we can't rely on
+ Jinja-based evaluation of the YAML files (so we can't trivially
+ reuse vars in other var values, as is the case with Ansible).
+ """
+ substitutions = dict(
+ securedrop_version=securedrop_test_vars.securedrop_version,
+ ossec_version=securedrop_test_vars.ossec_version,
+ keyring_version=securedrop_test_vars.keyring_version,
+ )
+
+ deb_packages = [d.format(**substitutions) for d in securedrop_test_vars.build_deb_packages]
+ return deb_packages
+
+
+deb_packages = get_deb_packages()
+
[email protected]("deb", deb_packages)
+def test_build_deb_packages(File, deb):
+ """
+ Sanity check the built Debian packages for Control field
+ values and general package structure.
+ """
+ deb_package = File(deb.format(
+ securedrop_test_vars.securedrop_version))
+ assert deb_package.is_file
+
+
[email protected]("deb", deb_packages)
+def test_deb_packages_appear_installable(File, Command, Sudo, deb):
+ """
+ Confirms that a dry-run of installation reports no errors.
+ Simple check for valid Debian package structure, but not thorough.
+ When run on a malformed package, `dpkg` will report:
+
+ dpkg-deb: error: `foo.deb' is not a debian format archive
+
+ Testing application behavior is left to the functional tests.
+ """
+
+ deb_package = File(deb.format(
+ securedrop_test_vars.securedrop_version))
+
+ deb_basename = os.path.basename(deb_package.path)
+ package_name = extract_package_name_from_filepath(deb_package.path)
+ assert deb_basename.startswith(package_name)
+
+ # Sudo is required to call `dpkg --install`, even as dry-run.
+ with Sudo():
+ c = Command("dpkg --install --dry-run {}".format(deb_package.path))
+ assert "Selecting previously unselected package {}".format(package_name) in c.stdout
+ regex = "Preparing to unpack [./]+{} ...".format(re.escape(deb_basename))
+ assert re.search(regex, c.stdout, re.M)
+ assert c.rc == 0
+
+
[email protected]("deb", deb_packages)
+def test_deb_package_control_fields(File, Command, deb):
+ """
+ Ensure Debian Control fields are populated as expected in the package.
+ These checks are rather superficial, and don't actually confirm that the
+ .deb files are not broken. At a later date, consider integration tests
+ that actually use these built files during an Ansible provisioning run.
+ """
+ deb_package = File(deb.format(
+ securedrop_test_vars.securedrop_version))
+ package_name = extract_package_name_from_filepath(deb_package.path)
+ # The `--field` option will display all fields if none are specified.
+ c = Command("dpkg-deb --field {}".format(deb_package.path))
+
+ assert "Maintainer: SecureDrop Team <[email protected]>" in c.stdout
+ assert "Architecture: amd64" in c.stdout
+ assert "Package: {}".format(package_name) in c.stdout
+ assert c.rc == 0
+
+# Marking as expected failure because the securedrop-keyring package
+# uses the old "freedom.press/securedrop" URL as the Homepage. That should
+# be changed to securedrop.org, and the check folded into the other control
+# fields logic, above.
[email protected]
[email protected]("deb", deb_packages)
+def test_deb_package_control_fields_homepage(File, Command, deb):
+ deb_package = File(deb.format(
+ securedrop_test_vars.securedrop_version))
+ # The `--field` option will display all fields if none are specified.
+ c = Command("dpkg-deb --field {}".format(deb_package.path))
+ assert "Homepage: https://securedrop.org" in c.stdout
+
+
+# Marking as expected failure because the build process does not currently
+# programmatically enforce absence of these files; but it definitely should.
+# Right now, package building requires a manual step to clean up .pyc files.
[email protected]
[email protected]("deb", deb_packages)
+def test_deb_package_contains_no_pyc_files(File, Command, deb):
+ """
+ Ensures no .pyc files are shipped via the Debian packages.
+ """
+ deb_package = File(deb.format(
+ securedrop_test_vars.securedrop_version))
+ # Using `dpkg-deb` but `lintian --tag package-installs-python-bytecode`
+ # would be cleaner. Will defer to adding lintian tests later.
+ c = Command("dpkg-deb --contents {}".format(deb_package.path))
+ assert not re.search("^.*\.pyc$", c.stdout, re.M)
diff --git a/testinfra/common/test_cron_apt.py b/testinfra/common/test_cron_apt.py
new file mode 100644
--- /dev/null
+++ b/testinfra/common/test_cron_apt.py
@@ -0,0 +1,142 @@
+import pytest
+import re
+
+
[email protected]('dependency', [
+ 'cron-apt',
+ 'ntp'
+])
+def test_cron_apt_dependencies(Package, dependency):
+ """
+ Ensure critical packages are installed. If any of these are missing,
+ the system will fail to receive automatic updates.
+
+ The current apt config uses cron-apt, rather than unattended-upgrades,
+ but this may change in the future. Previously the apt.freedom.press repo
+ was not reporting any "Origin" field, making use of unattended-upgrades
+ problematic. With better procedures in place regarding apt repo
+ maintenance, we can ensure the field is populated going forward.
+ """
+ assert Package(dependency).is_installed
+
+
+def test_cron_apt_config(File):
+ """
+ Ensure custom cron-apt config file is present.
+ """
+ f = File('/etc/cron-apt/config')
+ assert f.is_file
+ assert f.user == "root"
+ assert oct(f.mode) == "0644"
+ assert f.contains('^SYSLOGON="always"$')
+ assert f.contains('^EXITON=error$')
+
+
+
[email protected]('repo', [
+ 'deb http://security.ubuntu.com/ubuntu trusty-security main',
+ 'deb-src http://security.ubuntu.com/ubuntu trusty-security main',
+ 'deb http://security.ubuntu.com/ubuntu trusty-security universe',
+ 'deb-src http://security.ubuntu.com/ubuntu trusty-security universe',
+ 'deb [arch=amd64] https://apt.freedom.press trusty main',
+ 'deb http://deb.torproject.org/torproject.org trusty main',
+])
+def test_cron_apt_repo_list(File, repo):
+ """
+ Ensure the correct apt repositories are specified
+ in the security list for apt.
+ """
+ f = File('/etc/apt/security.list')
+ assert f.is_file
+ assert f.user == "root"
+ assert oct(f.mode) == "0644"
+ repo_regex = '^{}$'.format(re.escape(repo))
+ assert f.contains(repo_regex)
+
+
+
+def test_cron_apt_repo_config_update(File):
+ """
+ Ensure cron-apt updates repos from the security.list config.
+ """
+
+ f = File('/etc/cron-apt/action.d/0-update')
+ assert f.is_file
+ assert f.user == "root"
+ assert oct(f.mode) == "0644"
+ repo_config = str('update -o quiet=2'
+ ' -o Dir::Etc::SourceList=/etc/apt/security.list'
+ ' -o Dir::Etc::SourceParts=""')
+ assert f.contains('^{}$'.format(repo_config))
+
+
+def test_cron_apt_repo_config_upgrade(File):
+ """
+ Ensure cron-apt upgrades packages from the security.list config.
+ """
+ f = File('/etc/cron-apt/action.d/5-security')
+ assert f.is_file
+ assert f.user == "root"
+ assert oct(f.mode) == "0644"
+ assert f.contains('^autoclean -y$')
+ repo_config = str('dist-upgrade -y -o APT::Get::Show-Upgraded=true'
+ ' -o Dir::Etc::SourceList=/etc/apt/security.list'
+ ' -o Dpkg::Options::=--force-confdef'
+ ' -o Dpkg::Options::=--force-confold')
+ assert f.contains(re.escape(repo_config))
+
+
+def test_cron_apt_config_deprecated(File):
+ """
+ Ensure default cron-apt file to download all updates does not exist.
+ """
+ f = File('/etc/cron-apt/action.d/3-download')
+ assert not f.exists
+
+
[email protected]('cron_job', [
+ { 'job': '0 4 * * * root /usr/bin/test -x /usr/sbin/cron-apt && /usr/sbin/cron-apt && /sbin/reboot',
+ 'state': 'present',
+ },
+ { 'job': '0 4 * * * root /usr/bin/test -x /usr/sbin/cron-apt && /usr/sbin/cron-apt',
+ 'state': 'absent',
+ },
+ { 'job': '0 5 * * * root /sbin/reboot',
+ 'state': 'absent',
+ },
+])
+def test_cron_apt_cron_jobs(File, cron_job):
+ """
+ Check for correct cron job for upgrading all packages and rebooting.
+ We'll also check for absence of previous versions of the cron job,
+ to make sure those have been cleaned up via the playbooks.
+ """
+ f = File('/etc/cron.d/cron-apt')
+ assert f.is_file
+ assert f.user == "root"
+ assert oct(f.mode) == "0644"
+
+ regex_job = '^{}$'.format(re.escape(cron_job['job']))
+ if cron_job['state'] == 'present':
+ assert f.contains(regex_job)
+ else:
+ assert not f.contains(regex_job)
+
+
+def test_cron_apt_all_packages_updated(Command):
+ """
+ Ensure a safe-upgrade has already been run, by checking that no
+ packages are eligible for upgrade currently.
+
+ The Ansible config installs a specific, out-of-date version of Firefox
+ for use with Selenium. Therefore apt will report it's possible to upgrade
+ Firefox, which we'll need to mark as "OK" in terms of the tests.
+ """
+ c = Command('aptitude --simulate -y safe-upgrade')
+ assert c.rc == 0
+ # If planning to upgrade anything, make sure it's ONLY firefox.
+ if "1 packages upgraded, 0 newly installed, 0 to remove and 0 not upgraded" in c.stdout:
+ assert "firefox" in c.stdout
+ else:
+ assert "No packages will be installed, upgraded, or removed." in c.stdout
+ assert "0 packages upgraded, 0 newly installed, 0 to remove and 0 not upgraded" in c.stdout
diff --git a/testinfra/common/test_fpf_apt_repo.py b/testinfra/common/test_fpf_apt_repo.py
new file mode 100644
--- /dev/null
+++ b/testinfra/common/test_fpf_apt_repo.py
@@ -0,0 +1,45 @@
+def test_fpf_apt_repo_present(File):
+ """
+ Ensure the FPF apt repo, apt.freedom.press, is configured.
+ This repository is necessary for the SecureDrop Debian packages,
+ including:
+
+ * securedrop-app-code
+ * securedrop-keyring
+ * securedrop-grsec
+
+ Depending on the host, additional FPF-maintained packages will be
+ installed, e.g. for OSSEC. Install state for those packages
+ is tested separately.
+ """
+ f = File('/etc/apt/sources.list.d/apt_freedom_press.list')
+ assert f.contains('^deb \[arch=amd64\] https:\/\/apt\.freedom\.press trusty main$')
+
+
+def test_fpf_apt_repo_fingerprint(Command):
+ """
+ Ensure the FPF apt repo has the correct fingerprint on the associated
+ signing pubkey. The key changed in October 2016, so test for the
+ newest fingerprint, which is installed on systems via the
+ `securedrop-keyring` package.
+ """
+
+
+ c = Command('apt-key finger')
+
+ fpf_gpg_pub_key_info = """/etc/apt/trusted.gpg.d/securedrop-keyring.gpg
+---------------------------------------------
+pub 4096R/00F4AD77 2016-10-20 [expires: 2017-10-20]
+ Key fingerprint = 2224 5C81 E3BA EB41 38B3 6061 310F 5612 00F4 AD77
+uid SecureDrop Release Signing Key"""
+
+ assert c.rc == 0
+ assert fpf_gpg_pub_key_info in c.stdout
+
+ fpf_gpg_pub_key_fingerprint_expired = 'B89A 29DB 2128 160B 8E4B 1B4C BADD E0C7 FC9F 6818'
+ fpf_gpg_pub_key_info_expired = """pub 4096R/FC9F6818 2014-10-26 [expired: 2016-10-27]
+ Key fingerprint = #{fpf_gpg_pub_key_fingerprint_expired}
+uid Freedom of the Press Foundation Master Signing Key"""
+
+ assert fpf_gpg_pub_key_fingerprint_expired not in c.stdout
+ assert fpf_gpg_pub_key_info_expired not in c.stdout
diff --git a/testinfra/common/test_grsecurity.py b/testinfra/common/test_grsecurity.py
new file mode 100644
--- /dev/null
+++ b/testinfra/common/test_grsecurity.py
@@ -0,0 +1,172 @@
+import pytest
+import os
+import re
+
+
+def test_ssh_motd_disabled(File):
+ """
+ Ensure the SSH MOTD (Message of the Day) is disabled.
+ Grsecurity balks at Ubuntu's default MOTD.
+ """
+ f = File("/etc/pam.d/sshd")
+ assert f.is_file
+ assert not f.contains("pam\.motd")
+
+
[email protected]("package", [
+ 'paxctl',
+ 'securedrop-grsec',
+])
+def test_grsecurity_apt_packages(Package, package):
+ """
+ Ensure the grsecurity-related apt packages are present on the system.
+ Includes the FPF-maintained metapackage, as well as paxctl, for managing
+ PaX flags on binaries.
+ """
+ assert Package(package).is_installed
+
+
[email protected]("package", [
+ 'linux-signed-image-generic-lts-utopic',
+ 'linux-signed-image-generic',
+ 'linux-signed-generic-lts-utopic',
+ 'linux-signed-generic',
+ '^linux-image-.*generic$',
+ '^linux-headers-.*',
+])
+def test_generic_kernels_absent(Command, package):
+ """
+ Ensure the default Ubuntu-provided kernel packages are absent.
+ In the past, conflicting version numbers have caused machines
+ to reboot into a non-grsec kernel due to poor handling of
+ GRUB_DEFAULT logic. Removing the vendor-provided kernel packages
+ prevents accidental boots into non-grsec kernels.
+ """
+ # Can't use the TestInfra Package module to check state=absent,
+ # so let's check by shelling out to `dpkg -l`. Dpkg will automatically
+ # honor simple regex in package names.
+ c = Command("dpkg -l {}".format(package))
+ assert c.rc == 1
+ error_text = "dpkg-query: no packages found matching {}".format(package)
+ assert c.stderr == error_text
+
+
+def test_grsecurity_lock_file(File):
+ """
+ Ensure system is rerunning a grsecurity kernel by testing for the
+ `grsec_lock` file, which is automatically created by grsecurity.
+ """
+ f = File("/proc/sys/kernel/grsecurity/grsec_lock")
+ assert oct(f.mode) == "0600"
+ assert f.user == "root"
+ assert f.size == 0
+
+
+def test_grsecurity_kernel_is_running(Command):
+ """
+ Make sure the currently running kernel is specific grsec kernel.
+ """
+ c = Command('uname -r')
+ assert c.stdout.endswith('-grsec')
+ assert c.stdout == '3.14.79-grsec'
+
+
[email protected]('sysctl_opt', [
+ ('kernel.grsecurity.grsec_lock', 1),
+ ('kernel.grsecurity.rwxmap_logging', 0),
+])
+def test_grsecurity_sysctl_options(Sysctl, Sudo, sysctl_opt):
+ """
+ Check that the grsecurity-related sysctl options are set correctly.
+ In production the RWX logging is disabled, to reduce log noise.
+ """
+ with Sudo():
+ assert Sysctl(sysctl_opt[0]) == sysctl_opt[1]
+
[email protected]('paxtest_check', [
+ "Executable anonymous mapping",
+ "Executable bss",
+ "Executable data",
+ "Executable heap",
+ "Executable stack",
+ "Executable shared library bss",
+ "Executable shared library data",
+ "Executable anonymous mapping (mprotect)",
+ "Executable bss (mprotect)",
+ "Executable data (mprotect)",
+ "Executable heap (mprotect)",
+ "Executable stack (mprotect)",
+ "Executable shared library bss (mprotect)",
+ "Executable shared library data (mprotect)",
+ "Writable text segments",
+ "Return to function (memcpy)",
+ "Return to function (memcpy, PIE)",
+])
+def test_grsecurity_paxtest(Command, Sudo, paxtest_check):
+ """
+ Check that paxtest does not report anything vulnerable
+ Requires the package paxtest to be installed.
+ The paxtest package is currently being installed in the app-test role.
+ """
+ if Command.exists("/usr/bin/paxtest"):
+ with Sudo():
+ c = Command("paxtest blackhat")
+ assert c.rc == 0
+ assert "Vulnerable" not in c.stdout
+ regex = "^{}\s*:\sKilled$".format(re.escape(paxtest_check))
+
+
+
+def test_grub_pc_marked_manual(Command):
+ """
+ Ensure the `grub-pc` packaged is marked as manually installed.
+ This is necessary for VirtualBox with Vagrant.
+ """
+ c = Command('apt-mark showmanual grub-pc')
+ assert c.rc == 0
+ assert c.stdout == "grub-pc"
+
+
+# The app-staging host has an outdated version of Firefox installed manually,
+# for compatibility with Selenium. Let's skip only on that host.
[email protected](
+ os.environ['SECUREDROP_TESTINFRA_TARGET_HOST'] == "app-staging",
+ reason="app-staging uses old version of Firefox for Selenium")
+def test_apt_autoremove(Command):
+ """
+ Ensure old packages have been autoremoved.
+ """
+ c = Command('apt-get --dry-run autoremove')
+ assert c.rc == 0
+ assert "0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded" in c.stdout
+
+
+# Expecting failure here, since the Ansible config doesn't set the same
+# flags in via the playbook as were recently declared in the securedrop-grsec
+# metapackage. The playbook in the SecureDrop install process should be updated
+# to match the PaX flags enforced via the metapackage.
[email protected]
[email protected]("binary", [
+ "/usr/sbin/grub-probe",
+ "/usr/sbin/grub-mkdevicemap",
+ "/usr/bin/grub-script-check",
+])
+def test_pax_flags(Command, File, binary):
+ """
+ Ensure PaX flags are set correctly on critical Grub binaries.
+ These flags are maintained as part of a post-install kernel hook
+ in the `securedrop-grsec` metapackage. If they aren't set correctly,
+ the machine may fail to boot into a new kernel.
+ """
+
+ f = File("/etc/kernel/postinst.d/paxctl-grub")
+ assert f.is_file
+ assert f.contains("^paxctl -zCE {}".format(binary))
+
+ c = Command("paxctl -v {}".format(binary))
+ assert c.rc == 0
+
+ assert "- PaX flags: -p---m--E--- [{}]".format(binary) in c.stdout
+ assert "PAGEEXEC is disabled" in c.stdout
+ assert "MPROTECT is disabled" in c.stdout
+ assert "EMUTRAMP is enabled" in c.stdout
diff --git a/testinfra/common/test_ip6tables.py b/testinfra/common/test_ip6tables.py
new file mode 100644
--- /dev/null
+++ b/testinfra/common/test_ip6tables.py
@@ -0,0 +1,15 @@
+def test_ip6tables_drop_everything(Command, Sudo):
+ """
+ Ensure that all IPv6 packets are dropped by default.
+ The IPv4 rules are more complicated, and tested separately.
+ """
+ desired_ip6tables_output = """
+-P INPUT DROP
+-P FORWARD DROP
+-P OUTPUT DROP
+""".lstrip().rstrip()
+
+ with Sudo():
+ c = Command("ip6tables -S")
+ assert c.stdout == desired_ip6tables_output
+ assert c.rc == 0
diff --git a/testinfra/common/test_platform.py b/testinfra/common/test_platform.py
new file mode 100644
--- /dev/null
+++ b/testinfra/common/test_platform.py
@@ -0,0 +1,28 @@
+import pytest
+
+
+def test_ansible_version(LocalCommand):
+ """
+ Check that a supported version of Ansible is being used.
+
+ The project has long used the Ansible 1.x series, but aims
+ to upgrade to 2.x during the 0.4 release. Developers commonly
+ install a recent version of Ansible when developing SecureDrop,
+ which is not a good baseline for testing.
+ """
+ c = LocalCommand("ansible --version")
+ assert c.stdout.startswith("ansible 1.")
+ assert "ansible 2" not in c.stdout
+
+
+def test_platform(SystemInfo):
+ """
+ SecureDrop requires Ubuntu Trusty 14.04 LTS. The shelf life
+ of that release means we'll need to migrate to Xenial LTS
+ at some point; until then, require hosts to be running
+ Ubuntu.
+ """
+ assert SystemInfo.type == "linux"
+ assert SystemInfo.distribution == "ubuntu"
+ assert SystemInfo.codename == "trusty"
+ assert SystemInfo.release == "14.04"
diff --git a/testinfra/common/test_system_hardening.py b/testinfra/common/test_system_hardening.py
new file mode 100644
--- /dev/null
+++ b/testinfra/common/test_system_hardening.py
@@ -0,0 +1,76 @@
+import pytest
+import re
+
+
[email protected]('sysctl_opt', [
+ ('net.ipv4.conf.all.accept_redirects', 0),
+ ('net.ipv4.conf.all.accept_source_route', 0),
+ ('net.ipv4.conf.all.rp_filter', 1),
+ ('net.ipv4.conf.all.secure_redirects', 0),
+ ('net.ipv4.conf.all.send_redirects', 0),
+ ('net.ipv4.conf.default.accept_redirects', 0),
+ ('net.ipv4.conf.default.accept_source_route', 0),
+ ('net.ipv4.conf.default.rp_filter', 1),
+ ('net.ipv4.conf.default.secure_redirects', 0),
+ ('net.ipv4.conf.default.send_redirects', 0),
+ ('net.ipv4.icmp_echo_ignore_broadcasts', 1),
+ ('net.ipv4.ip_forward', 0),
+ ('net.ipv4.tcp_max_syn_backlog', 4096),
+ ('net.ipv4.tcp_syncookies', 1),
+ ('net.ipv6.conf.all.disable_ipv6', 1),
+ ('net.ipv6.conf.default.disable_ipv6', 1),
+ ('net.ipv6.conf.lo.disable_ipv6', 1),
+])
+def test_sysctl_options(Sysctl, Sudo, sysctl_opt):
+ """
+ Ensure sysctl flags are set correctly. Most of these checks
+ are disabling IPv6 and hardening IPv4, which is appropriate
+ due to the heavy use of Tor.
+ """
+ with Sudo():
+ assert Sysctl(sysctl_opt[0]) == sysctl_opt[1]
+
+
+def test_dns_setting(File):
+ """
+ Ensure DNS service is hard-coded in resolv.conf config.
+ """
+ f = File('/etc/resolvconf/resolv.conf.d/base')
+ assert f.is_file
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0644"
+ assert f.contains('^nameserver 8\.8\.8\.8$')
+
+
[email protected]('kernel_module', [
+ 'bluetooth',
+ 'iwlwifi',
+])
+def test_blacklisted_kernel_modules(Command, File, Sudo, kernel_module):
+ """
+ Test that unwanted kernel modules are blacklisted on the system.
+ Mostly these checks are defense-in-depth approaches to ensuring
+ that wireless interfaces will not work.
+ """
+ with Sudo():
+ assert kernel_module not in Command("lsmod").stdout
+
+ f = File("/etc/modprobe.d/blacklist.conf")
+ assert f.contains("^blacklist {}$".format(kernel_module))
+
+
+# Expecting failure here, since the Ansible config doesn't actually
+# disable swap, as intended. (It doesn't manage /etc/fstab.)
[email protected]
+def test_swap_disabled(Command):
+ """
+ Ensure swap space is disabled. Prohibit writing memory to swapfiles
+ to reduce the threat of forensic analysis leaking any sensitive info.
+ """
+ c = Command('swapon --summary')
+ assert c.rc == 0
+ # A leading slash will indicate full path to a swapfile.
+ assert not re.search("^/", c.stdout, re.M)
+ # Expect that ONLY the headers will be present in the output.
+ assert c.stdout == "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"
diff --git a/testinfra/common/test_tor_config.py b/testinfra/common/test_tor_config.py
new file mode 100644
--- /dev/null
+++ b/testinfra/common/test_tor_config.py
@@ -0,0 +1,93 @@
+import pytest
+import re
+
+sdvars = pytest.securedrop_test_vars
+
+def test_tor_apt_repo(File):
+ """
+ Ensure the Tor Project apt repository is configured.
+ The version of Tor in the Trusty repos is not up to date.
+ """
+ f = File('/etc/apt/sources.list.d/deb_torproject_org_torproject_org.list')
+ repo_regex = re.escape('deb http://deb.torproject.org/torproject.org trusty main')
+ assert f.contains(repo_regex)
+
+
[email protected]('package', [
+ 'deb.torproject.org-keyring',
+ 'tor',
+])
+def test_tor_packages(Package, package):
+ """
+ Ensure Tor packages are installed. Includes a check for the keyring,
+ so that automatic updates can handle rotating the signing key if necessary.
+ """
+ assert Package(package).is_installed
+
+
+def test_tor_service_running(Command, File, Sudo):
+ """
+ Ensure tor is running and enabled. Tor is required for SSH access,
+ so it must be enabled to start on boot.
+ """
+ # TestInfra tries determine the service manager intelligently, and
+ # inappropriately assumes Upstart on Trusty, due to presence of the
+ # `initctl` command. The tor service is handled via a SysV-style init
+ # script, so let's just shell out and verify the running and enabled
+ # states explicitly.
+ with Sudo():
+ tor_status = Command("service tor status")
+ assert tor_status.rc == 0
+ assert tor_status.stdout == " * tor is running"
+
+ with Sudo():
+ tor_enabled = Command("find /etc/rc?.d -name S??tor")
+ assert tor_enabled.rc == 0
+ assert tor_enabled.stdout != ""
+
+ tor_targets = tor_enabled.stdout.split("\n")
+ assert len(tor_targets) == 4
+ for target in tor_targets:
+ t = File(target)
+ assert t.is_symlink
+ assert t.linked_to == "/etc/init.d/tor"
+
+
[email protected]('torrc_option', [
+ 'SocksPort 0',
+ 'SafeLogging 1',
+ 'RunAsDaemon 1',
+ 'Sandbox 1',
+])
+def test_tor_torrc_options(File, torrc_option):
+ """
+ Check for required options in the system Tor config file.
+ These options should be present regardless of machine role,
+ meaning both Application and Monitor server will have them.
+
+ Separate tests will check for specific hidden services.
+ """
+ f = File("/etc/tor/torrc")
+ assert f.is_file
+ assert f.user == "debian-tor"
+ assert oct(f.mode) == "0644"
+ assert f.contains("^{}$".format(torrc_option))
+
+
+def test_tor_signing_key_fingerprint(Command):
+ """
+ The `deb.torproject.org-keyring` package manages the repo signing pubkey
+ for tor-related packages, so make sure that fingerprint matches
+ expectations.
+ """
+
+ c = Command("apt-key finger")
+ tor_gpg_pub_key_info = """/etc/apt/trusted.gpg.d/deb.torproject.org-keyring.gpg
+-----------------------------------------------------
+pub 2048R/886DDD89 2009-09-04 [expires: 2020-08-29]
+ Key fingerprint = A3C4 F0F9 79CA A22C DBA8 F512 EE8C BC9E 886D DD89
+uid deb.torproject.org archive signing key
+sub 2048R/219EC810 2009-09-04 [expires: 2018-08-30]"""
+
+ assert c.rc == 0
+ assert tor_gpg_pub_key_info in c.stdout
diff --git a/testinfra/common/test_tor_hidden_services.py b/testinfra/common/test_tor_hidden_services.py
new file mode 100644
--- /dev/null
+++ b/testinfra/common/test_tor_hidden_services.py
@@ -0,0 +1,89 @@
+import pytest
+import re
+
+
+sdvars = pytest.securedrop_test_vars
+
+
[email protected]('tor_service', sdvars.tor_services)
+def test_tor_service_directories(File, Sudo, tor_service):
+ """
+ Check mode and ownership on Tor service directories.
+ """
+ with Sudo():
+ f = File("/var/lib/tor/services/{}".format(tor_service['name']))
+ assert f.is_directory
+ # TODO: tor might mark these dirs as setgid
+ assert oct(f.mode) == "0700"
+ assert f.user == "debian-tor"
+ assert f.group == "debian-tor"
+
+
[email protected]('tor_service', sdvars.tor_services)
+def test_tor_service_hostnames(File, Sudo, tor_service):
+ """
+ Check contents of tor service hostname file. For normal Hidden Services,
+ the file should contain only hostname (.onion URL). For Authenticated
+ Hidden Services, it should also contain the HidServAuth cookie.
+ """
+
+ # Declare regex only for THS; we'll build regex for ATHS only if
+ # necessary, since we won't have the required values otherwise.
+ ths_hostname_regex = "[a-z0-9]{16}\.onion"
+
+ with Sudo():
+ f = File("/var/lib/tor/services/{}/hostname".format(tor_service['name']))
+ assert f.is_file
+ assert oct(f.mode) == "0600"
+ assert f.user == "debian-tor"
+ assert f.group == "debian-tor"
+
+ # All hostnames should contain at *least* the hostname.
+ assert re.search(ths_hostname_regex, f.content)
+
+ if tor_service['authenticated']:
+ # HidServAuth regex is approximately [a-zA-Z0-9/+], but validating
+ # the entire entry is sane, and we don't need to nitpick the charset.
+ aths_hostname_regex = ths_hostname_regex+" .{22} # client: "+tor_service['client']
+ assert re.search("^{}$".format(aths_hostname_regex), f.content)
+ else:
+ assert re.search("^{}$".format(ths_hostname_regex), f.content)
+
+
[email protected]('tor_service', sdvars.tor_services)
+def test_tor_services_config(File, tor_service):
+ """
+ Ensure torrc file contains relevant lines for Hidden Service declarations.
+ All hidden services must include:
+
+ * HiddenServiceDir
+ * HiddenServicePort
+
+ Only authenticated hidden services must also include:
+
+ * HiddenServiceAuthorizeClient
+
+ Check for each as appropriate.
+ """
+ f = File("/etc/tor/torrc")
+ dir_regex = "HiddenServiceDir /var/lib/tor/services/{}".format(
+ tor_service['name'])
+ # We need at least one port, but it may be used for both config values.
+ # On the Journalist Interface, we reuse the "80" remote port but map it to
+ # a different local port, so Apache can listen on several sockets.
+ remote_port = tor_service['ports'][0]
+ try:
+ local_port = tor_service['ports'][1]
+ except IndexError:
+ local_port = remote_port
+
+ port_regex = "HiddenServicePort {} 127.0.0.1:{}".format(
+ remote_port, local_port)
+
+ assert f.contains("^{}$".format(dir_regex))
+ assert f.contains("^{}$".format(port_regex))
+
+ if tor_service['authenticated']:
+ auth_regex = "HiddenServiceAuthorizeClient stealth {}".format(
+ tor_service['client'])
+ assert f.contains("^{}$".format(auth_regex))
diff --git a/testinfra/common/test_user_config.py b/testinfra/common/test_user_config.py
new file mode 100644
--- /dev/null
+++ b/testinfra/common/test_user_config.py
@@ -0,0 +1,69 @@
+import re
+
+
+def test_sudoers_config(File, Sudo):
+ """
+ Check sudoers config for passwordless sudo via group membership,
+ as well as environment-related hardening.
+ """
+ f = File("/etc/sudoers")
+ assert f.is_file
+ assert f.user == "root"
+ assert f.group == "root"
+ assert oct(f.mode) == "0440"
+
+ # Restrictive file mode requires sudo for reading, so let's
+ # read once and store the content in a var.
+ with Sudo():
+ sudoers_config = f.content
+
+ # Using re.search rather than `f.contains` since the basic grep
+ # matching doesn't support PCRE, so `\s` won't work.
+ assert re.search('^Defaults\s+env_reset$', sudoers_config, re.M)
+ assert re.search('^Defaults\s+env_reset$', sudoers_config, re.M)
+ assert re.search('^Defaults\s+mail_badpass$', sudoers_config, re.M)
+ assert re.search('Defaults\s+secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"', sudoers_config, re.M)
+ assert re.search('^%sudo\s+ALL=\(ALL\)\s+NOPASSWD:\s+ALL$', sudoers_config, re.M)
+ assert re.search('Defaults:%sudo\s+!requiretty', sudoers_config, re.M)
+
+
+def test_sudoers_tmux_env(File):
+ """
+ Ensure SecureDrop-specific bashrc additions are present.
+ This checks for automatic tmux start on interactive shells.
+ If we switch to byobu, we can set `byobu-enabled` and check
+ the corresponding settings there.
+ """
+
+ f = File('/etc/profile.d/securedrop_additions.sh')
+ non_interactive_str = re.escape('[[ $- != *i* ]] && return')
+ tmux_check = re.escape('test -z "$TMUX" && (tmux attach || tmux new-session)')
+
+ assert f.contains("^{}$".format(non_interactive_str))
+ assert f.contains("^if which tmux >\/dev\/null 2>&1; then$")
+
+ assert 'test -z "$TMUX" && (tmux attach || tmux new-session)' in f.content
+ assert f.contains(tmux_check)
+
+
+def test_tmux_installed(Package):
+ """
+ Ensure the `tmux` package is present, since it's required for the user env.
+ When running an interactive SSH session over Tor, tmux should be started
+ automatically, to prevent problems if the connection is broken unexpectedly,
+ as sometimes happens over Tor. The Admin will be able to reconnect to the
+ running tmux session and review command output.
+ """
+ assert Package("tmux").is_installed
+
+
+def test_sudoers_tmux_env_deprecated(File):
+ """
+ Previous version of the Ansible config set the tmux config
+ in per-user ~/.bashrc, which was redundant. The config has
+ since moved to /etc/profile.d, to provide a single point of
+ update that applies to all users. Let's make sure that the
+ old setting isn't still active.
+ """
+ f = File("/home/vagrant/.bashrc")
+ assert not f.contains("^. \/etc\/bashrc\.securedrop_additions$")
diff --git a/testinfra/development/test_development_application_settings.py b/testinfra/development/test_development_application_settings.py
new file mode 100644
--- /dev/null
+++ b/testinfra/development/test_development_application_settings.py
@@ -0,0 +1,131 @@
+import pytest
+
+
[email protected]('package', [
+ "securedrop-app-code",
+ "apache2-mpm-worker",
+ "libapache2-mod-wsgi",
+ "libapache2-mod-xsendfile",
+])
+def test_development_lacks_deb_packages(Command, package):
+ """
+ The development machine does not use Apache, but rather the Flask runner,
+ for standing up dev-friendly servers inside the VM. Therefore the
+ app-code-related deb packages should be absent.
+ """
+ # The TestInfra `Package` module doesn't offer state=absent checks,
+ # so let's call `dpkg -l` and inspect that output.
+ c = Command("dpkg -l {}".format(package))
+ assert c.rc == 1
+ assert c.stdout == ""
+ assert c.stderr == "dpkg-query: no packages found matching {}".format(
+ package)
+
+
+def test_development_apparmor_no_complain_mode(Command, Sudo):
+ """
+ Ensure that AppArmor profiles are not set to complain mode in development.
+ The app-staging host sets profiles to complain, viz.
+
+ * usr.sbin.apache2
+ * usr.sbin.tor
+
+ but those changes should not land on the development machine.
+ """
+
+ with Sudo():
+ c = Command("aa-status")
+ assert c.rc == 0
+ assert '0 profiles are in complain mode.' in c.stdout
+
+
[email protected]('unwanted_file', [
+ "/var/www/html",
+ "/var/www/source.wsgi",
+ "/var/www/document.wsgi",
+])
+def test_development_apache_docroot_absent(File, unwanted_file):
+ """
+ Ensure the default HTML document root is missing.
+ Development environment does not serve out of /var/www,
+ since it uses the Flask dev server, not Apache.
+ """
+ f = File(unwanted_file)
+ assert not f.exists
+
+
[email protected]('data_dir', [
+ "/var/lib/securedrop",
+ "/var/lib/securedrop/keys",
+ "/var/lib/securedrop/tmp",
+ "/var/lib/securedrop/store",
+])
+def test_development_data_directories_exist(File, data_dir):
+ """
+ Ensure that application code directories are created
+ under /vagrant for the development environment, rather than
+ /var/www as in staging and prod.
+ """
+ f = File(data_dir)
+ assert f.is_directory
+ assert f.user == "vagrant"
+ assert f.group == "vagrant"
+ assert oct(f.mode) == "0700"
+
+
+def test_development_app_directories_exist(File):
+ """
+ Ensure that application code directories are created
+ under /vagrant for the development environment, rather than
+ /var/www as in staging and prod.
+
+ Using a separate check from the data directories because /vagrant
+ will be mounted with different mode.
+ """
+ f = File("/vagrant/securedrop")
+ assert f.is_directory
+ assert f.user == "vagrant"
+ assert f.group == "vagrant"
+
+ # Vagrant VirtualBox environments show /vagrant as 770,
+ # but the Vagrant DigitalOcean droplet shows /vagrant as 775.
+ # This appears to be a side-effect of the default umask
+ # in the snapci instances. (The rsync provisioner for the
+ # vagrant-digitalocean plugin preserves permissions from the host.)
+ # The spectests for 'staging' still check for an explicit mode,
+ # so it's OK to relax this test for now.
+ #it { should be_mode '700' }
+ # TODO: should be 700 in all environments; ansible task is
+ # straightforward about this.
+
+
+def test_development_clean_tmp_cron_job(Command, Sudo):
+ """
+ Ensure cron job for cleaning the temporary directory for the app code exists.
+ """
+
+ with Sudo():
+ c = Command('crontab -l')
+ assert c.rc == 0
+ # TODO: this should be using property, but the ansible role
+ # doesn't use a var, it's hard-coded. update ansible, then fix test.
+ # it { should have_entry "@daily #{property['securedrop_code']}/manage.py clean-tmp" }
+ assert "@daily /vagrant/securedrop/manage.py clean-tmp" in c.stdout
+
+
+def test_development_default_logo_exists(File):
+ """
+ Checks for default SecureDrop logo file.
+
+ TODO: Add check for custom logo file.
+ """
+
+ f = File("/vagrant/securedrop/static/i/logo.png")
+ assert f.is_file
+ assert f.user == "vagrant"
+ assert f.group == "vagrant"
+ assert oct(f.mode) == "0644"
+ # TODO: Ansible task declares mode 400 but not as string, needs to be fixed
+ # and tests updated. Also, not using "mode" in tests below because umask
+ # on snapci machines differs from the /vagrant folder in dev VM.
+ # Fixing Ansible task may fix differing perms.
diff --git a/testinfra/development/test_development_environment.py b/testinfra/development/test_development_environment.py
new file mode 100644
--- /dev/null
+++ b/testinfra/development/test_development_environment.py
@@ -0,0 +1,63 @@
+import pytest
+
+
+def test_development_app_dependencies(Package):
+ """
+ Ensure development apt dependencies are installed.
+ """
+ development_apt_dependencies = [
+ 'libssl-dev',
+ 'python-dev',
+ 'python-pip',
+ ]
+ for dependency in development_apt_dependencies:
+ p = Package(dependency)
+ assert p.is_installed
+
+
[email protected]('pip_package,version', [
+ ('Flask-Testing', '0.6.1'),
+ ('Flask', '0.11.1'),
+ ('Jinja2', '2.8'),
+ ('MarkupSafe', '0.23'),
+ ('Werkzeug', '0.11.11'),
+ ('beautifulsoup4', '4.5.1'),
+ ('click', '6.6'),
+ ('coverage', '4.2'),
+ ('first', '2.0.1'),
+ ('funcsigs', '1.0.2'),
+ ('itsdangerous', '0.24'),
+ ('mock', '2.0.0'),
+ ('pbr', '1.10.0'),
+ ('pip-tools', '1.7.0'),
+ ('py', '1.4.31'),
+ ('pytest-cov', '2.4.0'),
+ ('pytest', '3.0.3'),
+ ('selenium', '2.53.6'),
+ ('six', '1.10.0'),
+])
+def test_development_pip_dependencies(Command, pip_package, version):
+ """
+ Declare SecureDrop app pip requirements. On the development VM,
+ the pip dependencies should be installed directly via pip, rather
+ than relying on the deb packages with pip-wheel inclusions.
+ Versions here are intentionally hardcoded to track changes.
+ """
+ c = Command('pip freeze')
+ assert "{}=={}".format(pip_package, version) in c.stdout
+
+
+def test_development_securedrop_env_var(File):
+ """
+ Ensure that the SECUREDROP_ENV var is set to "dev".
+
+
+ TODO: this isn't really checking that the env var is set,
+ just that it's declared in the bashrc. spec_helper ignores
+ env vars via ssh by default, so start there.
+ """
+
+ f = File('/home/vagrant/.bashrc')
+ assert f.is_file
+ assert f.user == 'vagrant'
+ assert f.contains('^export SECUREDROP_ENV=dev$')
diff --git a/testinfra/development/test_development_networking.py b/testinfra/development/test_development_networking.py
new file mode 100644
--- /dev/null
+++ b/testinfra/development/test_development_networking.py
@@ -0,0 +1,55 @@
+def test_development_iptables_rules(Command, Sudo):
+ """
+ Declare desired iptables rules
+ The 'development' machine doesn't have any custom
+ iptables rules, so just check for the default chains.
+ """
+ desired_iptables_rules = [
+ '-P INPUT ACCEPT',
+ '-P FORWARD ACCEPT',
+ '-P OUTPUT ACCEPT',
+ ]
+ with Sudo():
+ c = Command('iptables -S')
+ assert c.rc == 0
+ for rule in desired_iptables_rules:
+ assert rule in c.stdout
+
+ # If any iptables rules are ever added, this test will
+ # fail, so tests can be written for the new rules.
+ # Counting newlines in the output simply to avoid calling
+ # `iptables -S` again and piping to `wc -l`.
+ assert c.stdout.count("\n") == len(desired_iptables_rules) - 1
+
+
+def test_development_ssh_listening(Socket):
+ """
+ Check for ssh listening on all interfaces. In prod environment,
+ SSH will be listening only on localhost, i.e. SSH over ATHS.
+ """
+ s = Socket("tcp://0.0.0.0:22")
+ assert s.is_listening
+
+
+def test_development_redis_worker(Socket):
+ """
+ Ensure that Redis worker is listening on localhost.
+ This worker is used to handle incoming submissions.
+ """
+
+ s = Socket("tcp://127.0.0.1:6379")
+ assert s.is_listening
+
+# The Flask runners for the source and journalist interfaces
+# aren't configured to run by default, e.g. on boot. Nor
+# do the app tests cause them to be run. So, we shouldn't
+# really expected them to be running.
+## check for source interface flask port listening
+#describe port(8080) do
+# it { should be_listening.on('0.0.0.0').with('tcp') }
+#end
+#
+## check for journalist interface flask port listening
+#describe port(8081) do
+# it { should be_listening.on('0.0.0.0').with('tcp') }
+#end
diff --git a/testinfra/development/test_xvfb.py b/testinfra/development/test_xvfb.py
new file mode 100644
--- /dev/null
+++ b/testinfra/development/test_xvfb.py
@@ -0,0 +1,113 @@
+import pytest
+
+
[email protected]('dependency', [
+ 'firefox',
+ 'xvfb',
+])
+def test_xvfb_apt_dependencies(Package, dependency):
+ """
+ Ensure apt requirements for Xvfb are present.
+ """
+ assert Package(dependency).is_installed
+
+
+def test_xvfb_service_config(File, Sudo):
+ """
+ Ensure xvfb service configuration file is present.
+ Using Sudo context manager because the expected mode is 700.
+ Not sure it's really necessary to have this script by 700; 755
+ sounds sufficient.
+ """
+ with Sudo():
+ f = File("/etc/init.d/xvfb")
+ assert f.is_file
+ assert oct(f.mode) == "0700"
+ assert f.user == "root"
+ assert f.group == "root"
+ # Let's hardcode the entire init script and check for exact match.
+ # The pytest output will display a diff if anything is missing.
+ xvfb_init_content = """
+# This is the /etc/init.d/xvfb script. We use it to launch xvfb at boot in the
+# development environment so we can easily run the functional tests.
+
+XVFB=/usr/bin/Xvfb
+XVFBARGS=":1 -screen 0 1024x768x24 -ac +extension GLX +render -noreset"
+PIDFILE=/var/run/xvfb.pid
+case "$1" in
+ start)
+ echo -n "Starting virtual X frame buffer: Xvfb"
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --make-pidfile --background --exec $XVFB -- $XVFBARGS
+ echo "."
+ ;;
+ stop)
+ echo -n "Stopping virtual X frame buffer: Xvfb"
+ start-stop-daemon --stop --quiet --pidfile $PIDFILE
+ echo "."
+ ;;
+ restart)
+ $0 stop
+ $0 start
+ ;;
+ *)
+ echo "Usage: /etc/init.d/xvfb {start|stop|restart}"
+ exit 1
+esac
+
+exit 0
+""".lstrip().rstrip()
+ with Sudo():
+ assert f.contains('^XVFB=/usr/bin/Xvfb$')
+ assert f.contains('^XVFBARGS=":1 -screen 0 1024x768x24 '
+ '-ac +extension GLX +render -noreset"$')
+ assert f.content == xvfb_init_content
+
+
+def test_xvfb_service_enabled(Command, Sudo):
+ """
+ Ensure xvfb is configured to start on boot via update-rc.d.
+ The `-n` option to update-rc.d is dry-run.
+
+ Using Sudo context manager because the service file is mode 700.
+ Not sure it's really necessary to have this script by 700; 755
+ sounds sufficient.
+ """
+ with Sudo():
+ c = Command('update-rc.d -n xvfb defaults')
+ assert c.rc == 0
+ wanted_text = 'System start/stop links for /etc/init.d/xvfb already exist.'
+ assert wanted_text in c.stdout
+
+
+def test_xvfb_display_config(File):
+ """
+ Ensure DISPLAY environment variable is set on boot, for running
+ headless tests via Xvfb.
+ """
+ f = File('/etc/profile.d/xvfb_display.sh')
+ assert f.is_file
+ assert oct(f.mode) == "0444"
+ assert f.user == "root"
+ assert f.group == "root"
+ assert f.contains("export DISPLAY=:1\n")
+
+
+def test_xvfb_service_running(Process, Sudo):
+ """
+ Ensure that xvfb service is running.
+
+ We can't use the Service module because it expects a "status"
+ subcommand for the init script, and our custom version doesn't have
+ one. So let's make sure the process is running.
+ """
+ # Sudo isn't necessary to read out of /proc on development, but is
+ # required when running under Grsecurity, which app-staging does.
+ # So let's escalate privileges to ensure we can determine service state.
+ with Sudo():
+ p = Process.get(user="root", comm="Xvfb")
+ wanted_args = str('/usr/bin/Xvfb :1 -screen 0 1024x768x24 '
+ '-ac +extension GLX +render -noreset')
+ assert p.args == wanted_args
+ # We only expect a single process, no children.
+ workers = Process.filter(ppid=p.pid)
+ assert len(workers) == 0
diff --git a/testinfra/mon/test_network.py b/testinfra/mon/test_network.py
new file mode 100644
--- /dev/null
+++ b/testinfra/mon/test_network.py
@@ -0,0 +1,69 @@
+import os
+import difflib
+import pytest
+from jinja2 import Template
+
+
+securedrop_test_vars = pytest.securedrop_test_vars
+
+
+def determine_app_ip(SystemInfo, Command):
+ """
+ Dumb logic to determine environment and lookup relevant app IP address
+ """
+ app_hostname = "app-prod"
+ hostname = SystemInfo.hostname
+ if "staging" in hostname:
+ app_hostname = "app-staging"
+ app_ip = Command.check_output("getent hosts "+app_hostname+" | awk '{ print $1 }'")
+ return app_ip
+
+
+def test_mon_iptables_rules(SystemInfo, Command, Sudo, Ansible):
+ app_ip = determine_app_ip(SystemInfo, Command)
+
+ # Build a dict of variables to pass to jinja for iptables comparison
+ kwargs = dict(
+ app_ip=app_ip,
+ default_interface = Ansible("setup")["ansible_facts"]["ansible_default_ipv4"]["interface"],
+ tor_user_id = Command.check_output("id -u debian-tor"),
+ ssh_group_gid = Command.check_output("getent group ssh | cut -d: -f3"),
+ postfix_user_id = Command.check_output("id -u postfix"),
+ dns_server = securedrop_test_vars.dns_server)
+
+ # Build iptables scrape cmd, purge comments + counters
+ iptables = "iptables-save | sed 's/ \[[0-9]*\:[0-9]*\]//g' | egrep -v '^#'"
+ iptables_file = "{}/iptables-{}.j2".format(
+ os.path.dirname(os.path.abspath(__file__)),
+ SystemInfo.hostname)
+
+ # template out a local iptables jinja file
+ jinja_iptables = Template(open(iptables_file,'r').read())
+ iptables_expected = jinja_iptables.render(**kwargs)
+
+ with Sudo():
+ # Actually run the iptables scrape command
+ iptables = Command.check_output(iptables)
+ # print diff comparison (only shows up in pytests if test fails or
+ # verbosity turned way up)
+ for iptablesdiff in difflib.context_diff(iptables_expected.split('\n'),
+ iptables.split('\n')):
+ print(iptablesdiff)
+ # Conduct the string comparison of the expected and actual iptables
+ # ruleset
+ assert iptables_expected == iptables
+
+
[email protected]('ossec_service', [
+ dict(host="0.0.0.0", proto="tcp", port=22),
+ dict(host="127.0.0.1", proto="tcp", port=25),
+ dict(host="0.0.0.0", proto="udp", port=1514),
+])
+def test_listening_ports(Socket, Sudo, ossec_service):
+ """
+ Ensure the OSSEC-related services are listening on the
+ expected sockets. Services to check include ossec, mail, and ssh.
+ """
+ socket = "{proto}://{host}:{port}".format(**ossec_service)
+ with Sudo():
+ assert Socket(socket).is_listening
diff --git a/testinfra/mon/test_ossec.py b/testinfra/mon/test_ossec.py
new file mode 100644
--- /dev/null
+++ b/testinfra/mon/test_ossec.py
@@ -0,0 +1,235 @@
+import re
+import pytest
+
+
+securedrop_test_vars = pytest.securedrop_test_vars
+
[email protected]('package', [
+ 'mailutils',
+ 'ossec-server',
+ 'postfix',
+ 'procmail',
+ 'securedrop-ossec-server',
+])
+def test_ossec_package(Package, package):
+ """
+ Ensure required packages for OSSEC are installed.
+ Includes mail utilities and the FPF-maintained metapackage.
+ """
+ assert Package(package).is_installed
+
+
[email protected]('header', [
+ '/^X-Originating-IP:/ IGNORE',
+ '/^X-Mailer:/ IGNORE',
+ '/^Mime-Version:/ IGNORE',
+ '/^User-Agent:/ IGNORE',
+ '/^Received:/ IGNORE',
+])
+def test_postfix_headers(File, header):
+ """
+ Ensure postfix header filters are set correctly. Common mail headers
+ are stripped by default to avoid leaking metadata about the instance.
+ Message body is always encrypted prior to sending.
+ """
+ f = File("/etc/postfix/header_checks")
+ assert f.is_file
+ assert oct(f.mode) == "0644"
+ regex = '^{}$'.format(re.escape(header))
+ assert re.search(regex, f.content, re.M)
+
+
[email protected]('setting', [
+ 'relayhost = [smtp.gmail.com]:587',
+ 'smtp_sasl_auth_enable = yes',
+ 'smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd',
+ 'smtp_sasl_security_options = noanonymous',
+ 'smtp_use_tls = yes',
+ 'smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache',
+ 'smtp_tls_security_level = secure',
+ 'smtp_tls_CApath = /etc/ssl/certs',
+ 'smtp_tls_ciphers = high',
+ 'smtp_tls_protocols = TLSv1.2 TLSv1.1 TLSv1 !SSLv3 !SSLv2',
+ 'myhostname = ossec.server',
+ 'myorigin = $myhostname',
+ 'smtpd_banner = $myhostname ESMTP $mail_name (Ubuntu)',
+ 'biff = no',
+ 'append_dot_mydomain = no',
+ 'readme_directory = no',
+ 'smtp_header_checks = regexp:/etc/postfix/header_checks',
+ 'mailbox_command = /usr/bin/procmail',
+ 'inet_interfaces = loopback-only',
+ 'alias_maps = hash:/etc/aliases',
+ 'alias_database = hash:/etc/aliases',
+ 'mydestination = $myhostname, localhost.localdomain , localhost',
+ 'mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128',
+ 'mailbox_size_limit = 0',
+ 'recipient_delimiter = +',
+])
+def test_postfix_settings(File, setting):
+ """
+ Check all postfix configuration lines. There are technically multiple
+ configuration paths regarding the TLS settings, particularly the
+ fingerprint verification logic, but only the base default config is tested
+ currently.
+ """
+ f = File("/etc/postfix/main.cf")
+ assert f.is_file
+ assert f.user == 'root'
+ assert oct(f.mode) == "0644"
+ regex = '^{}$'.format(re.escape(setting))
+ assert re.search(regex, f.content, re.M)
+
+
+def test_ossec_connectivity(Command, Sudo):
+ """
+ Ensure ossec-server machine has active connection to the ossec-agent.
+ The ossec service will report all available agents, and we can inspect
+ that list to make sure it's the host we expect.
+ """
+ desired_output = "{}-{} is available.".format(securedrop_test_vars.app_hostname,
+ securedrop_test_vars.app_ip)
+ with Sudo():
+ c = Command("/var/ossec/bin/list_agents -a")
+ assert c.stdout == desired_output
+ assert c.rc == 0
+
+def test_ossec_gnupg(File, Sudo):
+ """ ensure ossec gpg homedir exists """
+ with Sudo():
+ f = File(OSSEC_GNUPG)
+ assert f.is_directory
+ assert f.user == "ossec"
+ assert oct(f.mode) == "0700"
+
+
+def test_ossec_gnupg(File, Sudo):
+ """
+ Ensures the test Admin GPG public key is present as file.
+ Does not check that it's added to the keyring for the ossec user;
+ that's handled by a separate test.
+ """
+ with Sudo():
+ f = File("/var/ossec/test_admin_key.pub")
+ assert f.is_file
+ assert oct(f.mode) == "0644"
+
+
+def test_ossec_pubkey_in_keyring(Command, Sudo):
+ """
+ Ensure the test Admin GPG public key exists in the keyring
+ within the ossec home directory.
+ """
+ ossec_gpg_pubkey_info = """pub 4096R/EDDDC102 2014-10-15
+uid Test/Development (DO NOT USE IN PRODUCTION) (Admin's OSSEC Alert GPG key) <[email protected]>
+sub 4096R/97D2EB39 2014-10-15"""
+ with Sudo("ossec"):
+ c = Command("gpg --homedir /var/ossec/.gnupg --list-keys EDDDC102")
+ assert c.stdout == ossec_gpg_pubkey_info
+
+
[email protected]('keyfile', [
+ '/var/ossec/etc/sslmanager.key',
+ '/var/ossec/etc/sslmanager.cert',
+])
+def test_ossec_keyfiles(File, Sudo, keyfile):
+ """
+ Ensure that the OSSEC transport key pair exists. These keys are used
+ to protect the connection between the ossec-server and ossec-agent.
+
+ All this check does in confirm they're present, it doesn't perform any
+ matching checks to validate the configuration.
+ """
+ with Sudo():
+ f = File(keyfile)
+ assert f.is_file
+ assert oct(f.mode) == "0644"
+ assert f.user == "root"
+
+
[email protected]('setting', [
+ 'VERBOSE=yes',
+ 'MAILDIR=/var/mail/',
+ 'DEFAULT=$MAILDIR',
+ 'LOGFILE=/var/log/procmail.log',
+ 'SUBJECT=`formail -xSubject:`',
+ ':0 c',
+ '*^To:.*root.*',
+ '|/var/ossec/send_encrypted_alarm.sh',
+])
+def test_procmail_settings(File, Sudo, setting):
+ """
+ Ensure procmail settings are correct. These config lines determine
+ how the OSSEC email alerts are encrypted and then passed off for sending.
+ """
+ # Sudo is required to traverse the /var/ossec directory.
+ with Sudo():
+ f = File("/var/ossec/.procmailrc")
+ assert f.contains('^{}$'.format(setting))
+
+
+def test_procmail_attrs(File, Sudo):
+ """
+ Ensure procmail file attributes are specified correctly.
+ """
+ with Sudo():
+ f = File("/var/ossec/.procmailrc")
+ assert f.is_file
+ assert f.user == "ossec"
+ assert oct(f.mode) == "0644"
+
+
+def test_procmail_log(File, Sudo):
+ """
+ Ensure procmail log file exist with proper ownership.
+ Only the ossec user should have read/write permissions.
+ """
+ with Sudo():
+ f = File("/var/log/procmail.log")
+ assert f.is_file
+ assert f.user == "ossec"
+ assert f.group == "root"
+ assert oct(f.mode) == "0660"
+
+
+def test_ossec_authd(Command, Sudo):
+ """ Ensure that authd is not running """
+ with Sudo():
+ c = Command("pgrep ossec-authd")
+ assert c.stdout == ""
+ assert c.rc != 0
+
+def test_hosts_files(File, SystemInfo):
+ """ Ensure host files mapping are in place """
+ f = File('/etc/hosts')
+
+ hostname = SystemInfo.hostname
+ env = "prod"
+ app_ip = "10.0.1.4"
+ if "staging" in hostname:
+ env = "staging"
+ app_ip = "10.0.1.2"
+
+ assert f.contains('^127.0.0.1')
+ assert f.contains('^127.0.0.1\t*mon-{0}\t*mon-{0}$'.format(env))
+ assert f.contains('^{}\s*app-{}$'.format(app_ip, env))
+
+
+def test_ossec_log_contains_no_malformed_events(File, Sudo):
+ """
+ Ensure the OSSEC log reports no errors for incorrectly formatted
+ messages. These events indicate that the OSSEC server failed to decrypt
+ the event sent by the OSSEC agent, which implies a misconfiguration,
+ likely the IPv4 address or keypair differing from what's declared.
+
+ Documentation regarding this error message can be found at:
+ http://ossec-docs.readthedocs.io/en/latest/faq/unexpected.html#id4
+ """
+ with Sudo():
+ f = File("/var/ossec/logs/ossec.log")
+ assert not f.contains("ERROR: Incorrectly formated message from")
+
+
+def test_regression_hosts(Command):
+ """ Regression test to check for duplicate entries. """
+ assert Command.check_output("uniq --repeated /etc/hosts") == ""
diff --git a/testinfra/test.py b/testinfra/test.py
new file mode 100755
--- /dev/null
+++ b/testinfra/test.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+"""
+Wrapper script for running Testinfra against SecureDrop VMs.
+Accepts a single argument: the hostname to run the tests against.
+Script will handle building the list of tests to run, based on hostname.
+"""
+import os
+import subprocess
+import sys
+import tempfile
+
+# By default let's assume we're testing against the development VM.
+try:
+ target_host = sys.argv[1]
+except IndexError:
+ target_host = "development"
+
+# Set env var so that `testinfra/conftest.py` can read in a YAML vars file
+# specific to the host being tested.
+os.environ['SECUREDROP_TESTINFRA_TARGET_HOST'] = target_host
+
+
+def get_target_roles(target_host):
+ """
+ Assemble list of role tests to run. Hard-coded per host.
+ """
+ if target_host == "development":
+ target_roles = [
+ 'testinfra/app-code',
+ 'testinfra/development',
+ ]
+
+ elif target_host == "app-staging":
+ target_roles = [
+ 'testinfra/app',
+ 'testinfra/app-code',
+ 'testinfra/common',
+ 'testinfra/development/test_xvfb.py',
+ ]
+
+ elif target_host == "mon-staging":
+ target_roles = [
+ 'testinfra/mon',
+ 'testinfra/common',
+ ]
+
+ elif target_host == "mon-prod":
+ target_roles = [
+ 'testinfra/mon',
+ ]
+
+ elif target_host == "build":
+ target_roles = [
+ 'testinfra/build',
+ ]
+ else:
+ print("Unknown host '{}'! Exiting.".format(target_host))
+ sys.exit(1)
+
+ return target_roles
+
+
+def run_testinfra(target_host, verbose=True):
+ """
+ Handler for executing testinfra against `target_host`.
+ Queries list of roles via helper def `get_target_roles`.
+ """
+ target_roles = get_target_roles(target_host)
+ if verbose:
+ # Print informative output prior to test run.
+ print("Running Testinfra suite against '{}'...".format(target_host))
+ print("Target roles:")
+ for role in target_roles:
+ print(" - {}".format(role))
+
+ # Prod hosts host have SSH access over Tor. Let's use the SSH backend
+ # for Testinfra, rather than Ansible. When we write a dynamic inventory
+ # script for Ansible SSH-over-Tor, we can use the Ansible backend
+ # everywhere.
+ if target_host.endswith("-prod"):
+ os.environ['SECUREDROP_SSH_OVER_TOR'] = '1'
+ # Dump SSH config to tempfile so it can be passed as arg to testinfra.
+ ssh_config_output = subprocess.check_output(["vagrant", "ssh-config", target_host])
+ # Create temporary file to store ssh-config. Not deleting it automatically
+ # because there's no sensitive info (HidServAuth is required to connect),
+ # and we'll need it outside of the context-manager block that writes to it.
+ ssh_config_tmpfile = tempfile.NamedTemporaryFile(delete=False)
+ with ssh_config_tmpfile.file as f:
+ f.write(ssh_config_output)
+ ssh_config_path = ssh_config_tmpfile.name
+ testinfra_command_template = """
+testinfra \
+ -vv \
+ -n auto \
+ --connection ssh \
+ --ssh-config \
+ {ssh_config_path}\
+ --hosts {target_host} \
+ {target_roles}
+""".lstrip().rstrip()
+
+ else:
+ ssh_config_path = ""
+ testinfra_command_template = """
+testinfra \
+ -vv \
+ -n auto \
+ --connection ansible \
+ --ansible-inventory \
+ .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory \
+ --hosts {target_host} \
+ {target_roles}
+""".lstrip().rstrip()
+
+ testinfra_command = testinfra_command_template.format(
+ target_host=target_host,
+ ssh_config_path=ssh_config_path,
+ target_roles=" ".join(target_roles),
+ ).split()
+
+ # Execute config tests.
+ subprocess.check_call(testinfra_command)
+
+if __name__ == "__main__":
+ run_testinfra(target_host)
diff --git a/spec_tests/spec/vars/development.yml b/testinfra/vars/development.yml
similarity index 100%
rename from spec_tests/spec/vars/development.yml
rename to testinfra/vars/development.yml
| Convert ServerSpec tests to TestInfra
Substantial work has been done on config tests, currently implemented in [ServerSpec](http://serverspec.org/) (#1006). The config tests are not currently run automatically via a CI pipeline (#1067), but should be. Given recent efforts to improve the application tests (#1445, #1457, #1466, #1474, #1516), we should make a similar push to test the system hardening specifications.
The core developers already have strong familiarity with Python and [pytest](http://docs.pytest.org/en/latest/contents.html), so we should convert the existing config tests to [TestInfra](https://testinfra.readthedocs.io/en/latest/#), which is pytest-based. Doing so would allow us to remove Ruby-based tooling from the repo and focus on a Python-only development environment for the foreseeable future.
The config tests must be updated before we make a concerted effort to improve the Ansible versioning situation on Admin Workstations (#1146, #1531). We'll also need a strong config testing baseline in order to evaluate the upgrade from Ubuntu Trusty to Xenial (#1530).
| 2017-03-11T00:52:02Z | [] | [] |