in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
huggingface__transformers-9427
Improve coverage of the documentation Currently, some public classes are not documented anywhere because we didn't create the corresponding doc pages. Those missing pages are: - Benchmark classes - Bert Japanese - Data collators If someone feels like working on one of those, please tag yourself with a comment on this issue. Once the objects are properly documented, they can be removed from the `SHOULD_BE_DOCUMENTED` constant in [this file](https://github.com/huggingface/transformers/blob/1310e1a758edc8e89ec363db76863c771fbeb1de/utils/check_repo.py#L374).
[ { "content": "# coding=utf-8\n# Copyright 2020 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport inspect\nimport os\nimport re\nfrom pathlib import Path\n\n\n# All paths are set with the intent you should run this script from the root of the repo with the command\n# python utils/check_repo.py\nPATH_TO_TRANSFORMERS = \"src/transformers\"\nPATH_TO_TESTS = \"tests\"\nPATH_TO_DOC = \"docs/source\"\n\n# Update this list for models that are not tested with a comment explaining the reason it should not be.\n# Being in this list is an exception and should **not** be the rule.\nIGNORE_NON_TESTED = [\n # models to ignore for not tested\n \"LEDEncoder\", # Building part of bigger (tested) model.\n \"LEDDecoder\", # Building part of bigger (tested) model.\n \"BartDecoder\", # Building part of bigger (tested) model.\n \"BartEncoder\", # Building part of bigger (tested) model.\n \"BertLMHeadModel\", # Needs to be setup as decoder.\n \"BlenderbotSmallEncoder\", # Building part of bigger (tested) model.\n \"BlenderbotSmallDecoder\", # Building part of bigger (tested) model.\n \"BlenderbotEncoder\", # Building part of bigger (tested) model.\n \"BlenderbotDecoder\", # Building part of bigger (tested) model.\n \"MBartEncoder\", # Building part of bigger (tested) model.\n \"MBartDecoder\", # Building part of bigger (tested) model.\n \"PegasusEncoder\", # Building part of bigger (tested) model.\n \"PegasusDecoder\", # Building part of bigger (tested) model.\n \"DPREncoder\", # Building part of bigger (tested) model.\n \"DPRSpanPredictor\", # Building part of bigger (tested) model.\n \"ProphetNetDecoderWrapper\", # Building part of bigger (tested) model.\n \"ReformerForMaskedLM\", # Needs to be setup as decoder.\n \"T5Stack\", # Building part of bigger (tested) model.\n \"TFDPREncoder\", # Building part of bigger (tested) model.\n \"TFDPRSpanPredictor\", # Building part of bigger (tested) model.\n \"TFElectraMainLayer\", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)\n \"TFRobertaForMultipleChoice\", # TODO: fix\n]\n\n# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't\n# trigger the common tests.\nTEST_FILES_WITH_NO_COMMON_TESTS = [\n \"test_modeling_camembert.py\",\n \"test_modeling_flax_bert.py\",\n \"test_modeling_flax_roberta.py\",\n \"test_modeling_mbart.py\",\n \"test_modeling_mt5.py\",\n \"test_modeling_pegasus.py\",\n \"test_modeling_tf_camembert.py\",\n \"test_modeling_tf_mt5.py\",\n \"test_modeling_tf_xlm_roberta.py\",\n \"test_modeling_xlm_prophetnet.py\",\n \"test_modeling_xlm_roberta.py\",\n]\n\n# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and\n# should **not** be the rule.\nIGNORE_NON_AUTO_CONFIGURED = [\n # models to ignore for model xxx mapping\n \"LEDEncoder\",\n \"LEDDecoder\",\n \"BartDecoder\",\n \"BartEncoder\",\n \"BlenderbotSmallEncoder\",\n \"BlenderbotSmallDecoder\",\n \"BlenderbotEncoder\",\n \"BlenderbotDecoder\",\n \"DPRContextEncoder\",\n \"DPREncoder\",\n \"DPRReader\",\n \"DPRSpanPredictor\",\n \"FlaubertForQuestionAnswering\",\n \"FunnelBaseModel\",\n \"GPT2DoubleHeadsModel\",\n \"MT5EncoderModel\",\n \"MBartEncoder\",\n \"MBartDecoder\",\n \"OpenAIGPTDoubleHeadsModel\",\n \"PegasusEncoder\",\n \"PegasusDecoder\",\n \"ProphetNetDecoder\",\n \"ProphetNetEncoder\",\n \"ProphetNetDecoderWrapper\",\n \"RagModel\",\n \"RagSequenceForGeneration\",\n \"RagTokenForGeneration\",\n \"T5Stack\",\n \"T5EncoderModel\",\n \"TFDPRContextEncoder\",\n \"TFDPREncoder\",\n \"TFDPRReader\",\n \"TFDPRSpanPredictor\",\n \"TFFunnelBaseModel\",\n \"TFGPT2DoubleHeadsModel\",\n \"TFMT5EncoderModel\",\n \"TFOpenAIGPTDoubleHeadsModel\",\n \"TFT5EncoderModel\",\n \"XLMForQuestionAnswering\",\n \"XLMProphetNetDecoder\",\n \"XLMProphetNetEncoder\",\n \"XLNetForQuestionAnswering\",\n]\n\n# This is to make sure the transformers module imported is the one in the repo.\nspec = importlib.util.spec_from_file_location(\n \"transformers\",\n os.path.join(PATH_TO_TRANSFORMERS, \"__init__.py\"),\n submodule_search_locations=[PATH_TO_TRANSFORMERS],\n)\ntransformers = spec.loader.load_module()\n\n\n# If some modeling modules should be ignored for all checks, they should be added in the nested list\n# _ignore_modules of this function.\ndef get_model_modules():\n \"\"\" Get the model modules inside the transformers library. \"\"\"\n _ignore_modules = [\n \"modeling_auto\",\n \"modeling_encoder_decoder\",\n \"modeling_marian\",\n \"modeling_mmbt\",\n \"modeling_outputs\",\n \"modeling_retribert\",\n \"modeling_utils\",\n \"modeling_flax_auto\",\n \"modeling_flax_utils\",\n \"modeling_transfo_xl_utilities\",\n \"modeling_tf_auto\",\n \"modeling_tf_outputs\",\n \"modeling_tf_pytorch_utils\",\n \"modeling_tf_utils\",\n \"modeling_tf_transfo_xl_utilities\",\n ]\n modules = []\n for model in dir(transformers.models):\n # There are some magic dunder attributes in the dir, we ignore them\n if not model.startswith(\"__\"):\n model_module = getattr(transformers.models, model)\n for submodule in dir(model_module):\n if submodule.startswith(\"modeling\") and submodule not in _ignore_modules:\n modeling_module = getattr(model_module, submodule)\n if inspect.ismodule(modeling_module):\n modules.append(modeling_module)\n return modules\n\n\ndef get_models(module):\n \"\"\" Get the objects in module that are models.\"\"\"\n models = []\n model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel)\n for attr_name in dir(module):\n if \"Pretrained\" in attr_name or \"PreTrained\" in attr_name:\n continue\n attr = getattr(module, attr_name)\n if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:\n models.append((attr_name, attr))\n return models\n\n\n# If some test_modeling files should be ignored when checking models are all tested, they should be added in the\n# nested list _ignore_files of this function.\ndef get_model_test_files():\n \"\"\" Get the model test files.\"\"\"\n _ignore_files = [\n \"test_modeling_common\",\n \"test_modeling_encoder_decoder\",\n \"test_modeling_marian\",\n \"test_modeling_tf_common\",\n ]\n test_files = []\n for filename in os.listdir(PATH_TO_TESTS):\n if (\n os.path.isfile(f\"{PATH_TO_TESTS}/{filename}\")\n and filename.startswith(\"test_modeling\")\n and not os.path.splitext(filename)[0] in _ignore_files\n ):\n test_files.append(filename)\n return test_files\n\n\n# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class\n# for the all_model_classes variable.\ndef find_tested_models(test_file):\n \"\"\" Parse the content of test_file to detect what's in all_model_classes\"\"\"\n # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class\n with open(os.path.join(PATH_TO_TESTS, test_file), \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n content = f.read()\n all_models = re.findall(r\"all_model_classes\\s+=\\s+\\(\\s*\\(([^\\)]*)\\)\", content)\n # Check with one less parenthesis\n if len(all_models) == 0:\n all_models = re.findall(r\"all_model_classes\\s+=\\s+\\(([^\\)]*)\\)\", content)\n if len(all_models) > 0:\n model_tested = []\n for entry in all_models:\n for line in entry.split(\",\"):\n name = line.strip()\n if len(name) > 0:\n model_tested.append(name)\n return model_tested\n\n\ndef check_models_are_tested(module, test_file):\n \"\"\" Check models defined in module are tested in test_file.\"\"\"\n defined_models = get_models(module)\n tested_models = find_tested_models(test_file)\n if tested_models is None:\n if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:\n return\n return [\n f\"{test_file} should define `all_model_classes` to apply common tests to the models it tests. \"\n + \"If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file \"\n + \"`utils/check_repo.py`.\"\n ]\n failures = []\n for model_name, _ in defined_models:\n if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:\n failures.append(\n f\"{model_name} is defined in {module.__name__} but is not tested in \"\n + f\"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file.\"\n + \"If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`\"\n + \"in the file `utils/check_repo.py`.\"\n )\n return failures\n\n\ndef check_all_models_are_tested():\n \"\"\" Check all models are properly tested.\"\"\"\n modules = get_model_modules()\n test_files = get_model_test_files()\n failures = []\n for module in modules:\n test_file = f\"test_{module.__name__.split('.')[-1]}.py\"\n if test_file not in test_files:\n failures.append(f\"{module.__name__} does not have its corresponding test file {test_file}.\")\n new_failures = check_models_are_tested(module, test_file)\n if new_failures is not None:\n failures += new_failures\n if len(failures) > 0:\n raise Exception(f\"There were {len(failures)} failures:\\n\" + \"\\n\".join(failures))\n\n\ndef get_all_auto_configured_models():\n \"\"\" Return the list of all models in at least one auto class.\"\"\"\n result = set() # To avoid duplicates we concatenate all model classes in a set.\n for attr_name in dir(transformers.models.auto.modeling_auto):\n if attr_name.startswith(\"MODEL_\") and attr_name.endswith(\"MAPPING\"):\n result = result | set(getattr(transformers.models.auto.modeling_auto, attr_name).values())\n for attr_name in dir(transformers.models.auto.modeling_tf_auto):\n if attr_name.startswith(\"TF_MODEL_\") and attr_name.endswith(\"MAPPING\"):\n result = result | set(getattr(transformers.models.auto.modeling_tf_auto, attr_name).values())\n return [cls.__name__ for cls in result]\n\n\ndef check_models_are_auto_configured(module, all_auto_models):\n \"\"\" Check models defined in module are each in an auto class.\"\"\"\n defined_models = get_models(module)\n failures = []\n for model_name, _ in defined_models:\n if model_name not in all_auto_models and model_name not in IGNORE_NON_AUTO_CONFIGURED:\n failures.append(\n f\"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. \"\n \"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file \"\n \"`utils/check_repo.py`.\"\n )\n return failures\n\n\ndef check_all_models_are_auto_configured():\n \"\"\" Check all models are each in an auto class.\"\"\"\n modules = get_model_modules()\n all_auto_models = get_all_auto_configured_models()\n failures = []\n for module in modules:\n new_failures = check_models_are_auto_configured(module, all_auto_models)\n if new_failures is not None:\n failures += new_failures\n if len(failures) > 0:\n raise Exception(f\"There were {len(failures)} failures:\\n\" + \"\\n\".join(failures))\n\n\n_re_decorator = re.compile(r\"^\\s*@(\\S+)\\s+$\")\n\n\ndef check_decorator_order(filename):\n \"\"\" Check that in the test file `filename` the slow decorator is always last.\"\"\"\n with open(filename, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n lines = f.readlines()\n decorator_before = None\n errors = []\n for i, line in enumerate(lines):\n search = _re_decorator.search(line)\n if search is not None:\n decorator_name = search.groups()[0]\n if decorator_before is not None and decorator_name.startswith(\"parameterized\"):\n errors.append(i)\n decorator_before = decorator_name\n elif decorator_before is not None:\n decorator_before = None\n return errors\n\n\ndef check_all_decorator_order():\n \"\"\" Check that in all test files, the slow decorator is always last.\"\"\"\n errors = []\n for fname in os.listdir(PATH_TO_TESTS):\n if fname.endswith(\".py\"):\n filename = os.path.join(PATH_TO_TESTS, fname)\n new_errors = check_decorator_order(filename)\n errors += [f\"- {filename}, line {i}\" for i in new_errors]\n if len(errors) > 0:\n msg = \"\\n\".join(errors)\n raise ValueError(\n f\"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\\n{msg}\"\n )\n\n\ndef find_all_documented_objects():\n \"\"\" Parse the content of all doc files to detect which classes and functions it documents\"\"\"\n documented_obj = []\n for doc_file in Path(PATH_TO_DOC).glob(\"**/*.rst\"):\n with open(doc_file, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n content = f.read()\n raw_doc_objs = re.findall(r\"(?:autoclass|autofunction):: transformers.(\\S+)\\s+\", content)\n documented_obj += [obj.split(\".\")[-1] for obj in raw_doc_objs]\n return documented_obj\n\n\n# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.\nDEPRECATED_OBJECTS = [\n \"AutoModelWithLMHead\",\n \"BartPretrainedModel\",\n \"GlueDataset\",\n \"GlueDataTrainingArguments\",\n \"LineByLineTextDataset\",\n \"LineByLineWithRefDataset\",\n \"LineByLineWithSOPTextDataset\",\n \"PretrainedBartModel\",\n \"PretrainedFSMTModel\",\n \"SingleSentenceClassificationProcessor\",\n \"SquadDataTrainingArguments\",\n \"SquadDataset\",\n \"SquadExample\",\n \"SquadFeatures\",\n \"SquadV1Processor\",\n \"SquadV2Processor\",\n \"TFAutoModelWithLMHead\",\n \"TFBartPretrainedModel\",\n \"TextDataset\",\n \"TextDatasetForNextSentencePrediction\",\n \"glue_compute_metrics\",\n \"glue_convert_examples_to_features\",\n \"glue_output_modes\",\n \"glue_processors\",\n \"glue_tasks_num_labels\",\n \"squad_convert_examples_to_features\",\n \"xnli_compute_metrics\",\n \"xnli_output_modes\",\n \"xnli_processors\",\n \"xnli_tasks_num_labels\",\n]\n\n# Exceptionally, some objects should not be documented after all rules passed.\n# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!\nUNDOCUMENTED_OBJECTS = [\n \"AddedToken\", # This is a tokenizers class.\n \"BasicTokenizer\", # Internal, should never have been in the main init.\n \"DPRPretrainedReader\", # Like an Encoder.\n \"ModelCard\", # Internal type.\n \"SqueezeBertModule\", # Internal building block (should have been called SqueezeBertLayer)\n \"TFDPRPretrainedReader\", # Like an Encoder.\n \"TransfoXLCorpus\", # Internal type.\n \"WordpieceTokenizer\", # Internal, should never have been in the main init.\n \"absl\", # External module\n \"add_end_docstrings\", # Internal, should never have been in the main init.\n \"add_start_docstrings\", # Internal, should never have been in the main init.\n \"cached_path\", # Internal used for downloading models.\n \"convert_tf_weight_name_to_pt_weight_name\", # Internal used to convert model weights\n \"logger\", # Internal logger\n \"logging\", # External module\n]\n\n# This list should be empty. Objects in it should get their own doc page.\nSHOULD_HAVE_THEIR_OWN_PAGE = [\n # bert-japanese\n \"BertJapaneseTokenizer\",\n \"CharacterTokenizer\",\n \"MecabTokenizer\",\n # Phoebus\n \"PhobertTokenizer\",\n # Benchmarks\n \"PyTorchBenchmark\",\n \"PyTorchBenchmarkArguments\",\n \"TensorFlowBenchmark\",\n \"TensorFlowBenchmarkArguments\",\n]\n\n\ndef ignore_undocumented(name):\n \"\"\"Rules to determine if `name` should be undocumented.\"\"\"\n # NOT DOCUMENTED ON PURPOSE.\n # Magic attributes are not documented.\n if name.startswith(\"__\"):\n return True\n # Constants uppercase are not documented.\n if name.isupper():\n return True\n # PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.\n if (\n name.endswith(\"PreTrainedModel\")\n or name.endswith(\"Decoder\")\n or name.endswith(\"Encoder\")\n or name.endswith(\"Layer\")\n or name.endswith(\"Embeddings\")\n or name.endswith(\"Attention\")\n ):\n return True\n # Submodules are not documented.\n if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(\n os.path.join(PATH_TO_TRANSFORMERS, f\"{name}.py\")\n ):\n return True\n # All load functions are not documented.\n if name.startswith(\"load_tf\") or name.startswith(\"load_pytorch\"):\n return True\n # is_xxx_available functions are not documented.\n if name.startswith(\"is_\") and name.endswith(\"_available\"):\n return True\n # Deprecated objects are not documented.\n if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:\n return True\n # MMBT model does not really work.\n if name.startswith(\"MMBT\"):\n return True\n\n # NOT DOCUMENTED BUT NOT ON PURPOSE, SHOULD BE FIXED!\n # All data collators should be documented\n if name.startswith(\"DataCollator\") or name.endswith(\"data_collator\"):\n return True\n if name in SHOULD_HAVE_THEIR_OWN_PAGE:\n return True\n return False\n\n\ndef check_all_objects_are_documented():\n \"\"\" Check all models are properly documented.\"\"\"\n documented_objs = find_all_documented_objects()\n undocumented_objs = [c for c in dir(transformers) if c not in documented_objs and not ignore_undocumented(c)]\n if len(undocumented_objs) > 0:\n raise Exception(\n \"The following objects are in the public init so should be documented:\\n - \"\n + \"\\n - \".join(undocumented_objs)\n )\n\n\ndef check_repo_quality():\n \"\"\" Check all models are properly tested and documented.\"\"\"\n print(\"Checking all models are properly tested.\")\n check_all_decorator_order()\n check_all_models_are_tested()\n print(\"Checking all objects are properly documented.\")\n check_all_objects_are_documented()\n print(\"Checking all models are in at least one auto class.\")\n check_all_models_are_auto_configured()\n\n\nif __name__ == \"__main__\":\n check_repo_quality()\n", "path": "utils/check_repo.py" } ]
[ { "content": "# coding=utf-8\n# Copyright 2020 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport inspect\nimport os\nimport re\nfrom pathlib import Path\n\n\n# All paths are set with the intent you should run this script from the root of the repo with the command\n# python utils/check_repo.py\nPATH_TO_TRANSFORMERS = \"src/transformers\"\nPATH_TO_TESTS = \"tests\"\nPATH_TO_DOC = \"docs/source\"\n\n# Update this list for models that are not tested with a comment explaining the reason it should not be.\n# Being in this list is an exception and should **not** be the rule.\nIGNORE_NON_TESTED = [\n # models to ignore for not tested\n \"LEDEncoder\", # Building part of bigger (tested) model.\n \"LEDDecoder\", # Building part of bigger (tested) model.\n \"BartDecoder\", # Building part of bigger (tested) model.\n \"BartEncoder\", # Building part of bigger (tested) model.\n \"BertLMHeadModel\", # Needs to be setup as decoder.\n \"BlenderbotSmallEncoder\", # Building part of bigger (tested) model.\n \"BlenderbotSmallDecoder\", # Building part of bigger (tested) model.\n \"BlenderbotEncoder\", # Building part of bigger (tested) model.\n \"BlenderbotDecoder\", # Building part of bigger (tested) model.\n \"MBartEncoder\", # Building part of bigger (tested) model.\n \"MBartDecoder\", # Building part of bigger (tested) model.\n \"PegasusEncoder\", # Building part of bigger (tested) model.\n \"PegasusDecoder\", # Building part of bigger (tested) model.\n \"DPREncoder\", # Building part of bigger (tested) model.\n \"DPRSpanPredictor\", # Building part of bigger (tested) model.\n \"ProphetNetDecoderWrapper\", # Building part of bigger (tested) model.\n \"ReformerForMaskedLM\", # Needs to be setup as decoder.\n \"T5Stack\", # Building part of bigger (tested) model.\n \"TFDPREncoder\", # Building part of bigger (tested) model.\n \"TFDPRSpanPredictor\", # Building part of bigger (tested) model.\n \"TFElectraMainLayer\", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)\n \"TFRobertaForMultipleChoice\", # TODO: fix\n]\n\n# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't\n# trigger the common tests.\nTEST_FILES_WITH_NO_COMMON_TESTS = [\n \"test_modeling_camembert.py\",\n \"test_modeling_flax_bert.py\",\n \"test_modeling_flax_roberta.py\",\n \"test_modeling_mbart.py\",\n \"test_modeling_mt5.py\",\n \"test_modeling_pegasus.py\",\n \"test_modeling_tf_camembert.py\",\n \"test_modeling_tf_mt5.py\",\n \"test_modeling_tf_xlm_roberta.py\",\n \"test_modeling_xlm_prophetnet.py\",\n \"test_modeling_xlm_roberta.py\",\n]\n\n# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and\n# should **not** be the rule.\nIGNORE_NON_AUTO_CONFIGURED = [\n # models to ignore for model xxx mapping\n \"LEDEncoder\",\n \"LEDDecoder\",\n \"BartDecoder\",\n \"BartEncoder\",\n \"BlenderbotSmallEncoder\",\n \"BlenderbotSmallDecoder\",\n \"BlenderbotEncoder\",\n \"BlenderbotDecoder\",\n \"DPRContextEncoder\",\n \"DPREncoder\",\n \"DPRReader\",\n \"DPRSpanPredictor\",\n \"FlaubertForQuestionAnswering\",\n \"FunnelBaseModel\",\n \"GPT2DoubleHeadsModel\",\n \"MT5EncoderModel\",\n \"MBartEncoder\",\n \"MBartDecoder\",\n \"OpenAIGPTDoubleHeadsModel\",\n \"PegasusEncoder\",\n \"PegasusDecoder\",\n \"ProphetNetDecoder\",\n \"ProphetNetEncoder\",\n \"ProphetNetDecoderWrapper\",\n \"RagModel\",\n \"RagSequenceForGeneration\",\n \"RagTokenForGeneration\",\n \"T5Stack\",\n \"T5EncoderModel\",\n \"TFDPRContextEncoder\",\n \"TFDPREncoder\",\n \"TFDPRReader\",\n \"TFDPRSpanPredictor\",\n \"TFFunnelBaseModel\",\n \"TFGPT2DoubleHeadsModel\",\n \"TFMT5EncoderModel\",\n \"TFOpenAIGPTDoubleHeadsModel\",\n \"TFT5EncoderModel\",\n \"XLMForQuestionAnswering\",\n \"XLMProphetNetDecoder\",\n \"XLMProphetNetEncoder\",\n \"XLNetForQuestionAnswering\",\n]\n\n# This is to make sure the transformers module imported is the one in the repo.\nspec = importlib.util.spec_from_file_location(\n \"transformers\",\n os.path.join(PATH_TO_TRANSFORMERS, \"__init__.py\"),\n submodule_search_locations=[PATH_TO_TRANSFORMERS],\n)\ntransformers = spec.loader.load_module()\n\n\n# If some modeling modules should be ignored for all checks, they should be added in the nested list\n# _ignore_modules of this function.\ndef get_model_modules():\n \"\"\" Get the model modules inside the transformers library. \"\"\"\n _ignore_modules = [\n \"modeling_auto\",\n \"modeling_encoder_decoder\",\n \"modeling_marian\",\n \"modeling_mmbt\",\n \"modeling_outputs\",\n \"modeling_retribert\",\n \"modeling_utils\",\n \"modeling_flax_auto\",\n \"modeling_flax_utils\",\n \"modeling_transfo_xl_utilities\",\n \"modeling_tf_auto\",\n \"modeling_tf_outputs\",\n \"modeling_tf_pytorch_utils\",\n \"modeling_tf_utils\",\n \"modeling_tf_transfo_xl_utilities\",\n ]\n modules = []\n for model in dir(transformers.models):\n # There are some magic dunder attributes in the dir, we ignore them\n if not model.startswith(\"__\"):\n model_module = getattr(transformers.models, model)\n for submodule in dir(model_module):\n if submodule.startswith(\"modeling\") and submodule not in _ignore_modules:\n modeling_module = getattr(model_module, submodule)\n if inspect.ismodule(modeling_module):\n modules.append(modeling_module)\n return modules\n\n\ndef get_models(module):\n \"\"\" Get the objects in module that are models.\"\"\"\n models = []\n model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel)\n for attr_name in dir(module):\n if \"Pretrained\" in attr_name or \"PreTrained\" in attr_name:\n continue\n attr = getattr(module, attr_name)\n if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:\n models.append((attr_name, attr))\n return models\n\n\n# If some test_modeling files should be ignored when checking models are all tested, they should be added in the\n# nested list _ignore_files of this function.\ndef get_model_test_files():\n \"\"\" Get the model test files.\"\"\"\n _ignore_files = [\n \"test_modeling_common\",\n \"test_modeling_encoder_decoder\",\n \"test_modeling_marian\",\n \"test_modeling_tf_common\",\n ]\n test_files = []\n for filename in os.listdir(PATH_TO_TESTS):\n if (\n os.path.isfile(f\"{PATH_TO_TESTS}/{filename}\")\n and filename.startswith(\"test_modeling\")\n and not os.path.splitext(filename)[0] in _ignore_files\n ):\n test_files.append(filename)\n return test_files\n\n\n# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class\n# for the all_model_classes variable.\ndef find_tested_models(test_file):\n \"\"\" Parse the content of test_file to detect what's in all_model_classes\"\"\"\n # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class\n with open(os.path.join(PATH_TO_TESTS, test_file), \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n content = f.read()\n all_models = re.findall(r\"all_model_classes\\s+=\\s+\\(\\s*\\(([^\\)]*)\\)\", content)\n # Check with one less parenthesis\n if len(all_models) == 0:\n all_models = re.findall(r\"all_model_classes\\s+=\\s+\\(([^\\)]*)\\)\", content)\n if len(all_models) > 0:\n model_tested = []\n for entry in all_models:\n for line in entry.split(\",\"):\n name = line.strip()\n if len(name) > 0:\n model_tested.append(name)\n return model_tested\n\n\ndef check_models_are_tested(module, test_file):\n \"\"\" Check models defined in module are tested in test_file.\"\"\"\n defined_models = get_models(module)\n tested_models = find_tested_models(test_file)\n if tested_models is None:\n if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:\n return\n return [\n f\"{test_file} should define `all_model_classes` to apply common tests to the models it tests. \"\n + \"If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file \"\n + \"`utils/check_repo.py`.\"\n ]\n failures = []\n for model_name, _ in defined_models:\n if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:\n failures.append(\n f\"{model_name} is defined in {module.__name__} but is not tested in \"\n + f\"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file.\"\n + \"If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`\"\n + \"in the file `utils/check_repo.py`.\"\n )\n return failures\n\n\ndef check_all_models_are_tested():\n \"\"\" Check all models are properly tested.\"\"\"\n modules = get_model_modules()\n test_files = get_model_test_files()\n failures = []\n for module in modules:\n test_file = f\"test_{module.__name__.split('.')[-1]}.py\"\n if test_file not in test_files:\n failures.append(f\"{module.__name__} does not have its corresponding test file {test_file}.\")\n new_failures = check_models_are_tested(module, test_file)\n if new_failures is not None:\n failures += new_failures\n if len(failures) > 0:\n raise Exception(f\"There were {len(failures)} failures:\\n\" + \"\\n\".join(failures))\n\n\ndef get_all_auto_configured_models():\n \"\"\" Return the list of all models in at least one auto class.\"\"\"\n result = set() # To avoid duplicates we concatenate all model classes in a set.\n for attr_name in dir(transformers.models.auto.modeling_auto):\n if attr_name.startswith(\"MODEL_\") and attr_name.endswith(\"MAPPING\"):\n result = result | set(getattr(transformers.models.auto.modeling_auto, attr_name).values())\n for attr_name in dir(transformers.models.auto.modeling_tf_auto):\n if attr_name.startswith(\"TF_MODEL_\") and attr_name.endswith(\"MAPPING\"):\n result = result | set(getattr(transformers.models.auto.modeling_tf_auto, attr_name).values())\n return [cls.__name__ for cls in result]\n\n\ndef check_models_are_auto_configured(module, all_auto_models):\n \"\"\" Check models defined in module are each in an auto class.\"\"\"\n defined_models = get_models(module)\n failures = []\n for model_name, _ in defined_models:\n if model_name not in all_auto_models and model_name not in IGNORE_NON_AUTO_CONFIGURED:\n failures.append(\n f\"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. \"\n \"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file \"\n \"`utils/check_repo.py`.\"\n )\n return failures\n\n\ndef check_all_models_are_auto_configured():\n \"\"\" Check all models are each in an auto class.\"\"\"\n modules = get_model_modules()\n all_auto_models = get_all_auto_configured_models()\n failures = []\n for module in modules:\n new_failures = check_models_are_auto_configured(module, all_auto_models)\n if new_failures is not None:\n failures += new_failures\n if len(failures) > 0:\n raise Exception(f\"There were {len(failures)} failures:\\n\" + \"\\n\".join(failures))\n\n\n_re_decorator = re.compile(r\"^\\s*@(\\S+)\\s+$\")\n\n\ndef check_decorator_order(filename):\n \"\"\" Check that in the test file `filename` the slow decorator is always last.\"\"\"\n with open(filename, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n lines = f.readlines()\n decorator_before = None\n errors = []\n for i, line in enumerate(lines):\n search = _re_decorator.search(line)\n if search is not None:\n decorator_name = search.groups()[0]\n if decorator_before is not None and decorator_name.startswith(\"parameterized\"):\n errors.append(i)\n decorator_before = decorator_name\n elif decorator_before is not None:\n decorator_before = None\n return errors\n\n\ndef check_all_decorator_order():\n \"\"\" Check that in all test files, the slow decorator is always last.\"\"\"\n errors = []\n for fname in os.listdir(PATH_TO_TESTS):\n if fname.endswith(\".py\"):\n filename = os.path.join(PATH_TO_TESTS, fname)\n new_errors = check_decorator_order(filename)\n errors += [f\"- {filename}, line {i}\" for i in new_errors]\n if len(errors) > 0:\n msg = \"\\n\".join(errors)\n raise ValueError(\n f\"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\\n{msg}\"\n )\n\n\ndef find_all_documented_objects():\n \"\"\" Parse the content of all doc files to detect which classes and functions it documents\"\"\"\n documented_obj = []\n for doc_file in Path(PATH_TO_DOC).glob(\"**/*.rst\"):\n with open(doc_file, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n content = f.read()\n raw_doc_objs = re.findall(r\"(?:autoclass|autofunction):: transformers.(\\S+)\\s+\", content)\n documented_obj += [obj.split(\".\")[-1] for obj in raw_doc_objs]\n return documented_obj\n\n\n# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.\nDEPRECATED_OBJECTS = [\n \"AutoModelWithLMHead\",\n \"BartPretrainedModel\",\n \"GlueDataset\",\n \"GlueDataTrainingArguments\",\n \"LineByLineTextDataset\",\n \"LineByLineWithRefDataset\",\n \"LineByLineWithSOPTextDataset\",\n \"PretrainedBartModel\",\n \"PretrainedFSMTModel\",\n \"SingleSentenceClassificationProcessor\",\n \"SquadDataTrainingArguments\",\n \"SquadDataset\",\n \"SquadExample\",\n \"SquadFeatures\",\n \"SquadV1Processor\",\n \"SquadV2Processor\",\n \"TFAutoModelWithLMHead\",\n \"TFBartPretrainedModel\",\n \"TextDataset\",\n \"TextDatasetForNextSentencePrediction\",\n \"glue_compute_metrics\",\n \"glue_convert_examples_to_features\",\n \"glue_output_modes\",\n \"glue_processors\",\n \"glue_tasks_num_labels\",\n \"squad_convert_examples_to_features\",\n \"xnli_compute_metrics\",\n \"xnli_output_modes\",\n \"xnli_processors\",\n \"xnli_tasks_num_labels\",\n]\n\n# Exceptionally, some objects should not be documented after all rules passed.\n# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!\nUNDOCUMENTED_OBJECTS = [\n \"AddedToken\", # This is a tokenizers class.\n \"BasicTokenizer\", # Internal, should never have been in the main init.\n \"DPRPretrainedReader\", # Like an Encoder.\n \"ModelCard\", # Internal type.\n \"SqueezeBertModule\", # Internal building block (should have been called SqueezeBertLayer)\n \"TFDPRPretrainedReader\", # Like an Encoder.\n \"TransfoXLCorpus\", # Internal type.\n \"WordpieceTokenizer\", # Internal, should never have been in the main init.\n \"absl\", # External module\n \"add_end_docstrings\", # Internal, should never have been in the main init.\n \"add_start_docstrings\", # Internal, should never have been in the main init.\n \"cached_path\", # Internal used for downloading models.\n \"convert_tf_weight_name_to_pt_weight_name\", # Internal used to convert model weights\n \"logger\", # Internal logger\n \"logging\", # External module\n]\n\n# This list should be empty. Objects in it should get their own doc page.\nSHOULD_HAVE_THEIR_OWN_PAGE = [\n # bert-japanese\n \"BertJapaneseTokenizer\",\n \"CharacterTokenizer\",\n \"MecabTokenizer\",\n # Benchmarks\n \"PyTorchBenchmark\",\n \"PyTorchBenchmarkArguments\",\n \"TensorFlowBenchmark\",\n \"TensorFlowBenchmarkArguments\",\n]\n\n\ndef ignore_undocumented(name):\n \"\"\"Rules to determine if `name` should be undocumented.\"\"\"\n # NOT DOCUMENTED ON PURPOSE.\n # Magic attributes are not documented.\n if name.startswith(\"__\"):\n return True\n # Constants uppercase are not documented.\n if name.isupper():\n return True\n # PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.\n if (\n name.endswith(\"PreTrainedModel\")\n or name.endswith(\"Decoder\")\n or name.endswith(\"Encoder\")\n or name.endswith(\"Layer\")\n or name.endswith(\"Embeddings\")\n or name.endswith(\"Attention\")\n ):\n return True\n # Submodules are not documented.\n if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(\n os.path.join(PATH_TO_TRANSFORMERS, f\"{name}.py\")\n ):\n return True\n # All load functions are not documented.\n if name.startswith(\"load_tf\") or name.startswith(\"load_pytorch\"):\n return True\n # is_xxx_available functions are not documented.\n if name.startswith(\"is_\") and name.endswith(\"_available\"):\n return True\n # Deprecated objects are not documented.\n if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:\n return True\n # MMBT model does not really work.\n if name.startswith(\"MMBT\"):\n return True\n\n # NOT DOCUMENTED BUT NOT ON PURPOSE, SHOULD BE FIXED!\n # All data collators should be documented\n if name.startswith(\"DataCollator\") or name.endswith(\"data_collator\"):\n return True\n if name in SHOULD_HAVE_THEIR_OWN_PAGE:\n return True\n return False\n\n\ndef check_all_objects_are_documented():\n \"\"\" Check all models are properly documented.\"\"\"\n documented_objs = find_all_documented_objects()\n undocumented_objs = [c for c in dir(transformers) if c not in documented_objs and not ignore_undocumented(c)]\n if len(undocumented_objs) > 0:\n raise Exception(\n \"The following objects are in the public init so should be documented:\\n - \"\n + \"\\n - \".join(undocumented_objs)\n )\n\n\ndef check_repo_quality():\n \"\"\" Check all models are properly tested and documented.\"\"\"\n print(\"Checking all models are properly tested.\")\n check_all_decorator_order()\n check_all_models_are_tested()\n print(\"Checking all objects are properly documented.\")\n check_all_objects_are_documented()\n print(\"Checking all models are in at least one auto class.\")\n check_all_models_are_auto_configured()\n\n\nif __name__ == \"__main__\":\n check_repo_quality()\n", "path": "utils/check_repo.py" } ]
diff --git a/docs/source/index.rst b/docs/source/index.rst index 43b73efcb446..35b801278a61 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -388,6 +388,7 @@ TensorFlow and/or Flax. model_doc/gpt model_doc/gpt2 model_doc/pegasus + model_doc/phobert model_doc/prophetnet model_doc/rag model_doc/reformer diff --git a/docs/source/model_doc/phobert.rst b/docs/source/model_doc/phobert.rst new file mode 100644 index 000000000000..5ef99b40801d --- /dev/null +++ b/docs/source/model_doc/phobert.rst @@ -0,0 +1,59 @@ +.. + Copyright 2020 The HuggingFace Team. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + specific language governing permissions and limitations under the License. + +PhoBERT +----------------------------------------------------------------------------------------------------------------------- + +Overview +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The PhoBERT model was proposed in `PhoBERT: Pre-trained language models for Vietnamese +<https://www.aclweb.org/anthology/2020.findings-emnlp.92.pdf>`__ by Dat Quoc Nguyen, Anh Tuan Nguyen. + +The abstract from the paper is the following: + +*We present PhoBERT with two versions, PhoBERT-base and PhoBERT-large, the first public large-scale monolingual +language models pre-trained for Vietnamese. Experimental results show that PhoBERT consistently outperforms the recent +best pre-trained multilingual model XLM-R (Conneau et al., 2020) and improves the state-of-the-art in multiple +Vietnamese-specific NLP tasks including Part-of-speech tagging, Dependency parsing, Named-entity recognition and +Natural language inference.* + +Example of use: + +.. code-block:: + + import torch + from transformers import AutoModel, AutoTokenizer + + phobert = AutoModel.from_pretrained("vinai/phobert-base") + tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base") + + # INPUT TEXT MUST BE ALREADY WORD-SEGMENTED! + line = "Tôi là sinh_viên trường đại_học Công_nghệ ." + + input_ids = torch.tensor([tokenizer.encode(line)]) + + with torch.no_grad(): + features = phobert(input_ids) # Models outputs are now tuples + + ## With TensorFlow 2.0+: + # from transformers import TFAutoModel + # phobert = TFAutoModel.from_pretrained("vinai/phobert-base") + + +The original code can be found `here <https://github.com/VinAIResearch/PhoBERT>`__. + +PhobertTokenizer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.PhobertTokenizer + :members: diff --git a/utils/check_repo.py b/utils/check_repo.py index 0f6f9db8aa24..aefac35684a4 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -402,8 +402,6 @@ def find_all_documented_objects(): "BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer", - # Phoebus - "PhobertTokenizer", # Benchmarks "PyTorchBenchmark", "PyTorchBenchmarkArguments",
mozilla__bugbug-3334
Use information on how a bug is filed as a feature This could be especially useful for the Spam model. https://bugzilla.mozilla.org/show_bug.cgi?id=1565403
[ { "content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.over_sampling import BorderlineSMOTE\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\n\nclass SpamBugModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = BorderlineSMOTE(random_state=0)\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.whiteboard(),\n bug_features.product(),\n # TODO: We would like to use the component at the time of filing too,\n # but we can't because the rollback script doesn't support changes to\n # components yet.\n # bug_features.component(),\n bug_features.num_words_title(),\n bug_features.num_words_comments(),\n bug_features.keywords(),\n bug_features.priority(),\n bug_features.version(),\n bug_features.target_milestone(),\n bug_features.has_attachment(),\n bug_features.platform(),\n bug_features.op_sys(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.0001), \"title\"),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.0001),\n \"comments\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs(include_invalid=True):\n bug_id = bug_data[\"id\"]\n\n # Skip bugs filed by Mozillians, since we are sure they are not spam.\n if \"@mozilla\" in bug_data[\"creator\"]:\n continue\n\n # A bug that was moved out of 'Invalid Bugs' is definitely a legitimate bug.\n for history in bug_data[\"history\"]:\n for change in history[\"changes\"]:\n if (\n change[\"field_name\"] == \"product\"\n and change[\"removed\"] == \"Invalid Bugs\"\n ):\n classes[bug_id] = 0\n\n # A fixed bug is definitely a legitimate bug.\n if bug_data[\"resolution\"] == \"FIXED\":\n classes[bug_id] = 0\n\n # A bug in the 'Invalid Bugs' product is definitely a spam bug.\n elif bug_data[\"product\"] == \"Invalid Bugs\":\n classes[bug_id] = 1\n\n print(\n \"{} bugs are classified as non-spam\".format(\n sum(1 for label in classes.values() if label == 0)\n )\n )\n print(\n \"{} bugs are classified as spam\".format(\n sum(1 for label in classes.values() if label == 1)\n )\n )\n\n return classes, [0, 1]\n\n def items_gen(self, classes):\n # Overwriting this method to add include_invalid=True to get_bugs to\n # include spam bugs.\n return (\n (bug, classes[bug[\"id\"]])\n for bug in bugzilla.get_bugs(include_invalid=True)\n if bug[\"id\"] in classes\n )\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for i, bug in enumerate(bugs):\n if \"@mozilla\" in bug[\"creator\"]:\n if probabilities:\n classes[i] = [1.0, 0.0]\n else:\n classes[i] = 0\n\n return classes\n", "path": "bugbug/models/spambug.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.over_sampling import BorderlineSMOTE\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\n\nclass SpamBugModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = BorderlineSMOTE(random_state=0)\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.whiteboard(),\n bug_features.product(),\n # TODO: We would like to use the component at the time of filing too,\n # but we can't because the rollback script doesn't support changes to\n # components yet.\n # bug_features.component(),\n bug_features.num_words_title(),\n bug_features.num_words_comments(),\n bug_features.keywords(),\n bug_features.priority(),\n bug_features.version(),\n bug_features.target_milestone(),\n bug_features.has_attachment(),\n bug_features.platform(),\n bug_features.op_sys(),\n bug_features.filed_via(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.0001), \"title\"),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.0001),\n \"comments\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs(include_invalid=True):\n bug_id = bug_data[\"id\"]\n\n # Skip bugs filed by Mozillians, since we are sure they are not spam.\n if \"@mozilla\" in bug_data[\"creator\"]:\n continue\n\n # A bug that was moved out of 'Invalid Bugs' is definitely a legitimate bug.\n for history in bug_data[\"history\"]:\n for change in history[\"changes\"]:\n if (\n change[\"field_name\"] == \"product\"\n and change[\"removed\"] == \"Invalid Bugs\"\n ):\n classes[bug_id] = 0\n\n # A fixed bug is definitely a legitimate bug.\n if bug_data[\"resolution\"] == \"FIXED\":\n classes[bug_id] = 0\n\n # A bug in the 'Invalid Bugs' product is definitely a spam bug.\n elif bug_data[\"product\"] == \"Invalid Bugs\":\n classes[bug_id] = 1\n\n print(\n \"{} bugs are classified as non-spam\".format(\n sum(1 for label in classes.values() if label == 0)\n )\n )\n print(\n \"{} bugs are classified as spam\".format(\n sum(1 for label in classes.values() if label == 1)\n )\n )\n\n return classes, [0, 1]\n\n def items_gen(self, classes):\n # Overwriting this method to add include_invalid=True to get_bugs to\n # include spam bugs.\n return (\n (bug, classes[bug[\"id\"]])\n for bug in bugzilla.get_bugs(include_invalid=True)\n if bug[\"id\"] in classes\n )\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for i, bug in enumerate(bugs):\n if \"@mozilla\" in bug[\"creator\"]:\n if probabilities:\n classes[i] = [1.0, 0.0]\n else:\n classes[i] = 0\n\n return classes\n", "path": "bugbug/models/spambug.py" } ]
diff --git a/bugbug/models/spambug.py b/bugbug/models/spambug.py index f37362028e..9c10a614b3 100644 --- a/bugbug/models/spambug.py +++ b/bugbug/models/spambug.py @@ -41,6 +41,7 @@ def __init__(self, lemmatization=False): bug_features.has_attachment(), bug_features.platform(), bug_features.op_sys(), + bug_features.filed_via(), ] cleanup_functions = [
sopel-irc__sopel-1325
[Bugzilla] Error calling shutdown method for module bugzilla:None Noticed this in my logs. Bugzilla shutdown throwing none. On Sopel 6.5.3, Python 3.5.3. ``` Ping timeout reached after 120 seconds, closing connection Calling shutdown for 2 modules. calling reddit.shutdown calling bugzilla.shutdown Error calling shutdown method for module bugzilla:None Closed! Warning: Disconnected. Reconnecting in 20 seconds... Welcome to Sopel. Loading modules... ```
[ { "content": "# coding=utf-8\n\"\"\"Bugzilla issue reporting module\n\nCopyright 2013-2015, Embolalia, embolalia.com\nLicensed under the Eiffel Forum License 2.\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nimport xmltodict\n\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ListAttribute\nfrom sopel.logger import get_logger\nfrom sopel.module import rule\n\n\nregex = None\nLOGGER = get_logger(__name__)\n\n\nclass BugzillaSection(StaticSection):\n domains = ListAttribute('domains')\n \"\"\"The domains of the Bugzilla instances from which to get information.\"\"\"\n\n\ndef configure(config):\n config.define_section('bugzilla', BugzillaSection)\n config.bugzilla.configure_setting(\n 'domains',\n 'Enter the domains of the Bugzillas you want extra information '\n 'from (e.g. bugzilla.gnome.org)'\n )\n\n\ndef setup(bot):\n global regex\n bot.config.define_section('bugzilla', BugzillaSection)\n\n if not bot.config.bugzilla.domains:\n return\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n\n domains = '|'.join(bot.config.bugzilla.domains)\n regex = re.compile((r'https?://(%s)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+)')\n % domains)\n bot.memory['url_callbacks'][regex] = show_bug\n\n\ndef shutdown(bot):\n del bot.memory['url_callbacks'][regex]\n\n\n@rule(r'.*https?://(\\S+?)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+).*')\ndef show_bug(bot, trigger, match=None):\n \"\"\"Show information about a Bugzilla bug.\"\"\"\n match = match or trigger\n domain = match.group(1)\n if domain not in bot.config.bugzilla.domains:\n return\n url = 'https://%s%sctype=xml&%s' % match.groups()\n data = web.get(url, dont_decode=True)\n bug = xmltodict.parse(data).get('bugzilla').get('bug')\n error = bug.get('@error', None) # error=\"NotPermitted\"\n\n if error:\n LOGGER.warning('Bugzilla error: %s' % error)\n bot.say('[BUGZILLA] Unable to get infomation for '\n 'linked bug (%s)' % error)\n return\n\n message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' +\n 'Importance: %s | Status: %s | Assigned to: %s | ' +\n 'Reported: %s | Modified: %s')\n\n resolution = bug.get('resolution')\n if resolution is not None:\n status = bug.get('bug_status') + ' ' + resolution\n else:\n status = bug.get('bug_status')\n\n assigned_to = bug.get('assigned_to')\n if isinstance(assigned_to, dict):\n assigned_to = assigned_to.get('@name')\n\n message = message % (\n bug.get('short_desc'), bug.get('product'),\n bug.get('component'), bug.get('version'),\n (bug.get('priority') + ' ' + bug.get('bug_severity')),\n status, assigned_to, bug.get('creation_ts'),\n bug.get('delta_ts'))\n bot.say(message)\n", "path": "sopel/modules/bugzilla.py" } ]
[ { "content": "# coding=utf-8\n\"\"\"Bugzilla issue reporting module\n\nCopyright 2013-2015, Embolalia, embolalia.com\nLicensed under the Eiffel Forum License 2.\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nimport xmltodict\n\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ListAttribute\nfrom sopel.logger import get_logger\nfrom sopel.module import rule\n\n\nregex = None\nLOGGER = get_logger(__name__)\n\n\nclass BugzillaSection(StaticSection):\n domains = ListAttribute('domains')\n \"\"\"The domains of the Bugzilla instances from which to get information.\"\"\"\n\n\ndef configure(config):\n config.define_section('bugzilla', BugzillaSection)\n config.bugzilla.configure_setting(\n 'domains',\n 'Enter the domains of the Bugzillas you want extra information '\n 'from (e.g. bugzilla.gnome.org)'\n )\n\n\ndef setup(bot):\n global regex\n bot.config.define_section('bugzilla', BugzillaSection)\n\n if not bot.config.bugzilla.domains:\n return\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n\n domains = '|'.join(bot.config.bugzilla.domains)\n regex = re.compile((r'https?://(%s)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+)')\n % domains)\n bot.memory['url_callbacks'][regex] = show_bug\n\n\ndef shutdown(bot):\n try:\n del bot.memory['url_callbacks'][regex]\n except KeyError:\n # bot.config.bugzilla.domains was probably just empty on startup\n # everything's daijoubu\n pass\n\n\n@rule(r'.*https?://(\\S+?)'\n '(/show_bug.cgi\\?\\S*?)'\n '(id=\\d+).*')\ndef show_bug(bot, trigger, match=None):\n \"\"\"Show information about a Bugzilla bug.\"\"\"\n match = match or trigger\n domain = match.group(1)\n if domain not in bot.config.bugzilla.domains:\n return\n url = 'https://%s%sctype=xml&%s' % match.groups()\n data = web.get(url, dont_decode=True)\n bug = xmltodict.parse(data).get('bugzilla').get('bug')\n error = bug.get('@error', None) # error=\"NotPermitted\"\n\n if error:\n LOGGER.warning('Bugzilla error: %s' % error)\n bot.say('[BUGZILLA] Unable to get infomation for '\n 'linked bug (%s)' % error)\n return\n\n message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' +\n 'Importance: %s | Status: %s | Assigned to: %s | ' +\n 'Reported: %s | Modified: %s')\n\n resolution = bug.get('resolution')\n if resolution is not None:\n status = bug.get('bug_status') + ' ' + resolution\n else:\n status = bug.get('bug_status')\n\n assigned_to = bug.get('assigned_to')\n if isinstance(assigned_to, dict):\n assigned_to = assigned_to.get('@name')\n\n message = message % (\n bug.get('short_desc'), bug.get('product'),\n bug.get('component'), bug.get('version'),\n (bug.get('priority') + ' ' + bug.get('bug_severity')),\n status, assigned_to, bug.get('creation_ts'),\n bug.get('delta_ts'))\n bot.say(message)\n", "path": "sopel/modules/bugzilla.py" } ]
diff --git a/sopel/modules/bugzilla.py b/sopel/modules/bugzilla.py index c87ff08e09..bb054bc68b 100644 --- a/sopel/modules/bugzilla.py +++ b/sopel/modules/bugzilla.py @@ -52,7 +52,12 @@ def setup(bot): def shutdown(bot): - del bot.memory['url_callbacks'][regex] + try: + del bot.memory['url_callbacks'][regex] + except KeyError: + # bot.config.bugzilla.domains was probably just empty on startup + # everything's daijoubu + pass @rule(r'.*https?://(\S+?)'
beetbox__beets-3774
FetchArt crashes with TypeError for particular album I've imported hundreds of albums just fine using `fetchart` in `auto` mode, and then this one came along and crashed the import. The issue in short: ``` File "/usr/local/lib/python3.7/dist-packages/beetsplug/fetchart.py", line 416, in get self.API_ALBUMS + album.mb_releasegroupid, TypeError: can only concatenate str (not "int") to str ``` So it seems `album.mb_releasegroupid` is an `int`. This seems like a bug. ### Problem Running this command in verbose (`-vv`) mode: ```sh $ beet -vv im --set=genre=Vaporwave Vektroid\ -\ Color\ Ocean\ Road/ ``` Led to this problem: ``` user configuration: /media/droppie/libraries/music/.meta/beets/config.yaml data directory: /media/droppie/libraries/music/.meta/beets plugin paths: /opt/whatlastgenre/plugin/beets/beetsplug Sending event: pluginload library database: /media/droppie/libraries/music/.meta/beets/library.db library directory: /media/droppie/libraries/music Sending event: library_opened Sending event: import_begin Sending event: import_task_created Sending event: import_task_start Looking up: /media/droppie/data/music/Vektroid - Color Ocean Road Tagging Vektroid - Color Ocean Road No album ID found. Search terms: Vektroid - Color Ocean Road Album might be VA: False Searching for MusicBrainz releases with: {'release': 'color ocean road', 'artist': 'vektroid', 'tracks': '6'} Requesting MusicBrainz release acfcb884-a136-44d3-a537-65194f07bf59 primary MB release type: album Sending event: albuminfo_received Candidate: Vektroid - Color Ocean Road (acfcb884-a136-44d3-a537-65194f07bf59) Computing track assignment... ...done. Success. Distance: 0.19 Requesting MusicBrainz release 42fa86c0-c28c-4850-ba6a-b2696808dabf primary MB release type: album Sending event: albuminfo_received Candidate: Vektroid - RE•SET (42fa86c0-c28c-4850-ba6a-b2696808dabf) Computing track assignment... ...done. Success. Distance: 0.65 Requesting MusicBrainz release 6a832aac-a80b-4db2-a1d7-dc65d92a982d primary MB release type: album Sending event: albuminfo_received Candidate: Vektroid - Telnet Complete (6a832aac-a80b-4db2-a1d7-dc65d92a982d) Computing track assignment... ...done. Success. Distance: 0.76 Requesting MusicBrainz release 043e9481-f864-4676-9888-924f026fa3dd primary MB release type: album Sending event: albuminfo_received Candidate: Vektroid - Seed & Synthetic Earth (043e9481-f864-4676-9888-924f026fa3dd) Computing track assignment... ...done. Success. Distance: 0.67 Requesting MusicBrainz release 12ec78f1-169a-408c-9d9e-a699901d133c primary MB release type: broadcast secondary MB release type(s): dj-mix Sending event: albuminfo_received Candidate: Vektroid - FACT Mix 619: Vektroid (Sept '17) (12ec78f1-169a-408c-9d9e-a699901d133c) Computing track assignment... ...done. Success. Distance: 0.65 discogs: Searching for master release 1520897 discogs: hit rate limit, waiting for 0.9732460975646973 seconds discogs: Searching for master release 1520897 discogs: hit rate limit, waiting for 0.9869840145111084 seconds discogs: Searching for master release 1520897 discogs: hit rate limit, waiting for 0.9847149848937988 seconds Sending event: albuminfo_received Candidate: Vektroid - Color Ocean Road (4820006) Computing track assignment... ...done. Success. Distance: 0.22 Sending event: albuminfo_received Candidate: Vektroid - Color Ocean Road (13385176) Computing track assignment... ...done. Success. Distance: 0.05 Sending event: albuminfo_received Candidate: Vektroid - Color Ocean Road (13395427) Computing track assignment... ...done. Success. Distance: 0.22 Evaluating 8 candidates. /media/droppie/data/music/Vektroid - Color Ocean Road (6 items) Sending event: before_choose_candidate Tagging: Vektroid - Color Ocean Road URL: https://www.discogs.com/Vektroid-Color-Ocean-Road/release/13385176 (Similarity: 95.2%) (source) (Discogs, File, 2012, US, PrismCorp) [A]pply, More candidates, Skip, Use as-is, as Tracks, Group albums, Enter search, enter Id, aBort, eDit, edit Candidates? Sending event: import_task_choice Sending event: import_task_apply Replacing item 2040: /media/droppie/data/music/Vektroid - Color Ocean Road/01 Shalom.mp3 Sending event: database_change Sending event: item_removed Replacing item 2041: /media/droppie/data/music/Vektroid - Color Ocean Road/02 Color Ocean.mp3 Sending event: database_change Sending event: item_removed Replacing item 2042: /media/droppie/data/music/Vektroid - Color Ocean Road/03 Seafoam Island.mp3 Sending event: database_change Sending event: item_removed Replacing item 2043: /media/droppie/data/music/Vektroid - Color Ocean Road/04 Sushi Plaza.mp3 Sending event: database_change Sending event: item_removed Replacing item 2044: /media/droppie/data/music/Vektroid - Color Ocean Road/05 Mango _ Fuji.mp3 Sending event: database_change Sending event: item_removed Replacing item 2045: /media/droppie/data/music/Vektroid - Color Ocean Road/06 Om Namo Ocean Road.mp3 Sending event: database_change Sending event: database_change Sending event: item_removed 6 of 6 items replaced Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Reimported album: added 1602800480.7681532, flexible attributes [] from album 185 for /media/droppie/data/music/Vektroid - Color Ocean Road Reimported item added 1602800480.7766094 from item 2040 for /media/droppie/data/music/Vektroid - Color Ocean Road/01 Shalom.mp3 Reimported item flexible attributes ['track_alt', 'data_source', 'track_alt', 'data_source'] from item 2040 for /media/droppie/data/music/Vektroid - Color Ocean Road/01 Shalom.mp3 Sending event: database_change Reimported item added 1602800480.7842014 from item 2041 for /media/droppie/data/music/Vektroid - Color Ocean Road/02 Color Ocean.mp3 Reimported item flexible attributes ['track_alt', 'data_source', 'track_alt', 'data_source'] from item 2041 for /media/droppie/data/music/Vektroid - Color Ocean Road/02 Color Ocean.mp3 Sending event: database_change Reimported item added 1602800480.7913542 from item 2042 for /media/droppie/data/music/Vektroid - Color Ocean Road/03 Seafoam Island.mp3 Reimported item flexible attributes ['track_alt', 'data_source', 'track_alt', 'data_source'] from item 2042 for /media/droppie/data/music/Vektroid - Color Ocean Road/03 Seafoam Island.mp3 Sending event: database_change Reimported item added 1602800480.7984622 from item 2043 for /media/droppie/data/music/Vektroid - Color Ocean Road/04 Sushi Plaza.mp3 Reimported item flexible attributes ['track_alt', 'data_source', 'track_alt', 'data_source'] from item 2043 for /media/droppie/data/music/Vektroid - Color Ocean Road/04 Sushi Plaza.mp3 Sending event: database_change Reimported item added 1602800480.8071747 from item 2044 for /media/droppie/data/music/Vektroid - Color Ocean Road/05 Mango _ Fuji.mp3 Reimported item flexible attributes ['track_alt', 'data_source', 'track_alt', 'data_source'] from item 2044 for /media/droppie/data/music/Vektroid - Color Ocean Road/05 Mango _ Fuji.mp3 Sending event: database_change Reimported item added 1602800480.8171701 from item 2045 for /media/droppie/data/music/Vektroid - Color Ocean Road/06 Om Namo Ocean Road.mp3 Reimported item flexible attributes ['track_alt', 'data_source', 'track_alt', 'data_source'] from item 2045 for /media/droppie/data/music/Vektroid - Color Ocean Road/06 Om Namo Ocean Road.mp3 Sending event: database_change Set field genre=Vaporwave for /media/droppie/data/music/Vektroid - Color Ocean Road Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change Sending event: database_change fetchart: trying source filesystem for album Vektroid - Color Ocean Road fetchart: trying source coverart for album Vektroid - Color Ocean Road fetchart: downloading image: https://coverartarchive.org/release/13385176/front fetchart: not a supported image: image/x-None fetchart: trying source coverart for album Vektroid - Color Ocean Road fetchart: downloading image: https://coverartarchive.org/release-group/1520897/front fetchart: not a supported image: image/x-None fetchart: trying source itunes for album Vektroid - Color Ocean Road fetchart: getting URL: https://itunes.apple.com/search?term=Vektroid+Color+Ocean+Road&entity=album&media=music&limit=200 fetchart: iTunes search for 'Vektroid Color Ocean Road' got no results fetchart: trying source amazon for album Vektroid - Color Ocean Road fetchart: trying source albumart for album Vektroid - Color Ocean Road fetchart: trying source wikipedia for album Vektroid - Color Ocean Road fetchart: getting URL: https://dbpedia.org/sparql?format=application%2Fsparql-results%2Bjson&timeout=2500&query=PREFIX+rdf%3A+%3Chttp%3A%2F%2Fwww.w3.org%2F1999%2F02%2F22-rdf-syntax-ns%23%3E%0A+++++++++++++++++PREFIX+dbpprop%3A+%3Chttp%3A%2F%2Fdbpedia.org%2Fproperty%2F%3E%0A+++++++++++++++++PREFIX+owl%3A+%3Chttp%3A%2F%2Fdbpedia.org%2Fontology%2F%3E%0A+++++++++++++++++PREFIX+rdfs%3A+%3Chttp%3A%2F%2Fwww.w3.org%2F2000%2F01%2Frdf-schema%23%3E%0A+++++++++++++++++PREFIX+foaf%3A+%3Chttp%3A%2F%2Fxmlns.com%2Ffoaf%2F0.1%2F%3E%0A%0A+++++++++++++++++SELECT+DISTINCT+%3FpageId+%3FcoverFilename+WHERE+%7B%0A+++++++++++++++++++%3Fsubject+owl%3AwikiPageID+%3FpageId+.%0A+++++++++++++++++++%3Fsubject+dbpprop%3Aname+%3Fname+.%0A+++++++++++++++++++%3Fsubject+rdfs%3Alabel+%3Flabel+.%0A+++++++++++++++++++%7B+%3Fsubject+dbpprop%3Aartist+%3Fartist+%7D%0A+++++++++++++++++++++UNION%0A+++++++++++++++++++%7B+%3Fsubject+owl%3Aartist+%3Fartist+%7D%0A+++++++++++++++++++%7B+%3Fartist+foaf%3Aname+%22Vektroid%22%40en+%7D%0A+++++++++++++++++++++UNION%0A+++++++++++++++++++%7B+%3Fartist+dbpprop%3Aname+%22Vektroid%22%40en+%7D%0A+++++++++++++++++++%3Fsubject+rdf%3Atype+%3Chttp%3A%2F%2Fdbpedia.org%2Fontology%2FAlbum%3E+.%0A+++++++++++++++++++%3Fsubject+dbpprop%3Acover+%3FcoverFilename+.%0A+++++++++++++++++++FILTER+%28+regex%28%3Fname%2C+%22Color+Ocean+Road%22%2C+%22i%22%29+%29%0A++++++++++++++++++%7D%0A+++++++++++++++++Limit+1 fetchart: wikipedia: album not found on dbpedia fetchart: trying source fanarttv for album Vektroid - Color Ocean Road Traceback (most recent call last): File "/usr/local/bin/beet", line 10, in <module> sys.exit(main()) File "/usr/local/lib/python3.7/dist-packages/beets/ui/__init__.py", line 1266, in main _raw_main(args) File "/usr/local/lib/python3.7/dist-packages/beets/ui/__init__.py", line 1253, in _raw_main subcommand.func(lib, suboptions, subargs) File "/usr/local/lib/python3.7/dist-packages/beets/ui/commands.py", line 955, in import_func import_files(lib, paths, query) File "/usr/local/lib/python3.7/dist-packages/beets/ui/commands.py", line 925, in import_files session.run() File "/usr/local/lib/python3.7/dist-packages/beets/importer.py", line 329, in run pl.run_parallel(QUEUE_SIZE) File "/usr/local/lib/python3.7/dist-packages/beets/util/pipeline.py", line 445, in run_parallel six.reraise(exc_info[0], exc_info[1], exc_info[2]) File "/usr/lib/python3/dist-packages/six.py", line 693, in reraise raise value File "/usr/local/lib/python3.7/dist-packages/beets/util/pipeline.py", line 312, in run out = self.coro.send(msg) File "/usr/local/lib/python3.7/dist-packages/beets/util/pipeline.py", line 194, in coro func(*(args + (task,))) File "/usr/local/lib/python3.7/dist-packages/beets/importer.py", line 1511, in plugin_stage func(session, task) File "/usr/local/lib/python3.7/dist-packages/beets/plugins.py", line 143, in wrapper return func(*args, **kwargs) File "/usr/local/lib/python3.7/dist-packages/beetsplug/fetchart.py", line 854, in fetch_art candidate = self.art_for_album(task.album, task.paths, local) File "/usr/local/lib/python3.7/dist-packages/beetsplug/fetchart.py", line 920, in art_for_album for candidate in source.get(album, self, paths): File "/usr/local/lib/python3.7/dist-packages/beetsplug/fetchart.py", line 416, in get self.API_ALBUMS + album.mb_releasegroupid, TypeError: can only concatenate str (not "int") to str ``` ### Setup * OS: Raspbian Buster * Python version: 3.7.3 * beets version: 1.4.9 * Turning off plugins made problem go away (yes/no): n/a My configuration (output of `beet config`) is: ```yaml library: /media/droppie/libraries/music/.meta/beets/library.db directory: /media/droppie/libraries/music original_date: yes per_disc_numbering: no va_name: "\U0001F465 Various Artists" import: write: no copy: no move: no link: yes resume: ask incremental: yes quiet_fallback: skip none_rec_action: ask timid: yes default_action: apply log: /var/log/beets/import.log languages: - en detail: no group_albums: no autotag: yes duplicate_action: ask incremental_skip_later: no set_fields: genre: "\U0001F3A7 New" pluginpath: /opt/whatlastgenre/plugin/beets/beetsplug plugins: discogs edit fetchart bucket replaygain info follow wlg types types: rating: int edit: itemfields: track artist title genre albumartist albumfields: albumartist album genre rating fetchart: sources: - filesystem - coverart: release releasegroup - itunes - '*' auto: yes art_filename: cover.jpg store_source: yes bucket: bucket_year: ['1930s', '1940s', '1950s', '1960s', '1970s', '1980s', '1990s', '2000s', '2010s', '2020s'] paths: default: %bucket{$year}/$year - $albumartist - $album/$track - $title comp: %bucket{$year}/$year - $albumartist - $album/$track - $artist - $title replaygain: backend: gstreamer auto: no overwrite: no targetlevel: 89 follow: auto: no email: [email protected] password: XXX userid: XXX wlg: auto: no force: no count: 3 separator: ', ' web: host: 0.0.0.0 cors: '*' discogs: user_token: XXX ```
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Representation of type information for DBCore model fields.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom . import query\nfrom beets.util import str2bool\nimport six\n\nif not six.PY2:\n buffer = memoryview # sqlite won't accept memoryview in python 2\n\n\n# Abstract base.\n\nclass Type(object):\n \"\"\"An object encapsulating the type of a model field. Includes\n information about how to store, query, format, and parse a given\n field.\n \"\"\"\n\n sql = u'TEXT'\n \"\"\"The SQLite column type for the value.\n \"\"\"\n\n query = query.SubstringQuery\n \"\"\"The `Query` subclass to be used when querying the field.\n \"\"\"\n\n model_type = six.text_type\n \"\"\"The Python type that is used to represent the value in the model.\n\n The model is guaranteed to return a value of this type if the field\n is accessed. To this end, the constructor is used by the `normalize`\n and `from_sql` methods and the `default` property.\n \"\"\"\n\n @property\n def null(self):\n \"\"\"The value to be exposed when the underlying value is None.\n \"\"\"\n return self.model_type()\n\n def format(self, value):\n \"\"\"Given a value of this type, produce a Unicode string\n representing the value. This is used in template evaluation.\n \"\"\"\n if value is None:\n value = self.null\n # `self.null` might be `None`\n if value is None:\n value = u''\n if isinstance(value, bytes):\n value = value.decode('utf-8', 'ignore')\n\n return six.text_type(value)\n\n def parse(self, string):\n \"\"\"Parse a (possibly human-written) string and return the\n indicated value of this type.\n \"\"\"\n try:\n return self.model_type(string)\n except ValueError:\n return self.null\n\n def normalize(self, value):\n \"\"\"Given a value that will be assigned into a field of this\n type, normalize the value to have the appropriate type. This\n base implementation only reinterprets `None`.\n \"\"\"\n if value is None:\n return self.null\n else:\n # TODO This should eventually be replaced by\n # `self.model_type(value)`\n return value\n\n def from_sql(self, sql_value):\n \"\"\"Receives the value stored in the SQL backend and return the\n value to be stored in the model.\n\n For fixed fields the type of `value` is determined by the column\n type affinity given in the `sql` property and the SQL to Python\n mapping of the database adapter. For more information see:\n https://www.sqlite.org/datatype3.html\n https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types\n\n Flexible fields have the type affinity `TEXT`. This means the\n `sql_value` is either a `buffer`/`memoryview` or a `unicode` object`\n and the method must handle these in addition.\n \"\"\"\n if isinstance(sql_value, buffer):\n sql_value = bytes(sql_value).decode('utf-8', 'ignore')\n if isinstance(sql_value, six.text_type):\n return self.parse(sql_value)\n else:\n return self.normalize(sql_value)\n\n def to_sql(self, model_value):\n \"\"\"Convert a value as stored in the model object to a value used\n by the database adapter.\n \"\"\"\n return model_value\n\n\n# Reusable types.\n\nclass Default(Type):\n null = None\n\n\nclass Integer(Type):\n \"\"\"A basic integer type.\n \"\"\"\n sql = u'INTEGER'\n query = query.NumericQuery\n model_type = int\n\n def normalize(self, value):\n try:\n return self.model_type(round(float(value)))\n except ValueError:\n return self.null\n except TypeError:\n return self.null\n\n\nclass PaddedInt(Integer):\n \"\"\"An integer field that is formatted with a given number of digits,\n padded with zeroes.\n \"\"\"\n def __init__(self, digits):\n self.digits = digits\n\n def format(self, value):\n return u'{0:0{1}d}'.format(value or 0, self.digits)\n\n\nclass NullPaddedInt(PaddedInt):\n \"\"\"Same as `PaddedInt`, but does not normalize `None` to `0.0`.\n \"\"\"\n null = None\n\n\nclass ScaledInt(Integer):\n \"\"\"An integer whose formatting operation scales the number by a\n constant and adds a suffix. Good for units with large magnitudes.\n \"\"\"\n def __init__(self, unit, suffix=u''):\n self.unit = unit\n self.suffix = suffix\n\n def format(self, value):\n return u'{0}{1}'.format((value or 0) // self.unit, self.suffix)\n\n\nclass Id(Integer):\n \"\"\"An integer used as the row id or a foreign key in a SQLite table.\n This type is nullable: None values are not translated to zero.\n \"\"\"\n null = None\n\n def __init__(self, primary=True):\n if primary:\n self.sql = u'INTEGER PRIMARY KEY'\n\n\nclass Float(Type):\n \"\"\"A basic floating-point type. The `digits` parameter specifies how\n many decimal places to use in the human-readable representation.\n \"\"\"\n sql = u'REAL'\n query = query.NumericQuery\n model_type = float\n\n def __init__(self, digits=1):\n self.digits = digits\n\n def format(self, value):\n return u'{0:.{1}f}'.format(value or 0, self.digits)\n\n\nclass NullFloat(Float):\n \"\"\"Same as `Float`, but does not normalize `None` to `0.0`.\n \"\"\"\n null = None\n\n\nclass String(Type):\n \"\"\"A Unicode string type.\n \"\"\"\n sql = u'TEXT'\n query = query.SubstringQuery\n\n\nclass Boolean(Type):\n \"\"\"A boolean type.\n \"\"\"\n sql = u'INTEGER'\n query = query.BooleanQuery\n model_type = bool\n\n def format(self, value):\n return six.text_type(bool(value))\n\n def parse(self, string):\n return str2bool(string)\n\n\n# Shared instances of common types.\nDEFAULT = Default()\nINTEGER = Integer()\nPRIMARY_ID = Id(True)\nFOREIGN_ID = Id(False)\nFLOAT = Float()\nNULL_FLOAT = NullFloat()\nSTRING = String()\nBOOLEAN = Boolean()\n", "path": "beets/dbcore/types.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Representation of type information for DBCore model fields.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom . import query\nfrom beets.util import str2bool\nimport six\n\nif not six.PY2:\n buffer = memoryview # sqlite won't accept memoryview in python 2\n\n\n# Abstract base.\n\nclass Type(object):\n \"\"\"An object encapsulating the type of a model field. Includes\n information about how to store, query, format, and parse a given\n field.\n \"\"\"\n\n sql = u'TEXT'\n \"\"\"The SQLite column type for the value.\n \"\"\"\n\n query = query.SubstringQuery\n \"\"\"The `Query` subclass to be used when querying the field.\n \"\"\"\n\n model_type = six.text_type\n \"\"\"The Python type that is used to represent the value in the model.\n\n The model is guaranteed to return a value of this type if the field\n is accessed. To this end, the constructor is used by the `normalize`\n and `from_sql` methods and the `default` property.\n \"\"\"\n\n @property\n def null(self):\n \"\"\"The value to be exposed when the underlying value is None.\n \"\"\"\n return self.model_type()\n\n def format(self, value):\n \"\"\"Given a value of this type, produce a Unicode string\n representing the value. This is used in template evaluation.\n \"\"\"\n if value is None:\n value = self.null\n # `self.null` might be `None`\n if value is None:\n value = u''\n if isinstance(value, bytes):\n value = value.decode('utf-8', 'ignore')\n\n return six.text_type(value)\n\n def parse(self, string):\n \"\"\"Parse a (possibly human-written) string and return the\n indicated value of this type.\n \"\"\"\n try:\n return self.model_type(string)\n except ValueError:\n return self.null\n\n def normalize(self, value):\n \"\"\"Given a value that will be assigned into a field of this\n type, normalize the value to have the appropriate type. This\n base implementation only reinterprets `None`.\n \"\"\"\n if value is None:\n return self.null\n else:\n # TODO This should eventually be replaced by\n # `self.model_type(value)`\n return value\n\n def from_sql(self, sql_value):\n \"\"\"Receives the value stored in the SQL backend and return the\n value to be stored in the model.\n\n For fixed fields the type of `value` is determined by the column\n type affinity given in the `sql` property and the SQL to Python\n mapping of the database adapter. For more information see:\n https://www.sqlite.org/datatype3.html\n https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types\n\n Flexible fields have the type affinity `TEXT`. This means the\n `sql_value` is either a `buffer`/`memoryview` or a `unicode` object`\n and the method must handle these in addition.\n \"\"\"\n if isinstance(sql_value, buffer):\n sql_value = bytes(sql_value).decode('utf-8', 'ignore')\n if isinstance(sql_value, six.text_type):\n return self.parse(sql_value)\n else:\n return self.normalize(sql_value)\n\n def to_sql(self, model_value):\n \"\"\"Convert a value as stored in the model object to a value used\n by the database adapter.\n \"\"\"\n return model_value\n\n\n# Reusable types.\n\nclass Default(Type):\n null = None\n\n\nclass Integer(Type):\n \"\"\"A basic integer type.\n \"\"\"\n sql = u'INTEGER'\n query = query.NumericQuery\n model_type = int\n\n def normalize(self, value):\n try:\n return self.model_type(round(float(value)))\n except ValueError:\n return self.null\n except TypeError:\n return self.null\n\n\nclass PaddedInt(Integer):\n \"\"\"An integer field that is formatted with a given number of digits,\n padded with zeroes.\n \"\"\"\n def __init__(self, digits):\n self.digits = digits\n\n def format(self, value):\n return u'{0:0{1}d}'.format(value or 0, self.digits)\n\n\nclass NullPaddedInt(PaddedInt):\n \"\"\"Same as `PaddedInt`, but does not normalize `None` to `0.0`.\n \"\"\"\n null = None\n\n\nclass ScaledInt(Integer):\n \"\"\"An integer whose formatting operation scales the number by a\n constant and adds a suffix. Good for units with large magnitudes.\n \"\"\"\n def __init__(self, unit, suffix=u''):\n self.unit = unit\n self.suffix = suffix\n\n def format(self, value):\n return u'{0}{1}'.format((value or 0) // self.unit, self.suffix)\n\n\nclass Id(Integer):\n \"\"\"An integer used as the row id or a foreign key in a SQLite table.\n This type is nullable: None values are not translated to zero.\n \"\"\"\n null = None\n\n def __init__(self, primary=True):\n if primary:\n self.sql = u'INTEGER PRIMARY KEY'\n\n\nclass Float(Type):\n \"\"\"A basic floating-point type. The `digits` parameter specifies how\n many decimal places to use in the human-readable representation.\n \"\"\"\n sql = u'REAL'\n query = query.NumericQuery\n model_type = float\n\n def __init__(self, digits=1):\n self.digits = digits\n\n def format(self, value):\n return u'{0:.{1}f}'.format(value or 0, self.digits)\n\n\nclass NullFloat(Float):\n \"\"\"Same as `Float`, but does not normalize `None` to `0.0`.\n \"\"\"\n null = None\n\n\nclass String(Type):\n \"\"\"A Unicode string type.\n \"\"\"\n sql = u'TEXT'\n query = query.SubstringQuery\n\n def normalize(self, value):\n if value is None:\n return self.null\n else:\n return self.model_type(value)\n\n\nclass Boolean(Type):\n \"\"\"A boolean type.\n \"\"\"\n sql = u'INTEGER'\n query = query.BooleanQuery\n model_type = bool\n\n def format(self, value):\n return six.text_type(bool(value))\n\n def parse(self, string):\n return str2bool(string)\n\n\n# Shared instances of common types.\nDEFAULT = Default()\nINTEGER = Integer()\nPRIMARY_ID = Id(True)\nFOREIGN_ID = Id(False)\nFLOAT = Float()\nNULL_FLOAT = NullFloat()\nSTRING = String()\nBOOLEAN = Boolean()\n", "path": "beets/dbcore/types.py" } ]
diff --git a/beets/dbcore/types.py b/beets/dbcore/types.py index 5aa2b98127..c85eb1a50f 100644 --- a/beets/dbcore/types.py +++ b/beets/dbcore/types.py @@ -207,6 +207,12 @@ class String(Type): sql = u'TEXT' query = query.SubstringQuery + def normalize(self, value): + if value is None: + return self.null + else: + return self.model_type(value) + class Boolean(Type): """A boolean type. diff --git a/docs/changelog.rst b/docs/changelog.rst index a530cbac13..528707fe08 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -266,6 +266,10 @@ Fixes: the current track in the queue. Thanks to :user:`aereaux`. :bug:`3722` +* String-typed fields are now normalized to string values, avoiding an + occasional crash when using both the :doc:`/plugins/fetchart` and the + :doc:`/plugins/discogs` together. + :bug:`3773` :bug:`3774` * Fix a bug causing PIL to generate poor quality JPEGs when resizing artwork. :bug:`3743` diff --git a/test/test_lyrics.py b/test/test_lyrics.py index 833b86b3ad..95b094e98e 100644 --- a/test/test_lyrics.py +++ b/test/test_lyrics.py @@ -48,71 +48,72 @@ def setUp(self): lyrics.LyricsPlugin() def test_search_artist(self): - item = Item(artist='Alice ft. Bob', title='song') - self.assertIn(('Alice ft. Bob', ['song']), + item = Item(artist=u'Alice ft. Bob', title=u'song') + self.assertIn((u'Alice ft. Bob', [u'song']), lyrics.search_pairs(item)) - self.assertIn(('Alice', ['song']), + self.assertIn((u'Alice', [u'song']), lyrics.search_pairs(item)) - item = Item(artist='Alice feat Bob', title='song') - self.assertIn(('Alice feat Bob', ['song']), + item = Item(artist=u'Alice feat Bob', title=u'song') + self.assertIn((u'Alice feat Bob', [u'song']), lyrics.search_pairs(item)) - self.assertIn(('Alice', ['song']), + self.assertIn((u'Alice', [u'song']), lyrics.search_pairs(item)) - item = Item(artist='Alice feat. Bob', title='song') - self.assertIn(('Alice feat. Bob', ['song']), + item = Item(artist=u'Alice feat. Bob', title=u'song') + self.assertIn((u'Alice feat. Bob', [u'song']), lyrics.search_pairs(item)) - self.assertIn(('Alice', ['song']), + self.assertIn((u'Alice', [u'song']), lyrics.search_pairs(item)) - item = Item(artist='Alice feats Bob', title='song') - self.assertIn(('Alice feats Bob', ['song']), + item = Item(artist=u'Alice feats Bob', title=u'song') + self.assertIn((u'Alice feats Bob', [u'song']), lyrics.search_pairs(item)) - self.assertNotIn(('Alice', ['song']), + self.assertNotIn((u'Alice', [u'song']), lyrics.search_pairs(item)) - item = Item(artist='Alice featuring Bob', title='song') - self.assertIn(('Alice featuring Bob', ['song']), + item = Item(artist=u'Alice featuring Bob', title=u'song') + self.assertIn((u'Alice featuring Bob', [u'song']), lyrics.search_pairs(item)) - self.assertIn(('Alice', ['song']), + self.assertIn((u'Alice', [u'song']), lyrics.search_pairs(item)) - item = Item(artist='Alice & Bob', title='song') - self.assertIn(('Alice & Bob', ['song']), + item = Item(artist=u'Alice & Bob', title=u'song') + self.assertIn((u'Alice & Bob', [u'song']), lyrics.search_pairs(item)) - self.assertIn(('Alice', ['song']), + self.assertIn((u'Alice', [u'song']), lyrics.search_pairs(item)) - item = Item(artist='Alice and Bob', title='song') - self.assertIn(('Alice and Bob', ['song']), + item = Item(artist=u'Alice and Bob', title=u'song') + self.assertIn((u'Alice and Bob', [u'song']), lyrics.search_pairs(item)) - self.assertIn(('Alice', ['song']), + self.assertIn((u'Alice', [u'song']), lyrics.search_pairs(item)) - item = Item(artist='Alice and Bob', title='song') - self.assertEqual(('Alice and Bob', ['song']), + item = Item(artist=u'Alice and Bob', title=u'song') + self.assertEqual((u'Alice and Bob', [u'song']), list(lyrics.search_pairs(item))[0]) def test_search_artist_sort(self): - item = Item(artist='CHVRCHΞS', title='song', artist_sort='CHVRCHES') - self.assertIn(('CHVRCHΞS', ['song']), + item = Item(artist=u'CHVRCHΞS', title=u'song', artist_sort=u'CHVRCHES') + self.assertIn((u'CHVRCHΞS', [u'song']), lyrics.search_pairs(item)) - self.assertIn(('CHVRCHES', ['song']), + self.assertIn((u'CHVRCHES', [u'song']), lyrics.search_pairs(item)) # Make sure that the original artist name is still the first entry - self.assertEqual(('CHVRCHΞS', ['song']), + self.assertEqual((u'CHVRCHΞS', [u'song']), list(lyrics.search_pairs(item))[0]) - item = Item(artist='横山克', title='song', artist_sort='Masaru Yokoyama') - self.assertIn(('横山克', ['song']), + item = Item(artist=u'横山克', title=u'song', + artist_sort=u'Masaru Yokoyama') + self.assertIn((u'横山克', [u'song']), lyrics.search_pairs(item)) - self.assertIn(('Masaru Yokoyama', ['song']), + self.assertIn((u'Masaru Yokoyama', [u'song']), lyrics.search_pairs(item)) # Make sure that the original artist name is still the first entry - self.assertEqual(('横山克', ['song']), + self.assertEqual((u'横山克', [u'song']), list(lyrics.search_pairs(item))[0]) def test_search_pairs_multi_titles(self):
zenml-io__zenml-2271
Update `sklearn` Integration to Support Versions >1.3.0 and Resolve MLflow Autologging Issues ## Open Source Contributors Welcomed! Please comment below if you would like to work on this issue! ### Contact Details [Optional] [email protected] ### What happened? The current ZenML Sklearn integration is restricted to versions of Sklearn <1.3.0, as defined in `src/zenml/integrations/sklearn/__init__.py`. However, the release of Sklearn 1.3.0 necessitates an update to this constraint. Additionally, this Sklearn version upgrade appears to cause issues with MLflow autologging, likely due to compatibility conflicts. ### Task Description Update the Sklearn integration in ZenML to support Sklearn versions >1.3.0. Additionally, identify and resolve any issues arising in MLflow autologging due to this version update. ### Expected Outcome - The Sklearn integration in ZenML should allow for the use of Sklearn versions >1.3.0. - Any compatibility issues, especially with MLflow autologging, should be identified and resolved. - Ensure that all tests, including CI pipelines, pass with the updated Sklearn version. ### Steps to Implement - Modify the Sklearn version constraint in src/zenml/integrations/sklearn/__init__.py to allow for versions >1.3.0. - Investigate and identify the root cause of the issues with MLflow autologging when using Sklearn 1.3.0. - Implement necessary fixes or updates to ensure compatibility with the new Sklearn version. - Thoroughly test the changes, especially focusing on MLflow autologging functionality. - Update documentation and examples as necessary to reflect the support for the new Sklearn version. ### Additional Context This update is crucial for keeping ZenML compatible with the latest machine learning tools and libraries, ensuring that users can leverage the newest features and improvements in Sklearn. ### Code of Conduct - [ ] I agree to follow this project's Code of Conduct
[ { "content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Initialization of the sklearn integration.\"\"\"\n\nfrom zenml.integrations.constants import SKLEARN\nfrom zenml.integrations.integration import Integration\n\n\nclass SklearnIntegration(Integration):\n \"\"\"Definition of sklearn integration for ZenML.\"\"\"\n\n NAME = SKLEARN\n REQUIREMENTS = [\"scikit-learn<1.3\"]\n\n @classmethod\n def activate(cls) -> None:\n \"\"\"Activates the integration.\"\"\"\n from zenml.integrations.sklearn import materializers # noqa\n\n\nSklearnIntegration.check_installation()\n", "path": "src/zenml/integrations/sklearn/__init__.py" } ]
[ { "content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Initialization of the sklearn integration.\"\"\"\n\nfrom zenml.integrations.constants import SKLEARN\nfrom zenml.integrations.integration import Integration\n\n\nclass SklearnIntegration(Integration):\n \"\"\"Definition of sklearn integration for ZenML.\"\"\"\n\n NAME = SKLEARN\n REQUIREMENTS = [\"scikit-learn>1.3\"]\n\n @classmethod\n def activate(cls) -> None:\n \"\"\"Activates the integration.\"\"\"\n from zenml.integrations.sklearn import materializers # noqa\n\n\nSklearnIntegration.check_installation()\n", "path": "src/zenml/integrations/sklearn/__init__.py" } ]
diff --git a/docs/book/user-guide/starter-guide/create-an-ml-pipeline.md b/docs/book/user-guide/starter-guide/create-an-ml-pipeline.md index 2b898d45565..882c5334f36 100644 --- a/docs/book/user-guide/starter-guide/create-an-ml-pipeline.md +++ b/docs/book/user-guide/starter-guide/create-an-ml-pipeline.md @@ -127,7 +127,7 @@ zenml integration install sklearn -y In this case, ZenML has an integration with `sklearn` so you can use the ZenML CLI to install the right version directly. {% hint style="info" %} -The `zenml integration install sklearn` command is simply doing a `pip install sklearn<1.3` behind the scenes. If something goes wrong, one can always use `zenml integration requirements sklearn` to see which requirements are compatible and install using pip (or any other tool) directly. +The `zenml integration install sklearn` command is simply doing a `pip install sklearn>1.3` behind the scenes. If something goes wrong, one can always use `zenml integration requirements sklearn` to see which requirements are compatible and install using pip (or any other tool) directly. {% endhint %} ### Define a data loader with multiple outputs diff --git a/src/zenml/integrations/sklearn/__init__.py b/src/zenml/integrations/sklearn/__init__.py index 9a9b1fed77e..3245e301367 100644 --- a/src/zenml/integrations/sklearn/__init__.py +++ b/src/zenml/integrations/sklearn/__init__.py @@ -21,7 +21,7 @@ class SklearnIntegration(Integration): """Definition of sklearn integration for ZenML.""" NAME = SKLEARN - REQUIREMENTS = ["scikit-learn<1.3"] + REQUIREMENTS = ["scikit-learn>1.3"] @classmethod def activate(cls) -> None: diff --git a/tests/conftest.py b/tests/conftest.py index e35ad22a452..b6f90f58f05 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,7 +20,7 @@ from uuid import uuid4 import pytest -from py._builtin import execfile +from pytest import File from pytest_mock import MockerFixture from tests.harness.environment import TestEnvironment @@ -325,9 +325,7 @@ def virtualenv( "tests" ) - execfile( - str(activate_this_file), dict(__file__=str(activate_this_file)) - ) + File(str(activate_this_file), dict(__file__=str(activate_this_file))) # Set new system executable sys.executable = tmp_path / env_bin_dir / "python" @@ -346,7 +344,7 @@ def virtualenv( "your virtual environment to run integration " "tests" ) - execfile(str(activate_this_f), dict(__file__=str(activate_this_f))) + File(str(activate_this_f), dict(__file__=str(activate_this_f))) else: yield ""
conda__conda-5124
export toposort for conda-build export toposort for conda-build
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom functools import partial\nfrom logging import getLogger\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .api import get_index # NOQA\nget_index = get_index\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .fetch import TmpDownload # NOQA\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index # NOQA\ndist_str_in_index, fetch_index = dist_str_in_index, fetch_index\nfrom .core.package_cache import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, rm_rf, symlink_conda # NOQA\npackage_cache, prefix_placeholder, rm_rf, symlink_conda = package_cache, prefix_placeholder, rm_rf, symlink_conda # NOQA\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.linked_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, NoPackagesFound, Resolve, Unsatisfiable, normalized_version # NOQA\nMatchSpec, NoPackagesFound, Resolve = MatchSpec, NoPackagesFound, Resolve\nUnsatisfiable, normalized_version = Unsatisfiable, normalized_version\n\nfrom .signature import KEYS, KEYS_DIR, hash_file, verify # NOQA\nKEYS, KEYS_DIR = KEYS, KEYS_DIR\nhash_file, verify = hash_file, verify\n\nfrom .utils import hashsum_file, human_bytes, memoized, unix_path_to_win, win_path_to_unix, url_path # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nmemoized, unix_path_to_win = memoized, unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .config import sys_rc_path # NOQA\nsys_rc_path = sys_rc_path\n\nfrom .models.version import VersionOrder # NOQA\nVersionOrder = VersionOrder\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix as context_get_prefix, non_x86_linux_machines # NOQA\nnon_x86_linux_machines = non_x86_linux_machines\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_prefix = partial(context_get_prefix, conda.base.context.context)\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError # NOQA\nPaddingError = PaddingError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\n\nif PY3:\n import configparser # NOQA # pragma: py2 no cover\nelse:\n import ConfigParser as configparser # NOQA # pragma: py3 no cover\nconfigparser = configparser\n\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.repodata import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object):\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\nclass InstalledPackages(object): pass # NOQA\n", "path": "conda/exports.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom functools import partial\nfrom logging import getLogger\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .api import get_index # NOQA\nget_index = get_index\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .common.toposort import _toposort\n_toposort = _toposort\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .fetch import TmpDownload # NOQA\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index # NOQA\ndist_str_in_index, fetch_index = dist_str_in_index, fetch_index\nfrom .core.package_cache import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, rm_rf, symlink_conda # NOQA\npackage_cache, prefix_placeholder, rm_rf, symlink_conda = package_cache, prefix_placeholder, rm_rf, symlink_conda # NOQA\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.linked_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, NoPackagesFound, Resolve, Unsatisfiable, normalized_version # NOQA\nMatchSpec, NoPackagesFound, Resolve = MatchSpec, NoPackagesFound, Resolve\nUnsatisfiable, normalized_version = Unsatisfiable, normalized_version\n\nfrom .signature import KEYS, KEYS_DIR, hash_file, verify # NOQA\nKEYS, KEYS_DIR = KEYS, KEYS_DIR\nhash_file, verify = hash_file, verify\n\nfrom .utils import hashsum_file, human_bytes, memoized, unix_path_to_win, win_path_to_unix, url_path # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nmemoized, unix_path_to_win = memoized, unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .config import sys_rc_path # NOQA\nsys_rc_path = sys_rc_path\n\nfrom .models.version import VersionOrder # NOQA\nVersionOrder = VersionOrder\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix as context_get_prefix, non_x86_linux_machines # NOQA\nnon_x86_linux_machines = non_x86_linux_machines\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_prefix = partial(context_get_prefix, conda.base.context.context)\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError # NOQA\nPaddingError = PaddingError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\n\nif PY3:\n import configparser # NOQA # pragma: py2 no cover\nelse:\n import ConfigParser as configparser # NOQA # pragma: py3 no cover\nconfigparser = configparser\n\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.repodata import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object):\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\nclass InstalledPackages(object): pass # NOQA\n", "path": "conda/exports.py" } ]
diff --git a/conda/exports.py b/conda/exports.py index 803ec103916..c14f048ec90 100644 --- a/conda/exports.py +++ b/conda/exports.py @@ -30,6 +30,9 @@ from .gateways.connection import CondaSession # NOQA CondaSession = CondaSession +from .common.toposort import _toposort +_toposort = _toposort + from .gateways.disk.link import lchmod # NOQA lchmod = lchmod
pyodide__pyodide-717
Calling yaml.load() without Loader=... is deprecated For each built packages there is now the following deprecation warning , ``` pyodide_build/common.py:27: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details. return yaml.load(fd) ``` it would be nice to fix this.
[ { "content": "from pathlib import Path\nfrom typing import Optional, Set\n\n\nROOTDIR = Path(__file__).parents[1].resolve() / \"tools\"\nHOSTPYTHON = ROOTDIR / \"..\" / \"cpython\" / \"build\" / \"3.8.2\" / \"host\"\nTARGETPYTHON = ROOTDIR / \"..\" / \"cpython\" / \"installs\" / \"python-3.8.2\"\nDEFAULTCFLAGS = \"\"\nDEFAULTLDFLAGS = \" \".join(\n [\n \"-O3\",\n \"-s\",\n \"BINARYEN_METHOD='native-wasm'\",\n \"-Werror\",\n \"-s\",\n \"EMULATED_FUNCTION_POINTERS=1\",\n \"-s\",\n \"EMULATE_FUNCTION_POINTER_CASTS=1\",\n \"-s\",\n \"SIDE_MODULE=1\",\n \"-s\",\n \"WASM=1\",\n \"--memory-init-file\",\n \"0\",\n ]\n)\n\n\ndef parse_package(package):\n # Import yaml here because pywasmcross needs to run in the built native\n # Python, which won't have PyYAML\n import yaml\n\n # TODO: Validate against a schema\n with open(package) as fd:\n return yaml.load(fd)\n\n\ndef _parse_package_subset(query: Optional[str]) -> Optional[Set[str]]:\n \"\"\"Parse the list of packages specified with PYODIDE_PACKAGES env var.\n\n Also add the list of mandatory packages: ['micropip', 'distlib']\n\n Returns:\n a set of package names to build or None.\n \"\"\"\n if query is None:\n return None\n packages = query.split(\",\")\n packages = [el.strip() for el in packages]\n packages = [\"micropip\", \"distlib\"] + packages\n return set(packages)\n", "path": "pyodide_build/common.py" } ]
[ { "content": "from pathlib import Path\nfrom typing import Optional, Set\n\n\nROOTDIR = Path(__file__).parents[1].resolve() / \"tools\"\nHOSTPYTHON = ROOTDIR / \"..\" / \"cpython\" / \"build\" / \"3.8.2\" / \"host\"\nTARGETPYTHON = ROOTDIR / \"..\" / \"cpython\" / \"installs\" / \"python-3.8.2\"\nDEFAULTCFLAGS = \"\"\nDEFAULTLDFLAGS = \" \".join(\n [\n \"-O3\",\n \"-s\",\n \"BINARYEN_METHOD='native-wasm'\",\n \"-Werror\",\n \"-s\",\n \"EMULATED_FUNCTION_POINTERS=1\",\n \"-s\",\n \"EMULATE_FUNCTION_POINTER_CASTS=1\",\n \"-s\",\n \"SIDE_MODULE=1\",\n \"-s\",\n \"WASM=1\",\n \"--memory-init-file\",\n \"0\",\n ]\n)\n\n\ndef parse_package(package):\n # Import yaml here because pywasmcross needs to run in the built native\n # Python, which won't have PyYAML\n import yaml\n\n # TODO: Validate against a schema\n with open(package) as fd:\n return yaml.safe_load(fd)\n\n\ndef _parse_package_subset(query: Optional[str]) -> Optional[Set[str]]:\n \"\"\"Parse the list of packages specified with PYODIDE_PACKAGES env var.\n\n Also add the list of mandatory packages: ['micropip', 'distlib']\n\n Returns:\n a set of package names to build or None.\n \"\"\"\n if query is None:\n return None\n packages = query.split(\",\")\n packages = [el.strip() for el in packages]\n packages = [\"micropip\", \"distlib\"] + packages\n return set(packages)\n", "path": "pyodide_build/common.py" } ]
diff --git a/pyodide_build/common.py b/pyodide_build/common.py index 7d6752c94ac..de955a32beb 100644 --- a/pyodide_build/common.py +++ b/pyodide_build/common.py @@ -33,7 +33,7 @@ def parse_package(package): # TODO: Validate against a schema with open(package) as fd: - return yaml.load(fd) + return yaml.safe_load(fd) def _parse_package_subset(query: Optional[str]) -> Optional[Set[str]]:
pretix__pretix-1777
log level I'm probably missing something, so please bear with me. I followed the "small-scale manual" deployment guide. `python -m pretix runperiodic` logs celery success messages at the info level: `INFO 2019-01-03 20:49:47,479 celery.app.trace trace Task pretix.base.services.quotas.refresh_quota_caches[817c903c-ea12-491f-aa65-23b89e59075a] succeeded in 0.006468222010880709s: None INFO 2019-01-03 20:49:47,606 celery.app.trace trace Task pretix.base.services.waitinglist.assign_automatically[db93ed55-5e42-4278-8647-3deefa0ea8dc] succeeded in 0.08206735923886299s: 0 ` This means I get a useless email from the cron job twice an hour, unless I set `loglevel` in `pretix/settings.py` to `WARNING`. Is there a way to override the log level in pretix.cfg that I'm overlooking? log level I'm probably missing something, so please bear with me. I followed the "small-scale manual" deployment guide. `python -m pretix runperiodic` logs celery success messages at the info level: `INFO 2019-01-03 20:49:47,479 celery.app.trace trace Task pretix.base.services.quotas.refresh_quota_caches[817c903c-ea12-491f-aa65-23b89e59075a] succeeded in 0.006468222010880709s: None INFO 2019-01-03 20:49:47,606 celery.app.trace trace Task pretix.base.services.waitinglist.assign_automatically[db93ed55-5e42-4278-8647-3deefa0ea8dc] succeeded in 0.08206735923886299s: 0 ` This means I get a useless email from the cron job twice an hour, unless I set `loglevel` in `pretix/settings.py` to `WARNING`. Is there a way to override the log level in pretix.cfg that I'm overlooking?
[ { "content": "import configparser\nimport logging\nimport os\nimport sys\nfrom urllib.parse import urlparse\n\nimport django.conf.locale\nfrom django.utils.crypto import get_random_string\nfrom kombu import Queue\nfrom pkg_resources import iter_entry_points\nfrom pycountry import currencies\n\nfrom . import __version__\n\nfrom django.contrib.messages import constants as messages # NOQA\nfrom django.utils.translation import gettext_lazy as _ # NOQA\n\nconfig = configparser.RawConfigParser()\nif 'PRETIX_CONFIG_FILE' in os.environ:\n config.read_file(open(os.environ.get('PRETIX_CONFIG_FILE'), encoding='utf-8'))\nelse:\n config.read(['/etc/pretix/pretix.cfg', os.path.expanduser('~/.pretix.cfg'), 'pretix.cfg'],\n encoding='utf-8')\n\nCONFIG_FILE = config\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nDATA_DIR = config.get('pretix', 'datadir', fallback=os.environ.get('DATA_DIR', 'data'))\nLOG_DIR = os.path.join(DATA_DIR, 'logs')\nMEDIA_ROOT = os.path.join(DATA_DIR, 'media')\nPROFILE_DIR = os.path.join(DATA_DIR, 'profiles')\n\nif not os.path.exists(DATA_DIR):\n os.mkdir(DATA_DIR)\nif not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\nif not os.path.exists(MEDIA_ROOT):\n os.mkdir(MEDIA_ROOT)\n\nif config.has_option('django', 'secret'):\n SECRET_KEY = config.get('django', 'secret')\nelse:\n SECRET_FILE = os.path.join(DATA_DIR, '.secret')\n if os.path.exists(SECRET_FILE):\n with open(SECRET_FILE, 'r') as f:\n SECRET_KEY = f.read().strip()\n else:\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n SECRET_KEY = get_random_string(50, chars)\n with open(SECRET_FILE, 'w') as f:\n os.chmod(SECRET_FILE, 0o600)\n try:\n os.chown(SECRET_FILE, os.getuid(), os.getgid())\n except AttributeError:\n pass # os.chown is not available on Windows\n f.write(SECRET_KEY)\n\n# Adjustable settings\n\ndebug_fallback = \"runserver\" in sys.argv\nDEBUG = config.getboolean('django', 'debug', fallback=debug_fallback)\nLOG_CSP = config.getboolean('pretix', 'csp_log', fallback=True)\n\nPDFTK = config.get('tools', 'pdftk', fallback=None)\n\nPRETIX_AUTH_BACKENDS = config.get('pretix', 'auth_backends', fallback='pretix.base.auth.NativeAuthBackend').split(',')\n\ndb_backend = config.get('database', 'backend', fallback='sqlite3')\nif db_backend == 'postgresql_psycopg2':\n db_backend = 'postgresql'\nDATABASE_IS_GALERA = config.getboolean('database', 'galera', fallback=False)\nif DATABASE_IS_GALERA and 'mysql' in db_backend:\n db_options = {\n 'init_command': 'SET SESSION wsrep_sync_wait = 1;'\n }\nelse:\n db_options = {}\n\nif 'mysql' in db_backend:\n db_options['charset'] = 'utf8mb4'\nJSON_FIELD_AVAILABLE = db_backend in ('mysql', 'postgresql')\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.' + db_backend,\n 'NAME': config.get('database', 'name', fallback=os.path.join(DATA_DIR, 'db.sqlite3')),\n 'USER': config.get('database', 'user', fallback=''),\n 'PASSWORD': config.get('database', 'password', fallback=''),\n 'HOST': config.get('database', 'host', fallback=''),\n 'PORT': config.get('database', 'port', fallback=''),\n 'CONN_MAX_AGE': 0 if db_backend == 'sqlite3' else 120,\n 'OPTIONS': db_options,\n 'TEST': {\n 'CHARSET': 'utf8mb4',\n 'COLLATION': 'utf8mb4_unicode_ci',\n } if 'mysql' in db_backend else {}\n }\n}\nDATABASE_REPLICA = 'default'\nif config.has_section('replica'):\n DATABASE_REPLICA = 'replica'\n DATABASES['replica'] = {\n 'ENGINE': 'django.db.backends.' + db_backend,\n 'NAME': config.get('replica', 'name', fallback=DATABASES['default']['NAME']),\n 'USER': config.get('replica', 'user', fallback=DATABASES['default']['USER']),\n 'PASSWORD': config.get('replica', 'password', fallback=DATABASES['default']['PASSWORD']),\n 'HOST': config.get('replica', 'host', fallback=DATABASES['default']['HOST']),\n 'PORT': config.get('replica', 'port', fallback=DATABASES['default']['PORT']),\n 'CONN_MAX_AGE': 0 if db_backend == 'sqlite3' else 120,\n 'OPTIONS': db_options,\n 'TEST': {\n 'CHARSET': 'utf8mb4',\n 'COLLATION': 'utf8mb4_unicode_ci',\n } if 'mysql' in db_backend else {}\n }\n DATABASE_ROUTERS = ['pretix.helpers.database.ReplicaRouter']\n\nSTATIC_URL = config.get('urls', 'static', fallback='/static/')\n\nMEDIA_URL = config.get('urls', 'media', fallback='/media/')\n\nPRETIX_INSTANCE_NAME = config.get('pretix', 'instance_name', fallback='pretix.de')\nPRETIX_REGISTRATION = config.getboolean('pretix', 'registration', fallback=True)\nPRETIX_PASSWORD_RESET = config.getboolean('pretix', 'password_reset', fallback=True)\nPRETIX_LONG_SESSIONS = config.getboolean('pretix', 'long_sessions', fallback=True)\nPRETIX_ADMIN_AUDIT_COMMENTS = config.getboolean('pretix', 'audit_comments', fallback=False)\nPRETIX_OBLIGATORY_2FA = config.getboolean('pretix', 'obligatory_2fa', fallback=False)\nPRETIX_SESSION_TIMEOUT_RELATIVE = 3600 * 3\nPRETIX_SESSION_TIMEOUT_ABSOLUTE = 3600 * 12\nPRETIX_PRIMARY_COLOR = '#8E44B3'\n\nSITE_URL = config.get('pretix', 'url', fallback='http://localhost')\nif SITE_URL.endswith('/'):\n SITE_URL = SITE_URL[:-1]\n\nCSRF_TRUSTED_ORIGINS = [urlparse(SITE_URL).hostname]\n\nTRUST_X_FORWARDED_FOR = config.get('pretix', 'trust_x_forwarded_for', fallback=False)\n\nif config.get('pretix', 'trust_x_forwarded_proto', fallback=False):\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\nPRETIX_PLUGINS_DEFAULT = config.get('pretix', 'plugins_default',\n fallback='pretix.plugins.sendmail,pretix.plugins.statistics,pretix.plugins.checkinlists,pretix.plugins.autocheckin')\nPRETIX_PLUGINS_EXCLUDE = config.get('pretix', 'plugins_exclude', fallback='').split(',')\n\nFETCH_ECB_RATES = config.getboolean('pretix', 'ecb_rates', fallback=True)\n\nDEFAULT_CURRENCY = config.get('pretix', 'currency', fallback='EUR')\nCURRENCIES = list(currencies)\nCURRENCY_PLACES = {\n # default is 2\n 'BIF': 0,\n 'CLP': 0,\n 'DJF': 0,\n 'GNF': 0,\n 'JPY': 0,\n 'KMF': 0,\n 'KRW': 0,\n 'MGA': 0,\n 'PYG': 0,\n 'RWF': 0,\n 'VND': 0,\n 'VUV': 0,\n 'XAF': 0,\n 'XOF': 0,\n 'XPF': 0,\n}\n\nALLOWED_HOSTS = ['*']\n\nLANGUAGE_CODE = config.get('locale', 'default', fallback='en')\nTIME_ZONE = config.get('locale', 'timezone', fallback='UTC')\n\nMAIL_FROM = SERVER_EMAIL = DEFAULT_FROM_EMAIL = config.get(\n 'mail', 'from', fallback='pretix@localhost')\nEMAIL_HOST = config.get('mail', 'host', fallback='localhost')\nEMAIL_PORT = config.getint('mail', 'port', fallback=25)\nEMAIL_HOST_USER = config.get('mail', 'user', fallback='')\nEMAIL_HOST_PASSWORD = config.get('mail', 'password', fallback='')\nEMAIL_USE_TLS = config.getboolean('mail', 'tls', fallback=False)\nEMAIL_USE_SSL = config.getboolean('mail', 'ssl', fallback=False)\nEMAIL_SUBJECT_PREFIX = '[pretix] '\n\nADMINS = [('Admin', n) for n in config.get('mail', 'admins', fallback='').split(\",\") if n]\n\nMETRICS_ENABLED = config.getboolean('metrics', 'enabled', fallback=False)\nMETRICS_USER = config.get('metrics', 'user', fallback=\"metrics\")\nMETRICS_PASSPHRASE = config.get('metrics', 'passphrase', fallback=\"\")\n\nCACHES = {\n 'default': {\n 'BACKEND': 'pretix.helpers.cache.CustomDummyCache',\n }\n}\nREAL_CACHE_USED = False\nSESSION_ENGINE = None\n\nHAS_MEMCACHED = config.has_option('memcached', 'location')\nif HAS_MEMCACHED:\n REAL_CACHE_USED = True\n CACHES['default'] = {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': config.get('memcached', 'location'),\n }\n\nHAS_REDIS = config.has_option('redis', 'location')\nif HAS_REDIS:\n CACHES['redis'] = {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": config.get('redis', 'location'),\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n }\n }\n CACHES['redis_sessions'] = {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": config.get('redis', 'location'),\n \"TIMEOUT\": 3600 * 24 * 30,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n }\n }\n if not HAS_MEMCACHED:\n CACHES['default'] = CACHES['redis']\n REAL_CACHE_USED = True\n if config.getboolean('redis', 'sessions', fallback=False):\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"redis_sessions\"\n\nif not SESSION_ENGINE:\n if REAL_CACHE_USED:\n SESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n else:\n SESSION_ENGINE = \"django.contrib.sessions.backends.db\"\n\nHAS_CELERY = config.has_option('celery', 'broker')\nif HAS_CELERY:\n CELERY_BROKER_URL = config.get('celery', 'broker')\n CELERY_RESULT_BACKEND = config.get('celery', 'backend')\nelse:\n CELERY_TASK_ALWAYS_EAGER = True\n\nSESSION_COOKIE_DOMAIN = config.get('pretix', 'cookie_domain', fallback=None)\n\nCACHE_TICKETS_HOURS = config.getint('cache', 'tickets', fallback=24 * 3)\n\nENTROPY = {\n 'order_code': config.getint('entropy', 'order_code', fallback=5),\n 'ticket_secret': config.getint('entropy', 'ticket_secret', fallback=32),\n 'voucher_code': config.getint('entropy', 'voucher_code', fallback=16),\n 'giftcard_secret': config.getint('entropy', 'giftcard_secret', fallback=12),\n}\n\n# Internal settings\nPRETIX_EMAIL_NONE_VALUE = '[email protected]'\n\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static.dist')\n\nSESSION_COOKIE_NAME = 'pretix_session'\nLANGUAGE_COOKIE_NAME = 'pretix_language'\nCSRF_COOKIE_NAME = 'pretix_csrftoken'\nSESSION_COOKIE_HTTPONLY = True\n\nINSTALLED_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'pretix.base',\n 'pretix.control',\n 'pretix.presale',\n 'pretix.multidomain',\n 'pretix.api',\n 'pretix.helpers',\n 'rest_framework',\n 'django_filters',\n 'compressor',\n 'bootstrap3',\n 'djangoformsetjs',\n 'pretix.plugins.banktransfer',\n 'pretix.plugins.stripe',\n 'pretix.plugins.paypal',\n 'pretix.plugins.ticketoutputpdf',\n 'pretix.plugins.sendmail',\n 'pretix.plugins.statistics',\n 'pretix.plugins.reports',\n 'pretix.plugins.checkinlists',\n 'pretix.plugins.pretixdroid',\n 'pretix.plugins.badges',\n 'pretix.plugins.manualpayment',\n 'pretix.plugins.returnurl',\n 'django_markup',\n 'django_otp',\n 'django_otp.plugins.otp_totp',\n 'django_otp.plugins.otp_static',\n 'statici18n',\n 'django_countries',\n 'hijack',\n 'compat',\n 'oauth2_provider',\n 'phonenumber_field'\n]\n\ntry:\n import django_extensions # noqa\n INSTALLED_APPS.append('django_extensions')\nexcept ImportError:\n pass\n\nPLUGINS = []\nfor entry_point in iter_entry_points(group='pretix.plugin', name=None):\n if entry_point.module_name in PRETIX_PLUGINS_EXCLUDE:\n continue\n PLUGINS.append(entry_point.module_name)\n INSTALLED_APPS.append(entry_point.module_name)\n\nHIJACK_AUTHORIZE_STAFF = True\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'pretix.api.auth.permission.EventPermission',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',\n 'PAGE_SIZE': 50,\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'pretix.api.auth.token.TeamTokenAuthentication',\n 'pretix.api.auth.device.DeviceTokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication',\n 'oauth2_provider.contrib.rest_framework.OAuth2Authentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'EXCEPTION_HANDLER': 'pretix.api.exception.custom_exception_handler',\n 'UNICODE_JSON': False\n}\n\n\nCORE_MODULES = {\n \"pretix.base\",\n \"pretix.presale\",\n \"pretix.control\",\n \"pretix.plugins.checkinlists\",\n}\n\nMIDDLEWARE = [\n 'pretix.api.middleware.IdempotencyMiddleware',\n 'pretix.multidomain.middlewares.MultiDomainMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'pretix.multidomain.middlewares.SessionMiddleware',\n 'pretix.multidomain.middlewares.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'pretix.control.middleware.PermissionMiddleware',\n 'pretix.control.middleware.AuditLogMiddleware',\n 'pretix.base.middleware.LocaleMiddleware',\n 'pretix.base.middleware.SecurityMiddleware',\n 'pretix.presale.middleware.EventMiddleware',\n 'pretix.api.middleware.ApiScopeMiddleware',\n]\n\ntry:\n import debug_toolbar # noqa\n if DEBUG:\n INSTALLED_APPS.append('debug_toolbar.apps.DebugToolbarConfig')\n MIDDLEWARE.insert(0, 'debug_toolbar.middleware.DebugToolbarMiddleware')\nexcept ImportError:\n pass\n\n\nif METRICS_ENABLED:\n MIDDLEWARE.insert(MIDDLEWARE.index('django.middleware.common.CommonMiddleware') + 1,\n 'pretix.helpers.metrics.middleware.MetricsMiddleware')\n\n\nPROFILING_RATE = config.getfloat('django', 'profile', fallback=0) # Percentage of requests to profile\nif PROFILING_RATE > 0:\n if not os.path.exists(PROFILE_DIR):\n os.mkdir(PROFILE_DIR)\n MIDDLEWARE.insert(0, 'pretix.helpers.profile.middleware.CProfileMiddleware')\n\n\n# Security settings\nX_FRAME_OPTIONS = 'DENY'\n\n# URL settings\nROOT_URLCONF = 'pretix.multidomain.maindomain_urlconf'\n\nWSGI_APPLICATION = 'pretix.wsgi.application'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [\n os.path.join(os.path.dirname(__file__), 'locale'),\n]\nif config.has_option('languages', 'path'):\n LOCALE_PATHS.insert(0, config.get('languages', 'path'))\n\nFORMAT_MODULE_PATH = [\n 'pretix.helpers.formats',\n]\n\nALL_LANGUAGES = [\n ('en', _('English')),\n ('de', _('German')),\n ('de-informal', _('German (informal)')),\n ('ar', _('Arabic')),\n ('zh-hans', _('Chinese (simplified)')),\n ('da', _('Danish')),\n ('nl', _('Dutch')),\n ('nl-informal', _('Dutch (informal)')),\n ('fr', _('French')),\n ('el', _('Greek')),\n ('it', _('Italian')),\n ('lv', _('Latvian')),\n ('pl', _('Polish')),\n ('pt-br', _('Portuguese (Brazil)')),\n ('ru', _('Russian')),\n ('es', _('Spanish')),\n ('tr', _('Turkish')),\n]\nLANGUAGES_OFFICIAL = {\n 'en', 'de', 'de-informal'\n}\nLANGUAGES_INCUBATING = {\n 'pt-br', 'pl',\n} - set(config.get('languages', 'allow_incubating', fallback='').split(','))\nLANGUAGES_RTL = {\n 'ar', 'hw'\n}\n\nif DEBUG:\n LANGUAGES = ALL_LANGUAGES\nelse:\n LANGUAGES = [(k, v) for k, v in ALL_LANGUAGES if k not in LANGUAGES_INCUBATING]\n\n\nEXTRA_LANG_INFO = {\n 'de-informal': {\n 'bidi': False,\n 'code': 'de-informal',\n 'name': 'German (informal)',\n 'name_local': 'Deutsch',\n 'public_code': 'de',\n },\n 'nl-informal': {\n 'bidi': False,\n 'code': 'nl-informal',\n 'name': 'Dutch (informal)',\n 'name_local': 'Nederlands',\n 'public_code': 'nl',\n },\n 'fr': {\n 'bidi': False,\n 'code': 'fr',\n 'name': 'French',\n 'name_local': 'Français'\n },\n 'lv': {\n 'bidi': False,\n 'code': 'lv',\n 'name': 'Latvian',\n 'name_local': 'Latviešu'\n },\n}\n\ndjango.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)\n\n\nAUTH_USER_MODEL = 'pretixbase.User'\nLOGIN_URL = 'control:auth.login'\nLOGIN_URL_CONTROL = 'control:auth.login'\nCSRF_FAILURE_VIEW = 'pretix.base.views.errors.csrf_failure'\n\ntemplate_loaders = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\nif not DEBUG:\n template_loaders = (\n ('django.template.loaders.cached.Loader', template_loaders),\n )\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(DATA_DIR, 'templates'),\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n \"django.template.context_processors.request\",\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'pretix.base.context.contextprocessor',\n 'pretix.control.context.contextprocessor',\n 'pretix.presale.context.contextprocessor',\n ],\n 'loaders': template_loaders\n },\n },\n]\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'pretix/static')\n] if os.path.exists(os.path.join(BASE_DIR, 'pretix/static')) else []\n\nSTATICI18N_ROOT = os.path.join(BASE_DIR, \"pretix/static\")\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\n# if os.path.exists(os.path.join(DATA_DIR, 'static')):\n# STATICFILES_DIRS.insert(0, os.path.join(DATA_DIR, 'static'))\n\nCOMPRESS_PRECOMPILERS = (\n ('text/x-scss', 'django_libsass.SassCompiler'),\n)\n\nCOMPRESS_ENABLED = COMPRESS_OFFLINE = not debug_fallback\n\nCOMPRESS_CSS_FILTERS = (\n # CssAbsoluteFilter is incredibly slow, especially when dealing with our _flags.scss\n # However, we don't need it if we consequently use the static() function in Sass\n # 'compressor.filters.css_default.CssAbsoluteFilter',\n 'compressor.filters.cssmin.CSSCompressorFilter',\n)\n\n# Debug toolbar\nDEBUG_TOOLBAR_PATCH_SETTINGS = False\n\n\nDEBUG_TOOLBAR_CONFIG = {\n 'JQUERY_URL': '',\n}\n\nINTERNAL_IPS = ('127.0.0.1', '::1')\n\nMESSAGE_TAGS = {\n messages.INFO: 'alert-info',\n messages.ERROR: 'alert-danger',\n messages.WARNING: 'alert-warning',\n messages.SUCCESS: 'alert-success',\n}\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nloglevel = 'DEBUG' if DEBUG else 'INFO'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(levelname)s %(asctime)s %(name)s %(module)s %(message)s'\n },\n },\n 'filters': {\n 'require_admin_enabled': {\n '()': 'pretix.helpers.logs.AdminExistsFilter',\n }\n },\n 'handlers': {\n 'console': {\n 'level': loglevel,\n 'class': 'logging.StreamHandler',\n 'formatter': 'default'\n },\n 'csp_file': {\n 'level': loglevel,\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(LOG_DIR, 'csp.log'),\n 'formatter': 'default'\n },\n 'file': {\n 'level': loglevel,\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(LOG_DIR, 'pretix.log'),\n 'formatter': 'default'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_admin_enabled']\n },\n 'null': {\n 'class': 'logging.NullHandler',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['file', 'console'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['file', 'console', 'mail_admins'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'pretix.security.csp': {\n 'handlers': ['csp_file'],\n 'level': loglevel,\n 'propagate': False,\n },\n 'django.security': {\n 'handlers': ['file', 'console', 'mail_admins'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.security.DisallowedHost': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'django.db.backends': {\n 'handlers': ['file', 'console'],\n 'level': 'INFO', # Do not output all the queries\n 'propagate': True,\n }\n },\n}\n\nSENTRY_ENABLED = False\nif config.has_option('sentry', 'dsn') and not any(c in sys.argv for c in ('shell', 'shell_scoped', 'shell_plus')):\n import sentry_sdk\n from sentry_sdk.integrations.celery import CeleryIntegration\n from sentry_sdk.integrations.logging import (\n LoggingIntegration, ignore_logger,\n )\n\n from .sentry import PretixSentryIntegration, setup_custom_filters\n\n SENTRY_ENABLED = True\n sentry_sdk.init(\n dsn=config.get('sentry', 'dsn'),\n integrations=[\n PretixSentryIntegration(),\n CeleryIntegration(),\n LoggingIntegration(\n level=logging.INFO,\n event_level=logging.CRITICAL\n )\n ],\n environment=SITE_URL,\n release=__version__,\n send_default_pii=False,\n )\n ignore_logger('pretix.base.tasks')\n ignore_logger('django.security.DisallowedHost')\n setup_custom_filters()\n\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TASK_DEFAULT_QUEUE = 'default'\nCELERY_TASK_QUEUES = (\n Queue('default', routing_key='default.#'),\n Queue('checkout', routing_key='checkout.#'),\n Queue('mail', routing_key='mail.#'),\n Queue('background', routing_key='background.#'),\n Queue('notifications', routing_key='notifications.#'),\n)\nCELERY_TASK_ROUTES = ([\n ('pretix.base.services.cart.*', {'queue': 'checkout'}),\n ('pretix.base.services.orders.*', {'queue': 'checkout'}),\n ('pretix.base.services.mail.*', {'queue': 'mail'}),\n ('pretix.base.services.update_check.*', {'queue': 'background'}),\n ('pretix.base.services.quotas.*', {'queue': 'background'}),\n ('pretix.base.services.waitinglist.*', {'queue': 'background'}),\n ('pretix.base.services.notifications.*', {'queue': 'notifications'}),\n ('pretix.api.webhooks.*', {'queue': 'notifications'}),\n ('pretix.presale.style.*', {'queue': 'background'}),\n ('pretix.plugins.banktransfer.*', {'queue': 'background'}),\n],)\n\nBOOTSTRAP3 = {\n 'success_css_class': '',\n 'field_renderers': {\n 'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer',\n 'control': 'pretix.control.forms.renderers.ControlFieldRenderer',\n 'checkout': 'pretix.presale.forms.renderers.CheckoutFieldRenderer',\n },\n}\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\nOAUTH2_PROVIDER_APPLICATION_MODEL = 'pretixapi.OAuthApplication'\nOAUTH2_PROVIDER_GRANT_MODEL = 'pretixapi.OAuthGrant'\nOAUTH2_PROVIDER_ACCESS_TOKEN_MODEL = 'pretixapi.OAuthAccessToken'\nOAUTH2_PROVIDER_REFRESH_TOKEN_MODEL = 'pretixapi.OAuthRefreshToken'\nOAUTH2_PROVIDER = {\n 'SCOPES': {\n 'read': _('Read access'),\n 'write': _('Write access'),\n },\n 'OAUTH2_VALIDATOR_CLASS': 'pretix.api.oauth.Validator',\n 'ALLOWED_REDIRECT_URI_SCHEMES': ['https'] if not DEBUG else ['http', 'https'],\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 3600 * 24,\n 'ROTATE_REFRESH_TOKEN': False,\n\n}\n\nCOUNTRIES_OVERRIDE = {\n 'XK': _('Kosovo'),\n}\n", "path": "src/pretix/settings.py" } ]
[ { "content": "import configparser\nimport logging\nimport os\nimport sys\nfrom urllib.parse import urlparse\n\nimport django.conf.locale\nfrom django.utils.crypto import get_random_string\nfrom kombu import Queue\nfrom pkg_resources import iter_entry_points\nfrom pycountry import currencies\n\nfrom . import __version__\n\nfrom django.contrib.messages import constants as messages # NOQA\nfrom django.utils.translation import gettext_lazy as _ # NOQA\n\nconfig = configparser.RawConfigParser()\nif 'PRETIX_CONFIG_FILE' in os.environ:\n config.read_file(open(os.environ.get('PRETIX_CONFIG_FILE'), encoding='utf-8'))\nelse:\n config.read(['/etc/pretix/pretix.cfg', os.path.expanduser('~/.pretix.cfg'), 'pretix.cfg'],\n encoding='utf-8')\n\nCONFIG_FILE = config\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nDATA_DIR = config.get('pretix', 'datadir', fallback=os.environ.get('DATA_DIR', 'data'))\nLOG_DIR = os.path.join(DATA_DIR, 'logs')\nMEDIA_ROOT = os.path.join(DATA_DIR, 'media')\nPROFILE_DIR = os.path.join(DATA_DIR, 'profiles')\n\nif not os.path.exists(DATA_DIR):\n os.mkdir(DATA_DIR)\nif not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\nif not os.path.exists(MEDIA_ROOT):\n os.mkdir(MEDIA_ROOT)\n\nif config.has_option('django', 'secret'):\n SECRET_KEY = config.get('django', 'secret')\nelse:\n SECRET_FILE = os.path.join(DATA_DIR, '.secret')\n if os.path.exists(SECRET_FILE):\n with open(SECRET_FILE, 'r') as f:\n SECRET_KEY = f.read().strip()\n else:\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n SECRET_KEY = get_random_string(50, chars)\n with open(SECRET_FILE, 'w') as f:\n os.chmod(SECRET_FILE, 0o600)\n try:\n os.chown(SECRET_FILE, os.getuid(), os.getgid())\n except AttributeError:\n pass # os.chown is not available on Windows\n f.write(SECRET_KEY)\n\n# Adjustable settings\n\ndebug_fallback = \"runserver\" in sys.argv\nDEBUG = config.getboolean('django', 'debug', fallback=debug_fallback)\nLOG_CSP = config.getboolean('pretix', 'csp_log', fallback=True)\n\nPDFTK = config.get('tools', 'pdftk', fallback=None)\n\nPRETIX_AUTH_BACKENDS = config.get('pretix', 'auth_backends', fallback='pretix.base.auth.NativeAuthBackend').split(',')\n\ndb_backend = config.get('database', 'backend', fallback='sqlite3')\nif db_backend == 'postgresql_psycopg2':\n db_backend = 'postgresql'\nDATABASE_IS_GALERA = config.getboolean('database', 'galera', fallback=False)\nif DATABASE_IS_GALERA and 'mysql' in db_backend:\n db_options = {\n 'init_command': 'SET SESSION wsrep_sync_wait = 1;'\n }\nelse:\n db_options = {}\n\nif 'mysql' in db_backend:\n db_options['charset'] = 'utf8mb4'\nJSON_FIELD_AVAILABLE = db_backend in ('mysql', 'postgresql')\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.' + db_backend,\n 'NAME': config.get('database', 'name', fallback=os.path.join(DATA_DIR, 'db.sqlite3')),\n 'USER': config.get('database', 'user', fallback=''),\n 'PASSWORD': config.get('database', 'password', fallback=''),\n 'HOST': config.get('database', 'host', fallback=''),\n 'PORT': config.get('database', 'port', fallback=''),\n 'CONN_MAX_AGE': 0 if db_backend == 'sqlite3' else 120,\n 'OPTIONS': db_options,\n 'TEST': {\n 'CHARSET': 'utf8mb4',\n 'COLLATION': 'utf8mb4_unicode_ci',\n } if 'mysql' in db_backend else {}\n }\n}\nDATABASE_REPLICA = 'default'\nif config.has_section('replica'):\n DATABASE_REPLICA = 'replica'\n DATABASES['replica'] = {\n 'ENGINE': 'django.db.backends.' + db_backend,\n 'NAME': config.get('replica', 'name', fallback=DATABASES['default']['NAME']),\n 'USER': config.get('replica', 'user', fallback=DATABASES['default']['USER']),\n 'PASSWORD': config.get('replica', 'password', fallback=DATABASES['default']['PASSWORD']),\n 'HOST': config.get('replica', 'host', fallback=DATABASES['default']['HOST']),\n 'PORT': config.get('replica', 'port', fallback=DATABASES['default']['PORT']),\n 'CONN_MAX_AGE': 0 if db_backend == 'sqlite3' else 120,\n 'OPTIONS': db_options,\n 'TEST': {\n 'CHARSET': 'utf8mb4',\n 'COLLATION': 'utf8mb4_unicode_ci',\n } if 'mysql' in db_backend else {}\n }\n DATABASE_ROUTERS = ['pretix.helpers.database.ReplicaRouter']\n\nSTATIC_URL = config.get('urls', 'static', fallback='/static/')\n\nMEDIA_URL = config.get('urls', 'media', fallback='/media/')\n\nPRETIX_INSTANCE_NAME = config.get('pretix', 'instance_name', fallback='pretix.de')\nPRETIX_REGISTRATION = config.getboolean('pretix', 'registration', fallback=True)\nPRETIX_PASSWORD_RESET = config.getboolean('pretix', 'password_reset', fallback=True)\nPRETIX_LONG_SESSIONS = config.getboolean('pretix', 'long_sessions', fallback=True)\nPRETIX_ADMIN_AUDIT_COMMENTS = config.getboolean('pretix', 'audit_comments', fallback=False)\nPRETIX_OBLIGATORY_2FA = config.getboolean('pretix', 'obligatory_2fa', fallback=False)\nPRETIX_SESSION_TIMEOUT_RELATIVE = 3600 * 3\nPRETIX_SESSION_TIMEOUT_ABSOLUTE = 3600 * 12\nPRETIX_PRIMARY_COLOR = '#8E44B3'\n\nSITE_URL = config.get('pretix', 'url', fallback='http://localhost')\nif SITE_URL.endswith('/'):\n SITE_URL = SITE_URL[:-1]\n\nCSRF_TRUSTED_ORIGINS = [urlparse(SITE_URL).hostname]\n\nTRUST_X_FORWARDED_FOR = config.get('pretix', 'trust_x_forwarded_for', fallback=False)\n\nif config.get('pretix', 'trust_x_forwarded_proto', fallback=False):\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\nPRETIX_PLUGINS_DEFAULT = config.get('pretix', 'plugins_default',\n fallback='pretix.plugins.sendmail,pretix.plugins.statistics,pretix.plugins.checkinlists,pretix.plugins.autocheckin')\nPRETIX_PLUGINS_EXCLUDE = config.get('pretix', 'plugins_exclude', fallback='').split(',')\n\nFETCH_ECB_RATES = config.getboolean('pretix', 'ecb_rates', fallback=True)\n\nDEFAULT_CURRENCY = config.get('pretix', 'currency', fallback='EUR')\nCURRENCIES = list(currencies)\nCURRENCY_PLACES = {\n # default is 2\n 'BIF': 0,\n 'CLP': 0,\n 'DJF': 0,\n 'GNF': 0,\n 'JPY': 0,\n 'KMF': 0,\n 'KRW': 0,\n 'MGA': 0,\n 'PYG': 0,\n 'RWF': 0,\n 'VND': 0,\n 'VUV': 0,\n 'XAF': 0,\n 'XOF': 0,\n 'XPF': 0,\n}\n\nALLOWED_HOSTS = ['*']\n\nLANGUAGE_CODE = config.get('locale', 'default', fallback='en')\nTIME_ZONE = config.get('locale', 'timezone', fallback='UTC')\n\nMAIL_FROM = SERVER_EMAIL = DEFAULT_FROM_EMAIL = config.get(\n 'mail', 'from', fallback='pretix@localhost')\nEMAIL_HOST = config.get('mail', 'host', fallback='localhost')\nEMAIL_PORT = config.getint('mail', 'port', fallback=25)\nEMAIL_HOST_USER = config.get('mail', 'user', fallback='')\nEMAIL_HOST_PASSWORD = config.get('mail', 'password', fallback='')\nEMAIL_USE_TLS = config.getboolean('mail', 'tls', fallback=False)\nEMAIL_USE_SSL = config.getboolean('mail', 'ssl', fallback=False)\nEMAIL_SUBJECT_PREFIX = '[pretix] '\n\nADMINS = [('Admin', n) for n in config.get('mail', 'admins', fallback='').split(\",\") if n]\n\nMETRICS_ENABLED = config.getboolean('metrics', 'enabled', fallback=False)\nMETRICS_USER = config.get('metrics', 'user', fallback=\"metrics\")\nMETRICS_PASSPHRASE = config.get('metrics', 'passphrase', fallback=\"\")\n\nCACHES = {\n 'default': {\n 'BACKEND': 'pretix.helpers.cache.CustomDummyCache',\n }\n}\nREAL_CACHE_USED = False\nSESSION_ENGINE = None\n\nHAS_MEMCACHED = config.has_option('memcached', 'location')\nif HAS_MEMCACHED:\n REAL_CACHE_USED = True\n CACHES['default'] = {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': config.get('memcached', 'location'),\n }\n\nHAS_REDIS = config.has_option('redis', 'location')\nif HAS_REDIS:\n CACHES['redis'] = {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": config.get('redis', 'location'),\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n }\n }\n CACHES['redis_sessions'] = {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": config.get('redis', 'location'),\n \"TIMEOUT\": 3600 * 24 * 30,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n }\n }\n if not HAS_MEMCACHED:\n CACHES['default'] = CACHES['redis']\n REAL_CACHE_USED = True\n if config.getboolean('redis', 'sessions', fallback=False):\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"redis_sessions\"\n\nif not SESSION_ENGINE:\n if REAL_CACHE_USED:\n SESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n else:\n SESSION_ENGINE = \"django.contrib.sessions.backends.db\"\n\nHAS_CELERY = config.has_option('celery', 'broker')\nif HAS_CELERY:\n CELERY_BROKER_URL = config.get('celery', 'broker')\n CELERY_RESULT_BACKEND = config.get('celery', 'backend')\nelse:\n CELERY_TASK_ALWAYS_EAGER = True\n\nSESSION_COOKIE_DOMAIN = config.get('pretix', 'cookie_domain', fallback=None)\n\nCACHE_TICKETS_HOURS = config.getint('cache', 'tickets', fallback=24 * 3)\n\nENTROPY = {\n 'order_code': config.getint('entropy', 'order_code', fallback=5),\n 'ticket_secret': config.getint('entropy', 'ticket_secret', fallback=32),\n 'voucher_code': config.getint('entropy', 'voucher_code', fallback=16),\n 'giftcard_secret': config.getint('entropy', 'giftcard_secret', fallback=12),\n}\n\n# Internal settings\nPRETIX_EMAIL_NONE_VALUE = '[email protected]'\n\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static.dist')\n\nSESSION_COOKIE_NAME = 'pretix_session'\nLANGUAGE_COOKIE_NAME = 'pretix_language'\nCSRF_COOKIE_NAME = 'pretix_csrftoken'\nSESSION_COOKIE_HTTPONLY = True\n\nINSTALLED_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'pretix.base',\n 'pretix.control',\n 'pretix.presale',\n 'pretix.multidomain',\n 'pretix.api',\n 'pretix.helpers',\n 'rest_framework',\n 'django_filters',\n 'compressor',\n 'bootstrap3',\n 'djangoformsetjs',\n 'pretix.plugins.banktransfer',\n 'pretix.plugins.stripe',\n 'pretix.plugins.paypal',\n 'pretix.plugins.ticketoutputpdf',\n 'pretix.plugins.sendmail',\n 'pretix.plugins.statistics',\n 'pretix.plugins.reports',\n 'pretix.plugins.checkinlists',\n 'pretix.plugins.pretixdroid',\n 'pretix.plugins.badges',\n 'pretix.plugins.manualpayment',\n 'pretix.plugins.returnurl',\n 'django_markup',\n 'django_otp',\n 'django_otp.plugins.otp_totp',\n 'django_otp.plugins.otp_static',\n 'statici18n',\n 'django_countries',\n 'hijack',\n 'compat',\n 'oauth2_provider',\n 'phonenumber_field'\n]\n\ntry:\n import django_extensions # noqa\n INSTALLED_APPS.append('django_extensions')\nexcept ImportError:\n pass\n\nPLUGINS = []\nfor entry_point in iter_entry_points(group='pretix.plugin', name=None):\n if entry_point.module_name in PRETIX_PLUGINS_EXCLUDE:\n continue\n PLUGINS.append(entry_point.module_name)\n INSTALLED_APPS.append(entry_point.module_name)\n\nHIJACK_AUTHORIZE_STAFF = True\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'pretix.api.auth.permission.EventPermission',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',\n 'PAGE_SIZE': 50,\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'pretix.api.auth.token.TeamTokenAuthentication',\n 'pretix.api.auth.device.DeviceTokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication',\n 'oauth2_provider.contrib.rest_framework.OAuth2Authentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'EXCEPTION_HANDLER': 'pretix.api.exception.custom_exception_handler',\n 'UNICODE_JSON': False\n}\n\n\nCORE_MODULES = {\n \"pretix.base\",\n \"pretix.presale\",\n \"pretix.control\",\n \"pretix.plugins.checkinlists\",\n}\n\nMIDDLEWARE = [\n 'pretix.api.middleware.IdempotencyMiddleware',\n 'pretix.multidomain.middlewares.MultiDomainMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'pretix.multidomain.middlewares.SessionMiddleware',\n 'pretix.multidomain.middlewares.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'pretix.control.middleware.PermissionMiddleware',\n 'pretix.control.middleware.AuditLogMiddleware',\n 'pretix.base.middleware.LocaleMiddleware',\n 'pretix.base.middleware.SecurityMiddleware',\n 'pretix.presale.middleware.EventMiddleware',\n 'pretix.api.middleware.ApiScopeMiddleware',\n]\n\ntry:\n import debug_toolbar # noqa\n if DEBUG:\n INSTALLED_APPS.append('debug_toolbar.apps.DebugToolbarConfig')\n MIDDLEWARE.insert(0, 'debug_toolbar.middleware.DebugToolbarMiddleware')\nexcept ImportError:\n pass\n\n\nif METRICS_ENABLED:\n MIDDLEWARE.insert(MIDDLEWARE.index('django.middleware.common.CommonMiddleware') + 1,\n 'pretix.helpers.metrics.middleware.MetricsMiddleware')\n\n\nPROFILING_RATE = config.getfloat('django', 'profile', fallback=0) # Percentage of requests to profile\nif PROFILING_RATE > 0:\n if not os.path.exists(PROFILE_DIR):\n os.mkdir(PROFILE_DIR)\n MIDDLEWARE.insert(0, 'pretix.helpers.profile.middleware.CProfileMiddleware')\n\n\n# Security settings\nX_FRAME_OPTIONS = 'DENY'\n\n# URL settings\nROOT_URLCONF = 'pretix.multidomain.maindomain_urlconf'\n\nWSGI_APPLICATION = 'pretix.wsgi.application'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [\n os.path.join(os.path.dirname(__file__), 'locale'),\n]\nif config.has_option('languages', 'path'):\n LOCALE_PATHS.insert(0, config.get('languages', 'path'))\n\nFORMAT_MODULE_PATH = [\n 'pretix.helpers.formats',\n]\n\nALL_LANGUAGES = [\n ('en', _('English')),\n ('de', _('German')),\n ('de-informal', _('German (informal)')),\n ('ar', _('Arabic')),\n ('zh-hans', _('Chinese (simplified)')),\n ('da', _('Danish')),\n ('nl', _('Dutch')),\n ('nl-informal', _('Dutch (informal)')),\n ('fr', _('French')),\n ('el', _('Greek')),\n ('it', _('Italian')),\n ('lv', _('Latvian')),\n ('pl', _('Polish')),\n ('pt-br', _('Portuguese (Brazil)')),\n ('ru', _('Russian')),\n ('es', _('Spanish')),\n ('tr', _('Turkish')),\n]\nLANGUAGES_OFFICIAL = {\n 'en', 'de', 'de-informal'\n}\nLANGUAGES_INCUBATING = {\n 'pt-br', 'pl',\n} - set(config.get('languages', 'allow_incubating', fallback='').split(','))\nLANGUAGES_RTL = {\n 'ar', 'hw'\n}\n\nif DEBUG:\n LANGUAGES = ALL_LANGUAGES\nelse:\n LANGUAGES = [(k, v) for k, v in ALL_LANGUAGES if k not in LANGUAGES_INCUBATING]\n\n\nEXTRA_LANG_INFO = {\n 'de-informal': {\n 'bidi': False,\n 'code': 'de-informal',\n 'name': 'German (informal)',\n 'name_local': 'Deutsch',\n 'public_code': 'de',\n },\n 'nl-informal': {\n 'bidi': False,\n 'code': 'nl-informal',\n 'name': 'Dutch (informal)',\n 'name_local': 'Nederlands',\n 'public_code': 'nl',\n },\n 'fr': {\n 'bidi': False,\n 'code': 'fr',\n 'name': 'French',\n 'name_local': 'Français'\n },\n 'lv': {\n 'bidi': False,\n 'code': 'lv',\n 'name': 'Latvian',\n 'name_local': 'Latviešu'\n },\n}\n\ndjango.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)\n\n\nAUTH_USER_MODEL = 'pretixbase.User'\nLOGIN_URL = 'control:auth.login'\nLOGIN_URL_CONTROL = 'control:auth.login'\nCSRF_FAILURE_VIEW = 'pretix.base.views.errors.csrf_failure'\n\ntemplate_loaders = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\nif not DEBUG:\n template_loaders = (\n ('django.template.loaders.cached.Loader', template_loaders),\n )\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(DATA_DIR, 'templates'),\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n \"django.template.context_processors.request\",\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'pretix.base.context.contextprocessor',\n 'pretix.control.context.contextprocessor',\n 'pretix.presale.context.contextprocessor',\n ],\n 'loaders': template_loaders\n },\n },\n]\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'pretix/static')\n] if os.path.exists(os.path.join(BASE_DIR, 'pretix/static')) else []\n\nSTATICI18N_ROOT = os.path.join(BASE_DIR, \"pretix/static\")\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\n# if os.path.exists(os.path.join(DATA_DIR, 'static')):\n# STATICFILES_DIRS.insert(0, os.path.join(DATA_DIR, 'static'))\n\nCOMPRESS_PRECOMPILERS = (\n ('text/x-scss', 'django_libsass.SassCompiler'),\n)\n\nCOMPRESS_ENABLED = COMPRESS_OFFLINE = not debug_fallback\n\nCOMPRESS_CSS_FILTERS = (\n # CssAbsoluteFilter is incredibly slow, especially when dealing with our _flags.scss\n # However, we don't need it if we consequently use the static() function in Sass\n # 'compressor.filters.css_default.CssAbsoluteFilter',\n 'compressor.filters.cssmin.CSSCompressorFilter',\n)\n\n# Debug toolbar\nDEBUG_TOOLBAR_PATCH_SETTINGS = False\n\n\nDEBUG_TOOLBAR_CONFIG = {\n 'JQUERY_URL': '',\n}\n\nINTERNAL_IPS = ('127.0.0.1', '::1')\n\nMESSAGE_TAGS = {\n messages.INFO: 'alert-info',\n messages.ERROR: 'alert-danger',\n messages.WARNING: 'alert-warning',\n messages.SUCCESS: 'alert-success',\n}\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nloglevel = 'DEBUG' if DEBUG else config.get('pretix', 'loglevel', fallback='INFO')\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(levelname)s %(asctime)s %(name)s %(module)s %(message)s'\n },\n },\n 'filters': {\n 'require_admin_enabled': {\n '()': 'pretix.helpers.logs.AdminExistsFilter',\n }\n },\n 'handlers': {\n 'console': {\n 'level': loglevel,\n 'class': 'logging.StreamHandler',\n 'formatter': 'default'\n },\n 'csp_file': {\n 'level': loglevel,\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(LOG_DIR, 'csp.log'),\n 'formatter': 'default'\n },\n 'file': {\n 'level': loglevel,\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(LOG_DIR, 'pretix.log'),\n 'formatter': 'default'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_admin_enabled']\n },\n 'null': {\n 'class': 'logging.NullHandler',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['file', 'console'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['file', 'console', 'mail_admins'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'pretix.security.csp': {\n 'handlers': ['csp_file'],\n 'level': loglevel,\n 'propagate': False,\n },\n 'django.security': {\n 'handlers': ['file', 'console', 'mail_admins'],\n 'level': loglevel,\n 'propagate': True,\n },\n 'django.security.DisallowedHost': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'django.db.backends': {\n 'handlers': ['file', 'console'],\n 'level': 'INFO', # Do not output all the queries\n 'propagate': True,\n }\n },\n}\n\nSENTRY_ENABLED = False\nif config.has_option('sentry', 'dsn') and not any(c in sys.argv for c in ('shell', 'shell_scoped', 'shell_plus')):\n import sentry_sdk\n from sentry_sdk.integrations.celery import CeleryIntegration\n from sentry_sdk.integrations.logging import (\n LoggingIntegration, ignore_logger,\n )\n\n from .sentry import PretixSentryIntegration, setup_custom_filters\n\n SENTRY_ENABLED = True\n sentry_sdk.init(\n dsn=config.get('sentry', 'dsn'),\n integrations=[\n PretixSentryIntegration(),\n CeleryIntegration(),\n LoggingIntegration(\n level=logging.INFO,\n event_level=logging.CRITICAL\n )\n ],\n environment=SITE_URL,\n release=__version__,\n send_default_pii=False,\n )\n ignore_logger('pretix.base.tasks')\n ignore_logger('django.security.DisallowedHost')\n setup_custom_filters()\n\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TASK_DEFAULT_QUEUE = 'default'\nCELERY_TASK_QUEUES = (\n Queue('default', routing_key='default.#'),\n Queue('checkout', routing_key='checkout.#'),\n Queue('mail', routing_key='mail.#'),\n Queue('background', routing_key='background.#'),\n Queue('notifications', routing_key='notifications.#'),\n)\nCELERY_TASK_ROUTES = ([\n ('pretix.base.services.cart.*', {'queue': 'checkout'}),\n ('pretix.base.services.orders.*', {'queue': 'checkout'}),\n ('pretix.base.services.mail.*', {'queue': 'mail'}),\n ('pretix.base.services.update_check.*', {'queue': 'background'}),\n ('pretix.base.services.quotas.*', {'queue': 'background'}),\n ('pretix.base.services.waitinglist.*', {'queue': 'background'}),\n ('pretix.base.services.notifications.*', {'queue': 'notifications'}),\n ('pretix.api.webhooks.*', {'queue': 'notifications'}),\n ('pretix.presale.style.*', {'queue': 'background'}),\n ('pretix.plugins.banktransfer.*', {'queue': 'background'}),\n],)\n\nBOOTSTRAP3 = {\n 'success_css_class': '',\n 'field_renderers': {\n 'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer',\n 'control': 'pretix.control.forms.renderers.ControlFieldRenderer',\n 'checkout': 'pretix.presale.forms.renderers.CheckoutFieldRenderer',\n },\n}\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\nOAUTH2_PROVIDER_APPLICATION_MODEL = 'pretixapi.OAuthApplication'\nOAUTH2_PROVIDER_GRANT_MODEL = 'pretixapi.OAuthGrant'\nOAUTH2_PROVIDER_ACCESS_TOKEN_MODEL = 'pretixapi.OAuthAccessToken'\nOAUTH2_PROVIDER_REFRESH_TOKEN_MODEL = 'pretixapi.OAuthRefreshToken'\nOAUTH2_PROVIDER = {\n 'SCOPES': {\n 'read': _('Read access'),\n 'write': _('Write access'),\n },\n 'OAUTH2_VALIDATOR_CLASS': 'pretix.api.oauth.Validator',\n 'ALLOWED_REDIRECT_URI_SCHEMES': ['https'] if not DEBUG else ['http', 'https'],\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 3600 * 24,\n 'ROTATE_REFRESH_TOKEN': False,\n\n}\n\nCOUNTRIES_OVERRIDE = {\n 'XK': _('Kosovo'),\n}\n", "path": "src/pretix/settings.py" } ]
diff --git a/doc/admin/config.rst b/doc/admin/config.rst index 7cf43b540d4..c6de356efe5 100644 --- a/doc/admin/config.rst +++ b/doc/admin/config.rst @@ -97,6 +97,9 @@ Example:: ``csp_log`` Log violations of the Content Security Policy (CSP). Defaults to ``on``. + +``loglevel`` + Set console and file loglevel (``DEBUG``, ``INFO``, ``WARNING``, ``ERROR`` or ``CRITICAL``). Defaults to ``INFO``. Locale settings --------------- diff --git a/src/pretix/settings.py b/src/pretix/settings.py index 8fff1d0ea7f..0bf158c4937 100644 --- a/src/pretix/settings.py +++ b/src/pretix/settings.py @@ -561,7 +561,7 @@ } MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' -loglevel = 'DEBUG' if DEBUG else 'INFO' +loglevel = 'DEBUG' if DEBUG else config.get('pretix', 'loglevel', fallback='INFO') LOGGING = { 'version': 1,
ivy-llc__ivy-22920
eigvals
[ { "content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py" } ]
[ { "content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eigvals(a):\n return ivy.eig(a)[0]\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py" } ]
diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py index d0b98b00264c7..6c3a6e61f1265 100644 --- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py +++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py @@ -17,6 +17,11 @@ def eigh(a, /, UPLO="L"): return ivy.eigh(a, UPLO=UPLO) +@to_ivy_arrays_and_back +def eigvals(a): + return ivy.eig(a)[0] + + @to_ivy_arrays_and_back @from_zero_dim_arrays_to_scalar def eigvalsh(a, /, UPLO="L"): diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_matrix_eigenvalues.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_matrix_eigenvalues.py index d240cef1dc48b..b76655cf0897d 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_matrix_eigenvalues.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_matrix_eigenvalues.py @@ -130,6 +130,55 @@ def test_numpy_eigh( ) +@handle_frontend_test( + fn_tree="numpy.linalg.eigvals", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + min_value=0, + max_value=10, + shape=helpers.ints(min_value=2, max_value=4).map(lambda x: tuple([x, x])), + ).filter( + lambda x: "float16" not in x[0] + and "bfloat16" not in x[0] + and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon + and np.linalg.det(np.asarray(x[1][0])) != 0 + ), + test_with_out=st.just(False), +) +def test_numpy_eigvals( + dtype_and_x, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, +): + dtype, x = dtype_and_x + ret, frontend_ret = helpers.test_frontend_function( + input_dtypes=dtype, + backend_to_test=backend_fw, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + test_values=False, + a=x, + ) + with BackendHandler.update_backend(backend_fw) as ivy_backend: + ret = np.sort( + np.array([ivy_backend.to_numpy(x).astype(np.float128) for x in ret]) + ) + frontend_ret = np.sort(np.array([x.astype(np.float128) for x in frontend_ret])) + assert_all_close( + ret_np=ret, + ret_from_gt_np=frontend_ret, + backend=backend_fw, + ground_truth_backend=frontend, + atol=1e-2, + rtol=1e-2, + ) + + # eigvalsh @handle_frontend_test( fn_tree="numpy.linalg.eigvalsh",
meltano__meltano-6609
bug: `meltano state list` with pattern - no such option ### Meltano Version 2.3.0 ### Python Version 3.8 ### Bug scope CLI (options, error messages, logging, etc.) ### Operating System Mac ### Description It looks like the `--pattern` argument thats in the docs https://docs.meltano.com/reference/command-line-interface#list isnt available on the CLI. ``` (meltano) Patricks-MBP:data pnadolny$ meltano --version meltano, version 2.3.0 (meltano) Patricks-MBP:data pnadolny$ meltano state list --pattern '*tap-gitlab*' 2022-07-25T21:31:25.438941Z [info ] Environment 'userdev' is active Usage: meltano state list [OPTIONS] [PATTERN] Try 'meltano state list --help' for help. Error: No such option: --pattern ``` ### Code _No response_
[ { "content": "\"\"\"State management in CLI.\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport re\nfrom datetime import datetime as dt\nfrom functools import partial, reduce, wraps\nfrom operator import xor\n\nimport click\nimport structlog\n\nfrom meltano.cli.params import pass_project\nfrom meltano.core.block.parser import BlockParser\nfrom meltano.core.db import project_engine\nfrom meltano.core.job import Payload\nfrom meltano.core.project import Project\nfrom meltano.core.state_service import InvalidJobStateError, StateService\n\nfrom . import cli\nfrom .utils import InstrumentedCmd, InstrumentedGroup\n\nSTATE_SERVICE_KEY = \"state_service\"\n\nlogger = structlog.getLogger(__name__)\n\n\nclass MutuallyExclusiveOptionsError(Exception):\n \"\"\"Occurs when mutually exclusive options are provided incorrectly.\"\"\"\n\n def __init__(self, *options: str) -> None:\n \"\"\"Instantiate the error.\n\n Args:\n options: the mutually exclusive options that were incorrectly provided.\n \"\"\"\n super().__init__(*options)\n self.options = options\n\n def __str__(self) -> str:\n \"\"\"Represent the error as a string.\"\"\"\n return f\"Must provide exactly one of: {','.join(self.options)}\"\n\n\ndef _prompt_for_confirmation(prompt):\n \"\"\"Wrap destructive CLI commands which should prompt the user for confirmation.\"\"\"\n\n def wrapper(func):\n fun = click.option(\n \"--force\", is_flag=True, help=\"Don't prompt for confirmation.\"\n )(func)\n\n @wraps(func)\n def _wrapper(force=False, *args, **kwargs):\n if force or click.confirm(prompt):\n return fun(*args, **kwargs, force=force)\n else:\n click.secho(\"Aborting.\", fg=\"red\")\n\n return _wrapper\n\n return wrapper\n\n\nprompt_for_confirmation = partial(\n _prompt_for_confirmation, prompt=\"This is a destructive command. Continue?\"\n)\n\n\ndef state_service_from_state_id(project: Project, state_id: str) -> StateService | None:\n \"\"\"Instantiate by parsing a state_id.\"\"\"\n state_id_re = re.compile(r\"^(?P<env>.+)\\:(?P<tap>.+)-to-(?P<target>.+)$\")\n match = state_id_re.match(state_id)\n if match:\n # If the state_id matches convention (i.e., job has been run via \"meltano run\"),\n # try parsing into BlockSet.\n # This way, we get BlockSet validation and raise an error if no\n # plugin in the BlockSet has \"state\" capability\n try:\n if not project.active_environment:\n logger.warn(\n f\"Running state operation for environment '{match.group('env')}' outside of an environment\"\n )\n elif project.active_environment.name != match.group(\"env\"):\n logger.warn(\n f\"Environment '{match.group('env')}' used in state operation does not match current environment '{project.active_environment.name}'.\"\n )\n project.activate_environment(match.group(\"env\"))\n blocks = [match.group(\"tap\"), match.group(\"target\")]\n parser = BlockParser(logger, project, blocks)\n return next(parser.find_blocks()).state_service\n except Exception:\n logger.warn(\"No plugins found for provided state_id.\")\n # If provided state_id does not match convention (i.e., run via \"meltano elt\"),\n # use the standalone StateService in the CLI context.\n return None\n\n\[email protected](cls=InstrumentedGroup, name=\"state\", short_help=\"Manage Singer state.\")\[email protected]_context\n@pass_project(migrate=True)\ndef meltano_state(project: Project, ctx: click.Context):\n \"\"\"\n Manage state.\n\n \\b\\nRead more at https://docs.meltano.com/reference/command-line-interface#state\n \"\"\"\n _, sessionmaker = project_engine(project)\n session = sessionmaker()\n ctx.obj[STATE_SERVICE_KEY] = StateService(session) # noqa: WPS204\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"list\")\[email protected](\"pattern\", required=False)\[email protected]_context\n@pass_project()\ndef list_state(\n project: Project, ctx: click.Context, pattern: str | None\n): # noqa: WPS125\n \"\"\"List all state_ids for this project.\n\n Optionally pass a glob-style pattern to filter state_ids by.\n \"\"\"\n state_service = ctx.obj[STATE_SERVICE_KEY]\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"list\")\n states = state_service.list_state(pattern)\n if states:\n for state_id, state in states.items():\n if state:\n try:\n state_service.validate_state(json.dumps(state))\n except (InvalidJobStateError, json.decoder.JSONDecodeError):\n click.secho(state_id, fg=\"red\")\n else:\n click.secho(state_id, fg=\"green\")\n else:\n click.secho(state_id, fg=\"yellow\")\n else:\n logger.info(\"No state IDs found.\")\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"copy\")\n@prompt_for_confirmation(\n prompt=\"This will overwrite state for the destination. Continue?\"\n)\[email protected](\"src-state-id\", type=str)\[email protected](\"dst-state-id\", type=str)\n@pass_project(migrate=True)\[email protected]_context\ndef copy_state(\n ctx: click.Context,\n project: Project,\n src_state_id: str,\n dst_state_id: str,\n force: bool,\n):\n \"\"\"Copy state to another job id.\"\"\"\n # Retrieve state for copying\n state_service = (\n state_service_from_state_id(project, src_state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"copy\", dst_state_id)\n\n state_service.copy_state(src_state_id, dst_state_id)\n\n logger.info(\n f\"State for {dst_state_id} was successfully copied from {src_state_id} at {dt.utcnow():%Y-%m-%d %H:%M:%S}.\" # noqa: WPS323\n )\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"move\")\n@prompt_for_confirmation(\n prompt=\"This will clear the source state and overwrite destination state. Continue?\"\n)\[email protected](\"src-state-id\", type=str)\[email protected](\"dst-state-id\", type=str)\n@pass_project(migrate=True)\[email protected]_context\ndef move_state(\n ctx: click.Context,\n project: Project,\n src_state_id: str,\n dst_state_id: str,\n force: bool,\n):\n \"\"\"Move state to another job id, clearing the original.\"\"\"\n # Retrieve state for moveing\n state_service = (\n state_service_from_state_id(project, dst_state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"move\", dst_state_id)\n\n state_service.move_state(src_state_id, dst_state_id)\n\n logger.info(\n f\"State for {src_state_id} was successfully moved to {dst_state_id} at {dt.utcnow():%Y-%m-%d %H:%M:%S}.\" # noqa: WPS323\n )\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"merge\")\[email protected](\n \"--from-state-id\",\n type=str,\n help=\"Merge state from an existing state ID.\",\n)\[email protected](\n \"--input-file\",\n type=click.Path(exists=True),\n help=\"Merge state from a JSON file containing Singer state.\",\n)\[email protected](\"state-id\", type=str)\[email protected](\"state\", type=str, required=False)\n@pass_project(migrate=True)\[email protected]_context\ndef merge_state(\n ctx: click.Context,\n project: Project,\n state_id: str,\n state: str | None,\n input_file: click.Path | None,\n from_state_id: str | None,\n):\n \"\"\"Add bookmarks to existing state.\"\"\"\n state_service = (\n state_service_from_state_id(project, state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"merge\", state_id)\n mutually_exclusive_options = [\"--input-file\", \"STATE\", \"--from-state-id\"]\n if not reduce(xor, map(bool, [state, input_file, from_state_id])):\n raise MutuallyExclusiveOptionsError(*mutually_exclusive_options)\n elif input_file:\n with open(input_file) as state_f:\n state_service.add_state(\n state_id, state_f.read(), payload_flags=Payload.INCOMPLETE_STATE\n )\n elif state:\n state_service.add_state(state_id, state, payload_flags=Payload.INCOMPLETE_STATE)\n elif from_state_id:\n state_service.merge_state(from_state_id, state_id)\n logger.info(\n f\"State for {state_id} was successfully merged at {dt.utcnow():%Y-%m-%d %H:%M:%S}.\" # noqa: WPS323\n )\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"set\")\n@prompt_for_confirmation(\n prompt=\"This will overwrite the state's current value. Continue?\"\n)\[email protected](\n \"--input-file\",\n type=click.Path(exists=True),\n help=\"Set state from json file containing Singer state.\",\n)\[email protected](\"state-id\")\[email protected](\"state\", type=str, required=False)\n@pass_project(migrate=True)\[email protected]_context\ndef set_state(\n ctx: click.Context,\n project: Project,\n state_id: str,\n state: str | None,\n input_file: click.Path | None,\n force: bool,\n):\n \"\"\"Set state.\"\"\"\n state_service = (\n state_service_from_state_id(project, state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"set\", state_id)\n if not reduce(xor, map(bool, [state, input_file])):\n raise MutuallyExclusiveOptionsError(\"--input-file\", \"STATE\")\n elif input_file:\n with open(input_file) as state_f:\n state_service.set_state(state_id, state_f.read())\n elif state:\n state_service.set_state(state_id, state)\n logger.info(\n f\"State for {state_id} was successfully set at {dt.utcnow():%Y-%m-%d %H:%M:%S}.\" # noqa: WPS323\n )\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"get\") # noqa: WPS46\[email protected](\"state-id\")\n@pass_project(migrate=True)\[email protected]_context\ndef get_state(ctx: click.Context, project: Project, state_id: str): # noqa: WPS463\n \"\"\"Get state.\"\"\"\n state_service = (\n state_service_from_state_id(project, state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"get\", state_id)\n retrieved_state = state_service.get_state(state_id)\n click.echo(json.dumps(retrieved_state))\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"clear\")\n@prompt_for_confirmation(prompt=\"This will clear state for the job. Continue?\")\[email protected](\"state-id\")\n@pass_project(migrate=True)\[email protected]_context\ndef clear_state(ctx: click.Context, project: Project, state_id: str, force: bool):\n \"\"\"Clear state.\"\"\"\n state_service = (\n state_service_from_state_id(project, state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"clear\", state_id)\n state_service.clear_state(state_id)\n", "path": "src/meltano/cli/state.py" } ]
[ { "content": "\"\"\"State management in CLI.\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport re\nfrom datetime import datetime as dt\nfrom functools import partial, reduce, wraps\nfrom operator import xor\n\nimport click\nimport structlog\n\nfrom meltano.cli.params import pass_project\nfrom meltano.core.block.parser import BlockParser\nfrom meltano.core.db import project_engine\nfrom meltano.core.job import Payload\nfrom meltano.core.project import Project\nfrom meltano.core.state_service import InvalidJobStateError, StateService\n\nfrom . import cli\nfrom .utils import InstrumentedCmd, InstrumentedGroup\n\nSTATE_SERVICE_KEY = \"state_service\"\n\nlogger = structlog.getLogger(__name__)\n\n\nclass MutuallyExclusiveOptionsError(Exception):\n \"\"\"Occurs when mutually exclusive options are provided incorrectly.\"\"\"\n\n def __init__(self, *options: str) -> None:\n \"\"\"Instantiate the error.\n\n Args:\n options: the mutually exclusive options that were incorrectly provided.\n \"\"\"\n super().__init__(*options)\n self.options = options\n\n def __str__(self) -> str:\n \"\"\"Represent the error as a string.\"\"\"\n return f\"Must provide exactly one of: {','.join(self.options)}\"\n\n\ndef _prompt_for_confirmation(prompt):\n \"\"\"Wrap destructive CLI commands which should prompt the user for confirmation.\"\"\"\n\n def wrapper(func):\n fun = click.option(\n \"--force\", is_flag=True, help=\"Don't prompt for confirmation.\"\n )(func)\n\n @wraps(func)\n def _wrapper(force=False, *args, **kwargs):\n if force or click.confirm(prompt):\n return fun(*args, **kwargs, force=force)\n else:\n click.secho(\"Aborting.\", fg=\"red\")\n\n return _wrapper\n\n return wrapper\n\n\nprompt_for_confirmation = partial(\n _prompt_for_confirmation, prompt=\"This is a destructive command. Continue?\"\n)\n\n\ndef state_service_from_state_id(project: Project, state_id: str) -> StateService | None:\n \"\"\"Instantiate by parsing a state_id.\"\"\"\n state_id_re = re.compile(r\"^(?P<env>.+)\\:(?P<tap>.+)-to-(?P<target>.+)$\")\n match = state_id_re.match(state_id)\n if match:\n # If the state_id matches convention (i.e., job has been run via \"meltano run\"),\n # try parsing into BlockSet.\n # This way, we get BlockSet validation and raise an error if no\n # plugin in the BlockSet has \"state\" capability\n try:\n if not project.active_environment:\n logger.warn(\n f\"Running state operation for environment '{match.group('env')}' outside of an environment\"\n )\n elif project.active_environment.name != match.group(\"env\"):\n logger.warn(\n f\"Environment '{match.group('env')}' used in state operation does not match current environment '{project.active_environment.name}'.\"\n )\n project.activate_environment(match.group(\"env\"))\n blocks = [match.group(\"tap\"), match.group(\"target\")]\n parser = BlockParser(logger, project, blocks)\n return next(parser.find_blocks()).state_service\n except Exception:\n logger.warn(\"No plugins found for provided state_id.\")\n # If provided state_id does not match convention (i.e., run via \"meltano elt\"),\n # use the standalone StateService in the CLI context.\n return None\n\n\[email protected](cls=InstrumentedGroup, name=\"state\", short_help=\"Manage Singer state.\")\[email protected]_context\n@pass_project(migrate=True)\ndef meltano_state(project: Project, ctx: click.Context):\n \"\"\"\n Manage state.\n\n \\b\\nRead more at https://docs.meltano.com/reference/command-line-interface#state\n \"\"\"\n _, sessionmaker = project_engine(project)\n session = sessionmaker()\n ctx.obj[STATE_SERVICE_KEY] = StateService(session) # noqa: WPS204\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"list\")\[email protected](\"--pattern\", type=str, help=\"Filter state IDs by pattern.\")\[email protected]_context\n@pass_project()\ndef list_state(\n project: Project, ctx: click.Context, pattern: str | None\n): # noqa: WPS125\n \"\"\"List all state_ids for this project.\n\n Optionally pass a glob-style pattern to filter state_ids by.\n \"\"\"\n state_service = ctx.obj[STATE_SERVICE_KEY]\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"list\")\n states = state_service.list_state(pattern)\n if states:\n for state_id, state in states.items():\n if state:\n try:\n state_service.validate_state(json.dumps(state))\n except (InvalidJobStateError, json.decoder.JSONDecodeError):\n click.secho(state_id, fg=\"red\")\n else:\n click.secho(state_id, fg=\"green\")\n else:\n click.secho(state_id, fg=\"yellow\")\n else:\n logger.info(\"No state IDs found.\")\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"copy\")\n@prompt_for_confirmation(\n prompt=\"This will overwrite state for the destination. Continue?\"\n)\[email protected](\"src-state-id\", type=str)\[email protected](\"dst-state-id\", type=str)\n@pass_project(migrate=True)\[email protected]_context\ndef copy_state(\n ctx: click.Context,\n project: Project,\n src_state_id: str,\n dst_state_id: str,\n force: bool,\n):\n \"\"\"Copy state to another job id.\"\"\"\n # Retrieve state for copying\n state_service = (\n state_service_from_state_id(project, src_state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"copy\", dst_state_id)\n\n state_service.copy_state(src_state_id, dst_state_id)\n\n logger.info(\n f\"State for {dst_state_id} was successfully copied from {src_state_id} at {dt.utcnow():%Y-%m-%d %H:%M:%S}.\" # noqa: WPS323\n )\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"move\")\n@prompt_for_confirmation(\n prompt=\"This will clear the source state and overwrite destination state. Continue?\"\n)\[email protected](\"src-state-id\", type=str)\[email protected](\"dst-state-id\", type=str)\n@pass_project(migrate=True)\[email protected]_context\ndef move_state(\n ctx: click.Context,\n project: Project,\n src_state_id: str,\n dst_state_id: str,\n force: bool,\n):\n \"\"\"Move state to another job id, clearing the original.\"\"\"\n # Retrieve state for moveing\n state_service = (\n state_service_from_state_id(project, dst_state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"move\", dst_state_id)\n\n state_service.move_state(src_state_id, dst_state_id)\n\n logger.info(\n f\"State for {src_state_id} was successfully moved to {dst_state_id} at {dt.utcnow():%Y-%m-%d %H:%M:%S}.\" # noqa: WPS323\n )\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"merge\")\[email protected](\n \"--from-state-id\",\n type=str,\n help=\"Merge state from an existing state ID.\",\n)\[email protected](\n \"--input-file\",\n type=click.Path(exists=True),\n help=\"Merge state from a JSON file containing Singer state.\",\n)\[email protected](\"state-id\", type=str)\[email protected](\"state\", type=str, required=False)\n@pass_project(migrate=True)\[email protected]_context\ndef merge_state(\n ctx: click.Context,\n project: Project,\n state_id: str,\n state: str | None,\n input_file: click.Path | None,\n from_state_id: str | None,\n):\n \"\"\"Add bookmarks to existing state.\"\"\"\n state_service = (\n state_service_from_state_id(project, state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"merge\", state_id)\n mutually_exclusive_options = [\"--input-file\", \"STATE\", \"--from-state-id\"]\n if not reduce(xor, map(bool, [state, input_file, from_state_id])):\n raise MutuallyExclusiveOptionsError(*mutually_exclusive_options)\n elif input_file:\n with open(input_file) as state_f:\n state_service.add_state(\n state_id, state_f.read(), payload_flags=Payload.INCOMPLETE_STATE\n )\n elif state:\n state_service.add_state(state_id, state, payload_flags=Payload.INCOMPLETE_STATE)\n elif from_state_id:\n state_service.merge_state(from_state_id, state_id)\n logger.info(\n f\"State for {state_id} was successfully merged at {dt.utcnow():%Y-%m-%d %H:%M:%S}.\" # noqa: WPS323\n )\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"set\")\n@prompt_for_confirmation(\n prompt=\"This will overwrite the state's current value. Continue?\"\n)\[email protected](\n \"--input-file\",\n type=click.Path(exists=True),\n help=\"Set state from json file containing Singer state.\",\n)\[email protected](\"state-id\")\[email protected](\"state\", type=str, required=False)\n@pass_project(migrate=True)\[email protected]_context\ndef set_state(\n ctx: click.Context,\n project: Project,\n state_id: str,\n state: str | None,\n input_file: click.Path | None,\n force: bool,\n):\n \"\"\"Set state.\"\"\"\n state_service = (\n state_service_from_state_id(project, state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"set\", state_id)\n if not reduce(xor, map(bool, [state, input_file])):\n raise MutuallyExclusiveOptionsError(\"--input-file\", \"STATE\")\n elif input_file:\n with open(input_file) as state_f:\n state_service.set_state(state_id, state_f.read())\n elif state:\n state_service.set_state(state_id, state)\n logger.info(\n f\"State for {state_id} was successfully set at {dt.utcnow():%Y-%m-%d %H:%M:%S}.\" # noqa: WPS323\n )\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"get\") # noqa: WPS46\[email protected](\"state-id\")\n@pass_project(migrate=True)\[email protected]_context\ndef get_state(ctx: click.Context, project: Project, state_id: str): # noqa: WPS463\n \"\"\"Get state.\"\"\"\n state_service = (\n state_service_from_state_id(project, state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"get\", state_id)\n retrieved_state = state_service.get_state(state_id)\n click.echo(json.dumps(retrieved_state))\n\n\n@meltano_state.command(cls=InstrumentedCmd, name=\"clear\")\n@prompt_for_confirmation(prompt=\"This will clear state for the job. Continue?\")\[email protected](\"state-id\")\n@pass_project(migrate=True)\[email protected]_context\ndef clear_state(ctx: click.Context, project: Project, state_id: str, force: bool):\n \"\"\"Clear state.\"\"\"\n state_service = (\n state_service_from_state_id(project, state_id) or ctx.obj[STATE_SERVICE_KEY]\n )\n ctx.obj[\"legacy_tracker\"].track_meltano_state(\"clear\", state_id)\n state_service.clear_state(state_id)\n", "path": "src/meltano/cli/state.py" } ]
diff --git a/src/meltano/cli/state.py b/src/meltano/cli/state.py index 04e22bf147..30e7a7fcd1 100644 --- a/src/meltano/cli/state.py +++ b/src/meltano/cli/state.py @@ -111,7 +111,7 @@ def meltano_state(project: Project, ctx: click.Context): @meltano_state.command(cls=InstrumentedCmd, name="list") [email protected]("pattern", required=False) [email protected]("--pattern", type=str, help="Filter state IDs by pattern.") @click.pass_context @pass_project() def list_state( diff --git a/tests/meltano/cli/test_state.py b/tests/meltano/cli/test_state.py index af0403d5e0..c0b2f4c170 100644 --- a/tests/meltano/cli/test_state.py +++ b/tests/meltano/cli/test_state.py @@ -87,7 +87,7 @@ def test_list_pattern( ): with mock.patch("meltano.cli.state.StateService", return_value=state_service): for (pattern, expected_result) in patterns_with_expected_results: - result = cli_runner.invoke(cli, ["state", "list", pattern]) + result = cli_runner.invoke(cli, ["state", "list", "--pattern", pattern]) assert_cli_runner(result) assert self.get_result_set(result) == expected_result
pypa__setuptools-3307
[Docs] Clarify that "Keywords" page is an API reference for `setuptools.setup` ### Summary https://setuptools.readthedocs.io/en/latest/references/keywords.html has no indicators what those keywords are for. It also doesn't show up if you search for "setuptools.setup" in the sidebar search, and is generally innaccessible via search engines as well. It also has poor formatting, likely due to mismanaged indentation on the page. ### OS / Environment _No response_ ### Additional Information It would be good to: - have an introductory section in the page, describing what it is for -- explicitly mention "setuptools.setup" in this as well. - provide anchors for each of the keyword arguments. - fix the indentation for the list of various arguments. ### Code of Conduct - [X] I agree to follow the PSF Code of Conduct
[ { "content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(?<!\\w)PR #(?P<pull>\\d+)',\n url='{package_url}/pull/{pull}',\n ),\n dict(\n pattern=r'(?<!\\w)(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'(?<![`/\\w])PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/(?P<issue_repo>[\\-\\.\\w]+)#(?P<issue_number>\\d+)',\n url='{GH}/pypa/{issue_repo}/issues/{issue_number}',\n ),\n dict(\n pattern=r'pypa/(?P<commit_repo>[\\-\\.\\w]+)@(?P<commit_number>[\\da-f]+)',\n url='{GH}/pypa/{commit_repo}/commit/{commit_number}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Include Python intersphinx mapping to prevent failures\n# jaraco/skeleton#51\nextensions += ['sphinx.ext.intersphinx']\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n}\n\nintersphinx_mapping.update({\n 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)\n})\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_repo_org = 'pypa'\ngithub_repo_name = 'setuptools'\ngithub_repo_slug = f'{github_repo_org}/{github_repo_name}'\ngithub_repo_url = f'{github_url}/{github_repo_slug}'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n 'pypi': ('https://pypi.org/project/%s', '%s'), # noqa: WPS323\n 'wiki': ('https://wikipedia.org/wiki/%s', '%s'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# HTML theme\nhtml_theme = 'furo'\nhtml_logo = \"images/logo.svg\"\n\nhtml_theme_options = {\n \"sidebar_hide_name\": True,\n \"light_css_variables\": {\n \"color-brand-primary\": \"#336790\", # \"blue\"\n \"color-brand-content\": \"#336790\",\n },\n \"dark_css_variables\": {\n \"color-brand-primary\": \"#E5B62F\", # \"yellow\"\n \"color-brand-content\": \"#E5B62F\",\n },\n}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n\n# Support for distutils\n\n# Ref: https://stackoverflow.com/a/30624034/595220\nnitpick_ignore = [\n ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs\n ('envvar', 'DISTUTILS_DEBUG'), # undocumented\n ('envvar', 'HOME'), # undocumented\n ('envvar', 'PLAT'), # undocumented\n ('py:attr', 'CCompiler.language_map'), # undocumented\n ('py:attr', 'CCompiler.language_order'), # undocumented\n ('py:class', 'distutils.dist.Distribution'), # undocumented\n ('py:class', 'distutils.extension.Extension'), # undocumented\n ('py:class', 'BorlandCCompiler'), # undocumented\n ('py:class', 'CCompiler'), # undocumented\n ('py:class', 'CygwinCCompiler'), # undocumented\n ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented\n ('py:class', 'FileList'), # undocumented\n ('py:class', 'IShellLink'), # ref to MS docs\n ('py:class', 'MSVCCompiler'), # undocumented\n ('py:class', 'OptionDummy'), # undocumented\n ('py:class', 'UnixCCompiler'), # undocumented\n ('py:exc', 'CompileError'), # undocumented\n ('py:exc', 'DistutilsExecError'), # undocumented\n ('py:exc', 'DistutilsFileError'), # undocumented\n ('py:exc', 'LibError'), # undocumented\n ('py:exc', 'LinkError'), # undocumented\n ('py:exc', 'PreprocessError'), # undocumented\n ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented\n # undocumented:\n ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),\n ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented\n ('py:func', 'distutils.log.debug'), # undocumented\n ('py:func', 'distutils.spawn.find_executable'), # undocumented\n ('py:func', 'distutils.spawn.spawn'), # undocumented\n # TODO: check https://docutils.rtfd.io in the future\n ('py:mod', 'docutils'), # there's no Sphinx site documenting this\n]\n\n# Allow linking objects on other Sphinx sites seamlessly:\nintersphinx_mapping.update(\n python2=('https://docs.python.org/2', None),\n python=('https://docs.python.org/3', None),\n)\n\n# Add support for the unreleased \"next-version\" change notes\nextensions += ['sphinxcontrib.towncrier']\n# Extension needs a path from here to the towncrier config.\ntowncrier_draft_working_directory = '..'\n# Avoid an empty section for unpublished changes.\ntowncrier_draft_include_empty = False\n\nextensions += ['jaraco.tidelift']\n\n# Add icons (aka \"favicons\") to documentation\nextensions += ['sphinx-favicon']\nhtml_static_path = ['images'] # should contain the folder with icons\n\n# List of dicts with <link> HTML attributes\n# static-file points to files in the html_static_path (href is computed)\nfavicons = [\n { # \"Catch-all\" goes first, otherwise some browsers will overwrite\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"logo-symbol-only.svg\",\n \"sizes\": \"any\"\n },\n { # Version with thicker strokes for better visibility at smaller sizes\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"favicon.svg\",\n \"sizes\": \"16x16 24x24 32x32 48x48\"\n },\n # rel=\"apple-touch-icon\" does not support SVG yet\n]\n\nintersphinx_mapping['pip'] = 'https://pip.pypa.io/en/latest', None\nintersphinx_mapping['PyPUG'] = ('https://packaging.python.org/en/latest/', None)\nintersphinx_mapping['packaging'] = ('https://packaging.pypa.io/en/latest/', None)\nintersphinx_mapping['importlib-resources'] = (\n 'https://importlib-resources.readthedocs.io/en/latest', None\n)\n", "path": "docs/conf.py" } ]
[ { "content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(?<!\\w)PR #(?P<pull>\\d+)',\n url='{package_url}/pull/{pull}',\n ),\n dict(\n pattern=r'(?<!\\w)(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'(?<![`/\\w])PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/(?P<issue_repo>[\\-\\.\\w]+)#(?P<issue_number>\\d+)',\n url='{GH}/pypa/{issue_repo}/issues/{issue_number}',\n ),\n dict(\n pattern=r'pypa/(?P<commit_repo>[\\-\\.\\w]+)@(?P<commit_number>[\\da-f]+)',\n url='{GH}/pypa/{commit_repo}/commit/{commit_number}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Include Python intersphinx mapping to prevent failures\n# jaraco/skeleton#51\nextensions += ['sphinx.ext.intersphinx']\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n}\n\nintersphinx_mapping.update({\n 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)\n})\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_repo_org = 'pypa'\ngithub_repo_name = 'setuptools'\ngithub_repo_slug = f'{github_repo_org}/{github_repo_name}'\ngithub_repo_url = f'{github_url}/{github_repo_slug}'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n 'pypi': ('https://pypi.org/project/%s', '%s'), # noqa: WPS323\n 'wiki': ('https://wikipedia.org/wiki/%s', '%s'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# HTML theme\nhtml_theme = 'furo'\nhtml_logo = \"images/logo.svg\"\n\nhtml_theme_options = {\n \"sidebar_hide_name\": True,\n \"light_css_variables\": {\n \"color-brand-primary\": \"#336790\", # \"blue\"\n \"color-brand-content\": \"#336790\",\n },\n \"dark_css_variables\": {\n \"color-brand-primary\": \"#E5B62F\", # \"yellow\"\n \"color-brand-content\": \"#E5B62F\",\n },\n}\n\n# Redirect old docs so links and references in the ecosystem don't break\nextensions += ['sphinx_reredirects']\nredirects = {\n \"userguide/keywords\": \"/deprecated/changed_keywords.html\",\n}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n\n# Support for distutils\n\n# Ref: https://stackoverflow.com/a/30624034/595220\nnitpick_ignore = [\n ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs\n ('envvar', 'DISTUTILS_DEBUG'), # undocumented\n ('envvar', 'HOME'), # undocumented\n ('envvar', 'PLAT'), # undocumented\n ('py:attr', 'CCompiler.language_map'), # undocumented\n ('py:attr', 'CCompiler.language_order'), # undocumented\n ('py:class', 'distutils.dist.Distribution'), # undocumented\n ('py:class', 'distutils.extension.Extension'), # undocumented\n ('py:class', 'BorlandCCompiler'), # undocumented\n ('py:class', 'CCompiler'), # undocumented\n ('py:class', 'CygwinCCompiler'), # undocumented\n ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented\n ('py:class', 'FileList'), # undocumented\n ('py:class', 'IShellLink'), # ref to MS docs\n ('py:class', 'MSVCCompiler'), # undocumented\n ('py:class', 'OptionDummy'), # undocumented\n ('py:class', 'UnixCCompiler'), # undocumented\n ('py:exc', 'CompileError'), # undocumented\n ('py:exc', 'DistutilsExecError'), # undocumented\n ('py:exc', 'DistutilsFileError'), # undocumented\n ('py:exc', 'LibError'), # undocumented\n ('py:exc', 'LinkError'), # undocumented\n ('py:exc', 'PreprocessError'), # undocumented\n ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented\n # undocumented:\n ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),\n ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented\n ('py:func', 'distutils.log.debug'), # undocumented\n ('py:func', 'distutils.spawn.find_executable'), # undocumented\n ('py:func', 'distutils.spawn.spawn'), # undocumented\n # TODO: check https://docutils.rtfd.io in the future\n ('py:mod', 'docutils'), # there's no Sphinx site documenting this\n]\n\n# Allow linking objects on other Sphinx sites seamlessly:\nintersphinx_mapping.update(\n python2=('https://docs.python.org/2', None),\n python=('https://docs.python.org/3', None),\n)\n\n# Add support for the unreleased \"next-version\" change notes\nextensions += ['sphinxcontrib.towncrier']\n# Extension needs a path from here to the towncrier config.\ntowncrier_draft_working_directory = '..'\n# Avoid an empty section for unpublished changes.\ntowncrier_draft_include_empty = False\n\nextensions += ['jaraco.tidelift']\n\n# Add icons (aka \"favicons\") to documentation\nextensions += ['sphinx-favicon']\nhtml_static_path = ['images'] # should contain the folder with icons\n\n# List of dicts with <link> HTML attributes\n# static-file points to files in the html_static_path (href is computed)\nfavicons = [\n { # \"Catch-all\" goes first, otherwise some browsers will overwrite\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"logo-symbol-only.svg\",\n \"sizes\": \"any\"\n },\n { # Version with thicker strokes for better visibility at smaller sizes\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"static-file\": \"favicon.svg\",\n \"sizes\": \"16x16 24x24 32x32 48x48\"\n },\n # rel=\"apple-touch-icon\" does not support SVG yet\n]\n\nintersphinx_mapping['pip'] = 'https://pip.pypa.io/en/latest', None\nintersphinx_mapping['PyPUG'] = ('https://packaging.python.org/en/latest/', None)\nintersphinx_mapping['packaging'] = ('https://packaging.pypa.io/en/latest/', None)\nintersphinx_mapping['importlib-resources'] = (\n 'https://importlib-resources.readthedocs.io/en/latest', None\n)\n", "path": "docs/conf.py" } ]
diff --git a/changelog.d/3307.doc.rst b/changelog.d/3307.doc.rst new file mode 100644 index 0000000000..cdab873785 --- /dev/null +++ b/changelog.d/3307.doc.rst @@ -0,0 +1,4 @@ +Added introduction to references/keywords +Added deprecation tags to test kwargs +Moved userguide/keywords to deprecated section +Clarified in deprecated doc what keywords came from distutils and which were added or changed by setuptools diff --git a/docs/conf.py b/docs/conf.py index 4ebb521cf6..1023539c62 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -129,6 +129,12 @@ }, } +# Redirect old docs so links and references in the ecosystem don't break +extensions += ['sphinx_reredirects'] +redirects = { + "userguide/keywords": "/deprecated/changed_keywords.html", +} + # Add support for inline tabs extensions += ['sphinx_inline_tabs'] diff --git a/docs/deprecated/changed_keywords.rst b/docs/deprecated/changed_keywords.rst new file mode 100644 index 0000000000..7db372f2fe --- /dev/null +++ b/docs/deprecated/changed_keywords.rst @@ -0,0 +1,112 @@ +New and Changed ``setup()`` Keywords +==================================== + +This document tracks historical differences between ``setuptools`` and +``distutils``. + +Since ``distutils`` was scheduled for removal from the standard library in +Python 3.12, and ``setuptools`` started its adoption, these differences became less +relevant. +Please check :doc:`/references/keywords` for a complete list of keyword +arguments that can be passed to the ``setuptools.setup()`` function and +a their full description. + +.. tab:: Supported by both ``distutils`` and ``setuptoools`` + + ``name`` string + + ``version`` string + + ``description`` string + + ``long_description`` string + + ``long_description_content_type`` string + + ``author`` string + + ``author_email`` string + + ``maintainer`` string + + ``maintainer_email`` string + + ``url`` string + + ``download_url`` string + + ``packages`` list + + ``py_modules`` list + + ``scripts`` list + + ``ext_package`` string + + ``ext_modules`` list + + ``classifiers`` list + + ``distclass`` Distribution subclass + + ``script_name`` string + + ``script_args`` list + + ``options`` dictionary + + ``license`` string + + ``license_file`` string **deprecated** + + ``license_files`` list + + ``keywords`` string or list + + ``platforms`` list + + ``cmdclass`` dictionary + + ``data_files`` list **deprecated** + + ``package_dir`` dictionary + + ``requires`` string or list **deprecated** + + ``obsoletes`` list **deprecated** + + ``provides`` list + +.. tab:: Added or changed by ``setuptoools`` + + ``include_package_data`` bool + + ``exclude_package_data`` dictionary + + ``package_data`` dictionary + + ``zip_safe`` bool + + ``install_requires`` string or list + + ``entry_points`` dictionary + + ``extras_require`` dictionary + + ``python_requires`` string + + ``setup_requires`` string or list **deprecated** + + ``dependency_links`` list **deprecated** + + ``namespace_packages`` list + + ``test_suite`` string or function **deprecated** + + ``tests_require`` string or list **deprecated** + + ``test_loader`` class **deprecated** + + ``eager_resources`` list + + ``project_urls`` dictionary diff --git a/docs/deprecated/index.rst b/docs/deprecated/index.rst index 59fc7befd0..8169b3b70c 100644 --- a/docs/deprecated/index.rst +++ b/docs/deprecated/index.rst @@ -13,6 +13,7 @@ objectives. .. toctree:: :maxdepth: 1 + changed_keywords python_eggs easy_install distutils/index diff --git a/docs/references/keywords.rst b/docs/references/keywords.rst index c26b9d497d..f231b2b8ee 100644 --- a/docs/references/keywords.rst +++ b/docs/references/keywords.rst @@ -2,123 +2,210 @@ Keywords ======== +The following are keywords ``setuptools.setup()`` accepts. +They allow configuring the build process for a Python distribution or adding +metadata via a ``setup.py`` script placed at the root of your project. +All of them are optional; you do not have to supply them unless you need the +associated ``setuptools`` feature. + +Metadata and configuration supplied via ``setup()`` is complementary to (and +may be overwritten by) the information present in ``setup.cfg`` and ``pyproject.toml``. +Some important metadata, such as ``name`` and ``version``, may assume +a default *degenerate* value if not specified. + +Users are strongly encouraged to use a declarative config either via +:doc:`setup.cfg </userguide/declarative_config>` or :doc:`pyproject.toml +</userguide/pyproject_config>` and only rely on ``setup.py`` if they need to +tap into special behaviour that requires scripting (such as building C +extensions). + +.. note:: + When using declarative configs via ``pyproject.toml`` users can still keep a + very simple ``setup.py`` just to ensure editable installs are supported, for + example:: + + from setuptools import setup + + setup() + + Future versions of ``setuptools`` may support editable installs even + without ``setup.py``. + + +.. _keyword/name: + ``name`` A string specifying the name of the package. +.. _keyword/version: + ``version`` A string specifying the version number of the package. +.. _keyword/description: + ``description`` A string describing the package in a single line. +.. _keyword/long_description: + ``long_description`` A string providing a longer description of the package. +.. _keyword/long_description_content_type: + ``long_description_content_type`` A string specifying the content type is used for the ``long_description`` (e.g. ``text/markdown``) +.. _keyword/author: + ``author`` A string specifying the author of the package. +.. _keyword/author_email: + ``author_email`` A string specifying the email address of the package author. +.. _keyword/maintainer: + ``maintainer`` A string specifying the name of the current maintainer, if different from the author. Note that if the maintainer is provided, setuptools will use it as the author in ``PKG-INFO``. +.. _keyword/maintainer_email: + ``maintainer_email`` A string specifying the email address of the current maintainer, if different from the author. +.. _keyword/url: + ``url`` A string specifying the URL for the package homepage. +.. _keyword/download_url: + ``download_url`` A string specifying the URL to download the package. +.. _keyword/packages: + ``packages`` A list of strings specifying the packages that setuptools will manipulate. +.. _keyword/py_modules: + ``py_modules`` A list of strings specifying the modules that setuptools will manipulate. +.. _keyword/scripts: + ``scripts`` A list of strings specifying the standalone script files to be built and installed. +.. _keyword/ext_package: + ``ext_package`` A string specifying the base package name for the extensions provided by this package. +.. _keyword/ext_modules: + ``ext_modules`` A list of instances of ``setuptools.Extension`` providing the list of Python extensions to be built. +.. _keyword/classifiers: + ``classifiers`` A list of strings describing the categories for the package. +.. _keyword/distclass: + ``distclass`` A subclass of ``Distribution`` to use. +.. _keyword/script_name: + ``script_name`` A string specifying the name of the setup.py script -- defaults to ``sys.argv[0]`` +.. _keyword/script_args: + ``script_args`` A list of strings defining the arguments to supply to the setup script. +.. _keyword/options: + ``options`` A dictionary providing the default options for the setup script. +.. _keyword/license: + ``license`` A string specifying the license of the package. -``license_file`` +.. _keyword/license_file: +``license_file`` .. warning:: ``license_file`` is deprecated. Use ``license_files`` instead. -``license_files`` +.. _keyword/license_files: +``license_files`` A list of glob patterns for license related files that should be included. If neither ``license_file`` nor ``license_files`` is specified, this option defaults to ``LICEN[CS]E*``, ``COPYING*``, ``NOTICE*``, and ``AUTHORS*``. +.. _keyword/keywords: + ``keywords`` A list of strings or a comma-separated string providing descriptive meta-data. See: `PEP 0314`_. .. _PEP 0314: https://www.python.org/dev/peps/pep-0314/ +.. _keyword/platforms: + ``platforms`` A list of strings or comma-separated string. +.. _keyword/cmdclass: + ``cmdclass`` A dictionary providing a mapping of command names to ``Command`` subclasses. -``data_files`` +.. _keyword/data_files: +``data_files`` .. warning:: ``data_files`` is deprecated. It does not work with wheels, so it should be avoided. A list of strings specifying the data files to install. +.. _keyword/package_dir: + ``package_dir`` A dictionary providing a mapping of package to directory names. -``requires`` +.. _keyword/requires: +``requires`` .. warning:: ``requires`` is superseded by ``install_requires`` and should not be used anymore. -``obsoletes`` +.. _keyword/obsoletes: +``obsoletes`` .. warning:: ``obsoletes`` is currently ignored by ``pip``. @@ -135,8 +222,9 @@ Keywords e.g. Gorgon 2.3 gets subsumed into Torqued Python 1.0. When you install Torqued Python, the Gorgon distribution should be removed. -``provides`` +.. _keyword/provides: +``provides`` .. warning:: ``provides`` is currently ignored by ``pip``. @@ -164,7 +252,7 @@ Keywords Each package may be followed by an environment marker after a semicolon (e.g. ``foo; os_name == "posix"``). -.. Below are setuptools keywords, above are distutils +.. _keyword/include_package_data: ``include_package_data`` If set to ``True``, this tells ``setuptools`` to automatically include any @@ -172,12 +260,16 @@ Keywords your ``MANIFEST.in`` file. For more information, see the section on :ref:`Including Data Files`. +.. _keyword/exclude_package_data: + ``exclude_package_data`` A dictionary mapping package names to lists of glob patterns that should be *excluded* from your package directories. You can use this to trim back any excess files included by ``include_package_data``. For a complete description and examples, see the section on :ref:`Including Data Files`. +.. _keyword/package_data: + ``package_data`` A dictionary mapping package names to lists of glob patterns. For a complete description and examples, see the section on :ref:`Including Data @@ -187,17 +279,23 @@ Keywords in source control or are files that you don't want to include in your source distribution.) +.. _keyword/zip_safe: + ``zip_safe`` A boolean (True or False) flag specifying whether the project can be safely installed and run from a zip file. If this argument is not supplied, the ``bdist_egg`` command will have to analyze all of your project's contents for possible problems each time it builds an egg. +.. _keyword/install_requires: + ``install_requires`` A string or list of strings specifying what other distributions need to be installed when this one is. See the section on :ref:`Declaring Dependencies` for details and examples of the format of this argument. +.. _keyword/entry_points: + ``entry_points`` A dictionary mapping entry point group names to strings or lists of strings defining the entry points. Entry points are used to support dynamic @@ -206,28 +304,33 @@ Keywords of this argument. In addition, this keyword is used to support :ref:`Automatic Script Creation <entry_points>`. +.. _keyword/extras_require: + ``extras_require`` A dictionary mapping names of "extras" (optional features of your project) to strings or lists of strings specifying what other distributions must be installed to support those features. See the section on :ref:`Declaring Dependencies` for details and examples of the format of this argument. +.. _keyword/python_requires: + ``python_requires`` A string corresponding to a version specifier (as defined in PEP 440) for the Python version, used to specify the Requires-Python defined in PEP 345. -``setup_requires`` +.. _keyword/setup_requires: +``setup_requires`` .. warning:: - Using ``setup_requires`` is discouraged in favor of `PEP-518`_ + Using ``setup_requires`` is discouraged in favor of :pep:`518`. A string or list of strings specifying what other distributions need to be present in order for the *setup script* to run. ``setuptools`` will - attempt to obtain these (even going so far as to download them using - ``EasyInstall``) before processing the rest of the setup script or commands. - This argument is needed if you are using distutils extensions as part of - your build process; for example, extensions that process setup() arguments - and turn them into EGG-INFO metadata files. + attempt to obtain these before processing the + rest of the setup script or commands. This argument is needed if you + are using distutils extensions as part of your build process; for + example, extensions that process setup() arguments and turn them into + EGG-INFO metadata files. (Note: projects listed in ``setup_requires`` will NOT be automatically installed on the system where the setup script is being run. They are @@ -236,18 +339,18 @@ Keywords when the setup script is run, you should add them to ``install_requires`` **and** ``setup_requires``.) -.. _PEP-518: http://www.python.org/dev/peps/pep-0518/ +.. _keyword/dependency_links: ``dependency_links`` - .. warning:: ``dependency_links`` is deprecated. It is not supported anymore by pip. A list of strings naming URLs to be searched when satisfying dependencies. These links will be used if needed to install packages specified by ``setup_requires`` or ``tests_require``. They will also be written into - the egg's metadata for use by tools like EasyInstall to use when installing - an ``.egg`` file. + the egg's metadata for use during install by tools that support them. + +.. _keyword/namespace_packages: ``namespace_packages`` A list of strings naming the project's "namespace packages". A namespace @@ -261,6 +364,8 @@ Keywords does not contain any code other than a namespace declaration. See the section on :ref:`Namespace Packages` for more information. +.. _keyword/test_suite: + ``test_suite`` A string naming a ``unittest.TestCase`` subclass (or a package or module containing one or more of them, or a method of such a subclass), or naming @@ -270,27 +375,36 @@ Keywords added to the tests to be run. If the named suite is a package, any submodules and subpackages are recursively added to the overall test suite. - Specifying this argument enables use of the :ref:`test` command to run the + Specifying this argument enables use of the :ref:`test <test>` command to run the specified test suite, e.g. via ``setup.py test``. See the section on the - :ref:`test` command below for more details. + :ref:`test <test>` command below for more details. + + .. warning:: + .. deprecated:: 41.5.0 + The test command will be removed in a future version of ``setuptools``, + alongside any test configuration parameter. - New in 41.5.0: Deprecated the test command. +.. _keyword/tests_require: ``tests_require`` If your project's tests need one or more additional packages besides those needed to install it, you can use this option to specify them. It should be a string or list of strings specifying what other distributions need to be present for the package's tests to run. When you run the ``test`` - command, ``setuptools`` will attempt to obtain these (even going - so far as to download them using ``EasyInstall``). Note that these - required projects will *not* be installed on the system where the tests - are run, but only downloaded to the project's setup directory if they're - not already installed locally. + command, ``setuptools`` will attempt to obtain these. + Note that these required projects will *not* be installed on + the system where the tests are run, but only downloaded to the project's setup + directory if they're not already installed locally. - New in 41.5.0: Deprecated the test command. + .. warning:: + .. deprecated:: 41.5.0 + The test command will be removed in a future version of ``setuptools``, + alongside any test configuration parameter. .. _test_loader: +.. _keyword/test_loader: + ``test_loader`` If you would like to use a different way of finding tests to run than what setuptools normally uses, you can specify a module name and class name in @@ -312,7 +426,12 @@ Keywords as long as you use the ``tests_require`` option to ensure that the package containing the loader class is available when the ``test`` command is run. - New in 41.5.0: Deprecated the test command. + .. warning:: + .. deprecated:: 41.5.0 + The test command will be removed in a future version of ``setuptools``, + alongside any test configuration parameter. + +.. _keyword/eager_resources: ``eager_resources`` A list of strings naming resources that should be extracted together, if @@ -330,6 +449,8 @@ Keywords mess with it. For more details on how this argument works, see the section below on :ref:`Automatic Resource Extraction`. +.. _keyword/project_urls: + ``project_urls`` An arbitrary map of URL names to hyperlinks, allowing more extensible documentation of where various resources can be found than the simple diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst index 49655acdd0..74e9b1e4ce 100644 --- a/docs/userguide/index.rst +++ b/docs/userguide/index.rst @@ -32,7 +32,6 @@ quickstart provides an overview of the new workflow. extension declarative_config pyproject_config - keywords commands functionalities_rewrite miscellaneous diff --git a/docs/userguide/keywords.rst b/docs/userguide/keywords.rst deleted file mode 100644 index 5388ffea7f..0000000000 --- a/docs/userguide/keywords.rst +++ /dev/null @@ -1,163 +0,0 @@ -New and Changed ``setup()`` Keywords -==================================== - -The following keyword arguments to ``setup()`` are added or changed by -``setuptools``. All of them are optional; you do not have to supply them -unless you need the associated ``setuptools`` feature. - -``include_package_data`` - If set to ``True``, this tells ``setuptools`` to automatically include any - data files it finds inside your package directories that are specified by - your ``MANIFEST.in`` file. For more information, see the section on - :ref:`Including Data Files`. - -``exclude_package_data`` - A dictionary mapping package names to lists of glob patterns that should - be *excluded* from your package directories. You can use this to trim back - any excess files included by ``include_package_data``. For a complete - description and examples, see the section on :ref:`Including Data Files`. - -``package_data`` - A dictionary mapping package names to lists of glob patterns. For a - complete description and examples, see the section on :ref:`Including - Data Files`. You do not need to use this option if you are using - ``include_package_data``, unless you need to add e.g. files that are - generated by your setup script and build process. (And are therefore not - in source control or are files that you don't want to include in your - source distribution.) - -``zip_safe`` - A boolean (True or False) flag specifying whether the project can be - safely installed and run from a zip file. If this argument is not - supplied, the ``bdist_egg`` command will have to analyze all of your - project's contents for possible problems each time it builds an egg. - -``install_requires`` - A string or list of strings specifying what other distributions need to - be installed when this one is. See the section on :ref:`Declaring - Dependencies` for details and examples of the format of this argument. - -``entry_points`` - A dictionary mapping entry point group names to strings or lists of strings - defining the entry points. Entry points are used to support dynamic - discovery of services or plugins provided by a project. See :ref:`Dynamic - Discovery of Services and Plugins` for details and examples of the format - of this argument. In addition, this keyword is used to support - :ref:`Automatic Script Creation <entry_points>`. - -``extras_require`` - A dictionary mapping names of "extras" (optional features of your project) - to strings or lists of strings specifying what other distributions must be - installed to support those features. See the section on :ref:`Declaring - Dependencies` for details and examples of the format of this argument. - -``python_requires`` - A string corresponding to a version specifier (as defined in PEP 440) for - the Python version, used to specify the Requires-Python defined in PEP 345. - -``setup_requires`` - A string or list of strings specifying what other distributions need to - be present in order for the *setup script* to run. ``setuptools`` will - attempt to obtain these (using pip if available) before processing the - rest of the setup script or commands. This argument is needed if you - are using distutils extensions as part of your build process; for - example, extensions that process setup() arguments and turn them into - EGG-INFO metadata files. - - (Note: projects listed in ``setup_requires`` will NOT be automatically - installed on the system where the setup script is being run. They are - simply downloaded to the ./.eggs directory if they're not locally available - already. If you want them to be installed, as well as being available - when the setup script is run, you should add them to ``install_requires`` - **and** ``setup_requires``.) - -``dependency_links`` - A list of strings naming URLs to be searched when satisfying dependencies. - These links will be used if needed to install packages specified by - ``setup_requires`` or ``tests_require``. They will also be written into - the egg's metadata for use during install by tools that support them. - -``namespace_packages`` - A list of strings naming the project's "namespace packages". A namespace - package is a package that may be split across multiple project - distributions. For example, Zope 3's ``zope`` package is a namespace - package, because subpackages like ``zope.interface`` and ``zope.publisher`` - may be distributed separately. The egg runtime system can automatically - merge such subpackages into a single parent package at runtime, as long - as you declare them in each project that contains any subpackages of the - namespace package, and as long as the namespace package's ``__init__.py`` - does not contain any code other than a namespace declaration. See the - section below on :ref:`Namespace Packages` for more information. - -``test_suite`` - A string naming a ``unittest.TestCase`` subclass (or a package or module - containing one or more of them, or a method of such a subclass), or naming - a function that can be called with no arguments and returns a - ``unittest.TestSuite``. If the named suite is a module, and the module - has an ``additional_tests()`` function, it is called and the results are - added to the tests to be run. If the named suite is a package, any - submodules and subpackages are recursively added to the overall test suite. - - Specifying this argument enables use of the :ref:`test <test>` command to run the - specified test suite, e.g. via ``setup.py test``. See the section on the - :ref:`test <test>` command below for more details. - - New in 41.5.0: Deprecated the test command. - -``tests_require`` - If your project's tests need one or more additional packages besides those - needed to install it, you can use this option to specify them. It should - be a string or list of strings specifying what other distributions need to - be present for the package's tests to run. When you run the ``test`` - command, ``setuptools`` will attempt to obtain these (using pip if - available). Note that these required projects will *not* be installed on - the system where the tests are run, but only downloaded to the project's setup - directory if they're not already installed locally. - - New in 41.5.0: Deprecated the test command. - -.. _test_loader: - -``test_loader`` - If you would like to use a different way of finding tests to run than what - setuptools normally uses, you can specify a module name and class name in - this argument. The named class must be instantiable with no arguments, and - its instances must support the ``loadTestsFromNames()`` method as defined - in the Python ``unittest`` module's ``TestLoader`` class. Setuptools will - pass only one test "name" in the ``names`` argument: the value supplied for - the ``test_suite`` argument. The loader you specify may interpret this - string in any way it likes, as there are no restrictions on what may be - contained in a ``test_suite`` string. - - The module name and class name must be separated by a ``:``. The default - value of this argument is ``"setuptools.command.test:ScanningLoader"``. If - you want to use the default ``unittest`` behavior, you can specify - ``"unittest:TestLoader"`` as your ``test_loader`` argument instead. This - will prevent automatic scanning of submodules and subpackages. - - The module and class you specify here may be contained in another package, - as long as you use the ``tests_require`` option to ensure that the package - containing the loader class is available when the ``test`` command is run. - - New in 41.5.0: Deprecated the test command. - -``eager_resources`` - A list of strings naming resources that should be extracted together, if - any of them is needed, or if any C extensions included in the project are - imported. This argument is only useful if the project will be installed as - a zipfile, and there is a need to have all of the listed resources be - extracted to the filesystem *as a unit*. Resources listed here - should be "/"-separated paths, relative to the source root, so to list a - resource ``foo.png`` in package ``bar.baz``, you would include the string - ``bar/baz/foo.png`` in this argument. - - If you only need to obtain resources one at a time, or you don't have any C - extensions that access other files in the project (such as data files or - shared libraries), you probably do NOT need this argument and shouldn't - mess with it. For more details on how this argument works, see the section - below on :ref:`Automatic Resource Extraction`. - -``project_urls`` - An arbitrary map of URL names to hyperlinks, allowing more extensible - documentation of where various resources can be found than the simple - ``url`` and ``download_url`` options provide. diff --git a/docs/userguide/package_discovery.rst b/docs/userguide/package_discovery.rst index 38119bc6fa..0854a709b8 100644 --- a/docs/userguide/package_discovery.rst +++ b/docs/userguide/package_discovery.rst @@ -6,7 +6,7 @@ Package Discovery and Namespace Package .. note:: a full specification for the keyword supplied to ``setup.cfg`` or - ``setup.py`` can be found at :doc:`keywords reference <keywords>` + ``setup.py`` can be found at :doc:`keywords reference </references/keywords>` .. note:: the examples provided here are only to demonstrate the functionality diff --git a/setup.cfg b/setup.cfg index 4b386243a4..158919e55c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -93,6 +93,7 @@ docs = pygments-github-lexers==0.0.5 sphinx-favicon sphinx-inline-tabs + sphinx-reredirects sphinxcontrib-towncrier furo
iterative__dvc-1757
typo in docs super minor typo: $dvc repro --help -c CWD, --cwd CWD Directory within your repo to **reroduce** from. dvc --version 0.30.1
[ { "content": "from __future__ import unicode_literals\n\nimport os\n\nimport dvc.logger as logger\nfrom dvc.command.base import CmdBase\nfrom dvc.command.status import CmdDataStatus\nfrom dvc.exceptions import DvcException\n\n\nclass CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n saved_dir = os.path.realpath(os.curdir)\n if self.args.cwd:\n os.chdir(self.args.cwd)\n\n # Dirty hack so the for loop below can at least enter once\n if self.args.all_pipelines:\n self.args.targets = [None]\n elif not self.args.targets:\n self.args.targets = self.default_targets\n\n ret = 0\n for target in self.args.targets:\n try:\n stages = self.repo.reproduce(\n target,\n recursive=recursive,\n force=self.args.force,\n dry=self.args.dry,\n interactive=self.args.interactive,\n pipeline=self.args.pipeline,\n all_pipelines=self.args.all_pipelines,\n ignore_build_cache=self.args.ignore_build_cache,\n no_commit=self.args.no_commit,\n )\n\n if len(stages) == 0:\n logger.info(CmdDataStatus.UP_TO_DATE_MSG)\n\n if self.args.metrics:\n self.repo.metrics.show()\n except DvcException:\n logger.error()\n ret = 1\n break\n\n os.chdir(saved_dir)\n return ret\n\n\ndef add_parser(subparsers, parent_parser):\n REPRO_HELP = \"Reproduce DVC file. Default file name - 'Dvcfile'.\"\n repro_parser = subparsers.add_parser(\n \"repro\",\n parents=[parent_parser],\n description=REPRO_HELP,\n help=REPRO_HELP,\n )\n repro_parser.add_argument(\n \"targets\", nargs=\"*\", help=\"DVC file to reproduce.\"\n )\n repro_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce even if dependencies were not changed.\",\n )\n repro_parser.add_argument(\n \"-s\",\n \"--single-item\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce only single data item without recursive dependencies \"\n \"check.\",\n )\n repro_parser.add_argument(\n \"-c\",\n \"--cwd\",\n default=os.path.curdir,\n help=\"Directory within your repo to reroduce from.\",\n )\n repro_parser.add_argument(\n \"-m\",\n \"--metrics\",\n action=\"store_true\",\n default=False,\n help=\"Show metrics after reproduction.\",\n )\n repro_parser.add_argument(\n \"--dry\",\n action=\"store_true\",\n default=False,\n help=\"Only print the commands that would be executed without \"\n \"actually executing.\",\n )\n repro_parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store_true\",\n default=False,\n help=\"Ask for confirmation before reproducing each stage.\",\n )\n repro_parser.add_argument(\n \"-p\",\n \"--pipeline\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce the whole pipeline that the specified stage file \"\n \"belongs to.\",\n )\n repro_parser.add_argument(\n \"-P\",\n \"--all-pipelines\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce all pipelines in the repo.\",\n )\n repro_parser.add_argument(\n \"--ignore-build-cache\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce all descendants of a changed stage even if their \"\n \"direct dependencies didn't change.\",\n )\n repro_parser.add_argument(\n \"--no-commit\",\n action=\"store_true\",\n default=False,\n help=\"Don't put files/directories into cache.\",\n )\n repro_parser.set_defaults(func=CmdRepro)\n", "path": "dvc/command/repro.py" } ]
[ { "content": "from __future__ import unicode_literals\n\nimport os\n\nimport dvc.logger as logger\nfrom dvc.command.base import CmdBase\nfrom dvc.command.status import CmdDataStatus\nfrom dvc.exceptions import DvcException\n\n\nclass CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n saved_dir = os.path.realpath(os.curdir)\n if self.args.cwd:\n os.chdir(self.args.cwd)\n\n # Dirty hack so the for loop below can at least enter once\n if self.args.all_pipelines:\n self.args.targets = [None]\n elif not self.args.targets:\n self.args.targets = self.default_targets\n\n ret = 0\n for target in self.args.targets:\n try:\n stages = self.repo.reproduce(\n target,\n recursive=recursive,\n force=self.args.force,\n dry=self.args.dry,\n interactive=self.args.interactive,\n pipeline=self.args.pipeline,\n all_pipelines=self.args.all_pipelines,\n ignore_build_cache=self.args.ignore_build_cache,\n no_commit=self.args.no_commit,\n )\n\n if len(stages) == 0:\n logger.info(CmdDataStatus.UP_TO_DATE_MSG)\n\n if self.args.metrics:\n self.repo.metrics.show()\n except DvcException:\n logger.error()\n ret = 1\n break\n\n os.chdir(saved_dir)\n return ret\n\n\ndef add_parser(subparsers, parent_parser):\n REPRO_HELP = \"Reproduce DVC file. Default file name - 'Dvcfile'.\"\n repro_parser = subparsers.add_parser(\n \"repro\",\n parents=[parent_parser],\n description=REPRO_HELP,\n help=REPRO_HELP,\n )\n repro_parser.add_argument(\n \"targets\", nargs=\"*\", help=\"DVC file to reproduce.\"\n )\n repro_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce even if dependencies were not changed.\",\n )\n repro_parser.add_argument(\n \"-s\",\n \"--single-item\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce only single data item without recursive dependencies \"\n \"check.\",\n )\n repro_parser.add_argument(\n \"-c\",\n \"--cwd\",\n default=os.path.curdir,\n help=\"Directory within your repo to reproduce from.\",\n )\n repro_parser.add_argument(\n \"-m\",\n \"--metrics\",\n action=\"store_true\",\n default=False,\n help=\"Show metrics after reproduction.\",\n )\n repro_parser.add_argument(\n \"--dry\",\n action=\"store_true\",\n default=False,\n help=\"Only print the commands that would be executed without \"\n \"actually executing.\",\n )\n repro_parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store_true\",\n default=False,\n help=\"Ask for confirmation before reproducing each stage.\",\n )\n repro_parser.add_argument(\n \"-p\",\n \"--pipeline\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce the whole pipeline that the specified stage file \"\n \"belongs to.\",\n )\n repro_parser.add_argument(\n \"-P\",\n \"--all-pipelines\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce all pipelines in the repo.\",\n )\n repro_parser.add_argument(\n \"--ignore-build-cache\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce all descendants of a changed stage even if their \"\n \"direct dependencies didn't change.\",\n )\n repro_parser.add_argument(\n \"--no-commit\",\n action=\"store_true\",\n default=False,\n help=\"Don't put files/directories into cache.\",\n )\n repro_parser.set_defaults(func=CmdRepro)\n", "path": "dvc/command/repro.py" } ]
diff --git a/dvc/command/repro.py b/dvc/command/repro.py index 05b82e879f..8e582196fa 100644 --- a/dvc/command/repro.py +++ b/dvc/command/repro.py @@ -80,7 +80,7 @@ def add_parser(subparsers, parent_parser): "-c", "--cwd", default=os.path.curdir, - help="Directory within your repo to reroduce from.", + help="Directory within your repo to reproduce from.", ) repro_parser.add_argument( "-m",
localstack__localstack-4741
question: ENABLE_CONFIG_UPDATES for docker image ### Is there an existing issue for this? - [x] I have searched the existing issues and read the documentation ### Question Seems like ENABLE_CONFIG_UPDATES=1 works only in host mode. Is there way to enable it for docker ? ### Anything else? _No response_
[ { "content": "import json\nimport logging\nimport os\nimport platform\nimport re\nimport socket\nimport subprocess\nimport tempfile\nimport time\nfrom os.path import expanduser\n\nimport six\nfrom boto3 import Session\n\nfrom localstack.constants import (\n AWS_REGION_US_EAST_1,\n DEFAULT_BUCKET_MARKER_LOCAL,\n DEFAULT_DEVELOP_PORT,\n DEFAULT_LAMBDA_CONTAINER_REGISTRY,\n DEFAULT_PORT_EDGE,\n DEFAULT_SERVICE_PORTS,\n FALSE_STRINGS,\n LOCALHOST,\n LOCALHOST_IP,\n LOG_LEVELS,\n TRACE_LOG_LEVELS,\n TRUE_STRINGS,\n)\n\n# keep track of start time, for performance debugging\nload_start_time = time.time()\n\n\ndef eval_log_type(env_var_name):\n \"\"\"get the log type from environment variable\"\"\"\n ls_log = os.environ.get(env_var_name, \"\").lower().strip()\n return ls_log if ls_log in LOG_LEVELS else False\n\n\ndef is_env_true(env_var_name):\n \"\"\"Whether the given environment variable has a truthy value.\"\"\"\n return os.environ.get(env_var_name, \"\").lower().strip() in TRUE_STRINGS\n\n\ndef is_env_not_false(env_var_name):\n \"\"\"Whether the given environment variable is empty or has a truthy value.\"\"\"\n return os.environ.get(env_var_name, \"\").lower().strip() not in FALSE_STRINGS\n\n\n# java options to Lambda\nLAMBDA_JAVA_OPTS = os.environ.get(\"LAMBDA_JAVA_OPTS\", \"\").strip()\n\n# limit in which to kinesalite will start throwing exceptions\nKINESIS_SHARD_LIMIT = os.environ.get(\"KINESIS_SHARD_LIMIT\", \"\").strip() or \"100\"\n\n# delay in kinesalite response when making changes to streams\nKINESIS_LATENCY = os.environ.get(\"KINESIS_LATENCY\", \"\").strip() or \"500\"\n\n# Kinesis provider - either \"kinesis-mock\" or \"kinesalite\"\nKINESIS_PROVIDER = os.environ.get(\"KINESIS_PROVIDER\") or \"kinesis-mock\"\n\n# default AWS region\nif \"DEFAULT_REGION\" not in os.environ:\n os.environ[\"DEFAULT_REGION\"] = os.environ.get(\"AWS_DEFAULT_REGION\") or AWS_REGION_US_EAST_1\nDEFAULT_REGION = os.environ[\"DEFAULT_REGION\"]\n\n# Whether or not to handle lambda event sources as synchronous invocations\nSYNCHRONOUS_SNS_EVENTS = is_env_true(\"SYNCHRONOUS_SNS_EVENTS\")\nSYNCHRONOUS_SQS_EVENTS = is_env_true(\"SYNCHRONOUS_SQS_EVENTS\")\nSYNCHRONOUS_API_GATEWAY_EVENTS = is_env_not_false(\"SYNCHRONOUS_API_GATEWAY_EVENTS\")\nSYNCHRONOUS_KINESIS_EVENTS = is_env_not_false(\"SYNCHRONOUS_KINESIS_EVENTS\")\nSYNCHRONOUS_DYNAMODB_EVENTS = is_env_not_false(\"SYNCHRONOUS_DYNAMODB_EVENTS\")\n\n# randomly inject faults to Kinesis\nKINESIS_ERROR_PROBABILITY = float(os.environ.get(\"KINESIS_ERROR_PROBABILITY\", \"\").strip() or 0.0)\n\n# randomly inject faults to DynamoDB\nDYNAMODB_ERROR_PROBABILITY = float(os.environ.get(\"DYNAMODB_ERROR_PROBABILITY\", \"\").strip() or 0.0)\nDYNAMODB_READ_ERROR_PROBABILITY = float(\n os.environ.get(\"DYNAMODB_READ_ERROR_PROBABILITY\", \"\").strip() or 0.0\n)\nDYNAMODB_WRITE_ERROR_PROBABILITY = float(\n os.environ.get(\"DYNAMODB_WRITE_ERROR_PROBABILITY\", \"\").strip() or 0.0\n)\n\n# JAVA EE heap size for dynamodb\nDYNAMODB_HEAP_SIZE = os.environ.get(\"DYNAMODB_HEAP_SIZE\", \"\").strip() or \"256m\"\n\n# expose services on a specific host externally\nHOSTNAME_EXTERNAL = os.environ.get(\"HOSTNAME_EXTERNAL\", \"\").strip() or LOCALHOST\n\n# expose SQS on a specific port externally\nSQS_PORT_EXTERNAL = int(os.environ.get(\"SQS_PORT_EXTERNAL\") or 0)\n\n# name of the host under which the LocalStack services are available\nLOCALSTACK_HOSTNAME = os.environ.get(\"LOCALSTACK_HOSTNAME\", \"\").strip() or LOCALHOST\n\n# host under which the LocalStack services are available from Lambda Docker containers\nHOSTNAME_FROM_LAMBDA = os.environ.get(\"HOSTNAME_FROM_LAMBDA\", \"\").strip()\n\n# whether to remotely copy the lambda code or locally mount a volume\nLAMBDA_REMOTE_DOCKER = is_env_true(\"LAMBDA_REMOTE_DOCKER\")\n\n# Marker name to indicate that a bucket represents the local file system. This is used for testing\n# Serverless applications where we mount the Lambda code directly into the container from the host OS.\nBUCKET_MARKER_LOCAL = (\n os.environ.get(\"BUCKET_MARKER_LOCAL\", \"\").strip() or DEFAULT_BUCKET_MARKER_LOCAL\n)\n\n# network that the docker lambda container will be joining\nLAMBDA_DOCKER_NETWORK = os.environ.get(\"LAMBDA_DOCKER_NETWORK\", \"\").strip()\n\n# custom DNS server that the docker lambda container will use\nLAMBDA_DOCKER_DNS = os.environ.get(\"LAMBDA_DOCKER_DNS\", \"\").strip()\n\n# additional flags passed to Lambda Docker run/create commands\nLAMBDA_DOCKER_FLAGS = os.environ.get(\"LAMBDA_DOCKER_FLAGS\", \"\").strip()\n\n# default container registry for lambda execution images\nLAMBDA_CONTAINER_REGISTRY = (\n os.environ.get(\"LAMBDA_CONTAINER_REGISTRY\", \"\").strip() or DEFAULT_LAMBDA_CONTAINER_REGISTRY\n)\n\n# whether to remove containers after Lambdas finished executing\nLAMBDA_REMOVE_CONTAINERS = (\n os.environ.get(\"LAMBDA_REMOVE_CONTAINERS\", \"\").lower().strip() not in FALSE_STRINGS\n)\n\n# directory for persisting data\nDATA_DIR = os.environ.get(\"DATA_DIR\", \"\").strip()\n\n# folder for temporary files and data\nTMP_FOLDER = os.path.join(tempfile.gettempdir(), \"localstack\")\n\n# create folders\nfor folder in [DATA_DIR, TMP_FOLDER]:\n if folder and not os.path.exists(folder):\n try:\n os.makedirs(folder)\n except Exception:\n # this can happen due to a race condition when starting\n # multiple processes in parallel. Should be safe to ignore\n pass\n\n# fix for Mac OS, to be able to mount /var/folders in Docker\nif TMP_FOLDER.startswith(\"/var/folders/\") and os.path.exists(\"/private%s\" % TMP_FOLDER):\n TMP_FOLDER = \"/private%s\" % TMP_FOLDER\n\n# temporary folder of the host (required when running in Docker). Fall back to local tmp folder if not set\nHOST_TMP_FOLDER = os.environ.get(\"HOST_TMP_FOLDER\", TMP_FOLDER)\n\n# whether to enable verbose debug logging\nLS_LOG = eval_log_type(\"LS_LOG\")\nDEBUG = is_env_true(\"DEBUG\") or LS_LOG in TRACE_LOG_LEVELS\n\n# whether to enable debugpy\nDEVELOP = is_env_true(\"DEVELOP\")\n\n# PORT FOR DEBUGGER\nDEVELOP_PORT = int(os.environ.get(\"DEVELOP_PORT\", \"\").strip() or DEFAULT_DEVELOP_PORT)\n\n# whether to make debugpy wait for a debbuger client\nWAIT_FOR_DEBUGGER = is_env_true(\"WAIT_FOR_DEBUGGER\")\n\n# whether to use SSL encryption for the services\n# TODO: this is deprecated and should be removed (edge port supports HTTP/HTTPS multiplexing)\nUSE_SSL = is_env_true(\"USE_SSL\")\n\n# whether to use the legacy single-region mode, defined via DEFAULT_REGION\nUSE_SINGLE_REGION = is_env_true(\"USE_SINGLE_REGION\")\n\n# whether to run in TF compatibility mode for TF integration tests\n# (e.g., returning verbatim ports for ELB resources, rather than edge port 4566, etc.)\nTF_COMPAT_MODE = is_env_true(\"TF_COMPAT_MODE\")\n\n# default encoding used to convert strings to byte arrays (mainly for Python 3 compatibility)\nDEFAULT_ENCODING = \"utf-8\"\n\n# path to local Docker UNIX domain socket\nDOCKER_SOCK = os.environ.get(\"DOCKER_SOCK\", \"\").strip() or \"/var/run/docker.sock\"\n\n# additional flags to pass to \"docker run\" when starting the stack in Docker\nDOCKER_FLAGS = os.environ.get(\"DOCKER_FLAGS\", \"\").strip()\n\n# command used to run Docker containers (e.g., set to \"sudo docker\" to run as sudo)\nDOCKER_CMD = os.environ.get(\"DOCKER_CMD\", \"\").strip() or \"docker\"\n\n# use the command line docker client instead of the new sdk version, might get removed in the future\nLEGACY_DOCKER_CLIENT = is_env_true(\"LEGACY_DOCKER_CLIENT\")\n\n# whether to forward edge requests in-memory (instead of via proxy servers listening on backend ports)\n# TODO: this will likely become the default and may get removed in the future\nFORWARD_EDGE_INMEM = True\n# Default bind address for the edge service\nEDGE_BIND_HOST = os.environ.get(\"EDGE_BIND_HOST\", \"\").strip() or \"127.0.0.1\"\n# port number for the edge service, the main entry point for all API invocations\nEDGE_PORT = int(os.environ.get(\"EDGE_PORT\") or 0) or DEFAULT_PORT_EDGE\n# fallback port for non-SSL HTTP edge service (in case HTTPS edge service cannot be used)\nEDGE_PORT_HTTP = int(os.environ.get(\"EDGE_PORT_HTTP\") or 0)\n# optional target URL to forward all edge requests to\nEDGE_FORWARD_URL = os.environ.get(\"EDGE_FORWARD_URL\", \"\").strip()\n\n# IP of the docker bridge used to enable access between containers\nDOCKER_BRIDGE_IP = os.environ.get(\"DOCKER_BRIDGE_IP\", \"\").strip()\n\n# whether to enable API-based updates of configuration variables at runtime\nENABLE_CONFIG_UPDATES = is_env_true(\"ENABLE_CONFIG_UPDATES\")\n\n# CORS settings\nDISABLE_CORS_CHECKS = is_env_true(\"DISABLE_CORS_CHECKS\")\nDISABLE_CUSTOM_CORS_S3 = is_env_true(\"DISABLE_CUSTOM_CORS_S3\")\nDISABLE_CUSTOM_CORS_APIGATEWAY = is_env_true(\"DISABLE_CUSTOM_CORS_APIGATEWAY\")\nEXTRA_CORS_ALLOWED_HEADERS = os.environ.get(\"EXTRA_CORS_ALLOWED_HEADERS\", \"\").strip()\nEXTRA_CORS_EXPOSE_HEADERS = os.environ.get(\"EXTRA_CORS_EXPOSE_HEADERS\", \"\").strip()\nEXTRA_CORS_ALLOWED_ORIGINS = os.environ.get(\"EXTRA_CORS_ALLOWED_ORIGINS\", \"\").strip()\n\n# whether to disable publishing events to the API\nDISABLE_EVENTS = is_env_true(\"DISABLE_EVENTS\")\nDEBUG_ANALYTICS = is_env_true(\"DEBUG_ANALYTICS\")\n\n# whether to skip downloading additional infrastructure components (e.g., custom Elasticsearch versions)\nSKIP_INFRA_DOWNLOADS = os.environ.get(\"SKIP_INFRA_DOWNLOADS\", \"\").strip()\n\n# Adding Stepfunctions default port\nLOCAL_PORT_STEPFUNCTIONS = int(os.environ.get(\"LOCAL_PORT_STEPFUNCTIONS\") or 8083)\n# Stepfunctions lambda endpoint override\nSTEPFUNCTIONS_LAMBDA_ENDPOINT = os.environ.get(\"STEPFUNCTIONS_LAMBDA_ENDPOINT\", \"\").strip()\n\n# path prefix for windows volume mounting\nWINDOWS_DOCKER_MOUNT_PREFIX = os.environ.get(\"WINDOWS_DOCKER_MOUNT_PREFIX\", \"/host_mnt\")\n\n# name of the main Docker container\nMAIN_CONTAINER_NAME = os.environ.get(\"MAIN_CONTAINER_NAME\", \"\").strip() or \"localstack_main\"\n\n# the latest commit id of the repository when the docker image was created\nLOCALSTACK_BUILD_GIT_HASH = os.environ.get(\"LOCALSTACK_BUILD_GIT_HASH\", \"\").strip() or None\n\n# the date on which the docker image was created\nLOCALSTACK_BUILD_DATE = os.environ.get(\"LOCALSTACK_BUILD_DATE\", \"\").strip() or None\n\n# whether to skip S3 presign URL signature validation (TODO: currently enabled, until all issues are resolved)\nS3_SKIP_SIGNATURE_VALIDATION = is_env_not_false(\"S3_SKIP_SIGNATURE_VALIDATION\")\n\n# whether to skip waiting for the infrastructure to shut down, or exit immediately\nFORCE_SHUTDOWN = is_env_not_false(\"FORCE_SHUTDOWN\")\n\n# whether the in_docker check should always return true\nOVERRIDE_IN_DOCKER = is_env_true(\"OVERRIDE_IN_DOCKER\")\n\n# whether to return mocked success responses for still unimplemented API methods\nMOCK_UNIMPLEMENTED = is_env_true(\"MOCK_UNIMPLEMENTED\")\n\n\ndef has_docker():\n try:\n with open(os.devnull, \"w\") as devnull:\n subprocess.check_output(\"docker ps\", stderr=devnull, shell=True)\n return True\n except Exception:\n return False\n\n\ndef is_linux():\n return platform.system() == \"Linux\"\n\n\n# whether to use Lambda functions in a Docker container\nLAMBDA_EXECUTOR = os.environ.get(\"LAMBDA_EXECUTOR\", \"\").strip()\nif not LAMBDA_EXECUTOR:\n LAMBDA_EXECUTOR = \"docker\"\n if not has_docker():\n LAMBDA_EXECUTOR = \"local\"\n\n# Fallback URL to use when a non-existing Lambda is invoked. If this matches\n# `dynamodb://<table_name>`, then the invocation is recorded in the corresponding\n# DynamoDB table. If this matches `http(s)://...`, then the Lambda invocation is\n# forwarded as a POST request to that URL.\nLAMBDA_FALLBACK_URL = os.environ.get(\"LAMBDA_FALLBACK_URL\", \"\").strip()\n# Forward URL used to forward any Lambda invocations to an external\n# endpoint (can use useful for advanced test setups)\nLAMBDA_FORWARD_URL = os.environ.get(\"LAMBDA_FORWARD_URL\", \"\").strip()\n# Time in seconds to wait at max while extracting Lambda code.\n# By default it is 25 seconds for limiting the execution time\n# to avoid client/network timeout issues\nLAMBDA_CODE_EXTRACT_TIME = int(os.environ.get(\"LAMBDA_CODE_EXTRACT_TIME\") or 25)\n\n# A comma-delimited string of stream names and its corresponding shard count to\n# initialize during startup.\n# For example: \"my-first-stream:1,my-other-stream:2,my-last-stream:1\"\nKINESIS_INITIALIZE_STREAMS = os.environ.get(\"KINESIS_INITIALIZE_STREAMS\", \"\").strip()\n\n# list of environment variable names used for configuration.\n# Make sure to keep this in sync with the above!\n# Note: do *not* include DATA_DIR in this list, as it is treated separately\nCONFIG_ENV_VARS = [\n \"SERVICES\",\n \"HOSTNAME\",\n \"HOSTNAME_EXTERNAL\",\n \"LOCALSTACK_HOSTNAME\",\n \"LAMBDA_FALLBACK_URL\",\n \"LAMBDA_EXECUTOR\",\n \"LAMBDA_REMOTE_DOCKER\",\n \"LAMBDA_DOCKER_NETWORK\",\n \"LAMBDA_REMOVE_CONTAINERS\",\n \"USE_SSL\",\n \"USE_SINGLE_REGION\",\n \"DEBUG\",\n \"KINESIS_ERROR_PROBABILITY\",\n \"DYNAMODB_ERROR_PROBABILITY\",\n \"DYNAMODB_READ_ERROR_PROBABILITY\",\n \"DYNAMODB_WRITE_ERROR_PROBABILITY\",\n \"DOCKER_BRIDGE_IP\",\n \"DEFAULT_REGION\",\n \"LAMBDA_JAVA_OPTS\",\n \"LOCALSTACK_API_KEY\",\n \"LAMBDA_CONTAINER_REGISTRY\",\n \"TEST_AWS_ACCOUNT_ID\",\n \"DISABLE_EVENTS\",\n \"EDGE_PORT\",\n \"LS_LOG\",\n \"EDGE_PORT_HTTP\",\n \"EDGE_FORWARD_URL\",\n \"SKIP_INFRA_DOWNLOADS\",\n \"STEPFUNCTIONS_LAMBDA_ENDPOINT\",\n \"WINDOWS_DOCKER_MOUNT_PREFIX\",\n \"HOSTNAME_FROM_LAMBDA\",\n \"LOG_LICENSE_ISSUES\",\n \"SYNCHRONOUS_API_GATEWAY_EVENTS\",\n \"SYNCHRONOUS_KINESIS_EVENTS\",\n \"BUCKET_MARKER_LOCAL\",\n \"SYNCHRONOUS_SNS_EVENTS\",\n \"SYNCHRONOUS_SQS_EVENTS\",\n \"SYNCHRONOUS_DYNAMODB_EVENTS\",\n \"DYNAMODB_HEAP_SIZE\",\n \"MAIN_CONTAINER_NAME\",\n \"LAMBDA_DOCKER_DNS\",\n \"PERSISTENCE_SINGLE_FILE\",\n \"S3_SKIP_SIGNATURE_VALIDATION\",\n \"DEVELOP\",\n \"DEVELOP_PORT\",\n \"WAIT_FOR_DEBUGGER\",\n \"KINESIS_INITIALIZE_STREAMS\",\n \"TF_COMPAT_MODE\",\n \"LAMBDA_DOCKER_FLAGS\",\n \"LAMBDA_FORWARD_URL\",\n \"LAMBDA_CODE_EXTRACT_TIME\",\n \"THUNDRA_APIKEY\",\n \"THUNDRA_AGENT_JAVA_VERSION\",\n \"THUNDRA_AGENT_NODE_VERSION\",\n \"THUNDRA_AGENT_PYTHON_VERSION\",\n \"DISABLE_CORS_CHECKS\",\n \"DISABLE_CUSTOM_CORS_S3\",\n \"DISABLE_CUSTOM_CORS_APIGATEWAY\",\n \"EXTRA_CORS_ALLOWED_HEADERS\",\n \"EXTRA_CORS_EXPOSE_HEADERS\",\n \"EXTRA_CORS_ALLOWED_ORIGINS\",\n]\n\nfor key, value in six.iteritems(DEFAULT_SERVICE_PORTS):\n clean_key = key.upper().replace(\"-\", \"_\")\n CONFIG_ENV_VARS += [\n clean_key + \"_BACKEND\",\n clean_key + \"_PORT\",\n clean_key + \"_PORT_EXTERNAL\",\n ]\n\n\ndef ping(host):\n \"\"\"Returns True if host responds to a ping request\"\"\"\n is_windows = platform.system().lower() == \"windows\"\n ping_opts = \"-n 1\" if is_windows else \"-c 1\"\n args = \"ping %s %s\" % (ping_opts, host)\n return (\n subprocess.call(args, shell=not is_windows, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n == 0\n )\n\n\ndef in_docker():\n \"\"\"\n Returns True if running in a docker container, else False\n Ref. https://docs.docker.com/config/containers/runmetrics/#control-groups\n \"\"\"\n if OVERRIDE_IN_DOCKER:\n return True\n if os.path.exists(\"/.dockerenv\"):\n return True\n if not os.path.exists(\"/proc/1/cgroup\"):\n return False\n try:\n if any(\n [\n os.path.exists(\"/sys/fs/cgroup/memory/docker/\"),\n any(\n [\n \"docker-\" in file_names\n for file_names in os.listdir(\"/sys/fs/cgroup/memory/system.slice\")\n ]\n ),\n os.path.exists(\"/sys/fs/cgroup/docker/\"),\n any(\n [\n \"docker-\" in file_names\n for file_names in os.listdir(\"/sys/fs/cgroup/system.slice/\")\n ]\n ),\n ]\n ):\n return False\n except Exception:\n pass\n with open(\"/proc/1/cgroup\", \"rt\") as ifh:\n os_hostname = open(\"/etc/hostname\", \"rt\").read().strip()\n content = ifh.read()\n if os_hostname in content or \"docker\" in content:\n return True\n return False\n\n\nis_in_docker = in_docker()\nis_in_linux = is_linux()\n\n# determine IP of Docker bridge\nif not DOCKER_BRIDGE_IP:\n DOCKER_BRIDGE_IP = \"172.17.0.1\"\n if is_in_docker:\n candidates = (DOCKER_BRIDGE_IP, \"172.18.0.1\")\n for ip in candidates:\n if ping(ip):\n DOCKER_BRIDGE_IP = ip\n break\n\n# determine route to Docker host from container\ntry:\n DOCKER_HOST_FROM_CONTAINER = DOCKER_BRIDGE_IP\n if not is_in_docker and not is_in_linux:\n # If we're running outside docker, and would like the Lambda containers to be able\n # to access services running on the local machine, set DOCKER_HOST_FROM_CONTAINER accordingly\n if LOCALSTACK_HOSTNAME == LOCALHOST:\n DOCKER_HOST_FROM_CONTAINER = \"host.docker.internal\"\n # update LOCALSTACK_HOSTNAME if host.docker.internal is available\n if is_in_docker:\n DOCKER_HOST_FROM_CONTAINER = socket.gethostbyname(\"host.docker.internal\")\n if LOCALSTACK_HOSTNAME == DOCKER_BRIDGE_IP:\n LOCALSTACK_HOSTNAME = DOCKER_HOST_FROM_CONTAINER\nexcept socket.error:\n pass\n\n# make sure we default to LAMBDA_REMOTE_DOCKER=true if running in Docker\nif is_in_docker and not os.environ.get(\"LAMBDA_REMOTE_DOCKER\", \"\").strip():\n LAMBDA_REMOTE_DOCKER = True\n\n# local config file path in home directory\nCONFIG_FILE_PATH = os.path.join(TMP_FOLDER, \".localstack\")\nif not is_in_docker:\n CONFIG_FILE_PATH = os.path.join(expanduser(\"~\"), \".localstack\")\n\n# set variables no_proxy, i.e., run internal service calls directly\nno_proxy = \",\".join(set((LOCALSTACK_HOSTNAME, LOCALHOST, LOCALHOST_IP, \"[::1]\")))\nif os.environ.get(\"no_proxy\"):\n os.environ[\"no_proxy\"] += \",\" + no_proxy\nelif os.environ.get(\"NO_PROXY\"):\n os.environ[\"NO_PROXY\"] += \",\" + no_proxy\nelse:\n os.environ[\"no_proxy\"] = no_proxy\n\n# additional CLI commands, can be set by plugins\nCLI_COMMANDS = {}\n\n# set of valid regions\nVALID_PARTITIONS = set(Session().get_available_partitions())\nVALID_REGIONS = set()\nfor partition in VALID_PARTITIONS:\n for region in Session().get_available_regions(\"sns\", partition):\n VALID_REGIONS.add(region)\n\n\ndef parse_service_ports():\n \"\"\"Parses the environment variable $SERVICES with a comma-separated list of services\n and (optional) ports they should run on: 'service1:port1,service2,service3:port3'\"\"\"\n service_ports = os.environ.get(\"SERVICES\", \"\").strip()\n if not service_ports:\n return DEFAULT_SERVICE_PORTS\n result = {}\n for service_port in re.split(r\"\\s*,\\s*\", service_ports):\n parts = re.split(r\"[:=]\", service_port)\n service = parts[0]\n key_upper = service.upper().replace(\"-\", \"_\")\n port_env_name = \"%s_PORT\" % key_upper\n # (1) set default port number\n port_number = DEFAULT_SERVICE_PORTS.get(service)\n # (2) set port number from <SERVICE>_PORT environment, if present\n if os.environ.get(port_env_name):\n port_number = os.environ.get(port_env_name)\n # (3) set port number from <service>:<port> portion in $SERVICES, if present\n if len(parts) > 1:\n port_number = int(parts[-1])\n # (4) try to parse as int, fall back to 0 (invalid port)\n try:\n port_number = int(port_number)\n except Exception:\n port_number = 0\n result[service] = port_number\n return result\n\n\n# TODO: we need to investigate the performance impact of this\ndef populate_configs(service_ports=None):\n global SERVICE_PORTS, CONFIG_ENV_VARS\n\n SERVICE_PORTS = service_ports or parse_service_ports()\n globs = globals()\n protocol = get_protocol()\n\n # define service ports and URLs as environment variables\n for key, value in six.iteritems(DEFAULT_SERVICE_PORTS):\n key_upper = key.upper().replace(\"-\", \"_\")\n\n # define PORT_* variables with actual service ports as per configuration\n port_var_name = \"PORT_%s\" % key_upper\n port_number = service_port(key)\n globs[port_var_name] = port_number\n url = \"%s://%s:%s\" % (protocol, LOCALSTACK_HOSTNAME, port_number)\n # define TEST_*_URL variables with mock service endpoints\n url_key = \"TEST_%s_URL\" % key_upper\n # allow overwriting TEST_*_URL from user-defined environment variables\n existing = os.environ.get(url_key)\n url = existing or url\n # set global variable\n globs[url_key] = url\n # expose HOST_*_URL variables as environment variables\n os.environ[url_key] = url\n\n # expose LOCALSTACK_HOSTNAME as env. variable\n os.environ[\"LOCALSTACK_HOSTNAME\"] = LOCALSTACK_HOSTNAME\n\n # create variable aliases prefixed with LOCALSTACK_ (except LOCALSTACK_HOSTNAME)\n CONFIG_ENV_VARS += [\n \"LOCALSTACK_\" + v for v in CONFIG_ENV_VARS if not v.startswith(\"LOCALSTACK_\")\n ]\n CONFIG_ENV_VARS = list(set(CONFIG_ENV_VARS))\n\n\ndef service_port(service_key):\n if FORWARD_EDGE_INMEM:\n if service_key == \"elasticsearch\":\n # TODO Elasticsearch domains are a special case - we do not want to route them through\n # the edge service, as that would require too many route mappings. In the future, we\n # should integrate them with the port range for external services (4510-4530)\n return SERVICE_PORTS.get(service_key, 0)\n return get_edge_port_http()\n return SERVICE_PORTS.get(service_key, 0)\n\n\ndef get_protocol():\n return \"https\" if USE_SSL else \"http\"\n\n\ndef external_service_url(service_key, host=None):\n host = host or HOSTNAME_EXTERNAL\n return \"%s://%s:%s\" % (get_protocol(), host, service_port(service_key))\n\n\ndef get_edge_port_http():\n return EDGE_PORT_HTTP or EDGE_PORT\n\n\ndef get_edge_url(localstack_hostname=None, protocol=None):\n port = get_edge_port_http()\n protocol = protocol or get_protocol()\n localstack_hostname = localstack_hostname or LOCALSTACK_HOSTNAME\n return \"%s://%s:%s\" % (protocol, localstack_hostname, port)\n\n\n# initialize config values\npopulate_configs()\n\n# set log levels\nif DEBUG:\n logging.getLogger(\"\").setLevel(logging.DEBUG)\n logging.getLogger(\"localstack\").setLevel(logging.DEBUG)\n\n# whether to bundle multiple APIs into a single process, where possible\nBUNDLE_API_PROCESSES = True\n\n\ndef load_config_file(config_file=None):\n from localstack.utils.common import get_or_create_file, to_str\n\n config_file = config_file or CONFIG_FILE_PATH\n content = get_or_create_file(config_file)\n try:\n configs = json.loads(to_str(content) or \"{}\")\n except Exception as e:\n print(\"Unable to load local config file %s as JSON: %s\" % (config_file, e))\n return {}\n return configs\n\n\nif LS_LOG in TRACE_LOG_LEVELS:\n load_end_time = time.time()\n LOG = logging.getLogger(__name__)\n LOG.debug(\n \"Initializing the configuration took %s ms\" % int((load_end_time - load_start_time) * 1000)\n )\n", "path": "localstack/config.py" } ]
[ { "content": "import json\nimport logging\nimport os\nimport platform\nimport re\nimport socket\nimport subprocess\nimport tempfile\nimport time\nfrom os.path import expanduser\n\nimport six\nfrom boto3 import Session\n\nfrom localstack.constants import (\n AWS_REGION_US_EAST_1,\n DEFAULT_BUCKET_MARKER_LOCAL,\n DEFAULT_DEVELOP_PORT,\n DEFAULT_LAMBDA_CONTAINER_REGISTRY,\n DEFAULT_PORT_EDGE,\n DEFAULT_SERVICE_PORTS,\n FALSE_STRINGS,\n LOCALHOST,\n LOCALHOST_IP,\n LOG_LEVELS,\n TRACE_LOG_LEVELS,\n TRUE_STRINGS,\n)\n\n# keep track of start time, for performance debugging\nload_start_time = time.time()\n\n\ndef eval_log_type(env_var_name):\n \"\"\"get the log type from environment variable\"\"\"\n ls_log = os.environ.get(env_var_name, \"\").lower().strip()\n return ls_log if ls_log in LOG_LEVELS else False\n\n\ndef is_env_true(env_var_name):\n \"\"\"Whether the given environment variable has a truthy value.\"\"\"\n return os.environ.get(env_var_name, \"\").lower().strip() in TRUE_STRINGS\n\n\ndef is_env_not_false(env_var_name):\n \"\"\"Whether the given environment variable is empty or has a truthy value.\"\"\"\n return os.environ.get(env_var_name, \"\").lower().strip() not in FALSE_STRINGS\n\n\n# java options to Lambda\nLAMBDA_JAVA_OPTS = os.environ.get(\"LAMBDA_JAVA_OPTS\", \"\").strip()\n\n# limit in which to kinesalite will start throwing exceptions\nKINESIS_SHARD_LIMIT = os.environ.get(\"KINESIS_SHARD_LIMIT\", \"\").strip() or \"100\"\n\n# delay in kinesalite response when making changes to streams\nKINESIS_LATENCY = os.environ.get(\"KINESIS_LATENCY\", \"\").strip() or \"500\"\n\n# Kinesis provider - either \"kinesis-mock\" or \"kinesalite\"\nKINESIS_PROVIDER = os.environ.get(\"KINESIS_PROVIDER\") or \"kinesis-mock\"\n\n# default AWS region\nif \"DEFAULT_REGION\" not in os.environ:\n os.environ[\"DEFAULT_REGION\"] = os.environ.get(\"AWS_DEFAULT_REGION\") or AWS_REGION_US_EAST_1\nDEFAULT_REGION = os.environ[\"DEFAULT_REGION\"]\n\n# Whether or not to handle lambda event sources as synchronous invocations\nSYNCHRONOUS_SNS_EVENTS = is_env_true(\"SYNCHRONOUS_SNS_EVENTS\")\nSYNCHRONOUS_SQS_EVENTS = is_env_true(\"SYNCHRONOUS_SQS_EVENTS\")\nSYNCHRONOUS_API_GATEWAY_EVENTS = is_env_not_false(\"SYNCHRONOUS_API_GATEWAY_EVENTS\")\nSYNCHRONOUS_KINESIS_EVENTS = is_env_not_false(\"SYNCHRONOUS_KINESIS_EVENTS\")\nSYNCHRONOUS_DYNAMODB_EVENTS = is_env_not_false(\"SYNCHRONOUS_DYNAMODB_EVENTS\")\n\n# randomly inject faults to Kinesis\nKINESIS_ERROR_PROBABILITY = float(os.environ.get(\"KINESIS_ERROR_PROBABILITY\", \"\").strip() or 0.0)\n\n# randomly inject faults to DynamoDB\nDYNAMODB_ERROR_PROBABILITY = float(os.environ.get(\"DYNAMODB_ERROR_PROBABILITY\", \"\").strip() or 0.0)\nDYNAMODB_READ_ERROR_PROBABILITY = float(\n os.environ.get(\"DYNAMODB_READ_ERROR_PROBABILITY\", \"\").strip() or 0.0\n)\nDYNAMODB_WRITE_ERROR_PROBABILITY = float(\n os.environ.get(\"DYNAMODB_WRITE_ERROR_PROBABILITY\", \"\").strip() or 0.0\n)\n\n# JAVA EE heap size for dynamodb\nDYNAMODB_HEAP_SIZE = os.environ.get(\"DYNAMODB_HEAP_SIZE\", \"\").strip() or \"256m\"\n\n# expose services on a specific host externally\nHOSTNAME_EXTERNAL = os.environ.get(\"HOSTNAME_EXTERNAL\", \"\").strip() or LOCALHOST\n\n# expose SQS on a specific port externally\nSQS_PORT_EXTERNAL = int(os.environ.get(\"SQS_PORT_EXTERNAL\") or 0)\n\n# name of the host under which the LocalStack services are available\nLOCALSTACK_HOSTNAME = os.environ.get(\"LOCALSTACK_HOSTNAME\", \"\").strip() or LOCALHOST\n\n# host under which the LocalStack services are available from Lambda Docker containers\nHOSTNAME_FROM_LAMBDA = os.environ.get(\"HOSTNAME_FROM_LAMBDA\", \"\").strip()\n\n# whether to remotely copy the lambda code or locally mount a volume\nLAMBDA_REMOTE_DOCKER = is_env_true(\"LAMBDA_REMOTE_DOCKER\")\n\n# Marker name to indicate that a bucket represents the local file system. This is used for testing\n# Serverless applications where we mount the Lambda code directly into the container from the host OS.\nBUCKET_MARKER_LOCAL = (\n os.environ.get(\"BUCKET_MARKER_LOCAL\", \"\").strip() or DEFAULT_BUCKET_MARKER_LOCAL\n)\n\n# network that the docker lambda container will be joining\nLAMBDA_DOCKER_NETWORK = os.environ.get(\"LAMBDA_DOCKER_NETWORK\", \"\").strip()\n\n# custom DNS server that the docker lambda container will use\nLAMBDA_DOCKER_DNS = os.environ.get(\"LAMBDA_DOCKER_DNS\", \"\").strip()\n\n# additional flags passed to Lambda Docker run/create commands\nLAMBDA_DOCKER_FLAGS = os.environ.get(\"LAMBDA_DOCKER_FLAGS\", \"\").strip()\n\n# default container registry for lambda execution images\nLAMBDA_CONTAINER_REGISTRY = (\n os.environ.get(\"LAMBDA_CONTAINER_REGISTRY\", \"\").strip() or DEFAULT_LAMBDA_CONTAINER_REGISTRY\n)\n\n# whether to remove containers after Lambdas finished executing\nLAMBDA_REMOVE_CONTAINERS = (\n os.environ.get(\"LAMBDA_REMOVE_CONTAINERS\", \"\").lower().strip() not in FALSE_STRINGS\n)\n\n# directory for persisting data\nDATA_DIR = os.environ.get(\"DATA_DIR\", \"\").strip()\n\n# folder for temporary files and data\nTMP_FOLDER = os.path.join(tempfile.gettempdir(), \"localstack\")\n\n# create folders\nfor folder in [DATA_DIR, TMP_FOLDER]:\n if folder and not os.path.exists(folder):\n try:\n os.makedirs(folder)\n except Exception:\n # this can happen due to a race condition when starting\n # multiple processes in parallel. Should be safe to ignore\n pass\n\n# fix for Mac OS, to be able to mount /var/folders in Docker\nif TMP_FOLDER.startswith(\"/var/folders/\") and os.path.exists(\"/private%s\" % TMP_FOLDER):\n TMP_FOLDER = \"/private%s\" % TMP_FOLDER\n\n# temporary folder of the host (required when running in Docker). Fall back to local tmp folder if not set\nHOST_TMP_FOLDER = os.environ.get(\"HOST_TMP_FOLDER\", TMP_FOLDER)\n\n# whether to enable verbose debug logging\nLS_LOG = eval_log_type(\"LS_LOG\")\nDEBUG = is_env_true(\"DEBUG\") or LS_LOG in TRACE_LOG_LEVELS\n\n# whether to enable debugpy\nDEVELOP = is_env_true(\"DEVELOP\")\n\n# PORT FOR DEBUGGER\nDEVELOP_PORT = int(os.environ.get(\"DEVELOP_PORT\", \"\").strip() or DEFAULT_DEVELOP_PORT)\n\n# whether to make debugpy wait for a debbuger client\nWAIT_FOR_DEBUGGER = is_env_true(\"WAIT_FOR_DEBUGGER\")\n\n# whether to use SSL encryption for the services\n# TODO: this is deprecated and should be removed (edge port supports HTTP/HTTPS multiplexing)\nUSE_SSL = is_env_true(\"USE_SSL\")\n\n# whether to use the legacy single-region mode, defined via DEFAULT_REGION\nUSE_SINGLE_REGION = is_env_true(\"USE_SINGLE_REGION\")\n\n# whether to run in TF compatibility mode for TF integration tests\n# (e.g., returning verbatim ports for ELB resources, rather than edge port 4566, etc.)\nTF_COMPAT_MODE = is_env_true(\"TF_COMPAT_MODE\")\n\n# default encoding used to convert strings to byte arrays (mainly for Python 3 compatibility)\nDEFAULT_ENCODING = \"utf-8\"\n\n# path to local Docker UNIX domain socket\nDOCKER_SOCK = os.environ.get(\"DOCKER_SOCK\", \"\").strip() or \"/var/run/docker.sock\"\n\n# additional flags to pass to \"docker run\" when starting the stack in Docker\nDOCKER_FLAGS = os.environ.get(\"DOCKER_FLAGS\", \"\").strip()\n\n# command used to run Docker containers (e.g., set to \"sudo docker\" to run as sudo)\nDOCKER_CMD = os.environ.get(\"DOCKER_CMD\", \"\").strip() or \"docker\"\n\n# use the command line docker client instead of the new sdk version, might get removed in the future\nLEGACY_DOCKER_CLIENT = is_env_true(\"LEGACY_DOCKER_CLIENT\")\n\n# whether to forward edge requests in-memory (instead of via proxy servers listening on backend ports)\n# TODO: this will likely become the default and may get removed in the future\nFORWARD_EDGE_INMEM = True\n# Default bind address for the edge service\nEDGE_BIND_HOST = os.environ.get(\"EDGE_BIND_HOST\", \"\").strip() or \"127.0.0.1\"\n# port number for the edge service, the main entry point for all API invocations\nEDGE_PORT = int(os.environ.get(\"EDGE_PORT\") or 0) or DEFAULT_PORT_EDGE\n# fallback port for non-SSL HTTP edge service (in case HTTPS edge service cannot be used)\nEDGE_PORT_HTTP = int(os.environ.get(\"EDGE_PORT_HTTP\") or 0)\n# optional target URL to forward all edge requests to\nEDGE_FORWARD_URL = os.environ.get(\"EDGE_FORWARD_URL\", \"\").strip()\n\n# IP of the docker bridge used to enable access between containers\nDOCKER_BRIDGE_IP = os.environ.get(\"DOCKER_BRIDGE_IP\", \"\").strip()\n\n# whether to enable API-based updates of configuration variables at runtime\nENABLE_CONFIG_UPDATES = is_env_true(\"ENABLE_CONFIG_UPDATES\")\n\n# CORS settings\nDISABLE_CORS_CHECKS = is_env_true(\"DISABLE_CORS_CHECKS\")\nDISABLE_CUSTOM_CORS_S3 = is_env_true(\"DISABLE_CUSTOM_CORS_S3\")\nDISABLE_CUSTOM_CORS_APIGATEWAY = is_env_true(\"DISABLE_CUSTOM_CORS_APIGATEWAY\")\nEXTRA_CORS_ALLOWED_HEADERS = os.environ.get(\"EXTRA_CORS_ALLOWED_HEADERS\", \"\").strip()\nEXTRA_CORS_EXPOSE_HEADERS = os.environ.get(\"EXTRA_CORS_EXPOSE_HEADERS\", \"\").strip()\nEXTRA_CORS_ALLOWED_ORIGINS = os.environ.get(\"EXTRA_CORS_ALLOWED_ORIGINS\", \"\").strip()\n\n# whether to disable publishing events to the API\nDISABLE_EVENTS = is_env_true(\"DISABLE_EVENTS\")\nDEBUG_ANALYTICS = is_env_true(\"DEBUG_ANALYTICS\")\n\n# whether to skip downloading additional infrastructure components (e.g., custom Elasticsearch versions)\nSKIP_INFRA_DOWNLOADS = os.environ.get(\"SKIP_INFRA_DOWNLOADS\", \"\").strip()\n\n# Adding Stepfunctions default port\nLOCAL_PORT_STEPFUNCTIONS = int(os.environ.get(\"LOCAL_PORT_STEPFUNCTIONS\") or 8083)\n# Stepfunctions lambda endpoint override\nSTEPFUNCTIONS_LAMBDA_ENDPOINT = os.environ.get(\"STEPFUNCTIONS_LAMBDA_ENDPOINT\", \"\").strip()\n\n# path prefix for windows volume mounting\nWINDOWS_DOCKER_MOUNT_PREFIX = os.environ.get(\"WINDOWS_DOCKER_MOUNT_PREFIX\", \"/host_mnt\")\n\n# name of the main Docker container\nMAIN_CONTAINER_NAME = os.environ.get(\"MAIN_CONTAINER_NAME\", \"\").strip() or \"localstack_main\"\n\n# the latest commit id of the repository when the docker image was created\nLOCALSTACK_BUILD_GIT_HASH = os.environ.get(\"LOCALSTACK_BUILD_GIT_HASH\", \"\").strip() or None\n\n# the date on which the docker image was created\nLOCALSTACK_BUILD_DATE = os.environ.get(\"LOCALSTACK_BUILD_DATE\", \"\").strip() or None\n\n# whether to skip S3 presign URL signature validation (TODO: currently enabled, until all issues are resolved)\nS3_SKIP_SIGNATURE_VALIDATION = is_env_not_false(\"S3_SKIP_SIGNATURE_VALIDATION\")\n\n# whether to skip waiting for the infrastructure to shut down, or exit immediately\nFORCE_SHUTDOWN = is_env_not_false(\"FORCE_SHUTDOWN\")\n\n# whether the in_docker check should always return true\nOVERRIDE_IN_DOCKER = is_env_true(\"OVERRIDE_IN_DOCKER\")\n\n# whether to return mocked success responses for still unimplemented API methods\nMOCK_UNIMPLEMENTED = is_env_true(\"MOCK_UNIMPLEMENTED\")\n\n\ndef has_docker():\n try:\n with open(os.devnull, \"w\") as devnull:\n subprocess.check_output(\"docker ps\", stderr=devnull, shell=True)\n return True\n except Exception:\n return False\n\n\ndef is_linux():\n return platform.system() == \"Linux\"\n\n\n# whether to use Lambda functions in a Docker container\nLAMBDA_EXECUTOR = os.environ.get(\"LAMBDA_EXECUTOR\", \"\").strip()\nif not LAMBDA_EXECUTOR:\n LAMBDA_EXECUTOR = \"docker\"\n if not has_docker():\n LAMBDA_EXECUTOR = \"local\"\n\n# Fallback URL to use when a non-existing Lambda is invoked. If this matches\n# `dynamodb://<table_name>`, then the invocation is recorded in the corresponding\n# DynamoDB table. If this matches `http(s)://...`, then the Lambda invocation is\n# forwarded as a POST request to that URL.\nLAMBDA_FALLBACK_URL = os.environ.get(\"LAMBDA_FALLBACK_URL\", \"\").strip()\n# Forward URL used to forward any Lambda invocations to an external\n# endpoint (can use useful for advanced test setups)\nLAMBDA_FORWARD_URL = os.environ.get(\"LAMBDA_FORWARD_URL\", \"\").strip()\n# Time in seconds to wait at max while extracting Lambda code.\n# By default it is 25 seconds for limiting the execution time\n# to avoid client/network timeout issues\nLAMBDA_CODE_EXTRACT_TIME = int(os.environ.get(\"LAMBDA_CODE_EXTRACT_TIME\") or 25)\n\n# A comma-delimited string of stream names and its corresponding shard count to\n# initialize during startup.\n# For example: \"my-first-stream:1,my-other-stream:2,my-last-stream:1\"\nKINESIS_INITIALIZE_STREAMS = os.environ.get(\"KINESIS_INITIALIZE_STREAMS\", \"\").strip()\n\n# list of environment variable names used for configuration.\n# Make sure to keep this in sync with the above!\n# Note: do *not* include DATA_DIR in this list, as it is treated separately\nCONFIG_ENV_VARS = [\n \"SERVICES\",\n \"HOSTNAME\",\n \"HOSTNAME_EXTERNAL\",\n \"LOCALSTACK_HOSTNAME\",\n \"LAMBDA_FALLBACK_URL\",\n \"LAMBDA_EXECUTOR\",\n \"LAMBDA_REMOTE_DOCKER\",\n \"LAMBDA_DOCKER_NETWORK\",\n \"LAMBDA_REMOVE_CONTAINERS\",\n \"USE_SSL\",\n \"USE_SINGLE_REGION\",\n \"DEBUG\",\n \"KINESIS_ERROR_PROBABILITY\",\n \"DYNAMODB_ERROR_PROBABILITY\",\n \"DYNAMODB_READ_ERROR_PROBABILITY\",\n \"DYNAMODB_WRITE_ERROR_PROBABILITY\",\n \"DOCKER_BRIDGE_IP\",\n \"DEFAULT_REGION\",\n \"LAMBDA_JAVA_OPTS\",\n \"LOCALSTACK_API_KEY\",\n \"LAMBDA_CONTAINER_REGISTRY\",\n \"TEST_AWS_ACCOUNT_ID\",\n \"DISABLE_EVENTS\",\n \"EDGE_PORT\",\n \"LS_LOG\",\n \"EDGE_PORT_HTTP\",\n \"EDGE_FORWARD_URL\",\n \"SKIP_INFRA_DOWNLOADS\",\n \"STEPFUNCTIONS_LAMBDA_ENDPOINT\",\n \"WINDOWS_DOCKER_MOUNT_PREFIX\",\n \"HOSTNAME_FROM_LAMBDA\",\n \"LOG_LICENSE_ISSUES\",\n \"SYNCHRONOUS_API_GATEWAY_EVENTS\",\n \"SYNCHRONOUS_KINESIS_EVENTS\",\n \"BUCKET_MARKER_LOCAL\",\n \"SYNCHRONOUS_SNS_EVENTS\",\n \"SYNCHRONOUS_SQS_EVENTS\",\n \"SYNCHRONOUS_DYNAMODB_EVENTS\",\n \"DYNAMODB_HEAP_SIZE\",\n \"MAIN_CONTAINER_NAME\",\n \"LAMBDA_DOCKER_DNS\",\n \"PERSISTENCE_SINGLE_FILE\",\n \"S3_SKIP_SIGNATURE_VALIDATION\",\n \"DEVELOP\",\n \"DEVELOP_PORT\",\n \"WAIT_FOR_DEBUGGER\",\n \"KINESIS_INITIALIZE_STREAMS\",\n \"TF_COMPAT_MODE\",\n \"LAMBDA_DOCKER_FLAGS\",\n \"LAMBDA_FORWARD_URL\",\n \"LAMBDA_CODE_EXTRACT_TIME\",\n \"THUNDRA_APIKEY\",\n \"THUNDRA_AGENT_JAVA_VERSION\",\n \"THUNDRA_AGENT_NODE_VERSION\",\n \"THUNDRA_AGENT_PYTHON_VERSION\",\n \"DISABLE_CORS_CHECKS\",\n \"DISABLE_CUSTOM_CORS_S3\",\n \"DISABLE_CUSTOM_CORS_APIGATEWAY\",\n \"EXTRA_CORS_ALLOWED_HEADERS\",\n \"EXTRA_CORS_EXPOSE_HEADERS\",\n \"EXTRA_CORS_ALLOWED_ORIGINS\",\n \"ENABLE_CONFIG_UPDATES\",\n]\n\nfor key, value in six.iteritems(DEFAULT_SERVICE_PORTS):\n clean_key = key.upper().replace(\"-\", \"_\")\n CONFIG_ENV_VARS += [\n clean_key + \"_BACKEND\",\n clean_key + \"_PORT\",\n clean_key + \"_PORT_EXTERNAL\",\n ]\n\n\ndef ping(host):\n \"\"\"Returns True if host responds to a ping request\"\"\"\n is_windows = platform.system().lower() == \"windows\"\n ping_opts = \"-n 1\" if is_windows else \"-c 1\"\n args = \"ping %s %s\" % (ping_opts, host)\n return (\n subprocess.call(args, shell=not is_windows, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n == 0\n )\n\n\ndef in_docker():\n \"\"\"\n Returns True if running in a docker container, else False\n Ref. https://docs.docker.com/config/containers/runmetrics/#control-groups\n \"\"\"\n if OVERRIDE_IN_DOCKER:\n return True\n if os.path.exists(\"/.dockerenv\"):\n return True\n if not os.path.exists(\"/proc/1/cgroup\"):\n return False\n try:\n if any(\n [\n os.path.exists(\"/sys/fs/cgroup/memory/docker/\"),\n any(\n [\n \"docker-\" in file_names\n for file_names in os.listdir(\"/sys/fs/cgroup/memory/system.slice\")\n ]\n ),\n os.path.exists(\"/sys/fs/cgroup/docker/\"),\n any(\n [\n \"docker-\" in file_names\n for file_names in os.listdir(\"/sys/fs/cgroup/system.slice/\")\n ]\n ),\n ]\n ):\n return False\n except Exception:\n pass\n with open(\"/proc/1/cgroup\", \"rt\") as ifh:\n os_hostname = open(\"/etc/hostname\", \"rt\").read().strip()\n content = ifh.read()\n if os_hostname in content or \"docker\" in content:\n return True\n return False\n\n\nis_in_docker = in_docker()\nis_in_linux = is_linux()\n\n# determine IP of Docker bridge\nif not DOCKER_BRIDGE_IP:\n DOCKER_BRIDGE_IP = \"172.17.0.1\"\n if is_in_docker:\n candidates = (DOCKER_BRIDGE_IP, \"172.18.0.1\")\n for ip in candidates:\n if ping(ip):\n DOCKER_BRIDGE_IP = ip\n break\n\n# determine route to Docker host from container\ntry:\n DOCKER_HOST_FROM_CONTAINER = DOCKER_BRIDGE_IP\n if not is_in_docker and not is_in_linux:\n # If we're running outside docker, and would like the Lambda containers to be able\n # to access services running on the local machine, set DOCKER_HOST_FROM_CONTAINER accordingly\n if LOCALSTACK_HOSTNAME == LOCALHOST:\n DOCKER_HOST_FROM_CONTAINER = \"host.docker.internal\"\n # update LOCALSTACK_HOSTNAME if host.docker.internal is available\n if is_in_docker:\n DOCKER_HOST_FROM_CONTAINER = socket.gethostbyname(\"host.docker.internal\")\n if LOCALSTACK_HOSTNAME == DOCKER_BRIDGE_IP:\n LOCALSTACK_HOSTNAME = DOCKER_HOST_FROM_CONTAINER\nexcept socket.error:\n pass\n\n# make sure we default to LAMBDA_REMOTE_DOCKER=true if running in Docker\nif is_in_docker and not os.environ.get(\"LAMBDA_REMOTE_DOCKER\", \"\").strip():\n LAMBDA_REMOTE_DOCKER = True\n\n# local config file path in home directory\nCONFIG_FILE_PATH = os.path.join(TMP_FOLDER, \".localstack\")\nif not is_in_docker:\n CONFIG_FILE_PATH = os.path.join(expanduser(\"~\"), \".localstack\")\n\n# set variables no_proxy, i.e., run internal service calls directly\nno_proxy = \",\".join(set((LOCALSTACK_HOSTNAME, LOCALHOST, LOCALHOST_IP, \"[::1]\")))\nif os.environ.get(\"no_proxy\"):\n os.environ[\"no_proxy\"] += \",\" + no_proxy\nelif os.environ.get(\"NO_PROXY\"):\n os.environ[\"NO_PROXY\"] += \",\" + no_proxy\nelse:\n os.environ[\"no_proxy\"] = no_proxy\n\n# additional CLI commands, can be set by plugins\nCLI_COMMANDS = {}\n\n# set of valid regions\nVALID_PARTITIONS = set(Session().get_available_partitions())\nVALID_REGIONS = set()\nfor partition in VALID_PARTITIONS:\n for region in Session().get_available_regions(\"sns\", partition):\n VALID_REGIONS.add(region)\n\n\ndef parse_service_ports():\n \"\"\"Parses the environment variable $SERVICES with a comma-separated list of services\n and (optional) ports they should run on: 'service1:port1,service2,service3:port3'\"\"\"\n service_ports = os.environ.get(\"SERVICES\", \"\").strip()\n if not service_ports:\n return DEFAULT_SERVICE_PORTS\n result = {}\n for service_port in re.split(r\"\\s*,\\s*\", service_ports):\n parts = re.split(r\"[:=]\", service_port)\n service = parts[0]\n key_upper = service.upper().replace(\"-\", \"_\")\n port_env_name = \"%s_PORT\" % key_upper\n # (1) set default port number\n port_number = DEFAULT_SERVICE_PORTS.get(service)\n # (2) set port number from <SERVICE>_PORT environment, if present\n if os.environ.get(port_env_name):\n port_number = os.environ.get(port_env_name)\n # (3) set port number from <service>:<port> portion in $SERVICES, if present\n if len(parts) > 1:\n port_number = int(parts[-1])\n # (4) try to parse as int, fall back to 0 (invalid port)\n try:\n port_number = int(port_number)\n except Exception:\n port_number = 0\n result[service] = port_number\n return result\n\n\n# TODO: we need to investigate the performance impact of this\ndef populate_configs(service_ports=None):\n global SERVICE_PORTS, CONFIG_ENV_VARS\n\n SERVICE_PORTS = service_ports or parse_service_ports()\n globs = globals()\n protocol = get_protocol()\n\n # define service ports and URLs as environment variables\n for key, value in six.iteritems(DEFAULT_SERVICE_PORTS):\n key_upper = key.upper().replace(\"-\", \"_\")\n\n # define PORT_* variables with actual service ports as per configuration\n port_var_name = \"PORT_%s\" % key_upper\n port_number = service_port(key)\n globs[port_var_name] = port_number\n url = \"%s://%s:%s\" % (protocol, LOCALSTACK_HOSTNAME, port_number)\n # define TEST_*_URL variables with mock service endpoints\n url_key = \"TEST_%s_URL\" % key_upper\n # allow overwriting TEST_*_URL from user-defined environment variables\n existing = os.environ.get(url_key)\n url = existing or url\n # set global variable\n globs[url_key] = url\n # expose HOST_*_URL variables as environment variables\n os.environ[url_key] = url\n\n # expose LOCALSTACK_HOSTNAME as env. variable\n os.environ[\"LOCALSTACK_HOSTNAME\"] = LOCALSTACK_HOSTNAME\n\n # create variable aliases prefixed with LOCALSTACK_ (except LOCALSTACK_HOSTNAME)\n CONFIG_ENV_VARS += [\n \"LOCALSTACK_\" + v for v in CONFIG_ENV_VARS if not v.startswith(\"LOCALSTACK_\")\n ]\n CONFIG_ENV_VARS = list(set(CONFIG_ENV_VARS))\n\n\ndef service_port(service_key):\n if FORWARD_EDGE_INMEM:\n if service_key == \"elasticsearch\":\n # TODO Elasticsearch domains are a special case - we do not want to route them through\n # the edge service, as that would require too many route mappings. In the future, we\n # should integrate them with the port range for external services (4510-4530)\n return SERVICE_PORTS.get(service_key, 0)\n return get_edge_port_http()\n return SERVICE_PORTS.get(service_key, 0)\n\n\ndef get_protocol():\n return \"https\" if USE_SSL else \"http\"\n\n\ndef external_service_url(service_key, host=None):\n host = host or HOSTNAME_EXTERNAL\n return \"%s://%s:%s\" % (get_protocol(), host, service_port(service_key))\n\n\ndef get_edge_port_http():\n return EDGE_PORT_HTTP or EDGE_PORT\n\n\ndef get_edge_url(localstack_hostname=None, protocol=None):\n port = get_edge_port_http()\n protocol = protocol or get_protocol()\n localstack_hostname = localstack_hostname or LOCALSTACK_HOSTNAME\n return \"%s://%s:%s\" % (protocol, localstack_hostname, port)\n\n\n# initialize config values\npopulate_configs()\n\n# set log levels\nif DEBUG:\n logging.getLogger(\"\").setLevel(logging.DEBUG)\n logging.getLogger(\"localstack\").setLevel(logging.DEBUG)\n\n# whether to bundle multiple APIs into a single process, where possible\nBUNDLE_API_PROCESSES = True\n\n\ndef load_config_file(config_file=None):\n from localstack.utils.common import get_or_create_file, to_str\n\n config_file = config_file or CONFIG_FILE_PATH\n content = get_or_create_file(config_file)\n try:\n configs = json.loads(to_str(content) or \"{}\")\n except Exception as e:\n print(\"Unable to load local config file %s as JSON: %s\" % (config_file, e))\n return {}\n return configs\n\n\nif LS_LOG in TRACE_LOG_LEVELS:\n load_end_time = time.time()\n LOG = logging.getLogger(__name__)\n LOG.debug(\n \"Initializing the configuration took %s ms\" % int((load_end_time - load_start_time) * 1000)\n )\n", "path": "localstack/config.py" } ]
diff --git a/localstack/config.py b/localstack/config.py index 53ac92b18af18..3908046a04ad7 100644 --- a/localstack/config.py +++ b/localstack/config.py @@ -354,6 +354,7 @@ def is_linux(): "EXTRA_CORS_ALLOWED_HEADERS", "EXTRA_CORS_EXPOSE_HEADERS", "EXTRA_CORS_ALLOWED_ORIGINS", + "ENABLE_CONFIG_UPDATES", ] for key, value in six.iteritems(DEFAULT_SERVICE_PORTS):
Lightning-Universe__lightning-flash-666
ImageEmbedder default behavior is not a flattened output ## 🐛 Bug I discovered this issue while testing PR #655. If you run the [Image Embedding README example code](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), it returns a 3D tensor. My understanding from the use of embeddings in general, and how they are used in [Fifty One](https://voxel51.com/docs/fiftyone/tutorials/image_embeddings.html) is they expect the embeddings to be 1D (for each embedding). The reason it returns a 3D tensor is because it depends on the backbone used. The default there is `resnet101`, which returns a `2048x7x7` shape tensor. Others like inception return a flat 1D tensor, i.e. length-X. ### To Reproduce Steps to reproduce the behavior: Run the [README example](https://github.com/PyTorchLightning/lightning-flash#example-1-image-embedding), but remove the `embedding_dim` parameter. See below for example. Note: as-is, this will error on `print(embeddings.shape)`, regardless of configuration, since that is a list. But the question here is around the logic for the ImageEmbedder. #### Code sample ```python from flash.core.data.utils import download_data from flash.image import ImageEmbedder # 1. Download the data download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/") # 2. Create an ImageEmbedder with resnet50 trained on imagenet. embedder = ImageEmbedder(backbone="resnet50") # 3. Generate an embedding from an image path. embeddings = embedder.predict("data/hymenoptera_data/predict/153783656_85f9c3ac70.jpg") # 4. Print embeddings shape print(embeddings.shape) ``` ### Expected behavior Expect to see a 100352x1 shape tensor as the output, instead of 2048x7x7. ### Environment - PyTorch Version (e.g., 1.0): 1.9 - OS (e.g., Linux): Linux - How you installed PyTorch (`conda`, `pip`, source): pip - Build command you used (if compiling from source): N/A - Python version: 3.8.6 - CUDA/cuDNN version: N/A - GPU models and configuration: N/A - Any other relevant information: N/A ### Additional context I believe the question is around what the logic should be here: https://github.com/PyTorchLightning/lightning-flash/blob/075de3a46d74d9fc0e769401063fede1f12d0518/flash/image/embedding/model.py#L85-L92 If `embedding_dim` is None, then the head is `nn.Identity()`. **If we desire a flat 1D embedding, then the question is: should `nn.Identity()` change to `nn.Flatten()`?** It could be argued that the user should be left to flatten after on their own, but per the contributing guidelines, I thought this would align with "[Force User Decisions To Best Practices](https://github.com/PyTorchLightning/lightning-flash/blob/ddd942d3dfe3884a97a855446410166c3c9f16d9/.github/CONTRIBUTING.md#force-user-decisions-to-best-practices)" Let me know your thoughts. If that makes sense, then I can update the code, run some tests, and update docs in a PR.
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Mapping, Optional, Sequence, Tuple, Type, Union\n\nimport torch\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchmetrics import Accuracy, Metric\n\nfrom flash.core.data.data_source import DefaultDataKeys\nfrom flash.core.model import Task\nfrom flash.core.registry import FlashRegistry\nfrom flash.core.utilities.imports import _IMAGE_AVAILABLE\nfrom flash.core.utilities.isinstance import _isinstance\nfrom flash.image.classification.data import ImageClassificationPreprocess\n\nif _IMAGE_AVAILABLE:\n from flash.image.classification.backbones import IMAGE_CLASSIFIER_BACKBONES\nelse:\n IMAGE_CLASSIFIER_BACKBONES = FlashRegistry(\"backbones\")\n\n\nclass ImageEmbedder(Task):\n \"\"\"The ``ImageEmbedder`` is a :class:`~flash.Task` for obtaining feature vectors (embeddings) from images. For\n more details, see :ref:`image_embedder`.\n\n Args:\n embedding_dim: Dimension of the embedded vector. ``None`` uses the default from the backbone.\n backbone: A model to use to extract image features, defaults to ``\"swav-imagenet\"``.\n pretrained: Use a pretrained backbone, defaults to ``True``.\n loss_fn: Loss function for training and finetuning, defaults to :func:`torch.nn.functional.cross_entropy`\n optimizer: Optimizer to use for training and finetuning, defaults to :class:`torch.optim.SGD`.\n metrics: Metrics to compute for training and evaluation. Can either be an metric from the `torchmetrics`\n package, a custom metric inherenting from `torchmetrics.Metric`, a callable function or a list/dict\n containing a combination of the aforementioned. In all cases, each metric needs to have the signature\n `metric(preds,target)` and return a single scalar tensor. Defaults to :class:`torchmetrics.Accuracy`.\n learning_rate: Learning rate to use for training, defaults to ``1e-3``.\n pooling_fn: Function used to pool image to generate embeddings, defaults to :func:`torch.max`.\n \"\"\"\n\n backbones: FlashRegistry = IMAGE_CLASSIFIER_BACKBONES\n\n required_extras: str = \"image\"\n\n def __init__(\n self,\n embedding_dim: Optional[int] = None,\n backbone: str = \"resnet101\",\n pretrained: bool = True,\n loss_fn: Callable = F.cross_entropy,\n optimizer: Type[torch.optim.Optimizer] = torch.optim.SGD,\n metrics: Union[Metric, Callable, Mapping, Sequence, None] = (Accuracy()),\n learning_rate: float = 1e-3,\n pooling_fn: Callable = torch.max,\n ):\n super().__init__(\n model=None,\n loss_fn=loss_fn,\n optimizer=optimizer,\n metrics=metrics,\n learning_rate=learning_rate,\n preprocess=ImageClassificationPreprocess(),\n )\n\n self.save_hyperparameters()\n self.backbone_name = backbone\n self.embedding_dim = embedding_dim\n assert pooling_fn in [torch.mean, torch.max]\n self.pooling_fn = pooling_fn\n\n self.backbone, num_features = self.backbones.get(backbone)(pretrained=pretrained)\n\n if embedding_dim is None:\n self.head = nn.Identity()\n else:\n self.head = nn.Sequential(\n nn.Flatten(),\n nn.Linear(num_features, embedding_dim),\n )\n rank_zero_warn(\"Adding linear layer on top of backbone. Remember to finetune first before using!\")\n\n def apply_pool(self, x):\n x = self.pooling_fn(x, dim=-1)\n if _isinstance(x, Tuple[torch.Tensor, torch.Tensor]):\n x = x[0]\n x = self.pooling_fn(x, dim=-1)\n if _isinstance(x, Tuple[torch.Tensor, torch.Tensor]):\n x = x[0]\n return x\n\n def forward(self, x) -> torch.Tensor:\n x = self.backbone(x)\n\n # bolts ssl models return lists\n if isinstance(x, tuple):\n x = x[-1]\n\n if x.dim() == 4 and self.embedding_dim:\n x = self.apply_pool(x)\n\n x = self.head(x)\n return x\n\n def training_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().training_step(batch, batch_idx)\n\n def validation_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().validation_step(batch, batch_idx)\n\n def test_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().test_step(batch, batch_idx)\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n batch = batch[DefaultDataKeys.INPUT]\n return super().predict_step(batch, batch_idx, dataloader_idx=dataloader_idx)\n", "path": "flash/image/embedding/model.py" } ]
[ { "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Mapping, Optional, Sequence, Tuple, Type, Union\n\nimport torch\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchmetrics import Accuracy, Metric\n\nfrom flash.core.data.data_source import DefaultDataKeys\nfrom flash.core.model import Task\nfrom flash.core.registry import FlashRegistry\nfrom flash.core.utilities.imports import _IMAGE_AVAILABLE\nfrom flash.core.utilities.isinstance import _isinstance\nfrom flash.image.classification.data import ImageClassificationPreprocess\n\nif _IMAGE_AVAILABLE:\n from flash.image.classification.backbones import IMAGE_CLASSIFIER_BACKBONES\nelse:\n IMAGE_CLASSIFIER_BACKBONES = FlashRegistry(\"backbones\")\n\n\nclass ImageEmbedder(Task):\n \"\"\"The ``ImageEmbedder`` is a :class:`~flash.Task` for obtaining feature vectors (embeddings) from images. For\n more details, see :ref:`image_embedder`.\n\n Args:\n embedding_dim: Dimension of the embedded vector. ``None`` uses the default from the backbone.\n backbone: A model to use to extract image features, defaults to ``\"swav-imagenet\"``.\n pretrained: Use a pretrained backbone, defaults to ``True``.\n loss_fn: Loss function for training and finetuning, defaults to :func:`torch.nn.functional.cross_entropy`\n optimizer: Optimizer to use for training and finetuning, defaults to :class:`torch.optim.SGD`.\n metrics: Metrics to compute for training and evaluation. Can either be an metric from the `torchmetrics`\n package, a custom metric inherenting from `torchmetrics.Metric`, a callable function or a list/dict\n containing a combination of the aforementioned. In all cases, each metric needs to have the signature\n `metric(preds,target)` and return a single scalar tensor. Defaults to :class:`torchmetrics.Accuracy`.\n learning_rate: Learning rate to use for training, defaults to ``1e-3``.\n pooling_fn: Function used to pool image to generate embeddings, defaults to :func:`torch.max`.\n \"\"\"\n\n backbones: FlashRegistry = IMAGE_CLASSIFIER_BACKBONES\n\n required_extras: str = \"image\"\n\n def __init__(\n self,\n embedding_dim: Optional[int] = None,\n backbone: str = \"resnet101\",\n pretrained: bool = True,\n loss_fn: Callable = F.cross_entropy,\n optimizer: Type[torch.optim.Optimizer] = torch.optim.SGD,\n metrics: Union[Metric, Callable, Mapping, Sequence, None] = (Accuracy()),\n learning_rate: float = 1e-3,\n pooling_fn: Callable = torch.max,\n ):\n super().__init__(\n model=None,\n loss_fn=loss_fn,\n optimizer=optimizer,\n metrics=metrics,\n learning_rate=learning_rate,\n preprocess=ImageClassificationPreprocess(),\n )\n\n self.save_hyperparameters()\n self.backbone_name = backbone\n self.embedding_dim = embedding_dim\n assert pooling_fn in [torch.mean, torch.max]\n self.pooling_fn = pooling_fn\n\n self.backbone, num_features = self.backbones.get(backbone)(pretrained=pretrained)\n\n if embedding_dim is None:\n self.head = nn.Identity()\n else:\n self.head = nn.Sequential(\n nn.Flatten(),\n nn.Linear(num_features, embedding_dim),\n )\n rank_zero_warn(\"Adding linear layer on top of backbone. Remember to finetune first before using!\")\n\n def apply_pool(self, x):\n x = self.pooling_fn(x, dim=-1)\n if _isinstance(x, Tuple[torch.Tensor, torch.Tensor]):\n x = x[0]\n x = self.pooling_fn(x, dim=-1)\n if _isinstance(x, Tuple[torch.Tensor, torch.Tensor]):\n x = x[0]\n return x\n\n def forward(self, x) -> torch.Tensor:\n x = self.backbone(x)\n\n # bolts ssl models return lists\n if isinstance(x, tuple):\n x = x[-1]\n\n if x.dim() == 4 and not self.embedding_dim:\n x = self.apply_pool(x)\n\n x = self.head(x)\n return x\n\n def training_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().training_step(batch, batch_idx)\n\n def validation_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().validation_step(batch, batch_idx)\n\n def test_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().test_step(batch, batch_idx)\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n batch = batch[DefaultDataKeys.INPUT]\n return super().predict_step(batch, batch_idx, dataloader_idx=dataloader_idx)\n", "path": "flash/image/embedding/model.py" } ]
diff --git a/flash/image/embedding/model.py b/flash/image/embedding/model.py index f5e2c0cca9..a8cab9b90a 100644 --- a/flash/image/embedding/model.py +++ b/flash/image/embedding/model.py @@ -107,7 +107,7 @@ def forward(self, x) -> torch.Tensor: if isinstance(x, tuple): x = x[-1] - if x.dim() == 4 and self.embedding_dim: + if x.dim() == 4 and not self.embedding_dim: x = self.apply_pool(x) x = self.head(x)
buildbot__buildbot-3653
Default value is set wrong when triggerring a forcescheduler by REST API Assume that we have a forcescheduler like below: ```python ForceBuild_Scheduler = ForceScheduler( name="ForceBuild_Scheduler", buttonName=u"Force, label=u"Force", builderNames=["BuildEntry_Builder"], username=util.UserNameParameter(hide=True), reason=util.StringParameter(name="reason", default="force build", hide=True), codebases=[util.CodebaseParameter(codebase='', hide=True)], properties=[util.BooleanParameter(name="DefaultToBeTrue", label=u"", default=True)) ``` And trigger this scheduler by sending a post with body like below which doesn't contain value of `DefaultToBeTrue`. ```json { "jsonrpc": "2.0", "method":"force", "id":843, "params":{ } } ``` And then you can find that `self.getProperty('DefaultToBeTrue')` whose default value is set to `True` returns `False` in your buildstep. Default value is set wrong when triggerring a forcescheduler by REST API Assume that we have a forcescheduler like below: ```python ForceBuild_Scheduler = ForceScheduler( name="ForceBuild_Scheduler", buttonName=u"Force, label=u"Force", builderNames=["BuildEntry_Builder"], username=util.UserNameParameter(hide=True), reason=util.StringParameter(name="reason", default="force build", hide=True), codebases=[util.CodebaseParameter(codebase='', hide=True)], properties=[util.BooleanParameter(name="DefaultToBeTrue", label=u"", default=True)) ``` And trigger this scheduler by sending a post with body like below which doesn't contain value of `DefaultToBeTrue`. ```json { "jsonrpc": "2.0", "method":"force", "id":843, "params":{ } } ``` And then you can find that `self.getProperty('DefaultToBeTrue')` whose default value is set to `True` returns `False` in your buildstep.
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom future.utils import iteritems\nfrom future.utils import itervalues\nfrom future.utils import string_types\n\nimport re\nimport traceback\n\nfrom twisted.internet import defer\nfrom twisted.python.reflect import accumulateClassList\n\nfrom buildbot import config\nfrom buildbot.process.properties import Properties\nfrom buildbot.reporters.mail import VALID_EMAIL_ADDR\nfrom buildbot.schedulers import base\nfrom buildbot.util import identifiers\nfrom buildbot.worker_transition import deprecatedWorkerModuleAttribute\n\n\nclass ValidationError(ValueError):\n pass\n\n\nclass CollectedValidationError(ValueError):\n\n def __init__(self, errors):\n self.errors = errors\n ValueError.__init__(\n self, \"\\n\".join([k + \":\" + v for k, v in iteritems(errors)]))\n\n\nclass ValidationErrorCollector(object):\n\n def __init__(self):\n self.errors = {}\n\n @defer.inlineCallbacks\n def collectValidationErrors(self, name, fn, *args, **kwargs):\n res = None\n try:\n res = yield defer.maybeDeferred(fn, *args, **kwargs)\n except CollectedValidationError as e:\n for error_name, e in iteritems(e.errors):\n self.errors[error_name] = e\n except ValueError as e:\n self.errors[name] = str(e)\n defer.returnValue(res)\n\n def maybeRaiseCollectedErrors(self):\n errors = self.errors\n if errors:\n raise CollectedValidationError(errors)\n\n\nDefaultField = object() # sentinel object to signal default behavior\n\n\nclass BaseParameter(object):\n\n \"\"\"\n BaseParameter provides a base implementation for property customization\n \"\"\"\n spec_attributes = [\"name\", \"fullName\", \"label\", \"tablabel\", \"type\", \"default\", \"required\",\n \"multiple\", \"regex\", \"hide\"]\n name = \"\"\n parentName = None\n label = \"\"\n tablabel = \"\"\n type = \"\"\n default = \"\"\n required = False\n multiple = False\n regex = None\n debug = True\n hide = False\n\n @property\n def fullName(self):\n \"\"\"A full name, intended to uniquely identify a parameter\"\"\"\n # join with '_' if both are set (cannot put '.', because it is used as\n # **kwargs)\n if self.parentName and self.name:\n return self.parentName + '_' + self.name\n # otherwise just use the one that is set\n # (this allows empty name for \"anonymous nests\")\n return self.name or self.parentName\n\n def setParent(self, parent):\n self.parentName = parent.fullName if parent else None\n\n def __init__(self, name, label=None, tablabel=None, regex=None, **kw):\n \"\"\"\n @param name: the name of the field, used during posting values\n back to the scheduler. This is not necessarily a UI value,\n and there may be restrictions on the characters allowed for\n this value. For example, HTML would require this field to\n avoid spaces and other punctuation ('-', '.', and '_' allowed)\n @type name: unicode\n\n @param label: (optional) the name of the field, used for UI display.\n @type label: unicode or None (to use 'name')\n\n @param regex: (optional) regex to validate the value with. Not used by\n all subclasses\n @type regex: unicode or regex\n \"\"\"\n\n if name in [\"owner\", \"builderNames\", \"builderid\"]:\n config.error(\n \"%s cannot be used as a parameter name, because it is reserved\" % (name,))\n self.name = name\n self.label = name if label is None else label\n self.tablabel = self.label if tablabel is None else tablabel\n if regex:\n self.regex = re.compile(regex)\n if 'value' in kw:\n config.error(\"Use default='%s' instead of value=... to give a \"\n \"default Parameter value\" % kw['value'])\n # all other properties are generically passed via **kw\n self.__dict__.update(kw)\n\n def getFromKwargs(self, kwargs):\n \"\"\"Simple customization point for child classes that do not need the other\n parameters supplied to updateFromKwargs. Return the value for the property\n named 'self.name'.\n\n The default implementation converts from a list of items, validates using\n the optional regex field and calls 'parse_from_args' for the final conversion.\n \"\"\"\n args = kwargs.get(self.fullName, [])\n\n # delete white space for args\n for arg in args:\n if not arg.strip():\n args.remove(arg)\n\n if not args:\n if self.required:\n raise ValidationError(\n \"'%s' needs to be specified\" % (self.label))\n if self.multiple:\n args = self.default\n else:\n args = [self.default]\n\n if self.regex:\n for arg in args:\n if not self.regex.match(arg):\n raise ValidationError(\"%s:'%s' does not match pattern '%s'\"\n % (self.label, arg, self.regex.pattern))\n\n try:\n arg = self.parse_from_args(args)\n except Exception as e:\n # an exception will just display an alert in the web UI\n # also log the exception\n if self.debug:\n traceback.print_exc()\n raise e\n if arg is None:\n raise ValidationError(\"need %s: no default provided by config\"\n % (self.fullName,))\n return arg\n\n def updateFromKwargs(self, properties, kwargs, collector, **unused):\n \"\"\"Primary entry point to turn 'kwargs' into 'properties'\"\"\"\n properties[self.name] = self.getFromKwargs(kwargs)\n\n def parse_from_args(self, l):\n \"\"\"Secondary customization point, called from getFromKwargs to turn\n a validated value into a single property value\"\"\"\n if self.multiple:\n return [self.parse_from_arg(arg) for arg in l]\n return self.parse_from_arg(l[0])\n\n def parse_from_arg(self, s):\n return s\n\n def getSpec(self):\n spec_attributes = []\n accumulateClassList(self.__class__, 'spec_attributes', spec_attributes)\n ret = {}\n for i in spec_attributes:\n ret[i] = getattr(self, i)\n return ret\n\n\nclass FixedParameter(BaseParameter):\n\n \"\"\"A fixed parameter that cannot be modified by the user.\"\"\"\n type = \"fixed\"\n hide = True\n default = \"\"\n\n def parse_from_args(self, l):\n return self.default\n\n\nclass StringParameter(BaseParameter):\n\n \"\"\"A simple string parameter\"\"\"\n spec_attributes = [\"size\"]\n type = \"text\"\n size = 10\n\n def parse_from_arg(self, s):\n return s\n\n\nclass TextParameter(StringParameter):\n\n \"\"\"A generic string parameter that may span multiple lines\"\"\"\n spec_attributes = [\"cols\", \"rows\"]\n type = \"textarea\"\n cols = 80\n rows = 20\n\n def value_to_text(self, value):\n return str(value)\n\n\nclass IntParameter(StringParameter):\n\n \"\"\"An integer parameter\"\"\"\n type = \"int\"\n default = 0\n parse_from_arg = int # will throw an exception if parse fail\n\n\nclass BooleanParameter(BaseParameter):\n\n \"\"\"A boolean parameter\"\"\"\n type = \"bool\"\n\n def getFromKwargs(self, kwargs):\n return kwargs.get(self.fullName, None) == [True]\n\n\nclass UserNameParameter(StringParameter):\n\n \"\"\"A username parameter to supply the 'owner' of a build\"\"\"\n spec_attributes = [\"need_email\"]\n type = \"username\"\n default = \"\"\n size = 30\n need_email = True\n\n def __init__(self, name=\"username\", label=\"Your name:\", **kw):\n BaseParameter.__init__(self, name, label, **kw)\n\n def parse_from_arg(self, s):\n if not s and not self.required:\n return s\n if self.need_email:\n res = VALID_EMAIL_ADDR.search(s)\n if res is None:\n raise ValidationError(\"%s: please fill in email address in the \"\n \"form 'User <[email protected]>'\" % (self.name,))\n return s\n\n\nclass ChoiceStringParameter(BaseParameter):\n\n \"\"\"A list of strings, allowing the selection of one of the predefined values.\n The 'strict' parameter controls whether values outside the predefined list\n of choices are allowed\"\"\"\n spec_attributes = [\"choices\", \"strict\"]\n type = \"list\"\n choices = []\n strict = True\n\n def parse_from_arg(self, s):\n if self.strict and s not in self.choices:\n raise ValidationError(\n \"'%s' does not belong to list of available choices '%s'\" % (s, self.choices))\n return s\n\n def getChoices(self, master, scheduler, buildername):\n return self.choices\n\n\nclass InheritBuildParameter(ChoiceStringParameter):\n\n \"\"\"A parameter that takes its values from another build\"\"\"\n type = ChoiceStringParameter.type\n name = \"inherit\"\n compatible_builds = None\n\n def getChoices(self, master, scheduler, buildername):\n return self.compatible_builds(master.status, buildername)\n\n def getFromKwargs(self, kwargs):\n raise ValidationError(\n \"InheritBuildParameter can only be used by properties\")\n\n def updateFromKwargs(self, master, properties, changes, kwargs, **unused):\n arg = kwargs.get(self.fullName, [\"\"])[0]\n split_arg = arg.split(\" \")[0].split(\"/\")\n if len(split_arg) != 2:\n raise ValidationError(\"bad build: %s\" % (arg))\n builder, num = split_arg\n builder_status = master.status.getBuilder(builder)\n if not builder_status:\n raise ValidationError(\"unknown builder: %s in %s\" % (builder, arg))\n b = builder_status.getBuild(int(num))\n if not b:\n raise ValidationError(\"unknown build: %d in %s\" % (num, arg))\n props = {self.name: (arg.split(\" \")[0])}\n for name, value, source in b.getProperties().asList():\n if source == \"Force Build Form\":\n if name == \"owner\":\n name = \"orig_owner\"\n props[name] = value\n properties.update(props)\n changes.extend(b.changes)\n\n\nclass WorkerChoiceParameter(ChoiceStringParameter):\n\n \"\"\"A parameter that lets the worker name be explicitly chosen.\n\n This parameter works in conjunction with 'buildbot.process.builder.enforceChosenWorker',\n which should be added as the 'canStartBuild' parameter to the Builder.\n\n The \"anySentinel\" parameter represents the sentinel value to specify that\n there is no worker preference.\n \"\"\"\n anySentinel = '-any-'\n label = 'Worker'\n required = False\n strict = False\n\n def __init__(self, name='workername', **kwargs):\n ChoiceStringParameter.__init__(self, name, **kwargs)\n\n def updateFromKwargs(self, kwargs, **unused):\n workername = self.getFromKwargs(kwargs)\n if workername == self.anySentinel:\n # no preference, so don't set a parameter at all\n return\n ChoiceStringParameter.updateFromKwargs(self, kwargs=kwargs, **unused)\n\n def getChoices(self, master, scheduler, buildername):\n if buildername is None:\n # this is the \"Force All Builds\" page\n workernames = master.status.getWorkerNames()\n else:\n builderStatus = master.status.getBuilder(buildername)\n workernames = [worker.getName()\n for worker in builderStatus.getWorkers()]\n workernames.sort()\n workernames.insert(0, self.anySentinel)\n return workernames\n\n\ndeprecatedWorkerModuleAttribute(locals(), WorkerChoiceParameter,\n compat_name=\"BuildslaveChoiceParameter\")\n\n\nclass NestedParameter(BaseParameter):\n\n \"\"\"A 'parent' parameter for a set of related parameters. This provides a\n logical grouping for the child parameters.\n\n Typically, the 'fullName' of the child parameters mix in the parent's\n 'fullName'. This allows for a field to appear multiple times in a form\n (for example, two codebases each have a 'branch' field).\n\n If the 'name' of the parent is the empty string, then the parent's name\n does not mix in with the child 'fullName'. This is useful when a field\n will not appear multiple time in a scheduler but the logical grouping is\n helpful.\n\n The result of a NestedParameter is typically a dictionary, with the key/value\n being the name/value of the children.\n \"\"\"\n spec_attributes = [\n \"layout\", \"columns\"] # field is recursive, and thus managed in custom getSpec\n type = 'nested'\n layout = 'vertical'\n fields = None\n columns = None\n\n def __init__(self, name, fields, **kwargs):\n BaseParameter.__init__(self, fields=fields, name=name, **kwargs)\n # reasonable defaults for the number of columns\n if self.columns is None:\n num_visible_fields = len(\n [field for field in fields if not field.hide])\n if num_visible_fields >= 4:\n self.columns = 2\n else:\n self.columns = 1\n if self.columns > 4:\n config.error(\n \"UI only support up to 4 columns in nested parameters\")\n\n # fix up the child nodes with the parent (use None for now):\n self.setParent(None)\n\n def setParent(self, parent):\n BaseParameter.setParent(self, parent)\n for field in self.fields: # pylint: disable=not-an-iterable\n field.setParent(self)\n\n @defer.inlineCallbacks\n def collectChildProperties(self, kwargs, properties, collector, **kw):\n \"\"\"Collapse the child values into a dictionary. This is intended to be\n called by child classes to fix up the fullName->name conversions.\"\"\"\n\n childProperties = {}\n for field in self.fields: # pylint: disable=not-an-iterable\n yield collector.collectValidationErrors(field.fullName,\n field.updateFromKwargs,\n kwargs=kwargs,\n properties=childProperties,\n collector=collector,\n **kw)\n kwargs[self.fullName] = childProperties\n\n @defer.inlineCallbacks\n def updateFromKwargs(self, kwargs, properties, collector, **kw):\n \"\"\"By default, the child values will be collapsed into a dictionary. If\n the parent is anonymous, this dictionary is the top-level properties.\"\"\"\n yield self.collectChildProperties(kwargs=kwargs, properties=properties,\n collector=collector, **kw)\n # default behavior is to set a property\n # -- use setdefault+update in order to collapse 'anonymous' nested\n # parameters correctly\n if self.name:\n d = properties.setdefault(self.name, {})\n else:\n # if there's no name, collapse this nest all the way\n d = properties\n d.update(kwargs[self.fullName])\n\n def getSpec(self):\n ret = BaseParameter.getSpec(self)\n # pylint: disable=not-an-iterable\n ret['fields'] = sorted([field.getSpec() for field in self.fields],\n key=lambda x: x['name'])\n return ret\n\n\nParameterGroup = NestedParameter\n\n\nclass AnyPropertyParameter(NestedParameter):\n\n \"\"\"A generic property parameter, where both the name and value of the property\n must be given.\"\"\"\n type = NestedParameter.type\n\n def __init__(self, name, **kw):\n fields = [\n StringParameter(name='name', label=\"Name:\"),\n StringParameter(name='value', label=\"Value:\"),\n ]\n NestedParameter.__init__(self, name, label='', fields=fields, **kw)\n\n def getFromKwargs(self, kwargs):\n raise ValidationError(\n \"AnyPropertyParameter can only be used by properties\")\n\n @defer.inlineCallbacks\n def updateFromKwargs(self, master, properties, kwargs, collector, **kw):\n yield self.collectChildProperties(master=master,\n properties=properties,\n kwargs=kwargs,\n collector=collector,\n **kw)\n\n pname = kwargs[self.fullName].get(\"name\", \"\")\n pvalue = kwargs[self.fullName].get(\"value\", \"\")\n if not pname:\n return\n\n validation = master.config.validation\n pname_validate = validation['property_name']\n pval_validate = validation['property_value']\n\n if not pname_validate.match(pname) \\\n or not pval_validate.match(pvalue):\n raise ValidationError(\n \"bad property name='%s', value='%s'\" % (pname, pvalue))\n properties[pname] = pvalue\n\n\nclass CodebaseParameter(NestedParameter):\n\n \"\"\"A parameter whose result is a codebase specification instead of a property\"\"\"\n type = NestedParameter.type\n codebase = ''\n\n def __init__(self,\n codebase,\n name=None,\n label=None,\n\n branch=DefaultField,\n revision=DefaultField,\n repository=DefaultField,\n project=DefaultField,\n\n **kwargs):\n \"\"\"\n A set of properties that will be used to generate a codebase dictionary.\n\n The branch/revision/repository/project should each be a parameter that\n will map to the corresponding value in the sourcestamp. Use None to disable\n the field.\n\n @param codebase: name of the codebase; used as key for the sourcestamp set\n @type codebase: unicode\n\n @param name: optional override for the name-currying for the subfields\n @type codebase: unicode\n\n @param label: optional override for the label for this set of parameters\n @type codebase: unicode\n \"\"\"\n\n name = name or codebase\n if label is None and codebase:\n label = \"Codebase: \" + codebase\n\n fields_dict = dict(branch=branch, revision=revision,\n repository=repository, project=project)\n for k, v in iteritems(fields_dict):\n if v is DefaultField:\n v = StringParameter(name=k, label=k.capitalize() + \":\")\n elif isinstance(v, string_types):\n v = FixedParameter(name=k, default=v)\n fields_dict[k] = v\n\n fields = [val for val in fields_dict.values() if val]\n\n NestedParameter.__init__(self, name=name, label=label,\n codebase=codebase,\n fields=fields, **kwargs)\n\n def createSourcestamp(self, properties, kwargs):\n # default, just return the things we put together\n return kwargs.get(self.fullName, {})\n\n @defer.inlineCallbacks\n def updateFromKwargs(self, sourcestamps, kwargs, properties, collector, **kw):\n yield self.collectChildProperties(sourcestamps=sourcestamps,\n properties=properties,\n kwargs=kwargs,\n collector=collector,\n **kw)\n\n # convert the \"property\" to a sourcestamp\n ss = self.createSourcestamp(properties, kwargs)\n if ss is not None:\n sourcestamps[self.codebase] = ss\n\n\ndef oneCodebase(**kw):\n return [CodebaseParameter('', **kw)]\n\n\nclass ForceScheduler(base.BaseScheduler):\n\n \"\"\"\n ForceScheduler implements the backend for a UI to allow customization of\n builds. For example, a web form be populated to trigger a build.\n \"\"\"\n compare_attrs = base.BaseScheduler.compare_attrs + \\\n ('builderNames',\n 'reason', 'username',\n 'forcedProperties')\n\n def __init__(self, name, builderNames,\n username=UserNameParameter(),\n reason=StringParameter(\n name=\"reason\", default=\"force build\", size=20),\n reasonString=\"A build was forced by '%(owner)s': %(reason)s\",\n buttonName=None,\n codebases=None,\n label=None,\n properties=None):\n \"\"\"\n Initialize a ForceScheduler.\n\n The UI will provide a set of fields to the user; these fields are\n driven by a corresponding child class of BaseParameter.\n\n Use NestedParameter to provide logical groupings for parameters.\n\n The branch/revision/repository/project fields are deprecated and\n provided only for backwards compatibility. Using a Codebase(name='')\n will give the equivalent behavior.\n\n @param name: name of this scheduler (used as a key for state)\n @type name: unicode\n\n @param builderNames: list of builders this scheduler may start\n @type builderNames: list of unicode\n\n @param username: the \"owner\" for a build (may not be shown depending\n on the Auth configuration for the master)\n @type username: BaseParameter\n\n @param reason: the \"reason\" for a build\n @type reason: BaseParameter\n\n @param codebases: the codebases for a build\n @type codebases: list of string's or CodebaseParameter's;\n None will generate a default, but [] will\n remove all codebases\n\n @param properties: extra properties to configure the build\n @type properties: list of BaseParameter's\n \"\"\"\n\n if not self.checkIfType(name, str):\n config.error(\"ForceScheduler name must be a unicode string: %r\" %\n name)\n\n if not name:\n config.error(\"ForceScheduler name must not be empty: %r\" %\n name)\n\n if not identifiers.ident_re.match(name):\n config.error(\"ForceScheduler name must be an identifier: %r\" %\n name)\n\n if not self.checkIfListOfType(builderNames, string_types):\n config.error(\"ForceScheduler '%s': builderNames must be a list of strings: %r\" %\n (name, builderNames))\n\n if self.checkIfType(reason, BaseParameter):\n self.reason = reason\n else:\n config.error(\"ForceScheduler '%s': reason must be a StringParameter: %r\" %\n (name, reason))\n\n if properties is None:\n properties = []\n if not self.checkIfListOfType(properties, BaseParameter):\n config.error(\"ForceScheduler '%s': properties must be a list of BaseParameters: %r\" %\n (name, properties))\n\n if self.checkIfType(username, BaseParameter):\n self.username = username\n else:\n config.error(\"ForceScheduler '%s': username must be a StringParameter: %r\" %\n (name, username))\n\n self.forcedProperties = []\n self.label = name if label is None else label\n\n # Use the default single codebase form if none are provided\n if codebases is None:\n codebases = [CodebaseParameter(codebase='')]\n elif not codebases:\n config.error(\"ForceScheduler '%s': 'codebases' cannot be empty;\"\n \" use [CodebaseParameter(codebase='', hide=True)] if needed: %r \" % (\n name, codebases))\n elif not isinstance(codebases, list):\n config.error(\"ForceScheduler '%s': 'codebases' should be a list of strings or CodebaseParameter,\"\n \" not %s\" % (\n name, type(codebases)))\n\n codebase_dict = {}\n for codebase in codebases:\n if isinstance(codebase, string_types):\n codebase = CodebaseParameter(codebase=codebase)\n elif not isinstance(codebase, CodebaseParameter):\n config.error(\"ForceScheduler '%s': 'codebases' must be a list of strings\"\n \" or CodebaseParameter objects: %r\" % (\n name, codebases))\n\n self.forcedProperties.append(codebase)\n codebase_dict[codebase.codebase] = dict(\n branch='', repository='', revision='')\n\n base.BaseScheduler.__init__(self,\n name=name,\n builderNames=builderNames,\n properties={},\n codebases=codebase_dict)\n\n if properties:\n self.forcedProperties.extend(properties)\n\n # this is used to simplify the template\n self.all_fields = [NestedParameter(name='', fields=[username, reason])]\n self.all_fields.extend(self.forcedProperties)\n\n self.reasonString = reasonString\n self.buttonName = buttonName or name\n\n def checkIfType(self, obj, chkType):\n return isinstance(obj, chkType)\n\n def checkIfListOfType(self, obj, chkType):\n isListOfType = True\n\n if self.checkIfType(obj, list):\n for item in obj:\n if not self.checkIfType(item, chkType):\n isListOfType = False\n break\n else:\n isListOfType = False\n\n return isListOfType\n\n @defer.inlineCallbacks\n def gatherPropertiesAndChanges(self, collector, **kwargs):\n properties = {}\n changeids = []\n sourcestamps = {}\n\n for param in self.forcedProperties:\n yield collector.collectValidationErrors(param.fullName,\n param.updateFromKwargs,\n master=self.master,\n properties=properties,\n changes=changeids,\n sourcestamps=sourcestamps,\n collector=collector,\n kwargs=kwargs)\n changeids = [type(a) == int and a or a.number for a in changeids]\n\n real_properties = Properties()\n for pname, pvalue in iteritems(properties):\n real_properties.setProperty(pname, pvalue, \"Force Build Form\")\n\n defer.returnValue((real_properties, changeids, sourcestamps))\n\n @defer.inlineCallbacks\n def computeBuilderNames(self, builderNames=None, builderid=None):\n if builderNames is None:\n if builderid is not None:\n builder = yield self.master.data.get(('builders', str(builderid)))\n builderNames = [builder['name']]\n else:\n builderNames = self.builderNames\n else:\n builderNames = sorted(\n set(builderNames).intersection(self.builderNames))\n defer.returnValue(builderNames)\n\n @defer.inlineCallbacks\n def force(self, owner, builderNames=None, builderid=None, **kwargs):\n \"\"\"\n We check the parameters, and launch the build, if everything is correct\n \"\"\"\n builderNames = yield self.computeBuilderNames(builderNames, builderid)\n if not builderNames:\n raise KeyError(\"builderNames not specified or not supported\")\n\n # Currently the validation code expects all kwargs to be lists\n # I don't want to refactor that now so much sure we comply...\n kwargs = dict((k, [v]) if not isinstance(v, list) else (k, v)\n for k, v in iteritems(kwargs))\n\n # probably need to clean that out later as the IProperty is already a\n # validation mechanism\n collector = ValidationErrorCollector()\n reason = yield collector.collectValidationErrors(self.reason.fullName,\n self.reason.getFromKwargs, kwargs)\n if owner is None or owner == \"anonymous\":\n owner = yield collector.collectValidationErrors(self.username.fullName,\n self.username.getFromKwargs, kwargs)\n\n properties, changeids, sourcestamps = yield self.gatherPropertiesAndChanges(\n collector, **kwargs)\n\n collector.maybeRaiseCollectedErrors()\n\n properties.setProperty(\"reason\", reason, \"Force Build Form\")\n properties.setProperty(\"owner\", owner, \"Force Build Form\")\n\n r = self.reasonString % {'owner': owner, 'reason': reason}\n\n # turn sourcestamps into a list\n for cb, ss in iteritems(sourcestamps):\n ss['codebase'] = cb\n sourcestamps = list(itervalues(sourcestamps))\n\n # everything is validated, we can create our source stamp, and\n # buildrequest\n res = yield self.addBuildsetForSourceStampsWithDefaults(\n reason=r,\n sourcestamps=sourcestamps,\n properties=properties,\n builderNames=builderNames,\n )\n\n defer.returnValue(res)\n", "path": "master/buildbot/schedulers/forcesched.py" } ]
[ { "content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom future.utils import iteritems\nfrom future.utils import itervalues\nfrom future.utils import string_types\n\nimport re\nimport traceback\n\nfrom twisted.internet import defer\nfrom twisted.python.reflect import accumulateClassList\n\nfrom buildbot import config\nfrom buildbot.process.properties import Properties\nfrom buildbot.reporters.mail import VALID_EMAIL_ADDR\nfrom buildbot.schedulers import base\nfrom buildbot.util import identifiers\nfrom buildbot.worker_transition import deprecatedWorkerModuleAttribute\n\n\nclass ValidationError(ValueError):\n pass\n\n\nclass CollectedValidationError(ValueError):\n\n def __init__(self, errors):\n self.errors = errors\n ValueError.__init__(\n self, \"\\n\".join([k + \":\" + v for k, v in iteritems(errors)]))\n\n\nclass ValidationErrorCollector(object):\n\n def __init__(self):\n self.errors = {}\n\n @defer.inlineCallbacks\n def collectValidationErrors(self, name, fn, *args, **kwargs):\n res = None\n try:\n res = yield defer.maybeDeferred(fn, *args, **kwargs)\n except CollectedValidationError as e:\n for error_name, e in iteritems(e.errors):\n self.errors[error_name] = e\n except ValueError as e:\n self.errors[name] = str(e)\n defer.returnValue(res)\n\n def maybeRaiseCollectedErrors(self):\n errors = self.errors\n if errors:\n raise CollectedValidationError(errors)\n\n\nDefaultField = object() # sentinel object to signal default behavior\n\n\nclass BaseParameter(object):\n\n \"\"\"\n BaseParameter provides a base implementation for property customization\n \"\"\"\n spec_attributes = [\"name\", \"fullName\", \"label\", \"tablabel\", \"type\", \"default\", \"required\",\n \"multiple\", \"regex\", \"hide\"]\n name = \"\"\n parentName = None\n label = \"\"\n tablabel = \"\"\n type = \"\"\n default = \"\"\n required = False\n multiple = False\n regex = None\n debug = True\n hide = False\n\n @property\n def fullName(self):\n \"\"\"A full name, intended to uniquely identify a parameter\"\"\"\n # join with '_' if both are set (cannot put '.', because it is used as\n # **kwargs)\n if self.parentName and self.name:\n return self.parentName + '_' + self.name\n # otherwise just use the one that is set\n # (this allows empty name for \"anonymous nests\")\n return self.name or self.parentName\n\n def setParent(self, parent):\n self.parentName = parent.fullName if parent else None\n\n def __init__(self, name, label=None, tablabel=None, regex=None, **kw):\n \"\"\"\n @param name: the name of the field, used during posting values\n back to the scheduler. This is not necessarily a UI value,\n and there may be restrictions on the characters allowed for\n this value. For example, HTML would require this field to\n avoid spaces and other punctuation ('-', '.', and '_' allowed)\n @type name: unicode\n\n @param label: (optional) the name of the field, used for UI display.\n @type label: unicode or None (to use 'name')\n\n @param regex: (optional) regex to validate the value with. Not used by\n all subclasses\n @type regex: unicode or regex\n \"\"\"\n\n if name in [\"owner\", \"builderNames\", \"builderid\"]:\n config.error(\n \"%s cannot be used as a parameter name, because it is reserved\" % (name,))\n self.name = name\n self.label = name if label is None else label\n self.tablabel = self.label if tablabel is None else tablabel\n if regex:\n self.regex = re.compile(regex)\n if 'value' in kw:\n config.error(\"Use default='%s' instead of value=... to give a \"\n \"default Parameter value\" % kw['value'])\n # all other properties are generically passed via **kw\n self.__dict__.update(kw)\n\n def getFromKwargs(self, kwargs):\n \"\"\"Simple customization point for child classes that do not need the other\n parameters supplied to updateFromKwargs. Return the value for the property\n named 'self.name'.\n\n The default implementation converts from a list of items, validates using\n the optional regex field and calls 'parse_from_args' for the final conversion.\n \"\"\"\n args = kwargs.get(self.fullName, [])\n\n # delete white space for args\n for arg in args:\n if not arg.strip():\n args.remove(arg)\n\n if not args:\n if self.required:\n raise ValidationError(\n \"'%s' needs to be specified\" % (self.label))\n if self.multiple:\n args = self.default\n else:\n args = [self.default]\n\n if self.regex:\n for arg in args:\n if not self.regex.match(arg):\n raise ValidationError(\"%s:'%s' does not match pattern '%s'\"\n % (self.label, arg, self.regex.pattern))\n\n try:\n arg = self.parse_from_args(args)\n except Exception as e:\n # an exception will just display an alert in the web UI\n # also log the exception\n if self.debug:\n traceback.print_exc()\n raise e\n if arg is None:\n raise ValidationError(\"need %s: no default provided by config\"\n % (self.fullName,))\n return arg\n\n def updateFromKwargs(self, properties, kwargs, collector, **unused):\n \"\"\"Primary entry point to turn 'kwargs' into 'properties'\"\"\"\n properties[self.name] = self.getFromKwargs(kwargs)\n\n def parse_from_args(self, l):\n \"\"\"Secondary customization point, called from getFromKwargs to turn\n a validated value into a single property value\"\"\"\n if self.multiple:\n return [self.parse_from_arg(arg) for arg in l]\n return self.parse_from_arg(l[0])\n\n def parse_from_arg(self, s):\n return s\n\n def getSpec(self):\n spec_attributes = []\n accumulateClassList(self.__class__, 'spec_attributes', spec_attributes)\n ret = {}\n for i in spec_attributes:\n ret[i] = getattr(self, i)\n return ret\n\n\nclass FixedParameter(BaseParameter):\n\n \"\"\"A fixed parameter that cannot be modified by the user.\"\"\"\n type = \"fixed\"\n hide = True\n default = \"\"\n\n def parse_from_args(self, l):\n return self.default\n\n\nclass StringParameter(BaseParameter):\n\n \"\"\"A simple string parameter\"\"\"\n spec_attributes = [\"size\"]\n type = \"text\"\n size = 10\n\n def parse_from_arg(self, s):\n return s\n\n\nclass TextParameter(StringParameter):\n\n \"\"\"A generic string parameter that may span multiple lines\"\"\"\n spec_attributes = [\"cols\", \"rows\"]\n type = \"textarea\"\n cols = 80\n rows = 20\n\n def value_to_text(self, value):\n return str(value)\n\n\nclass IntParameter(StringParameter):\n\n \"\"\"An integer parameter\"\"\"\n type = \"int\"\n default = 0\n parse_from_arg = int # will throw an exception if parse fail\n\n\nclass BooleanParameter(BaseParameter):\n\n \"\"\"A boolean parameter\"\"\"\n type = \"bool\"\n\n def getFromKwargs(self, kwargs):\n return kwargs.get(self.fullName, [self.default]) == [True]\n\n\nclass UserNameParameter(StringParameter):\n\n \"\"\"A username parameter to supply the 'owner' of a build\"\"\"\n spec_attributes = [\"need_email\"]\n type = \"username\"\n default = \"\"\n size = 30\n need_email = True\n\n def __init__(self, name=\"username\", label=\"Your name:\", **kw):\n BaseParameter.__init__(self, name, label, **kw)\n\n def parse_from_arg(self, s):\n if not s and not self.required:\n return s\n if self.need_email:\n res = VALID_EMAIL_ADDR.search(s)\n if res is None:\n raise ValidationError(\"%s: please fill in email address in the \"\n \"form 'User <[email protected]>'\" % (self.name,))\n return s\n\n\nclass ChoiceStringParameter(BaseParameter):\n\n \"\"\"A list of strings, allowing the selection of one of the predefined values.\n The 'strict' parameter controls whether values outside the predefined list\n of choices are allowed\"\"\"\n spec_attributes = [\"choices\", \"strict\"]\n type = \"list\"\n choices = []\n strict = True\n\n def parse_from_arg(self, s):\n if self.strict and s not in self.choices:\n raise ValidationError(\n \"'%s' does not belong to list of available choices '%s'\" % (s, self.choices))\n return s\n\n def getChoices(self, master, scheduler, buildername):\n return self.choices\n\n\nclass InheritBuildParameter(ChoiceStringParameter):\n\n \"\"\"A parameter that takes its values from another build\"\"\"\n type = ChoiceStringParameter.type\n name = \"inherit\"\n compatible_builds = None\n\n def getChoices(self, master, scheduler, buildername):\n return self.compatible_builds(master.status, buildername)\n\n def getFromKwargs(self, kwargs):\n raise ValidationError(\n \"InheritBuildParameter can only be used by properties\")\n\n def updateFromKwargs(self, master, properties, changes, kwargs, **unused):\n arg = kwargs.get(self.fullName, [\"\"])[0]\n split_arg = arg.split(\" \")[0].split(\"/\")\n if len(split_arg) != 2:\n raise ValidationError(\"bad build: %s\" % (arg))\n builder, num = split_arg\n builder_status = master.status.getBuilder(builder)\n if not builder_status:\n raise ValidationError(\"unknown builder: %s in %s\" % (builder, arg))\n b = builder_status.getBuild(int(num))\n if not b:\n raise ValidationError(\"unknown build: %d in %s\" % (num, arg))\n props = {self.name: (arg.split(\" \")[0])}\n for name, value, source in b.getProperties().asList():\n if source == \"Force Build Form\":\n if name == \"owner\":\n name = \"orig_owner\"\n props[name] = value\n properties.update(props)\n changes.extend(b.changes)\n\n\nclass WorkerChoiceParameter(ChoiceStringParameter):\n\n \"\"\"A parameter that lets the worker name be explicitly chosen.\n\n This parameter works in conjunction with 'buildbot.process.builder.enforceChosenWorker',\n which should be added as the 'canStartBuild' parameter to the Builder.\n\n The \"anySentinel\" parameter represents the sentinel value to specify that\n there is no worker preference.\n \"\"\"\n anySentinel = '-any-'\n label = 'Worker'\n required = False\n strict = False\n\n def __init__(self, name='workername', **kwargs):\n ChoiceStringParameter.__init__(self, name, **kwargs)\n\n def updateFromKwargs(self, kwargs, **unused):\n workername = self.getFromKwargs(kwargs)\n if workername == self.anySentinel:\n # no preference, so don't set a parameter at all\n return\n ChoiceStringParameter.updateFromKwargs(self, kwargs=kwargs, **unused)\n\n def getChoices(self, master, scheduler, buildername):\n if buildername is None:\n # this is the \"Force All Builds\" page\n workernames = master.status.getWorkerNames()\n else:\n builderStatus = master.status.getBuilder(buildername)\n workernames = [worker.getName()\n for worker in builderStatus.getWorkers()]\n workernames.sort()\n workernames.insert(0, self.anySentinel)\n return workernames\n\n\ndeprecatedWorkerModuleAttribute(locals(), WorkerChoiceParameter,\n compat_name=\"BuildslaveChoiceParameter\")\n\n\nclass NestedParameter(BaseParameter):\n\n \"\"\"A 'parent' parameter for a set of related parameters. This provides a\n logical grouping for the child parameters.\n\n Typically, the 'fullName' of the child parameters mix in the parent's\n 'fullName'. This allows for a field to appear multiple times in a form\n (for example, two codebases each have a 'branch' field).\n\n If the 'name' of the parent is the empty string, then the parent's name\n does not mix in with the child 'fullName'. This is useful when a field\n will not appear multiple time in a scheduler but the logical grouping is\n helpful.\n\n The result of a NestedParameter is typically a dictionary, with the key/value\n being the name/value of the children.\n \"\"\"\n spec_attributes = [\n \"layout\", \"columns\"] # field is recursive, and thus managed in custom getSpec\n type = 'nested'\n layout = 'vertical'\n fields = None\n columns = None\n\n def __init__(self, name, fields, **kwargs):\n BaseParameter.__init__(self, fields=fields, name=name, **kwargs)\n # reasonable defaults for the number of columns\n if self.columns is None:\n num_visible_fields = len(\n [field for field in fields if not field.hide])\n if num_visible_fields >= 4:\n self.columns = 2\n else:\n self.columns = 1\n if self.columns > 4:\n config.error(\n \"UI only support up to 4 columns in nested parameters\")\n\n # fix up the child nodes with the parent (use None for now):\n self.setParent(None)\n\n def setParent(self, parent):\n BaseParameter.setParent(self, parent)\n for field in self.fields: # pylint: disable=not-an-iterable\n field.setParent(self)\n\n @defer.inlineCallbacks\n def collectChildProperties(self, kwargs, properties, collector, **kw):\n \"\"\"Collapse the child values into a dictionary. This is intended to be\n called by child classes to fix up the fullName->name conversions.\"\"\"\n\n childProperties = {}\n for field in self.fields: # pylint: disable=not-an-iterable\n yield collector.collectValidationErrors(field.fullName,\n field.updateFromKwargs,\n kwargs=kwargs,\n properties=childProperties,\n collector=collector,\n **kw)\n kwargs[self.fullName] = childProperties\n\n @defer.inlineCallbacks\n def updateFromKwargs(self, kwargs, properties, collector, **kw):\n \"\"\"By default, the child values will be collapsed into a dictionary. If\n the parent is anonymous, this dictionary is the top-level properties.\"\"\"\n yield self.collectChildProperties(kwargs=kwargs, properties=properties,\n collector=collector, **kw)\n # default behavior is to set a property\n # -- use setdefault+update in order to collapse 'anonymous' nested\n # parameters correctly\n if self.name:\n d = properties.setdefault(self.name, {})\n else:\n # if there's no name, collapse this nest all the way\n d = properties\n d.update(kwargs[self.fullName])\n\n def getSpec(self):\n ret = BaseParameter.getSpec(self)\n # pylint: disable=not-an-iterable\n ret['fields'] = sorted([field.getSpec() for field in self.fields],\n key=lambda x: x['name'])\n return ret\n\n\nParameterGroup = NestedParameter\n\n\nclass AnyPropertyParameter(NestedParameter):\n\n \"\"\"A generic property parameter, where both the name and value of the property\n must be given.\"\"\"\n type = NestedParameter.type\n\n def __init__(self, name, **kw):\n fields = [\n StringParameter(name='name', label=\"Name:\"),\n StringParameter(name='value', label=\"Value:\"),\n ]\n NestedParameter.__init__(self, name, label='', fields=fields, **kw)\n\n def getFromKwargs(self, kwargs):\n raise ValidationError(\n \"AnyPropertyParameter can only be used by properties\")\n\n @defer.inlineCallbacks\n def updateFromKwargs(self, master, properties, kwargs, collector, **kw):\n yield self.collectChildProperties(master=master,\n properties=properties,\n kwargs=kwargs,\n collector=collector,\n **kw)\n\n pname = kwargs[self.fullName].get(\"name\", \"\")\n pvalue = kwargs[self.fullName].get(\"value\", \"\")\n if not pname:\n return\n\n validation = master.config.validation\n pname_validate = validation['property_name']\n pval_validate = validation['property_value']\n\n if not pname_validate.match(pname) \\\n or not pval_validate.match(pvalue):\n raise ValidationError(\n \"bad property name='%s', value='%s'\" % (pname, pvalue))\n properties[pname] = pvalue\n\n\nclass CodebaseParameter(NestedParameter):\n\n \"\"\"A parameter whose result is a codebase specification instead of a property\"\"\"\n type = NestedParameter.type\n codebase = ''\n\n def __init__(self,\n codebase,\n name=None,\n label=None,\n\n branch=DefaultField,\n revision=DefaultField,\n repository=DefaultField,\n project=DefaultField,\n\n **kwargs):\n \"\"\"\n A set of properties that will be used to generate a codebase dictionary.\n\n The branch/revision/repository/project should each be a parameter that\n will map to the corresponding value in the sourcestamp. Use None to disable\n the field.\n\n @param codebase: name of the codebase; used as key for the sourcestamp set\n @type codebase: unicode\n\n @param name: optional override for the name-currying for the subfields\n @type codebase: unicode\n\n @param label: optional override for the label for this set of parameters\n @type codebase: unicode\n \"\"\"\n\n name = name or codebase\n if label is None and codebase:\n label = \"Codebase: \" + codebase\n\n fields_dict = dict(branch=branch, revision=revision,\n repository=repository, project=project)\n for k, v in iteritems(fields_dict):\n if v is DefaultField:\n v = StringParameter(name=k, label=k.capitalize() + \":\")\n elif isinstance(v, string_types):\n v = FixedParameter(name=k, default=v)\n fields_dict[k] = v\n\n fields = [val for val in fields_dict.values() if val]\n\n NestedParameter.__init__(self, name=name, label=label,\n codebase=codebase,\n fields=fields, **kwargs)\n\n def createSourcestamp(self, properties, kwargs):\n # default, just return the things we put together\n return kwargs.get(self.fullName, {})\n\n @defer.inlineCallbacks\n def updateFromKwargs(self, sourcestamps, kwargs, properties, collector, **kw):\n yield self.collectChildProperties(sourcestamps=sourcestamps,\n properties=properties,\n kwargs=kwargs,\n collector=collector,\n **kw)\n\n # convert the \"property\" to a sourcestamp\n ss = self.createSourcestamp(properties, kwargs)\n if ss is not None:\n sourcestamps[self.codebase] = ss\n\n\ndef oneCodebase(**kw):\n return [CodebaseParameter('', **kw)]\n\n\nclass ForceScheduler(base.BaseScheduler):\n\n \"\"\"\n ForceScheduler implements the backend for a UI to allow customization of\n builds. For example, a web form be populated to trigger a build.\n \"\"\"\n compare_attrs = base.BaseScheduler.compare_attrs + \\\n ('builderNames',\n 'reason', 'username',\n 'forcedProperties')\n\n def __init__(self, name, builderNames,\n username=UserNameParameter(),\n reason=StringParameter(\n name=\"reason\", default=\"force build\", size=20),\n reasonString=\"A build was forced by '%(owner)s': %(reason)s\",\n buttonName=None,\n codebases=None,\n label=None,\n properties=None):\n \"\"\"\n Initialize a ForceScheduler.\n\n The UI will provide a set of fields to the user; these fields are\n driven by a corresponding child class of BaseParameter.\n\n Use NestedParameter to provide logical groupings for parameters.\n\n The branch/revision/repository/project fields are deprecated and\n provided only for backwards compatibility. Using a Codebase(name='')\n will give the equivalent behavior.\n\n @param name: name of this scheduler (used as a key for state)\n @type name: unicode\n\n @param builderNames: list of builders this scheduler may start\n @type builderNames: list of unicode\n\n @param username: the \"owner\" for a build (may not be shown depending\n on the Auth configuration for the master)\n @type username: BaseParameter\n\n @param reason: the \"reason\" for a build\n @type reason: BaseParameter\n\n @param codebases: the codebases for a build\n @type codebases: list of string's or CodebaseParameter's;\n None will generate a default, but [] will\n remove all codebases\n\n @param properties: extra properties to configure the build\n @type properties: list of BaseParameter's\n \"\"\"\n\n if not self.checkIfType(name, str):\n config.error(\"ForceScheduler name must be a unicode string: %r\" %\n name)\n\n if not name:\n config.error(\"ForceScheduler name must not be empty: %r\" %\n name)\n\n if not identifiers.ident_re.match(name):\n config.error(\"ForceScheduler name must be an identifier: %r\" %\n name)\n\n if not self.checkIfListOfType(builderNames, string_types):\n config.error(\"ForceScheduler '%s': builderNames must be a list of strings: %r\" %\n (name, builderNames))\n\n if self.checkIfType(reason, BaseParameter):\n self.reason = reason\n else:\n config.error(\"ForceScheduler '%s': reason must be a StringParameter: %r\" %\n (name, reason))\n\n if properties is None:\n properties = []\n if not self.checkIfListOfType(properties, BaseParameter):\n config.error(\"ForceScheduler '%s': properties must be a list of BaseParameters: %r\" %\n (name, properties))\n\n if self.checkIfType(username, BaseParameter):\n self.username = username\n else:\n config.error(\"ForceScheduler '%s': username must be a StringParameter: %r\" %\n (name, username))\n\n self.forcedProperties = []\n self.label = name if label is None else label\n\n # Use the default single codebase form if none are provided\n if codebases is None:\n codebases = [CodebaseParameter(codebase='')]\n elif not codebases:\n config.error(\"ForceScheduler '%s': 'codebases' cannot be empty;\"\n \" use [CodebaseParameter(codebase='', hide=True)] if needed: %r \" % (\n name, codebases))\n elif not isinstance(codebases, list):\n config.error(\"ForceScheduler '%s': 'codebases' should be a list of strings or CodebaseParameter,\"\n \" not %s\" % (\n name, type(codebases)))\n\n codebase_dict = {}\n for codebase in codebases:\n if isinstance(codebase, string_types):\n codebase = CodebaseParameter(codebase=codebase)\n elif not isinstance(codebase, CodebaseParameter):\n config.error(\"ForceScheduler '%s': 'codebases' must be a list of strings\"\n \" or CodebaseParameter objects: %r\" % (\n name, codebases))\n\n self.forcedProperties.append(codebase)\n codebase_dict[codebase.codebase] = dict(\n branch='', repository='', revision='')\n\n base.BaseScheduler.__init__(self,\n name=name,\n builderNames=builderNames,\n properties={},\n codebases=codebase_dict)\n\n if properties:\n self.forcedProperties.extend(properties)\n\n # this is used to simplify the template\n self.all_fields = [NestedParameter(name='', fields=[username, reason])]\n self.all_fields.extend(self.forcedProperties)\n\n self.reasonString = reasonString\n self.buttonName = buttonName or name\n\n def checkIfType(self, obj, chkType):\n return isinstance(obj, chkType)\n\n def checkIfListOfType(self, obj, chkType):\n isListOfType = True\n\n if self.checkIfType(obj, list):\n for item in obj:\n if not self.checkIfType(item, chkType):\n isListOfType = False\n break\n else:\n isListOfType = False\n\n return isListOfType\n\n @defer.inlineCallbacks\n def gatherPropertiesAndChanges(self, collector, **kwargs):\n properties = {}\n changeids = []\n sourcestamps = {}\n\n for param in self.forcedProperties:\n yield collector.collectValidationErrors(param.fullName,\n param.updateFromKwargs,\n master=self.master,\n properties=properties,\n changes=changeids,\n sourcestamps=sourcestamps,\n collector=collector,\n kwargs=kwargs)\n changeids = [type(a) == int and a or a.number for a in changeids]\n\n real_properties = Properties()\n for pname, pvalue in iteritems(properties):\n real_properties.setProperty(pname, pvalue, \"Force Build Form\")\n\n defer.returnValue((real_properties, changeids, sourcestamps))\n\n @defer.inlineCallbacks\n def computeBuilderNames(self, builderNames=None, builderid=None):\n if builderNames is None:\n if builderid is not None:\n builder = yield self.master.data.get(('builders', str(builderid)))\n builderNames = [builder['name']]\n else:\n builderNames = self.builderNames\n else:\n builderNames = sorted(\n set(builderNames).intersection(self.builderNames))\n defer.returnValue(builderNames)\n\n @defer.inlineCallbacks\n def force(self, owner, builderNames=None, builderid=None, **kwargs):\n \"\"\"\n We check the parameters, and launch the build, if everything is correct\n \"\"\"\n builderNames = yield self.computeBuilderNames(builderNames, builderid)\n if not builderNames:\n raise KeyError(\"builderNames not specified or not supported\")\n\n # Currently the validation code expects all kwargs to be lists\n # I don't want to refactor that now so much sure we comply...\n kwargs = dict((k, [v]) if not isinstance(v, list) else (k, v)\n for k, v in iteritems(kwargs))\n\n # probably need to clean that out later as the IProperty is already a\n # validation mechanism\n collector = ValidationErrorCollector()\n reason = yield collector.collectValidationErrors(self.reason.fullName,\n self.reason.getFromKwargs, kwargs)\n if owner is None or owner == \"anonymous\":\n owner = yield collector.collectValidationErrors(self.username.fullName,\n self.username.getFromKwargs, kwargs)\n\n properties, changeids, sourcestamps = yield self.gatherPropertiesAndChanges(\n collector, **kwargs)\n\n collector.maybeRaiseCollectedErrors()\n\n properties.setProperty(\"reason\", reason, \"Force Build Form\")\n properties.setProperty(\"owner\", owner, \"Force Build Form\")\n\n r = self.reasonString % {'owner': owner, 'reason': reason}\n\n # turn sourcestamps into a list\n for cb, ss in iteritems(sourcestamps):\n ss['codebase'] = cb\n sourcestamps = list(itervalues(sourcestamps))\n\n # everything is validated, we can create our source stamp, and\n # buildrequest\n res = yield self.addBuildsetForSourceStampsWithDefaults(\n reason=r,\n sourcestamps=sourcestamps,\n properties=properties,\n builderNames=builderNames,\n )\n\n defer.returnValue(res)\n", "path": "master/buildbot/schedulers/forcesched.py" } ]
diff --git a/master/buildbot/schedulers/forcesched.py b/master/buildbot/schedulers/forcesched.py index 0fbc29b9b057..76d5b5ac52c6 100644 --- a/master/buildbot/schedulers/forcesched.py +++ b/master/buildbot/schedulers/forcesched.py @@ -249,7 +249,7 @@ class BooleanParameter(BaseParameter): type = "bool" def getFromKwargs(self, kwargs): - return kwargs.get(self.fullName, None) == [True] + return kwargs.get(self.fullName, [self.default]) == [True] class UserNameParameter(StringParameter):
internetarchive__openlibrary-8944
No `Cache-Control` headers on IA CDN requests ### Problem These two JS load on every page: https://openlibrary.org/cdn/archive.org/donate.js and. https://openlibrary.org/cdn/archive.org/analytics.js However, they are not cached like the rest of the JS. This causes a lot (2 seconds) delay when on a very slow connection (only a few 100ms on a decent connection). Their headers: ``` Content-Encoding: gzip Content-Type: text/javascript Date: Wed, 20 Mar 2024 11:48:52 GMT Referrer-Policy: no-referrer-when-downgrade Server: nginx/1.18.0 (Ubuntu) Vary: Accept-Encoding X-Ol-Stats: "" ``` Headers for all other requests: ``` Cache-Control: max-age=315360000 Content-Encoding: gzip Content-Type: application/javascript Date: Wed, 20 Mar 2024 11:03:17 GMT Etag: W/"65e78c3f-69215" Expires: Thu, 31 Dec 2037 23:55:55 GMT Last-Modified: Tue, 05 Mar 2024 21:18:55 GMT Referrer-Policy: no-referrer-when-downgrade Server: nginx/1.18.0 (Ubuntu) ``` #### Evidence / Screenshot <img width="1511" alt="image" src="https://github.com/internetarchive/openlibrary/assets/921217/79644e7c-160c-44f4-ac3d-1c795665b1f0"> #### Relevant URL(s) ### Reproducing the bug _No response_ ### Context _No response_ ### Notes from this Issue's Lead #### Proposal & constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> Add cache control headers. Probably should also add `defer` or `async` to their script tags so they don't block loading when they're not cached but this could be a different issue. https://javascript.info/script-async-defer #### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> https://github.com/internetarchive/openlibrary/blob/45ed08186221233c04f1859e3b8a703152b01539/openlibrary/plugins/openlibrary/code.py#L425-L430 https://github.com/internetarchive/openlibrary/blob/45ed08186221233c04f1859e3b8a703152b01539/openlibrary/templates/site/footer.html#L41-L42 https://github.com/internetarchive/openlibrary/blob/45ed08186221233c04f1859e3b8a703152b01539/openlibrary/plugins/upstream/utils.py#L1468-L1469 #### Stakeholders <!-- @ tag stakeholders of this bug --> PS: I don't have bandwidth to take this right now.
[ { "content": "\"\"\"\nOpen Library Plugin.\n\"\"\"\n\nfrom urllib.parse import parse_qs, urlparse, urlencode, urlunparse\nimport requests\nimport web\nimport json\nimport os\nimport socket\nimport random\nimport datetime\nimport logging\nfrom time import time\nimport math\nfrom pathlib import Path\nimport infogami\n\n# make sure infogami.config.features is set\nif not hasattr(infogami.config, 'features'):\n infogami.config.features = [] # type: ignore[attr-defined]\n\nfrom infogami.utils.app import metapage\nfrom infogami.utils import delegate\nfrom openlibrary.utils import dateutil\nfrom infogami.utils.view import (\n render,\n render_template,\n public,\n safeint,\n add_flash_message,\n)\nfrom infogami.infobase import client\nfrom infogami.core.db import ValidationException\n\nfrom openlibrary.core import cache\nfrom openlibrary.core.vendors import create_edition_from_amazon_metadata\nfrom openlibrary.utils.isbn import isbn_13_to_isbn_10, isbn_10_to_isbn_13\nfrom openlibrary.core.models import Edition\nfrom openlibrary.core.lending import get_availability\nimport openlibrary.core.stats\nfrom openlibrary.plugins.openlibrary.home import format_work_data\nfrom openlibrary.plugins.openlibrary.stats import increment_error_count\nfrom openlibrary.plugins.openlibrary import processors\n\ndelegate.app.add_processor(processors.ReadableUrlProcessor())\ndelegate.app.add_processor(processors.ProfileProcessor())\ndelegate.app.add_processor(processors.CORSProcessor(cors_prefixes={'/api/'}))\n\ntry:\n from infogami.plugins.api import code as api\nexcept:\n api = None # type: ignore[assignment]\n\n# http header extension for OL API\ninfogami.config.http_ext_header_uri = 'http://openlibrary.org/dev/docs/api' # type: ignore[attr-defined]\n\n# setup special connection with caching support\nfrom openlibrary.plugins.openlibrary import connection\n\nclient._connection_types['ol'] = connection.OLConnection # type: ignore[assignment]\ninfogami.config.infobase_parameters = {'type': 'ol'}\n\n# set up infobase schema. required when running in standalone mode.\nfrom openlibrary.core import schema\n\nschema.register_schema()\n\nfrom openlibrary.core import models\n\nmodels.register_models()\nmodels.register_types()\n\nimport openlibrary.core.lists.model as list_models\n\nlist_models.register_models()\n\n# Remove movefiles install hook. openlibrary manages its own files.\ninfogami._install_hooks = [\n h for h in infogami._install_hooks if h.__name__ != 'movefiles'\n]\n\nfrom openlibrary.plugins.openlibrary import lists, bulk_tag\n\nlists.setup()\nbulk_tag.setup()\n\nlogger = logging.getLogger('openlibrary')\n\n\nclass hooks(client.hook):\n def before_new_version(self, page):\n user = web.ctx.site.get_user()\n account = user and user.get_account()\n if account and account.is_blocked():\n raise ValidationException(\n 'Your account has been suspended. You are not allowed to make any edits.'\n )\n\n if page.key.startswith('/a/') or page.key.startswith('/authors/'):\n if page.type.key == '/type/author':\n return\n\n books = web.ctx.site.things({'type': '/type/edition', 'authors': page.key})\n books = books or web.ctx.site.things(\n {'type': '/type/work', 'authors': {'author': {'key': page.key}}}\n )\n if page.type.key == '/type/delete' and books:\n raise ValidationException(\n 'This Author page cannot be deleted as %d record(s) still reference this id. Please remove or reassign before trying again. Referenced by: %s'\n % (len(books), books)\n )\n elif page.type.key != '/type/author' and books:\n raise ValidationException(\n 'Changing type of author pages is not allowed.'\n )\n\n\[email protected]\ndef sampledump():\n \"\"\"Creates a dump of objects from OL database for creating a sample database.\"\"\"\n\n def expand_keys(keys):\n def f(k):\n if isinstance(k, dict):\n return web.ctx.site.things(k)\n elif k.endswith('*'):\n return web.ctx.site.things({'key~': k})\n else:\n return [k]\n\n result = []\n for k in keys:\n d = f(k)\n result += d\n return result\n\n def get_references(data, result=None):\n if result is None:\n result = []\n\n if isinstance(data, dict):\n if 'key' in data:\n result.append(data['key'])\n else:\n get_references(data.values(), result)\n elif isinstance(data, list):\n for v in data:\n get_references(v, result)\n return result\n\n visiting = {}\n visited = set()\n\n def visit(key):\n if key in visited or key.startswith('/type/'):\n return\n elif key in visiting:\n # This is a case of circular-dependency. Add a stub object to break it.\n print(json.dumps({'key': key, 'type': visiting[key]['type']}))\n visited.add(key)\n return\n\n thing = web.ctx.site.get(key)\n if not thing:\n return\n\n d = thing.dict()\n d.pop('permission', None)\n d.pop('child_permission', None)\n d.pop('table_of_contents', None)\n\n visiting[key] = d\n for ref in get_references(d.values()):\n visit(ref)\n visited.add(key)\n\n print(json.dumps(d))\n\n keys = [\n '/scan_record',\n '/scanning_center',\n {'type': '/type/scan_record', 'limit': 10},\n ]\n keys = expand_keys(keys) + ['/b/OL%dM' % i for i in range(1, 100)]\n visited = set()\n\n for k in keys:\n visit(k)\n\n\[email protected]\ndef sampleload(filename='sampledump.txt.gz'):\n if filename.endswith('.gz'):\n import gzip\n\n f = gzip.open(filename)\n else:\n f = open(filename)\n\n queries = [json.loads(line) for line in f]\n print(web.ctx.site.save_many(queries))\n\n\nclass routes(delegate.page):\n path = '/developers/routes'\n\n def GET(self):\n class ModulesToStr(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, metapage):\n return obj.__module__ + '.' + obj.__name__\n return super().default(obj)\n\n from openlibrary import code\n\n return '<pre>%s</pre>' % json.dumps(\n code.delegate.pages,\n sort_keys=True,\n cls=ModulesToStr,\n indent=4,\n separators=(',', ': '),\n )\n\n\nclass team(delegate.page):\n path = '/about/team'\n\n def GET(self):\n return render_template(\"about/index.html\")\n\n\nclass addbook(delegate.page):\n path = '/addbook'\n\n def GET(self):\n d = {'type': web.ctx.site.get('/type/edition')}\n\n i = web.input()\n author = i.get('author') and web.ctx.site.get(i.author)\n if author:\n d['authors'] = [author]\n\n page = web.ctx.site.new(\"\", d)\n return render.edit(page, self.path, 'Add Book')\n\n def POST(self):\n from infogami.core.code import edit\n\n key = web.ctx.site.new_key('/type/edition')\n web.ctx.path = key\n return edit().POST(key)\n\n\nclass widget(delegate.page):\n path = r'(/works/OL\\d+W|/books/OL\\d+M)/widget'\n\n def GET(self, key: str): # type: ignore[override]\n olid = key.split('/')[-1]\n item = web.ctx.site.get(key)\n is_work = key.startswith('/works/')\n item['olid'] = olid\n item['availability'] = get_availability(\n 'openlibrary_work' if is_work else 'openlibrary_edition',\n [olid],\n ).get(olid)\n item['authors'] = [\n web.storage(key=a.key, name=a.name or None) for a in item.get_authors()\n ]\n return delegate.RawText(\n render_template('widget', format_work_data(item) if is_work else item),\n content_type='text/html',\n )\n\n\nclass addauthor(delegate.page):\n path = '/addauthor'\n\n def POST(self):\n i = web.input('name')\n if len(i.name) < 2:\n return web.badrequest()\n key = web.ctx.site.new_key('/type/author')\n web.ctx.path = key\n web.ctx.site.save(\n {'key': key, 'name': i.name, 'type': {'key': '/type/author'}},\n comment='New Author',\n )\n raise web.HTTPError('200 OK', {}, key)\n\n\nclass clonebook(delegate.page):\n def GET(self):\n from infogami.core.code import edit\n\n i = web.input('key')\n page = web.ctx.site.get(i.key)\n if page is None:\n raise web.seeother(i.key)\n else:\n d = page._getdata()\n for k in ['isbn_10', 'isbn_13', 'lccn', 'oclc']:\n d.pop(k, None)\n return render.edit(page, '/addbook', 'Clone Book')\n\n\nclass search(delegate.page):\n path = '/suggest/search'\n\n def GET(self):\n i = web.input(prefix='')\n if len(i.prefix) > 2:\n q = {\n 'type': '/type/author',\n 'name~': i.prefix + '*',\n 'sort': 'name',\n 'limit': 5,\n }\n things = web.ctx.site.things(q)\n things = [web.ctx.site.get(key) for key in things]\n result = [\n {\n 'type': [{'id': t.key, 'name': t.key}],\n 'name': web.safestr(t.name),\n 'guid': t.key,\n 'id': t.key,\n 'article': {'id': t.key},\n }\n for t in things\n ]\n else:\n result = []\n callback = i.pop('callback', None)\n d = {\n 'status': '200 OK',\n 'query': dict(i, escape='html'),\n 'code': '/api/status/ok',\n 'result': result,\n }\n\n if callback:\n data = f'{callback}({json.dumps(d)})'\n else:\n data = json.dumps(d)\n raise web.HTTPError('200 OK', {}, data)\n\n\nclass blurb(delegate.page):\n path = '/suggest/blurb/(.*)'\n\n def GET(self, path):\n i = web.input()\n author = web.ctx.site.get('/' + path)\n body = ''\n if author.birth_date or author.death_date:\n body = f'{author.birth_date} - {author.death_date}'\n else:\n body = '%s' % author.date\n\n body += '<br/>'\n if author.bio:\n body += web.safestr(author.bio)\n\n result = {'body': body, 'media_type': 'text/html', 'text_encoding': 'utf-8'}\n d = {'status': '200 OK', 'code': '/api/status/ok', 'result': result}\n if callback := i.pop('callback', None):\n data = f'{callback}({json.dumps(d)})'\n else:\n data = json.dumps(d)\n\n raise web.HTTPError('200 OK', {}, data)\n\n\nclass thumbnail(delegate.page):\n path = '/suggest/thumbnail'\n\n\n@public\ndef get_property_type(type, name):\n for p in type.properties:\n if p.name == name:\n return p.expected_type\n return web.ctx.site.get('/type/string')\n\n\ndef save(filename, text):\n root = os.path.dirname(__file__)\n path = root + filename\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)\n f = open(path, 'w')\n f.write(text)\n f.close()\n\n\ndef change_ext(filename, ext):\n filename, _ = os.path.splitext(filename)\n if ext:\n filename = filename + ext\n return filename\n\n\ndef get_pages(type, processor):\n pages = web.ctx.site.things({'type': type})\n for p in pages:\n processor(web.ctx.site.get(p))\n\n\nclass robotstxt(delegate.page):\n path = '/robots.txt'\n\n def GET(self):\n web.header('Content-Type', 'text/plain')\n is_dev = 'dev' in infogami.config.features or web.ctx.host != 'openlibrary.org'\n robots_file = 'norobots.txt' if is_dev else 'robots.txt'\n return web.ok(open(f'static/{robots_file}').read())\n\n\[email protected]\ndef fetch_ia_js(filename: str) -> str:\n return requests.get(f'https://archive.org/includes/{filename}').text\n\n\nclass ia_js_cdn(delegate.page):\n path = r'/cdn/archive.org/(donate\\.js|analytics\\.js)'\n\n def GET(self, filename):\n web.header('Content-Type', 'text/javascript')\n return web.ok(fetch_ia_js(filename))\n\n\nclass serviceworker(delegate.page):\n path = '/sw.js'\n\n def GET(self):\n web.header('Content-Type', 'text/javascript')\n return web.ok(open('static/build/sw.js').read())\n\n\nclass assetlinks(delegate.page):\n path = '/.well-known/assetlinks'\n\n def GET(self):\n web.header('Content-Type', 'application/json')\n return web.ok(open('static/.well-known/assetlinks.json').read())\n\n\nclass opensearchxml(delegate.page):\n path = '/opensearch.xml'\n\n def GET(self):\n web.header('Content-Type', 'text/plain')\n return web.ok(open('static/opensearch.xml').read())\n\n\nclass health(delegate.page):\n path = '/health'\n\n def GET(self):\n web.header('Content-Type', 'text/plain')\n return web.ok('OK')\n\n\ndef remove_high_priority(query: str) -> str:\n \"\"\"\n Remove `high_priority=true` and `high_priority=false` from query parameters,\n as the API expects to pass URL parameters through to another query, and\n these may interfere with that query.\n\n >>> remove_high_priority('high_priority=true&v=1')\n 'v=1'\n \"\"\"\n query_params = parse_qs(query)\n query_params.pop(\"high_priority\", None)\n new_query = urlencode(query_params, doseq=True)\n return new_query\n\n\nclass isbn_lookup(delegate.page):\n path = r'/(?:isbn|ISBN)/([0-9xX-]+)'\n\n def GET(self, isbn):\n input = web.input(high_priority=False)\n\n high_priority = input.get(\"high_priority\") == \"true\"\n if \"high_priority\" in web.ctx.env.get('QUERY_STRING'):\n web.ctx.env['QUERY_STRING'] = remove_high_priority(\n web.ctx.env.get('QUERY_STRING')\n )\n\n # Preserve the url type (e.g. `.json`) and query params\n ext = ''\n if web.ctx.encoding and web.ctx.path.endswith('.' + web.ctx.encoding):\n ext = '.' + web.ctx.encoding\n if web.ctx.env.get('QUERY_STRING'):\n ext += '?' + web.ctx.env['QUERY_STRING']\n\n try:\n if ed := Edition.from_isbn(isbn=isbn, high_priority=high_priority):\n return web.found(ed.key + ext)\n except Exception as e:\n logger.error(e)\n return repr(e)\n\n web.ctx.status = '404 Not Found'\n return render.notfound(web.ctx.path, create=False)\n\n\nclass bookpage(delegate.page):\n \"\"\"\n Load an edition bookpage by identifier: isbn, oclc, lccn, or ia (ocaid).\n otherwise, return a 404.\n \"\"\"\n\n path = r'/(oclc|lccn|ia|OCLC|LCCN|IA)/([^/]*)(/.*)?'\n\n def GET(self, key, value, suffix=''):\n key = key.lower()\n\n if key == 'oclc':\n key = 'oclc_numbers'\n elif key == 'ia':\n key = 'ocaid'\n\n if key != 'ocaid': # example: MN41558ucmf_6\n value = value.replace('_', ' ')\n\n if web.ctx.encoding and web.ctx.path.endswith('.' + web.ctx.encoding):\n ext = '.' + web.ctx.encoding\n else:\n ext = ''\n\n if web.ctx.env.get('QUERY_STRING'):\n ext += '?' + web.ctx.env['QUERY_STRING']\n\n q = {'type': '/type/edition', key: value}\n\n result = web.ctx.site.things(q)\n\n if result:\n return web.found(result[0] + ext)\n elif key == 'ocaid':\n # Try a range of ocaid alternatives:\n ocaid_alternatives = [\n {'type': '/type/edition', 'source_records': 'ia:' + value},\n {'type': '/type/volume', 'ia_id': value},\n ]\n for q in ocaid_alternatives:\n result = web.ctx.site.things(q)\n if result:\n return web.found(result[0] + ext)\n\n # Perform import, if possible\n from openlibrary.plugins.importapi.code import ia_importapi, BookImportError\n from openlibrary import accounts\n\n with accounts.RunAs('ImportBot'):\n try:\n ia_importapi.ia_import(value, require_marc=True)\n except BookImportError:\n logger.exception('Unable to import ia record')\n\n # Go the the record created, or to the dummy ia-wrapper record\n return web.found('/books/ia:' + value + ext)\n\n web.ctx.status = '404 Not Found'\n return render.notfound(web.ctx.path, create=False)\n\n\ndelegate.media_types['application/rdf+xml'] = 'rdf'\n\n\nclass rdf(delegate.mode):\n name = 'view'\n encoding = 'rdf'\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n if not page:\n raise web.notfound('')\n else:\n from infogami.utils import template\n\n try:\n result = template.typetemplate('rdf')(page)\n except:\n raise web.notfound('')\n else:\n return delegate.RawText(\n result, content_type='application/rdf+xml; charset=utf-8'\n )\n\n\ndelegate.media_types[' application/atom+xml;profile=opds'] = 'opds'\n\n\nclass opds(delegate.mode):\n name = 'view'\n encoding = 'opds'\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n if not page:\n raise web.notfound('')\n else:\n from openlibrary.plugins.openlibrary import opds\n\n try:\n result = opds.OPDSEntry(page).to_string()\n except:\n raise web.notfound('')\n else:\n return delegate.RawText(\n result, content_type=' application/atom+xml;profile=opds'\n )\n\n\ndelegate.media_types['application/marcxml+xml'] = 'marcxml'\n\n\nclass marcxml(delegate.mode):\n name = 'view'\n encoding = 'marcxml'\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n if page is None or page.type.key != '/type/edition':\n raise web.notfound('')\n else:\n from infogami.utils import template\n\n try:\n result = template.typetemplate('marcxml')(page)\n except:\n raise web.notfound('')\n else:\n return delegate.RawText(\n result, content_type='application/marcxml+xml; charset=utf-8'\n )\n\n\ndelegate.media_types['text/x-yaml'] = 'yml'\n\n\nclass _yaml(delegate.mode):\n name = 'view'\n encoding = 'yml'\n\n def GET(self, key):\n d = self.get_data(key)\n\n if web.input(text='false').text.lower() == 'true':\n web.header('Content-Type', 'text/plain; charset=utf-8')\n else:\n web.header('Content-Type', 'text/x-yaml; charset=utf-8')\n\n raise web.ok(self.dump(d))\n\n def get_data(self, key):\n i = web.input(v=None)\n v = safeint(i.v, None)\n data = {'key': key, 'revision': v}\n try:\n d = api.request('/get', data=data)\n except client.ClientException as e:\n if e.json:\n msg = self.dump(json.loads(e.json))\n else:\n msg = str(e)\n raise web.HTTPError(e.status, data=msg)\n\n return json.loads(d)\n\n def dump(self, d):\n import yaml\n\n return yaml.safe_dump(d, indent=4, allow_unicode=True, default_flow_style=False)\n\n def load(self, data):\n import yaml\n\n return yaml.safe_load(data)\n\n\nclass _yaml_edit(_yaml):\n name = 'edit'\n encoding = 'yml'\n\n def is_admin(self):\n u = delegate.context.user\n return u and (u.is_admin() or u.is_super_librarian())\n\n def GET(self, key):\n # only allow admin users to edit yaml\n if not self.is_admin():\n return render.permission_denied(key, 'Permission Denied')\n\n try:\n d = self.get_data(key)\n except web.HTTPError as e:\n if web.ctx.status.lower() == '404 not found':\n d = {'key': key}\n else:\n raise\n return render.edit_yaml(key, self.dump(d))\n\n def POST(self, key):\n # only allow admin users to edit yaml\n if not self.is_admin():\n return render.permission_denied(key, 'Permission Denied')\n\n i = web.input(body='', _comment=None)\n\n if '_save' in i:\n d = self.load(i.body)\n p = web.ctx.site.new(key, d)\n try:\n p._save(i._comment)\n except (client.ClientException, ValidationException) as e:\n add_flash_message('error', str(e))\n return render.edit_yaml(key, i.body)\n raise web.seeother(key + '.yml')\n elif '_preview' in i:\n add_flash_message('Preview not supported')\n return render.edit_yaml(key, i.body)\n else:\n add_flash_message('unknown action')\n return render.edit_yaml(key, i.body)\n\n\ndef _get_user_root():\n user_root = infogami.config.get('infobase', {}).get('user_root', '/user')\n return web.rstrips(user_root, '/')\n\n\ndef _get_bots():\n bots = web.ctx.site.store.values(type='account', name='bot', value='true')\n user_root = _get_user_root()\n return [user_root + '/' + account['username'] for account in bots]\n\n\ndef _get_members_of_group(group_key):\n \"\"\"Returns keys of all members of the group identifier by group_key.\"\"\"\n usergroup = web.ctx.site.get(group_key) or {}\n return [m.key for m in usergroup.get('members', [])]\n\n\ndef can_write():\n \"\"\"\n Any user with bot flag set can write.\n For backward-compatability, all admin users and people in api usergroup are also allowed to write.\n \"\"\"\n user_key = delegate.context.user and delegate.context.user.key\n bots = (\n _get_members_of_group('/usergroup/api')\n + _get_members_of_group('/usergroup/admin')\n + _get_bots()\n )\n return user_key in bots\n\n\n# overwrite the implementation of can_write in the infogami API plugin with this one.\napi.can_write = can_write\n\n\nclass Forbidden(web.HTTPError):\n def __init__(self, msg=''):\n web.HTTPError.__init__(self, '403 Forbidden', {}, msg)\n\n\nclass BadRequest(web.HTTPError):\n def __init__(self, msg=''):\n web.HTTPError.__init__(self, '400 Bad Request', {}, msg)\n\n\nclass new:\n \"\"\"API to create new author/edition/work/publisher/series.\"\"\"\n\n def prepare_query(self, query):\n \"\"\"\n Add key to query and returns the key.\n If query is a list multiple queries are returned.\n \"\"\"\n if isinstance(query, list):\n return [self.prepare_query(q) for q in query]\n else:\n type = query['type']\n if isinstance(type, dict):\n type = type['key']\n query['key'] = web.ctx.site.new_key(type)\n return query['key']\n\n def verify_types(self, query):\n if isinstance(query, list):\n for q in query:\n self.verify_types(q)\n else:\n if 'type' not in query:\n raise BadRequest('Missing type')\n type = query['type']\n if isinstance(type, dict):\n if 'key' not in type:\n raise BadRequest('Bad Type: ' + json.dumps(type))\n type = type['key']\n\n if type not in [\n '/type/author',\n '/type/edition',\n '/type/work',\n '/type/series',\n '/type/publisher',\n ]:\n raise BadRequest('Bad Type: ' + json.dumps(type))\n\n def POST(self):\n if not can_write():\n raise Forbidden('Permission Denied.')\n\n try:\n query = json.loads(web.data())\n h = api.get_custom_headers()\n comment = h.get('comment')\n action = h.get('action')\n except Exception as e:\n raise BadRequest(str(e))\n\n self.verify_types(query)\n keys = self.prepare_query(query)\n\n try:\n if not isinstance(query, list):\n query = [query]\n web.ctx.site.save_many(query, comment=comment, action=action)\n except client.ClientException as e:\n raise BadRequest(str(e))\n\n # graphite/statsd tracking of bot edits\n user = delegate.context.user and delegate.context.user.key\n if user.lower().endswith('bot'):\n botname = user.replace('/people/', '', 1)\n botname = botname.replace('.', '-')\n key = 'ol.edits.bots.' + botname\n openlibrary.core.stats.increment(key)\n return json.dumps(keys)\n\n\napi and api.add_hook('new', new)\n\n\n@public\ndef changequery(query=None, **kw):\n if query is None:\n query = web.input(_method='get', _unicode=False)\n for k, v in kw.items():\n if v is None:\n query.pop(k, None)\n else:\n query[k] = v\n\n query = {\n k: [web.safestr(s) for s in v] if isinstance(v, list) else web.safestr(v)\n for k, v in query.items()\n }\n out = web.ctx.get('readable_path', web.ctx.path)\n if query:\n out += '?' + urllib.parse.urlencode(query, doseq=True)\n return out\n\n\n# Hack to limit recent changes offset.\n# Large offsets are blowing up the database.\n\nfrom infogami.core.db import get_recent_changes as _get_recentchanges\n\nimport urllib\n\n\n@public\ndef get_recent_changes(*a, **kw):\n if 'offset' in kw and kw['offset'] > 5000:\n return []\n else:\n return _get_recentchanges(*a, **kw)\n\n\n@public\ndef most_recent_change():\n if 'cache_most_recent' in infogami.config.features:\n v = web.ctx.site._request('/most_recent')\n v.thing = web.ctx.site.get(v.key)\n v.author = v.author and web.ctx.site.get(v.author)\n v.created = client.parse_datetime(v.created)\n return v\n else:\n return get_recent_changes(limit=1)[0]\n\n\n@public\ndef get_cover_id(key):\n try:\n _, cat, oln = key.split('/')\n return requests.get(\n f\"https://covers.openlibrary.org/{cat}/query?olid={oln}&limit=1\"\n ).json()[0]\n except (IndexError, json.decoder.JSONDecodeError, TypeError, ValueError):\n return None\n\n\nlocal_ip = None\n\n\nclass invalidate(delegate.page):\n path = '/system/invalidate'\n\n def POST(self):\n global local_ip\n if local_ip is None:\n local_ip = socket.gethostbyname(socket.gethostname())\n\n if (\n web.ctx.ip != '127.0.0.1'\n and web.ctx.ip.rsplit('.', 1)[0] != local_ip.rsplit('.', 1)[0]\n ):\n raise Forbidden('Allowed only in the local network.')\n\n data = json.loads(web.data())\n if not isinstance(data, list):\n data = [data]\n for d in data:\n thing = client.Thing(web.ctx.site, d['key'], client.storify(d))\n client._run_hooks('on_new_version', thing)\n return delegate.RawText('ok')\n\n\ndef save_error():\n t = datetime.datetime.utcnow()\n name = '%04d-%02d-%02d/%02d%02d%02d%06d' % (\n t.year,\n t.month,\n t.day,\n t.hour,\n t.minute,\n t.second,\n t.microsecond,\n )\n\n path = infogami.config.get('errorlog', 'errors') + '/' + name + '.html'\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n error = web.safestr(web.djangoerror())\n f = open(path, 'w')\n f.write(error)\n f.close()\n\n print('error saved to', path, file=web.debug)\n return name\n\n\ndef internalerror():\n i = web.input(_method='GET', debug='false')\n name = save_error()\n\n # TODO: move this stats stuff to plugins\\openlibrary\\stats.py\n # Can't have sub-metrics, so can't add more info\n openlibrary.core.stats.increment('ol.internal-errors')\n increment_error_count('ol.internal-errors-segmented')\n\n # TODO: move this to plugins\\openlibrary\\sentry.py\n from openlibrary.plugins.openlibrary.sentry import sentry\n\n if sentry.enabled:\n sentry.capture_exception_webpy()\n\n if i.debug.lower() == 'true':\n raise web.debugerror()\n else:\n msg = render.site(render.internalerror(name))\n raise web.internalerror(web.safestr(msg))\n\n\ndelegate.app.internalerror = internalerror\ndelegate.add_exception_hook(save_error)\n\n\nclass memory(delegate.page):\n path = '/debug/memory'\n\n def GET(self):\n import guppy\n\n h = guppy.hpy()\n return delegate.RawText(str(h.heap()))\n\n\ndef _get_relatedcarousels_component(workid):\n if 'env' not in web.ctx:\n delegate.fakeload()\n work = web.ctx.site.get('/works/%s' % workid) or {}\n component = render_template('books/RelatedWorksCarousel', work)\n return {0: str(component)}\n\n\ndef get_cached_relatedcarousels_component(*args, **kwargs):\n memoized_get_component_metadata = cache.memcache_memoize(\n _get_relatedcarousels_component,\n \"book.bookspage.component.relatedcarousels\",\n timeout=dateutil.HALF_DAY_SECS,\n )\n return (\n memoized_get_component_metadata(*args, **kwargs)\n or memoized_get_component_metadata.update(*args, **kwargs)[0]\n )\n\n\nclass Partials(delegate.page):\n path = '/partials'\n encoding = 'json'\n\n def GET(self):\n # `data` is meant to be a dict with two keys: `args` and `kwargs`.\n # `data['args']` is meant to be a list of a template's positional arguments, in order.\n # `data['kwargs']` is meant to be a dict containing a template's keyword arguments.\n i = web.input(workid=None, _component=None, data=None)\n component = i.pop(\"_component\")\n partial = {}\n if component == \"RelatedWorkCarousel\":\n partial = _get_relatedcarousels_component(i.workid)\n elif component == \"AffiliateLinks\":\n data = json.loads(i.data)\n args = data.get('args', [])\n # XXX : Throw error if args length is less than 2\n macro = web.template.Template.globals['macros'].AffiliateLinks(\n args[0], args[1]\n )\n partial = {\"partials\": str(macro)}\n\n return delegate.RawText(json.dumps(partial))\n\n\ndef is_bot():\n r\"\"\"Generated on ol-www1 within /var/log/nginx with:\n\n cat access.log | grep -oh \"; \\w*[bB]ot\" | sort --unique | awk '{print tolower($2)}'\n cat access.log | grep -oh \"; \\w*[sS]pider\" | sort --unique | awk '{print tolower($2)}'\n\n Manually removed singleton `bot` (to avoid overly complex grep regex)\n \"\"\"\n user_agent_bots = [\n 'sputnikbot',\n 'dotbot',\n 'semrushbot',\n 'googlebot',\n 'yandexbot',\n 'monsidobot',\n 'kazbtbot',\n 'seznambot',\n 'dubbotbot',\n '360spider',\n 'redditbot',\n 'yandexmobilebot',\n 'linkdexbot',\n 'musobot',\n 'mojeekbot',\n 'focuseekbot',\n 'behloolbot',\n 'startmebot',\n 'yandexaccessibilitybot',\n 'uptimerobot',\n 'femtosearchbot',\n 'pinterestbot',\n 'toutiaospider',\n 'yoozbot',\n 'parsijoobot',\n 'equellaurlbot',\n 'donkeybot',\n 'paperlibot',\n 'nsrbot',\n 'discordbot',\n 'ahrefsbot',\n '`googlebot',\n 'coccocbot',\n 'buzzbot',\n 'laserlikebot',\n 'baiduspider',\n 'bingbot',\n 'mj12bot',\n 'yoozbotadsbot',\n ]\n if not web.ctx.env.get('HTTP_USER_AGENT'):\n return True\n user_agent = web.ctx.env['HTTP_USER_AGENT'].lower()\n return any(bot in user_agent for bot in user_agent_bots)\n\n\ndef setup_template_globals():\n # must be imported here, otherwise silently messes up infogami's import execution\n # order, resulting in random errors like the the /account/login.json endpoint\n # defined in accounts.py being ignored, and using the infogami endpoint instead.\n from openlibrary.book_providers import (\n get_best_edition,\n get_book_provider,\n get_book_provider_by_name,\n get_cover_url,\n )\n\n web.template.Template.globals.update(\n {\n 'next': next,\n 'sorted': sorted,\n 'zip': zip,\n 'tuple': tuple,\n 'hash': hash,\n 'urlquote': web.urlquote,\n 'isbn_13_to_isbn_10': isbn_13_to_isbn_10,\n 'isbn_10_to_isbn_13': isbn_10_to_isbn_13,\n 'NEWLINE': '\\n',\n 'random': random.Random(),\n 'choose_random_from': random.choice,\n 'get_lang': lambda: web.ctx.lang,\n 'ceil': math.ceil,\n 'get_best_edition': get_best_edition,\n 'get_book_provider': get_book_provider,\n 'get_book_provider_by_name': get_book_provider_by_name,\n 'get_cover_url': get_cover_url,\n # bad use of globals\n 'is_bot': is_bot,\n 'time': time,\n 'input': web.input,\n 'dumps': json.dumps,\n }\n )\n\n\ndef setup_context_defaults():\n from infogami.utils import context\n\n context.defaults.update({'features': [], 'user': None, 'MAX_VISIBLE_BOOKS': 5})\n\n\ndef setup():\n from openlibrary.plugins.openlibrary import (\n sentry,\n home,\n borrow_home,\n stats,\n support,\n events,\n design,\n status,\n authors,\n swagger,\n )\n\n sentry.setup()\n home.setup()\n design.setup()\n borrow_home.setup()\n stats.setup()\n support.setup()\n events.setup()\n status.setup()\n authors.setup()\n swagger.setup()\n\n from openlibrary.plugins.openlibrary import api\n\n delegate.app.add_processor(web.unloadhook(stats.stats_hook))\n\n if infogami.config.get('dev_instance') is True:\n from openlibrary.plugins.openlibrary import dev_instance\n\n dev_instance.setup()\n\n setup_context_defaults()\n setup_template_globals()\n\n\nsetup()\n", "path": "openlibrary/plugins/openlibrary/code.py" } ]
[ { "content": "\"\"\"\nOpen Library Plugin.\n\"\"\"\n\nfrom urllib.parse import parse_qs, urlparse, urlencode, urlunparse\nimport requests\nimport web\nimport json\nimport os\nimport socket\nimport random\nimport datetime\nimport logging\nfrom time import time\nimport math\nfrom pathlib import Path\nimport infogami\n\n# make sure infogami.config.features is set\nif not hasattr(infogami.config, 'features'):\n infogami.config.features = [] # type: ignore[attr-defined]\n\nfrom infogami.utils.app import metapage\nfrom infogami.utils import delegate\nfrom openlibrary.utils import dateutil\nfrom infogami.utils.view import (\n render,\n render_template,\n public,\n safeint,\n add_flash_message,\n)\nfrom infogami.infobase import client\nfrom infogami.core.db import ValidationException\n\nfrom openlibrary.core import cache\nfrom openlibrary.core.vendors import create_edition_from_amazon_metadata\nfrom openlibrary.utils.isbn import isbn_13_to_isbn_10, isbn_10_to_isbn_13\nfrom openlibrary.core.models import Edition\nfrom openlibrary.core.lending import get_availability\nimport openlibrary.core.stats\nfrom openlibrary.plugins.openlibrary.home import format_work_data\nfrom openlibrary.plugins.openlibrary.stats import increment_error_count\nfrom openlibrary.plugins.openlibrary import processors\n\ndelegate.app.add_processor(processors.ReadableUrlProcessor())\ndelegate.app.add_processor(processors.ProfileProcessor())\ndelegate.app.add_processor(processors.CORSProcessor(cors_prefixes={'/api/'}))\n\ntry:\n from infogami.plugins.api import code as api\nexcept:\n api = None # type: ignore[assignment]\n\n# http header extension for OL API\ninfogami.config.http_ext_header_uri = 'http://openlibrary.org/dev/docs/api' # type: ignore[attr-defined]\n\n# setup special connection with caching support\nfrom openlibrary.plugins.openlibrary import connection\n\nclient._connection_types['ol'] = connection.OLConnection # type: ignore[assignment]\ninfogami.config.infobase_parameters = {'type': 'ol'}\n\n# set up infobase schema. required when running in standalone mode.\nfrom openlibrary.core import schema\n\nschema.register_schema()\n\nfrom openlibrary.core import models\n\nmodels.register_models()\nmodels.register_types()\n\nimport openlibrary.core.lists.model as list_models\n\nlist_models.register_models()\n\n# Remove movefiles install hook. openlibrary manages its own files.\ninfogami._install_hooks = [\n h for h in infogami._install_hooks if h.__name__ != 'movefiles'\n]\n\nfrom openlibrary.plugins.openlibrary import lists, bulk_tag\n\nlists.setup()\nbulk_tag.setup()\n\nlogger = logging.getLogger('openlibrary')\n\n\nclass hooks(client.hook):\n def before_new_version(self, page):\n user = web.ctx.site.get_user()\n account = user and user.get_account()\n if account and account.is_blocked():\n raise ValidationException(\n 'Your account has been suspended. You are not allowed to make any edits.'\n )\n\n if page.key.startswith('/a/') or page.key.startswith('/authors/'):\n if page.type.key == '/type/author':\n return\n\n books = web.ctx.site.things({'type': '/type/edition', 'authors': page.key})\n books = books or web.ctx.site.things(\n {'type': '/type/work', 'authors': {'author': {'key': page.key}}}\n )\n if page.type.key == '/type/delete' and books:\n raise ValidationException(\n 'This Author page cannot be deleted as %d record(s) still reference this id. Please remove or reassign before trying again. Referenced by: %s'\n % (len(books), books)\n )\n elif page.type.key != '/type/author' and books:\n raise ValidationException(\n 'Changing type of author pages is not allowed.'\n )\n\n\[email protected]\ndef sampledump():\n \"\"\"Creates a dump of objects from OL database for creating a sample database.\"\"\"\n\n def expand_keys(keys):\n def f(k):\n if isinstance(k, dict):\n return web.ctx.site.things(k)\n elif k.endswith('*'):\n return web.ctx.site.things({'key~': k})\n else:\n return [k]\n\n result = []\n for k in keys:\n d = f(k)\n result += d\n return result\n\n def get_references(data, result=None):\n if result is None:\n result = []\n\n if isinstance(data, dict):\n if 'key' in data:\n result.append(data['key'])\n else:\n get_references(data.values(), result)\n elif isinstance(data, list):\n for v in data:\n get_references(v, result)\n return result\n\n visiting = {}\n visited = set()\n\n def visit(key):\n if key in visited or key.startswith('/type/'):\n return\n elif key in visiting:\n # This is a case of circular-dependency. Add a stub object to break it.\n print(json.dumps({'key': key, 'type': visiting[key]['type']}))\n visited.add(key)\n return\n\n thing = web.ctx.site.get(key)\n if not thing:\n return\n\n d = thing.dict()\n d.pop('permission', None)\n d.pop('child_permission', None)\n d.pop('table_of_contents', None)\n\n visiting[key] = d\n for ref in get_references(d.values()):\n visit(ref)\n visited.add(key)\n\n print(json.dumps(d))\n\n keys = [\n '/scan_record',\n '/scanning_center',\n {'type': '/type/scan_record', 'limit': 10},\n ]\n keys = expand_keys(keys) + ['/b/OL%dM' % i for i in range(1, 100)]\n visited = set()\n\n for k in keys:\n visit(k)\n\n\[email protected]\ndef sampleload(filename='sampledump.txt.gz'):\n if filename.endswith('.gz'):\n import gzip\n\n f = gzip.open(filename)\n else:\n f = open(filename)\n\n queries = [json.loads(line) for line in f]\n print(web.ctx.site.save_many(queries))\n\n\nclass routes(delegate.page):\n path = '/developers/routes'\n\n def GET(self):\n class ModulesToStr(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, metapage):\n return obj.__module__ + '.' + obj.__name__\n return super().default(obj)\n\n from openlibrary import code\n\n return '<pre>%s</pre>' % json.dumps(\n code.delegate.pages,\n sort_keys=True,\n cls=ModulesToStr,\n indent=4,\n separators=(',', ': '),\n )\n\n\nclass team(delegate.page):\n path = '/about/team'\n\n def GET(self):\n return render_template(\"about/index.html\")\n\n\nclass addbook(delegate.page):\n path = '/addbook'\n\n def GET(self):\n d = {'type': web.ctx.site.get('/type/edition')}\n\n i = web.input()\n author = i.get('author') and web.ctx.site.get(i.author)\n if author:\n d['authors'] = [author]\n\n page = web.ctx.site.new(\"\", d)\n return render.edit(page, self.path, 'Add Book')\n\n def POST(self):\n from infogami.core.code import edit\n\n key = web.ctx.site.new_key('/type/edition')\n web.ctx.path = key\n return edit().POST(key)\n\n\nclass widget(delegate.page):\n path = r'(/works/OL\\d+W|/books/OL\\d+M)/widget'\n\n def GET(self, key: str): # type: ignore[override]\n olid = key.split('/')[-1]\n item = web.ctx.site.get(key)\n is_work = key.startswith('/works/')\n item['olid'] = olid\n item['availability'] = get_availability(\n 'openlibrary_work' if is_work else 'openlibrary_edition',\n [olid],\n ).get(olid)\n item['authors'] = [\n web.storage(key=a.key, name=a.name or None) for a in item.get_authors()\n ]\n return delegate.RawText(\n render_template('widget', format_work_data(item) if is_work else item),\n content_type='text/html',\n )\n\n\nclass addauthor(delegate.page):\n path = '/addauthor'\n\n def POST(self):\n i = web.input('name')\n if len(i.name) < 2:\n return web.badrequest()\n key = web.ctx.site.new_key('/type/author')\n web.ctx.path = key\n web.ctx.site.save(\n {'key': key, 'name': i.name, 'type': {'key': '/type/author'}},\n comment='New Author',\n )\n raise web.HTTPError('200 OK', {}, key)\n\n\nclass clonebook(delegate.page):\n def GET(self):\n from infogami.core.code import edit\n\n i = web.input('key')\n page = web.ctx.site.get(i.key)\n if page is None:\n raise web.seeother(i.key)\n else:\n d = page._getdata()\n for k in ['isbn_10', 'isbn_13', 'lccn', 'oclc']:\n d.pop(k, None)\n return render.edit(page, '/addbook', 'Clone Book')\n\n\nclass search(delegate.page):\n path = '/suggest/search'\n\n def GET(self):\n i = web.input(prefix='')\n if len(i.prefix) > 2:\n q = {\n 'type': '/type/author',\n 'name~': i.prefix + '*',\n 'sort': 'name',\n 'limit': 5,\n }\n things = web.ctx.site.things(q)\n things = [web.ctx.site.get(key) for key in things]\n result = [\n {\n 'type': [{'id': t.key, 'name': t.key}],\n 'name': web.safestr(t.name),\n 'guid': t.key,\n 'id': t.key,\n 'article': {'id': t.key},\n }\n for t in things\n ]\n else:\n result = []\n callback = i.pop('callback', None)\n d = {\n 'status': '200 OK',\n 'query': dict(i, escape='html'),\n 'code': '/api/status/ok',\n 'result': result,\n }\n\n if callback:\n data = f'{callback}({json.dumps(d)})'\n else:\n data = json.dumps(d)\n raise web.HTTPError('200 OK', {}, data)\n\n\nclass blurb(delegate.page):\n path = '/suggest/blurb/(.*)'\n\n def GET(self, path):\n i = web.input()\n author = web.ctx.site.get('/' + path)\n body = ''\n if author.birth_date or author.death_date:\n body = f'{author.birth_date} - {author.death_date}'\n else:\n body = '%s' % author.date\n\n body += '<br/>'\n if author.bio:\n body += web.safestr(author.bio)\n\n result = {'body': body, 'media_type': 'text/html', 'text_encoding': 'utf-8'}\n d = {'status': '200 OK', 'code': '/api/status/ok', 'result': result}\n if callback := i.pop('callback', None):\n data = f'{callback}({json.dumps(d)})'\n else:\n data = json.dumps(d)\n\n raise web.HTTPError('200 OK', {}, data)\n\n\nclass thumbnail(delegate.page):\n path = '/suggest/thumbnail'\n\n\n@public\ndef get_property_type(type, name):\n for p in type.properties:\n if p.name == name:\n return p.expected_type\n return web.ctx.site.get('/type/string')\n\n\ndef save(filename, text):\n root = os.path.dirname(__file__)\n path = root + filename\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)\n f = open(path, 'w')\n f.write(text)\n f.close()\n\n\ndef change_ext(filename, ext):\n filename, _ = os.path.splitext(filename)\n if ext:\n filename = filename + ext\n return filename\n\n\ndef get_pages(type, processor):\n pages = web.ctx.site.things({'type': type})\n for p in pages:\n processor(web.ctx.site.get(p))\n\n\nclass robotstxt(delegate.page):\n path = '/robots.txt'\n\n def GET(self):\n web.header('Content-Type', 'text/plain')\n is_dev = 'dev' in infogami.config.features or web.ctx.host != 'openlibrary.org'\n robots_file = 'norobots.txt' if is_dev else 'robots.txt'\n return web.ok(open(f'static/{robots_file}').read())\n\n\[email protected]\ndef fetch_ia_js(filename: str) -> str:\n return requests.get(f'https://archive.org/includes/{filename}').text\n\n\nclass ia_js_cdn(delegate.page):\n path = r'/cdn/archive.org/(donate\\.js|analytics\\.js)'\n\n def GET(self, filename):\n web.header('Content-Type', 'text/javascript')\n web.header(\"Cache-Control\", \"max-age=%d\" % (24 * 3600))\n return web.ok(fetch_ia_js(filename))\n\n\nclass serviceworker(delegate.page):\n path = '/sw.js'\n\n def GET(self):\n web.header('Content-Type', 'text/javascript')\n return web.ok(open('static/build/sw.js').read())\n\n\nclass assetlinks(delegate.page):\n path = '/.well-known/assetlinks'\n\n def GET(self):\n web.header('Content-Type', 'application/json')\n return web.ok(open('static/.well-known/assetlinks.json').read())\n\n\nclass opensearchxml(delegate.page):\n path = '/opensearch.xml'\n\n def GET(self):\n web.header('Content-Type', 'text/plain')\n return web.ok(open('static/opensearch.xml').read())\n\n\nclass health(delegate.page):\n path = '/health'\n\n def GET(self):\n web.header('Content-Type', 'text/plain')\n return web.ok('OK')\n\n\ndef remove_high_priority(query: str) -> str:\n \"\"\"\n Remove `high_priority=true` and `high_priority=false` from query parameters,\n as the API expects to pass URL parameters through to another query, and\n these may interfere with that query.\n\n >>> remove_high_priority('high_priority=true&v=1')\n 'v=1'\n \"\"\"\n query_params = parse_qs(query)\n query_params.pop(\"high_priority\", None)\n new_query = urlencode(query_params, doseq=True)\n return new_query\n\n\nclass isbn_lookup(delegate.page):\n path = r'/(?:isbn|ISBN)/([0-9xX-]+)'\n\n def GET(self, isbn):\n input = web.input(high_priority=False)\n\n high_priority = input.get(\"high_priority\") == \"true\"\n if \"high_priority\" in web.ctx.env.get('QUERY_STRING'):\n web.ctx.env['QUERY_STRING'] = remove_high_priority(\n web.ctx.env.get('QUERY_STRING')\n )\n\n # Preserve the url type (e.g. `.json`) and query params\n ext = ''\n if web.ctx.encoding and web.ctx.path.endswith('.' + web.ctx.encoding):\n ext = '.' + web.ctx.encoding\n if web.ctx.env.get('QUERY_STRING'):\n ext += '?' + web.ctx.env['QUERY_STRING']\n\n try:\n if ed := Edition.from_isbn(isbn=isbn, high_priority=high_priority):\n return web.found(ed.key + ext)\n except Exception as e:\n logger.error(e)\n return repr(e)\n\n web.ctx.status = '404 Not Found'\n return render.notfound(web.ctx.path, create=False)\n\n\nclass bookpage(delegate.page):\n \"\"\"\n Load an edition bookpage by identifier: isbn, oclc, lccn, or ia (ocaid).\n otherwise, return a 404.\n \"\"\"\n\n path = r'/(oclc|lccn|ia|OCLC|LCCN|IA)/([^/]*)(/.*)?'\n\n def GET(self, key, value, suffix=''):\n key = key.lower()\n\n if key == 'oclc':\n key = 'oclc_numbers'\n elif key == 'ia':\n key = 'ocaid'\n\n if key != 'ocaid': # example: MN41558ucmf_6\n value = value.replace('_', ' ')\n\n if web.ctx.encoding and web.ctx.path.endswith('.' + web.ctx.encoding):\n ext = '.' + web.ctx.encoding\n else:\n ext = ''\n\n if web.ctx.env.get('QUERY_STRING'):\n ext += '?' + web.ctx.env['QUERY_STRING']\n\n q = {'type': '/type/edition', key: value}\n\n result = web.ctx.site.things(q)\n\n if result:\n return web.found(result[0] + ext)\n elif key == 'ocaid':\n # Try a range of ocaid alternatives:\n ocaid_alternatives = [\n {'type': '/type/edition', 'source_records': 'ia:' + value},\n {'type': '/type/volume', 'ia_id': value},\n ]\n for q in ocaid_alternatives:\n result = web.ctx.site.things(q)\n if result:\n return web.found(result[0] + ext)\n\n # Perform import, if possible\n from openlibrary.plugins.importapi.code import ia_importapi, BookImportError\n from openlibrary import accounts\n\n with accounts.RunAs('ImportBot'):\n try:\n ia_importapi.ia_import(value, require_marc=True)\n except BookImportError:\n logger.exception('Unable to import ia record')\n\n # Go the the record created, or to the dummy ia-wrapper record\n return web.found('/books/ia:' + value + ext)\n\n web.ctx.status = '404 Not Found'\n return render.notfound(web.ctx.path, create=False)\n\n\ndelegate.media_types['application/rdf+xml'] = 'rdf'\n\n\nclass rdf(delegate.mode):\n name = 'view'\n encoding = 'rdf'\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n if not page:\n raise web.notfound('')\n else:\n from infogami.utils import template\n\n try:\n result = template.typetemplate('rdf')(page)\n except:\n raise web.notfound('')\n else:\n return delegate.RawText(\n result, content_type='application/rdf+xml; charset=utf-8'\n )\n\n\ndelegate.media_types[' application/atom+xml;profile=opds'] = 'opds'\n\n\nclass opds(delegate.mode):\n name = 'view'\n encoding = 'opds'\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n if not page:\n raise web.notfound('')\n else:\n from openlibrary.plugins.openlibrary import opds\n\n try:\n result = opds.OPDSEntry(page).to_string()\n except:\n raise web.notfound('')\n else:\n return delegate.RawText(\n result, content_type=' application/atom+xml;profile=opds'\n )\n\n\ndelegate.media_types['application/marcxml+xml'] = 'marcxml'\n\n\nclass marcxml(delegate.mode):\n name = 'view'\n encoding = 'marcxml'\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n if page is None or page.type.key != '/type/edition':\n raise web.notfound('')\n else:\n from infogami.utils import template\n\n try:\n result = template.typetemplate('marcxml')(page)\n except:\n raise web.notfound('')\n else:\n return delegate.RawText(\n result, content_type='application/marcxml+xml; charset=utf-8'\n )\n\n\ndelegate.media_types['text/x-yaml'] = 'yml'\n\n\nclass _yaml(delegate.mode):\n name = 'view'\n encoding = 'yml'\n\n def GET(self, key):\n d = self.get_data(key)\n\n if web.input(text='false').text.lower() == 'true':\n web.header('Content-Type', 'text/plain; charset=utf-8')\n else:\n web.header('Content-Type', 'text/x-yaml; charset=utf-8')\n\n raise web.ok(self.dump(d))\n\n def get_data(self, key):\n i = web.input(v=None)\n v = safeint(i.v, None)\n data = {'key': key, 'revision': v}\n try:\n d = api.request('/get', data=data)\n except client.ClientException as e:\n if e.json:\n msg = self.dump(json.loads(e.json))\n else:\n msg = str(e)\n raise web.HTTPError(e.status, data=msg)\n\n return json.loads(d)\n\n def dump(self, d):\n import yaml\n\n return yaml.safe_dump(d, indent=4, allow_unicode=True, default_flow_style=False)\n\n def load(self, data):\n import yaml\n\n return yaml.safe_load(data)\n\n\nclass _yaml_edit(_yaml):\n name = 'edit'\n encoding = 'yml'\n\n def is_admin(self):\n u = delegate.context.user\n return u and (u.is_admin() or u.is_super_librarian())\n\n def GET(self, key):\n # only allow admin users to edit yaml\n if not self.is_admin():\n return render.permission_denied(key, 'Permission Denied')\n\n try:\n d = self.get_data(key)\n except web.HTTPError as e:\n if web.ctx.status.lower() == '404 not found':\n d = {'key': key}\n else:\n raise\n return render.edit_yaml(key, self.dump(d))\n\n def POST(self, key):\n # only allow admin users to edit yaml\n if not self.is_admin():\n return render.permission_denied(key, 'Permission Denied')\n\n i = web.input(body='', _comment=None)\n\n if '_save' in i:\n d = self.load(i.body)\n p = web.ctx.site.new(key, d)\n try:\n p._save(i._comment)\n except (client.ClientException, ValidationException) as e:\n add_flash_message('error', str(e))\n return render.edit_yaml(key, i.body)\n raise web.seeother(key + '.yml')\n elif '_preview' in i:\n add_flash_message('Preview not supported')\n return render.edit_yaml(key, i.body)\n else:\n add_flash_message('unknown action')\n return render.edit_yaml(key, i.body)\n\n\ndef _get_user_root():\n user_root = infogami.config.get('infobase', {}).get('user_root', '/user')\n return web.rstrips(user_root, '/')\n\n\ndef _get_bots():\n bots = web.ctx.site.store.values(type='account', name='bot', value='true')\n user_root = _get_user_root()\n return [user_root + '/' + account['username'] for account in bots]\n\n\ndef _get_members_of_group(group_key):\n \"\"\"Returns keys of all members of the group identifier by group_key.\"\"\"\n usergroup = web.ctx.site.get(group_key) or {}\n return [m.key for m in usergroup.get('members', [])]\n\n\ndef can_write():\n \"\"\"\n Any user with bot flag set can write.\n For backward-compatability, all admin users and people in api usergroup are also allowed to write.\n \"\"\"\n user_key = delegate.context.user and delegate.context.user.key\n bots = (\n _get_members_of_group('/usergroup/api')\n + _get_members_of_group('/usergroup/admin')\n + _get_bots()\n )\n return user_key in bots\n\n\n# overwrite the implementation of can_write in the infogami API plugin with this one.\napi.can_write = can_write\n\n\nclass Forbidden(web.HTTPError):\n def __init__(self, msg=''):\n web.HTTPError.__init__(self, '403 Forbidden', {}, msg)\n\n\nclass BadRequest(web.HTTPError):\n def __init__(self, msg=''):\n web.HTTPError.__init__(self, '400 Bad Request', {}, msg)\n\n\nclass new:\n \"\"\"API to create new author/edition/work/publisher/series.\"\"\"\n\n def prepare_query(self, query):\n \"\"\"\n Add key to query and returns the key.\n If query is a list multiple queries are returned.\n \"\"\"\n if isinstance(query, list):\n return [self.prepare_query(q) for q in query]\n else:\n type = query['type']\n if isinstance(type, dict):\n type = type['key']\n query['key'] = web.ctx.site.new_key(type)\n return query['key']\n\n def verify_types(self, query):\n if isinstance(query, list):\n for q in query:\n self.verify_types(q)\n else:\n if 'type' not in query:\n raise BadRequest('Missing type')\n type = query['type']\n if isinstance(type, dict):\n if 'key' not in type:\n raise BadRequest('Bad Type: ' + json.dumps(type))\n type = type['key']\n\n if type not in [\n '/type/author',\n '/type/edition',\n '/type/work',\n '/type/series',\n '/type/publisher',\n ]:\n raise BadRequest('Bad Type: ' + json.dumps(type))\n\n def POST(self):\n if not can_write():\n raise Forbidden('Permission Denied.')\n\n try:\n query = json.loads(web.data())\n h = api.get_custom_headers()\n comment = h.get('comment')\n action = h.get('action')\n except Exception as e:\n raise BadRequest(str(e))\n\n self.verify_types(query)\n keys = self.prepare_query(query)\n\n try:\n if not isinstance(query, list):\n query = [query]\n web.ctx.site.save_many(query, comment=comment, action=action)\n except client.ClientException as e:\n raise BadRequest(str(e))\n\n # graphite/statsd tracking of bot edits\n user = delegate.context.user and delegate.context.user.key\n if user.lower().endswith('bot'):\n botname = user.replace('/people/', '', 1)\n botname = botname.replace('.', '-')\n key = 'ol.edits.bots.' + botname\n openlibrary.core.stats.increment(key)\n return json.dumps(keys)\n\n\napi and api.add_hook('new', new)\n\n\n@public\ndef changequery(query=None, **kw):\n if query is None:\n query = web.input(_method='get', _unicode=False)\n for k, v in kw.items():\n if v is None:\n query.pop(k, None)\n else:\n query[k] = v\n\n query = {\n k: [web.safestr(s) for s in v] if isinstance(v, list) else web.safestr(v)\n for k, v in query.items()\n }\n out = web.ctx.get('readable_path', web.ctx.path)\n if query:\n out += '?' + urllib.parse.urlencode(query, doseq=True)\n return out\n\n\n# Hack to limit recent changes offset.\n# Large offsets are blowing up the database.\n\nfrom infogami.core.db import get_recent_changes as _get_recentchanges\n\nimport urllib\n\n\n@public\ndef get_recent_changes(*a, **kw):\n if 'offset' in kw and kw['offset'] > 5000:\n return []\n else:\n return _get_recentchanges(*a, **kw)\n\n\n@public\ndef most_recent_change():\n if 'cache_most_recent' in infogami.config.features:\n v = web.ctx.site._request('/most_recent')\n v.thing = web.ctx.site.get(v.key)\n v.author = v.author and web.ctx.site.get(v.author)\n v.created = client.parse_datetime(v.created)\n return v\n else:\n return get_recent_changes(limit=1)[0]\n\n\n@public\ndef get_cover_id(key):\n try:\n _, cat, oln = key.split('/')\n return requests.get(\n f\"https://covers.openlibrary.org/{cat}/query?olid={oln}&limit=1\"\n ).json()[0]\n except (IndexError, json.decoder.JSONDecodeError, TypeError, ValueError):\n return None\n\n\nlocal_ip = None\n\n\nclass invalidate(delegate.page):\n path = '/system/invalidate'\n\n def POST(self):\n global local_ip\n if local_ip is None:\n local_ip = socket.gethostbyname(socket.gethostname())\n\n if (\n web.ctx.ip != '127.0.0.1'\n and web.ctx.ip.rsplit('.', 1)[0] != local_ip.rsplit('.', 1)[0]\n ):\n raise Forbidden('Allowed only in the local network.')\n\n data = json.loads(web.data())\n if not isinstance(data, list):\n data = [data]\n for d in data:\n thing = client.Thing(web.ctx.site, d['key'], client.storify(d))\n client._run_hooks('on_new_version', thing)\n return delegate.RawText('ok')\n\n\ndef save_error():\n t = datetime.datetime.utcnow()\n name = '%04d-%02d-%02d/%02d%02d%02d%06d' % (\n t.year,\n t.month,\n t.day,\n t.hour,\n t.minute,\n t.second,\n t.microsecond,\n )\n\n path = infogami.config.get('errorlog', 'errors') + '/' + name + '.html'\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n error = web.safestr(web.djangoerror())\n f = open(path, 'w')\n f.write(error)\n f.close()\n\n print('error saved to', path, file=web.debug)\n return name\n\n\ndef internalerror():\n i = web.input(_method='GET', debug='false')\n name = save_error()\n\n # TODO: move this stats stuff to plugins\\openlibrary\\stats.py\n # Can't have sub-metrics, so can't add more info\n openlibrary.core.stats.increment('ol.internal-errors')\n increment_error_count('ol.internal-errors-segmented')\n\n # TODO: move this to plugins\\openlibrary\\sentry.py\n from openlibrary.plugins.openlibrary.sentry import sentry\n\n if sentry.enabled:\n sentry.capture_exception_webpy()\n\n if i.debug.lower() == 'true':\n raise web.debugerror()\n else:\n msg = render.site(render.internalerror(name))\n raise web.internalerror(web.safestr(msg))\n\n\ndelegate.app.internalerror = internalerror\ndelegate.add_exception_hook(save_error)\n\n\nclass memory(delegate.page):\n path = '/debug/memory'\n\n def GET(self):\n import guppy\n\n h = guppy.hpy()\n return delegate.RawText(str(h.heap()))\n\n\ndef _get_relatedcarousels_component(workid):\n if 'env' not in web.ctx:\n delegate.fakeload()\n work = web.ctx.site.get('/works/%s' % workid) or {}\n component = render_template('books/RelatedWorksCarousel', work)\n return {0: str(component)}\n\n\ndef get_cached_relatedcarousels_component(*args, **kwargs):\n memoized_get_component_metadata = cache.memcache_memoize(\n _get_relatedcarousels_component,\n \"book.bookspage.component.relatedcarousels\",\n timeout=dateutil.HALF_DAY_SECS,\n )\n return (\n memoized_get_component_metadata(*args, **kwargs)\n or memoized_get_component_metadata.update(*args, **kwargs)[0]\n )\n\n\nclass Partials(delegate.page):\n path = '/partials'\n encoding = 'json'\n\n def GET(self):\n # `data` is meant to be a dict with two keys: `args` and `kwargs`.\n # `data['args']` is meant to be a list of a template's positional arguments, in order.\n # `data['kwargs']` is meant to be a dict containing a template's keyword arguments.\n i = web.input(workid=None, _component=None, data=None)\n component = i.pop(\"_component\")\n partial = {}\n if component == \"RelatedWorkCarousel\":\n partial = _get_relatedcarousels_component(i.workid)\n elif component == \"AffiliateLinks\":\n data = json.loads(i.data)\n args = data.get('args', [])\n # XXX : Throw error if args length is less than 2\n macro = web.template.Template.globals['macros'].AffiliateLinks(\n args[0], args[1]\n )\n partial = {\"partials\": str(macro)}\n\n return delegate.RawText(json.dumps(partial))\n\n\ndef is_bot():\n r\"\"\"Generated on ol-www1 within /var/log/nginx with:\n\n cat access.log | grep -oh \"; \\w*[bB]ot\" | sort --unique | awk '{print tolower($2)}'\n cat access.log | grep -oh \"; \\w*[sS]pider\" | sort --unique | awk '{print tolower($2)}'\n\n Manually removed singleton `bot` (to avoid overly complex grep regex)\n \"\"\"\n user_agent_bots = [\n 'sputnikbot',\n 'dotbot',\n 'semrushbot',\n 'googlebot',\n 'yandexbot',\n 'monsidobot',\n 'kazbtbot',\n 'seznambot',\n 'dubbotbot',\n '360spider',\n 'redditbot',\n 'yandexmobilebot',\n 'linkdexbot',\n 'musobot',\n 'mojeekbot',\n 'focuseekbot',\n 'behloolbot',\n 'startmebot',\n 'yandexaccessibilitybot',\n 'uptimerobot',\n 'femtosearchbot',\n 'pinterestbot',\n 'toutiaospider',\n 'yoozbot',\n 'parsijoobot',\n 'equellaurlbot',\n 'donkeybot',\n 'paperlibot',\n 'nsrbot',\n 'discordbot',\n 'ahrefsbot',\n '`googlebot',\n 'coccocbot',\n 'buzzbot',\n 'laserlikebot',\n 'baiduspider',\n 'bingbot',\n 'mj12bot',\n 'yoozbotadsbot',\n ]\n if not web.ctx.env.get('HTTP_USER_AGENT'):\n return True\n user_agent = web.ctx.env['HTTP_USER_AGENT'].lower()\n return any(bot in user_agent for bot in user_agent_bots)\n\n\ndef setup_template_globals():\n # must be imported here, otherwise silently messes up infogami's import execution\n # order, resulting in random errors like the the /account/login.json endpoint\n # defined in accounts.py being ignored, and using the infogami endpoint instead.\n from openlibrary.book_providers import (\n get_best_edition,\n get_book_provider,\n get_book_provider_by_name,\n get_cover_url,\n )\n\n web.template.Template.globals.update(\n {\n 'next': next,\n 'sorted': sorted,\n 'zip': zip,\n 'tuple': tuple,\n 'hash': hash,\n 'urlquote': web.urlquote,\n 'isbn_13_to_isbn_10': isbn_13_to_isbn_10,\n 'isbn_10_to_isbn_13': isbn_10_to_isbn_13,\n 'NEWLINE': '\\n',\n 'random': random.Random(),\n 'choose_random_from': random.choice,\n 'get_lang': lambda: web.ctx.lang,\n 'ceil': math.ceil,\n 'get_best_edition': get_best_edition,\n 'get_book_provider': get_book_provider,\n 'get_book_provider_by_name': get_book_provider_by_name,\n 'get_cover_url': get_cover_url,\n # bad use of globals\n 'is_bot': is_bot,\n 'time': time,\n 'input': web.input,\n 'dumps': json.dumps,\n }\n )\n\n\ndef setup_context_defaults():\n from infogami.utils import context\n\n context.defaults.update({'features': [], 'user': None, 'MAX_VISIBLE_BOOKS': 5})\n\n\ndef setup():\n from openlibrary.plugins.openlibrary import (\n sentry,\n home,\n borrow_home,\n stats,\n support,\n events,\n design,\n status,\n authors,\n swagger,\n )\n\n sentry.setup()\n home.setup()\n design.setup()\n borrow_home.setup()\n stats.setup()\n support.setup()\n events.setup()\n status.setup()\n authors.setup()\n swagger.setup()\n\n from openlibrary.plugins.openlibrary import api\n\n delegate.app.add_processor(web.unloadhook(stats.stats_hook))\n\n if infogami.config.get('dev_instance') is True:\n from openlibrary.plugins.openlibrary import dev_instance\n\n dev_instance.setup()\n\n setup_context_defaults()\n setup_template_globals()\n\n\nsetup()\n", "path": "openlibrary/plugins/openlibrary/code.py" } ]
diff --git a/openlibrary/plugins/openlibrary/code.py b/openlibrary/plugins/openlibrary/code.py index 0e6fe909afa..3ab9762dcdd 100644 --- a/openlibrary/plugins/openlibrary/code.py +++ b/openlibrary/plugins/openlibrary/code.py @@ -427,6 +427,7 @@ class ia_js_cdn(delegate.page): def GET(self, filename): web.header('Content-Type', 'text/javascript') + web.header("Cache-Control", "max-age=%d" % (24 * 3600)) return web.ok(fetch_ia_js(filename))
litestar-org__litestar-1773
StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
[ { "content": "from __future__ import annotations\n\nfrom litestar.exceptions import ImproperlyConfiguredException\n\n__all__ = (\"DTOException\", \"UnsupportedType\")\n\n\nclass DTOException(ImproperlyConfiguredException):\n \"\"\"Base exception for DTO errors.\"\"\"\n\n\nclass UnsupportedType(DTOException):\n \"\"\"Raised when a type is not supported by Litestar.\"\"\"\n", "path": "litestar/dto/exceptions.py" } ]
[ { "content": null, "path": "litestar/dto/exceptions.py" } ]
diff --git a/docs/reference/dto/exceptions.rst b/docs/reference/dto/exceptions.rst deleted file mode 100644 index c809fb9510..0000000000 --- a/docs/reference/dto/exceptions.rst +++ /dev/null @@ -1,5 +0,0 @@ -exceptions -========== - -.. automodule:: litestar.dto.exceptions - :members: diff --git a/docs/reference/dto/index.rst b/docs/reference/dto/index.rst index b793fa9f5d..4e80207dc8 100644 --- a/docs/reference/dto/index.rst +++ b/docs/reference/dto/index.rst @@ -5,5 +5,4 @@ dto :titlesonly: interface - exceptions factory/index diff --git a/litestar/dto/exceptions.py b/litestar/dto/exceptions.py deleted file mode 100644 index 6d8ae2a5ce..0000000000 --- a/litestar/dto/exceptions.py +++ /dev/null @@ -1,13 +0,0 @@ -from __future__ import annotations - -from litestar.exceptions import ImproperlyConfiguredException - -__all__ = ("DTOException", "UnsupportedType") - - -class DTOException(ImproperlyConfiguredException): - """Base exception for DTO errors.""" - - -class UnsupportedType(DTOException): - """Raised when a type is not supported by Litestar."""
SeldonIO__MLServer-1064
decode_args with tuple return value I'm confused about how to use `decode_args()` when the model returns a tuple of, let's say, a numpy array. If I have an inference function with the following signature ```python import numpy as np from mlserver.codecs.decorator import decode_args def predict(input: np.ndarray) -> tuple[np.ndarray]: # simple identity model return (input, ) decode_args(predict) ``` I receive a ``` CodecNotFound: Codec not found for output field with type tuple[numpy.ndarray] ``` error message. I think the issue lies in the following line that is used to unwrap the tuple signature and convert it to a list of types. https://github.com/SeldonIO/MLServer/blob/9d7045cc3c50a14201733865ca0c3e8480ebd942/mlserver/codecs/decorator.py#L37 However the line checks if the type hint is a tuple instance, which is not true in the above case: `isinstance(tuple[np.ndarray], tuple) == False)` To make `decode_args` work with a tuple return signature, I have to change the function signature to ```python def predict(input: np.ndarray) -> tuple([np.ndarray]): ``` which is an actual tuple instance and not a type hint. This looks like a bug to me.
[ { "content": "from functools import wraps, partial\nfrom typing import (\n Any,\n Callable,\n Coroutine,\n Dict,\n List,\n Optional,\n Union,\n Type,\n Tuple,\n get_origin,\n get_args,\n get_type_hints,\n TYPE_CHECKING,\n)\n\n\nfrom ..types import InferenceRequest, InferenceResponse, ResponseOutput\n\nfrom .base import RequestCodec, InputCodec, find_input_codec, find_request_codec\nfrom .errors import InputsNotFound, OutputNotFound, CodecNotFound\nfrom .utils import Codec\n\nif TYPE_CHECKING:\n from ..model import MLModel\n\nPredictFunc = Callable[\n [\"MLModel\", InferenceRequest], Coroutine[Any, Any, InferenceResponse]\n]\n\n\ndef _as_list(a: Optional[Union[Any, Tuple[Any]]]) -> List[Any]:\n if a is None:\n return []\n\n if isinstance(a, tuple):\n # Split into components\n return list(a)\n\n # Otherwise, assume it's a single element\n return [a]\n\n\ndef _is_codec_type(c: Codec, t: Type) -> bool:\n if issubclass(c, t): # type: ignore\n return True\n\n if isinstance(c, t):\n return True\n\n return False\n\n\n_is_input_codec = partial(_is_codec_type, t=InputCodec)\n_is_request_codec = partial(_is_codec_type, t=RequestCodec)\n\n\ndef _is_optional(t: Type) -> bool:\n origin = get_origin(t)\n if origin == Optional:\n return True\n\n if origin == Union:\n # Cover case where Optional[a] is reported as Union[a, None]\n args = get_args(t)\n if len(args) == 2 and type(None) in args:\n return True\n\n return False\n\n\ndef _unwrap_optional(t: Type) -> Type:\n args = get_args(t)\n for arg in args:\n if not isinstance(arg, type(None)):\n return arg\n\n return t\n\n\nclass SignatureCodec(RequestCodec):\n \"\"\"\n Internal codec that knows how to map type hints to codecs.\n \"\"\"\n\n # TODO: Should this receive the whole class as argument?\n def __init__(self, predict: Callable):\n self._predict = predict\n self._input_codecs, self._output_codecs = self._get_codecs(predict)\n\n def _get_codecs(self, pred: Callable) -> Tuple[Dict[str, Codec], List[Codec]]:\n self._input_hints = self._get_type_hints(pred)\n self._output_hints = _as_list(self._input_hints.pop(\"return\", None))\n\n input_codecs = {}\n for name, type_hint in self._input_hints.items():\n codec = self._find_codec(name=name, type_hint=type_hint, is_input=True)\n # TODO: Consider metadata as well! (needs to be done at runtime)\n input_codecs[name] = codec\n\n output_codecs = []\n for type_hint in self._output_hints:\n # Try either as an input or as a request codec\n codec = self._find_codec(name=None, type_hint=type_hint, is_input=False)\n output_codecs.append(codec)\n\n return input_codecs, output_codecs\n\n def _get_type_hints(self, pred: Callable) -> Dict[str, Type]:\n type_hints = get_type_hints(pred)\n # For us, `typing.Optional` is just syntactic sugar, so let's ensure we\n # unwrap it\n for name, hint in type_hints.items():\n if _is_optional(hint):\n type_hints[name] = _unwrap_optional(hint)\n\n return type_hints\n\n def _find_codec(\n self, name: Optional[str], type_hint: Type, is_input: bool = False\n ) -> Codec:\n codec = find_input_codec(type_hint=type_hint)\n if codec is not None:\n return codec\n\n codec = find_request_codec(type_hint=type_hint) # type: ignore\n if codec is not None:\n return codec\n\n raise CodecNotFound(name=name, payload_type=str(type_hint), is_input=is_input)\n\n def decode_request( # type: ignore\n self, request: InferenceRequest\n ) -> Dict[str, Any]:\n inputs = {}\n extra_request_inputs = []\n for request_input in request.inputs:\n input_name = request_input.name\n if input_name not in self._input_codecs:\n # Aggregate extra request inputs to check later, as they could\n # be part of aggregated request codecs (e.g. like dataframes)\n extra_request_inputs.append(request_input)\n continue\n\n # Ensure matching codec is an input codec\n codec = self._input_codecs[input_name]\n if not _is_input_codec(codec):\n raise CodecNotFound(name=input_name, is_input=True)\n\n inputs[input_name] = codec.decode_input(request_input) # type: ignore\n\n if extra_request_inputs:\n request_codec = self._get_request_codec()\n if not request_codec:\n # If there are no request codecs that can aggregate all\n # remaining inputs, raise an error\n raise InputsNotFound(extra_request_inputs, self._input_codecs)\n\n # We create a fake request built from the extra request inputs\n name, codec = request_codec\n extra_inputs = InferenceRequest(inputs=extra_request_inputs)\n inputs[name] = codec.decode_request(extra_inputs)\n\n return inputs\n\n def _get_request_codec(self) -> Optional[Tuple[str, RequestCodec]]:\n for name, codec in self._input_codecs.items():\n if _is_request_codec(codec):\n return name, codec # type: ignore\n\n return None\n\n def encode_response( # type: ignore\n self, model_name: str, payload: Any, model_version: Optional[str] = None\n ) -> InferenceResponse:\n payloads = _as_list(payload)\n outputs = []\n for idx, payload in enumerate(payloads):\n outputs += self._encode_outputs(idx, payload)\n\n return InferenceResponse(\n model_name=model_name, model_version=model_version, outputs=outputs\n )\n\n def _encode_outputs(self, idx: int, payload: Any) -> List[ResponseOutput]:\n output_type = type(payload)\n if idx >= len(self._output_codecs):\n raise OutputNotFound(idx, output_type, self._output_hints)\n\n # TODO: Fallback to encode_by_payload?\n codec = self._output_codecs[idx]\n if not codec.can_encode(payload):\n raise OutputNotFound(idx, output_type, self._output_hints)\n\n if _is_input_codec(codec):\n # TODO: Check model metadata for output names\n output_name = f\"output-{idx}\"\n response_output = codec.encode_output( # type: ignore\n name=output_name, payload=payload\n )\n return [response_output]\n\n if _is_request_codec(codec):\n # NOTE: We will ignore `model_name` and only grab the outputs\n response = codec.encode_response( # type: ignore\n model_name=\"\", payload=payload\n )\n return response.outputs\n\n return []\n\n\ndef decode_args(predict: Callable) -> PredictFunc:\n codec = SignatureCodec(predict)\n\n @wraps(predict)\n async def _f(self: \"MLModel\", request: InferenceRequest) -> InferenceResponse:\n inputs = codec.decode_request(request=request)\n\n outputs = await predict(self, **inputs)\n\n return codec.encode_response(\n model_name=self.name, payload=outputs, model_version=self.version\n )\n\n return _f\n", "path": "mlserver/codecs/decorator.py" } ]
[ { "content": "from functools import wraps, partial\nfrom typing import (\n Any,\n Callable,\n Coroutine,\n Dict,\n List,\n Optional,\n Union,\n Type,\n Tuple,\n get_origin,\n get_args,\n get_type_hints,\n TYPE_CHECKING,\n)\n\n\nfrom ..types import InferenceRequest, InferenceResponse, ResponseOutput\n\nfrom .base import RequestCodec, InputCodec, find_input_codec, find_request_codec\nfrom .errors import InputsNotFound, OutputNotFound, CodecNotFound\nfrom .utils import Codec\n\nif TYPE_CHECKING:\n from ..model import MLModel\n\nPredictFunc = Callable[\n [\"MLModel\", InferenceRequest], Coroutine[Any, Any, InferenceResponse]\n]\n\n\ndef _as_list(a: Optional[Union[Any, Tuple[Any]]]) -> List[Any]:\n if a is None:\n return []\n\n if isinstance(a, tuple):\n # Split into components\n return list(a)\n\n if get_origin(a) is tuple:\n # Convert type arguments into list\n return list(get_args(a))\n\n # Otherwise, assume it's a single element\n return [a]\n\n\ndef _is_codec_type(c: Codec, t: Type) -> bool:\n if issubclass(c, t): # type: ignore\n return True\n\n if isinstance(c, t):\n return True\n\n return False\n\n\n_is_input_codec = partial(_is_codec_type, t=InputCodec)\n_is_request_codec = partial(_is_codec_type, t=RequestCodec)\n\n\ndef _is_optional(t: Type) -> bool:\n origin = get_origin(t)\n if origin == Optional:\n return True\n\n if origin == Union:\n # Cover case where Optional[a] is reported as Union[a, None]\n args = get_args(t)\n if len(args) == 2 and type(None) in args:\n return True\n\n return False\n\n\ndef _unwrap_optional(t: Type) -> Type:\n args = get_args(t)\n for arg in args:\n if not isinstance(arg, type(None)):\n return arg\n\n return t\n\n\nclass SignatureCodec(RequestCodec):\n \"\"\"\n Internal codec that knows how to map type hints to codecs.\n \"\"\"\n\n # TODO: Should this receive the whole class as argument?\n def __init__(self, predict: Callable):\n self._predict = predict\n self._input_codecs, self._output_codecs = self._get_codecs(predict)\n\n def _get_codecs(self, pred: Callable) -> Tuple[Dict[str, Codec], List[Codec]]:\n self._input_hints = self._get_type_hints(pred)\n self._output_hints = _as_list(self._input_hints.pop(\"return\", None))\n\n input_codecs = {}\n for name, type_hint in self._input_hints.items():\n codec = self._find_codec(name=name, type_hint=type_hint, is_input=True)\n # TODO: Consider metadata as well! (needs to be done at runtime)\n input_codecs[name] = codec\n\n output_codecs = []\n for type_hint in self._output_hints:\n # Try either as an input or as a request codec\n codec = self._find_codec(name=None, type_hint=type_hint, is_input=False)\n output_codecs.append(codec)\n\n return input_codecs, output_codecs\n\n def _get_type_hints(self, pred: Callable) -> Dict[str, Type]:\n type_hints = get_type_hints(pred)\n # For us, `typing.Optional` is just syntactic sugar, so let's ensure we\n # unwrap it\n for name, hint in type_hints.items():\n if _is_optional(hint):\n type_hints[name] = _unwrap_optional(hint)\n\n return type_hints\n\n def _find_codec(\n self, name: Optional[str], type_hint: Type, is_input: bool = False\n ) -> Codec:\n codec = find_input_codec(type_hint=type_hint)\n if codec is not None:\n return codec\n\n codec = find_request_codec(type_hint=type_hint) # type: ignore\n if codec is not None:\n return codec\n\n raise CodecNotFound(name=name, payload_type=str(type_hint), is_input=is_input)\n\n def decode_request( # type: ignore\n self, request: InferenceRequest\n ) -> Dict[str, Any]:\n inputs = {}\n extra_request_inputs = []\n for request_input in request.inputs:\n input_name = request_input.name\n if input_name not in self._input_codecs:\n # Aggregate extra request inputs to check later, as they could\n # be part of aggregated request codecs (e.g. like dataframes)\n extra_request_inputs.append(request_input)\n continue\n\n # Ensure matching codec is an input codec\n codec = self._input_codecs[input_name]\n if not _is_input_codec(codec):\n raise CodecNotFound(name=input_name, is_input=True)\n\n inputs[input_name] = codec.decode_input(request_input) # type: ignore\n\n if extra_request_inputs:\n request_codec = self._get_request_codec()\n if not request_codec:\n # If there are no request codecs that can aggregate all\n # remaining inputs, raise an error\n raise InputsNotFound(extra_request_inputs, self._input_codecs)\n\n # We create a fake request built from the extra request inputs\n name, codec = request_codec\n extra_inputs = InferenceRequest(inputs=extra_request_inputs)\n inputs[name] = codec.decode_request(extra_inputs)\n\n return inputs\n\n def _get_request_codec(self) -> Optional[Tuple[str, RequestCodec]]:\n for name, codec in self._input_codecs.items():\n if _is_request_codec(codec):\n return name, codec # type: ignore\n\n return None\n\n def encode_response( # type: ignore\n self, model_name: str, payload: Any, model_version: Optional[str] = None\n ) -> InferenceResponse:\n payloads = _as_list(payload)\n outputs = []\n for idx, payload in enumerate(payloads):\n outputs += self._encode_outputs(idx, payload)\n\n return InferenceResponse(\n model_name=model_name, model_version=model_version, outputs=outputs\n )\n\n def _encode_outputs(self, idx: int, payload: Any) -> List[ResponseOutput]:\n output_type = type(payload)\n if idx >= len(self._output_codecs):\n raise OutputNotFound(idx, output_type, self._output_hints)\n\n # TODO: Fallback to encode_by_payload?\n codec = self._output_codecs[idx]\n if not codec.can_encode(payload):\n raise OutputNotFound(idx, output_type, self._output_hints)\n\n if _is_input_codec(codec):\n # TODO: Check model metadata for output names\n output_name = f\"output-{idx}\"\n response_output = codec.encode_output( # type: ignore\n name=output_name, payload=payload\n )\n return [response_output]\n\n if _is_request_codec(codec):\n # NOTE: We will ignore `model_name` and only grab the outputs\n response = codec.encode_response( # type: ignore\n model_name=\"\", payload=payload\n )\n return response.outputs\n\n return []\n\n\ndef decode_args(predict: Callable) -> PredictFunc:\n codec = SignatureCodec(predict)\n\n @wraps(predict)\n async def _f(self: \"MLModel\", request: InferenceRequest) -> InferenceResponse:\n inputs = codec.decode_request(request=request)\n\n outputs = await predict(self, **inputs)\n\n return codec.encode_response(\n model_name=self.name, payload=outputs, model_version=self.version\n )\n\n return _f\n", "path": "mlserver/codecs/decorator.py" } ]
diff --git a/mlserver/codecs/decorator.py b/mlserver/codecs/decorator.py index 89367fcbf..19d2e81e0 100644 --- a/mlserver/codecs/decorator.py +++ b/mlserver/codecs/decorator.py @@ -38,6 +38,10 @@ def _as_list(a: Optional[Union[Any, Tuple[Any]]]) -> List[Any]: # Split into components return list(a) + if get_origin(a) is tuple: + # Convert type arguments into list + return list(get_args(a)) + # Otherwise, assume it's a single element return [a] diff --git a/tests/codecs/test_decorator.py b/tests/codecs/test_decorator.py index 6e949062a..b809cb081 100644 --- a/tests/codecs/test_decorator.py +++ b/tests/codecs/test_decorator.py @@ -2,7 +2,7 @@ import numpy as np import pandas as pd -from typing import Any, Callable, Dict, Optional, List +from typing import Any, Callable, Dict, Optional, List, Tuple from mlserver.types import ( InferenceRequest, @@ -13,7 +13,7 @@ ) from mlserver.codecs.base import InputCodec from mlserver.codecs.utils import Codec -from mlserver.codecs.decorator import SignatureCodec +from mlserver.codecs.decorator import SignatureCodec, _as_list from mlserver.codecs.errors import InputsNotFound, OutputNotFound from mlserver.codecs.numpy import NumpyCodec, NumpyRequestCodec from mlserver.codecs.string import StringCodec @@ -287,3 +287,13 @@ async def test_decode_args( res = NumpyRequestCodec.decode_response(inference_response) assert res == output_value + + +def test_as_list_typing_tuple(): + signature_list = _as_list(Tuple[np.ndarray, np.ndarray]) + assert signature_list == [np.ndarray, np.ndarray] + + +def test_as_list_native_tuple(): + signature_list = _as_list((np.ndarray, np.ndarray)) + assert signature_list == [np.ndarray, np.ndarray]
pypa__virtualenv-2107
site.getsitepackages() doesn't respect --system-site-packages on python2 **Issue** site.getsitepackages() doesn't respect --system-site-packages being set on python2. System site-package paths are never included. I came across this while working on #2105. In contrast to #2105 this is not specific to debian, which is why I am creating a separate issue for it. ``` python -m virtualenv --system-site-packages -ppython2 testenv . testenv/bin/activate python -c "import site; print(site.getsitepackages())" ['/home/adrian/Documents/dev/uni/bachelor/mypy/testenv/lib/python2.7/site-packages', '/home/adrian/Documents/dev/uni/bachelor/mypy/testenv/lib/site-python'] ``` **Environment** Provide at least: - OS: Tested on both Ubuntu 18.04.3 LTS and Arch Linux - ``pip list`` of the host python where ``virtualenv`` is installed: Same as #2105 **Output of the virtual environment creation** Make sure to run the creation with `-vvv --with-traceback`: Same as #2105
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nA simple shim module to fix up things on Python 2 only.\n\nNote: until we setup correctly the paths we can only import built-ins.\n\"\"\"\nimport sys\n\n\ndef main():\n \"\"\"Patch what needed, and invoke the original site.py\"\"\"\n config = read_pyvenv()\n sys.real_prefix = sys.base_prefix = config[\"base-prefix\"]\n sys.base_exec_prefix = config[\"base-exec-prefix\"]\n sys.base_executable = config[\"base-executable\"]\n global_site_package_enabled = config.get(\"include-system-site-packages\", False) == \"true\"\n rewrite_standard_library_sys_path()\n disable_user_site_package()\n load_host_site()\n if global_site_package_enabled:\n add_global_site_package()\n\n\ndef load_host_site():\n \"\"\"trigger reload of site.py - now it will use the standard library instance that will take care of init\"\"\"\n # we have a duality here, we generate the platform and pure library path based on what distutils.install specifies\n # because this is what pip will be using; the host site.py though may contain it's own pattern for where the\n # platform and pure library paths should exist\n\n # notably on Ubuntu there's a patch for getsitepackages to point to\n # - prefix + local/lib/pythonx.y/dist-packages\n # - prefix + lib/pythonx.y/dist-packages\n # while distutils.install.cmd still points both of these to\n # - prefix + lib/python2.7/site-packages\n\n # to facilitate when the two match, or not we first reload the site.py, now triggering the import of host site.py,\n # as this will ensure that initialization code within host site.py runs\n\n here = __file__ # the distutils.install patterns will be injected relative to this site.py, save it here\n\n # ___RELOAD_CODE___\n\n # and then if the distutils site packages are not on the sys.path we add them via add_site_dir; note we must add\n # them by invoking add_site_dir to trigger the processing of pth files\n import os\n\n site_packages = r\"\"\"\n ___EXPECTED_SITE_PACKAGES___\n \"\"\"\n import json\n\n add_site_dir = sys.modules[\"site\"].addsitedir\n for path in json.loads(site_packages):\n full_path = os.path.abspath(os.path.join(here, path.encode(\"utf-8\")))\n add_site_dir(full_path)\n\n\nsep = \"\\\\\" if sys.platform == \"win32\" else \"/\" # no os module here yet - poor mans version\n\n\ndef read_pyvenv():\n \"\"\"read pyvenv.cfg\"\"\"\n config_file = \"{}{}pyvenv.cfg\".format(sys.prefix, sep)\n with open(config_file) as file_handler:\n lines = file_handler.readlines()\n config = {}\n for line in lines:\n try:\n split_at = line.index(\"=\")\n except ValueError:\n continue # ignore bad/empty lines\n else:\n config[line[:split_at].strip()] = line[split_at + 1 :].strip()\n return config\n\n\ndef rewrite_standard_library_sys_path():\n \"\"\"Once this site file is loaded the standard library paths have already been set, fix them up\"\"\"\n exe, prefix, exec_prefix = get_exe_prefixes(base=False)\n base_exe, base_prefix, base_exec = get_exe_prefixes(base=True)\n exe_dir = exe[: exe.rfind(sep)]\n for at, path in enumerate(sys.path):\n path = abs_path(path) # replace old sys prefix path starts with new\n skip_rewrite = path == exe_dir # don't fix the current executable location, notably on Windows this gets added\n skip_rewrite = skip_rewrite # ___SKIP_REWRITE____\n if not skip_rewrite:\n sys.path[at] = map_path(path, base_exe, exe_dir, exec_prefix, base_prefix, prefix, base_exec)\n\n # the rewrite above may have changed elements from PYTHONPATH, revert these if on\n if sys.flags.ignore_environment:\n return\n import os\n\n python_paths = []\n if \"PYTHONPATH\" in os.environ and os.environ[\"PYTHONPATH\"]:\n for path in os.environ[\"PYTHONPATH\"].split(os.pathsep):\n if path not in python_paths:\n python_paths.append(path)\n sys.path[: len(python_paths)] = python_paths\n\n\ndef get_exe_prefixes(base=False):\n return tuple(abs_path(getattr(sys, (\"base_\" if base else \"\") + i)) for i in (\"executable\", \"prefix\", \"exec_prefix\"))\n\n\ndef abs_path(value):\n values, keep = value.split(sep), []\n at = len(values) - 1\n while at >= 0:\n if values[at] == \"..\":\n at -= 1\n else:\n keep.append(values[at])\n at -= 1\n return sep.join(keep[::-1])\n\n\ndef map_path(path, base_executable, exe_dir, exec_prefix, base_prefix, prefix, base_exec_prefix):\n if path_starts_with(path, exe_dir):\n # content inside the exe folder needs to remap to original executables folder\n orig_exe_folder = base_executable[: base_executable.rfind(sep)]\n return \"{}{}\".format(orig_exe_folder, path[len(exe_dir) :])\n elif path_starts_with(path, prefix):\n return \"{}{}\".format(base_prefix, path[len(prefix) :])\n elif path_starts_with(path, exec_prefix):\n return \"{}{}\".format(base_exec_prefix, path[len(exec_prefix) :])\n return path\n\n\ndef path_starts_with(directory, value):\n return directory.startswith(value if value[-1] == sep else value + sep)\n\n\ndef disable_user_site_package():\n \"\"\"Flip the switch on enable user site package\"\"\"\n # sys.flags is a c-extension type, so we cannot monkeypatch it, replace it with a python class to flip it\n sys.original_flags = sys.flags\n\n class Flags(object):\n def __init__(self):\n self.__dict__ = {key: getattr(sys.flags, key) for key in dir(sys.flags) if not key.startswith(\"_\")}\n\n sys.flags = Flags()\n sys.flags.no_user_site = 1\n\n\ndef add_global_site_package():\n \"\"\"add the global site package\"\"\"\n import site\n\n # add user site package\n sys.flags = sys.original_flags # restore original\n site.ENABLE_USER_SITE = None # reset user site check\n # add the global site package to the path - use new prefix and delegate to site.py\n orig_prefixes = None\n try:\n orig_prefixes = site.PREFIXES\n site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]\n site.main()\n finally:\n site.PREFIXES = orig_prefixes\n\n\nmain()\n", "path": "src/virtualenv/create/via_global_ref/builtin/python2/site.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nA simple shim module to fix up things on Python 2 only.\n\nNote: until we setup correctly the paths we can only import built-ins.\n\"\"\"\nimport sys\n\n\ndef main():\n \"\"\"Patch what needed, and invoke the original site.py\"\"\"\n config = read_pyvenv()\n sys.real_prefix = sys.base_prefix = config[\"base-prefix\"]\n sys.base_exec_prefix = config[\"base-exec-prefix\"]\n sys.base_executable = config[\"base-executable\"]\n global_site_package_enabled = config.get(\"include-system-site-packages\", False) == \"true\"\n rewrite_standard_library_sys_path()\n disable_user_site_package()\n load_host_site()\n if global_site_package_enabled:\n add_global_site_package()\n\n\ndef load_host_site():\n \"\"\"trigger reload of site.py - now it will use the standard library instance that will take care of init\"\"\"\n # we have a duality here, we generate the platform and pure library path based on what distutils.install specifies\n # because this is what pip will be using; the host site.py though may contain it's own pattern for where the\n # platform and pure library paths should exist\n\n # notably on Ubuntu there's a patch for getsitepackages to point to\n # - prefix + local/lib/pythonx.y/dist-packages\n # - prefix + lib/pythonx.y/dist-packages\n # while distutils.install.cmd still points both of these to\n # - prefix + lib/python2.7/site-packages\n\n # to facilitate when the two match, or not we first reload the site.py, now triggering the import of host site.py,\n # as this will ensure that initialization code within host site.py runs\n\n here = __file__ # the distutils.install patterns will be injected relative to this site.py, save it here\n\n # ___RELOAD_CODE___\n\n # and then if the distutils site packages are not on the sys.path we add them via add_site_dir; note we must add\n # them by invoking add_site_dir to trigger the processing of pth files\n import os\n\n site_packages = r\"\"\"\n ___EXPECTED_SITE_PACKAGES___\n \"\"\"\n import json\n\n add_site_dir = sys.modules[\"site\"].addsitedir\n for path in json.loads(site_packages):\n full_path = os.path.abspath(os.path.join(here, path.encode(\"utf-8\")))\n add_site_dir(full_path)\n\n\nsep = \"\\\\\" if sys.platform == \"win32\" else \"/\" # no os module here yet - poor mans version\n\n\ndef read_pyvenv():\n \"\"\"read pyvenv.cfg\"\"\"\n config_file = \"{}{}pyvenv.cfg\".format(sys.prefix, sep)\n with open(config_file) as file_handler:\n lines = file_handler.readlines()\n config = {}\n for line in lines:\n try:\n split_at = line.index(\"=\")\n except ValueError:\n continue # ignore bad/empty lines\n else:\n config[line[:split_at].strip()] = line[split_at + 1 :].strip()\n return config\n\n\ndef rewrite_standard_library_sys_path():\n \"\"\"Once this site file is loaded the standard library paths have already been set, fix them up\"\"\"\n exe, prefix, exec_prefix = get_exe_prefixes(base=False)\n base_exe, base_prefix, base_exec = get_exe_prefixes(base=True)\n exe_dir = exe[: exe.rfind(sep)]\n for at, path in enumerate(sys.path):\n path = abs_path(path) # replace old sys prefix path starts with new\n skip_rewrite = path == exe_dir # don't fix the current executable location, notably on Windows this gets added\n skip_rewrite = skip_rewrite # ___SKIP_REWRITE____\n if not skip_rewrite:\n sys.path[at] = map_path(path, base_exe, exe_dir, exec_prefix, base_prefix, prefix, base_exec)\n\n # the rewrite above may have changed elements from PYTHONPATH, revert these if on\n if sys.flags.ignore_environment:\n return\n import os\n\n python_paths = []\n if \"PYTHONPATH\" in os.environ and os.environ[\"PYTHONPATH\"]:\n for path in os.environ[\"PYTHONPATH\"].split(os.pathsep):\n if path not in python_paths:\n python_paths.append(path)\n sys.path[: len(python_paths)] = python_paths\n\n\ndef get_exe_prefixes(base=False):\n return tuple(abs_path(getattr(sys, (\"base_\" if base else \"\") + i)) for i in (\"executable\", \"prefix\", \"exec_prefix\"))\n\n\ndef abs_path(value):\n values, keep = value.split(sep), []\n at = len(values) - 1\n while at >= 0:\n if values[at] == \"..\":\n at -= 1\n else:\n keep.append(values[at])\n at -= 1\n return sep.join(keep[::-1])\n\n\ndef map_path(path, base_executable, exe_dir, exec_prefix, base_prefix, prefix, base_exec_prefix):\n if path_starts_with(path, exe_dir):\n # content inside the exe folder needs to remap to original executables folder\n orig_exe_folder = base_executable[: base_executable.rfind(sep)]\n return \"{}{}\".format(orig_exe_folder, path[len(exe_dir) :])\n elif path_starts_with(path, prefix):\n return \"{}{}\".format(base_prefix, path[len(prefix) :])\n elif path_starts_with(path, exec_prefix):\n return \"{}{}\".format(base_exec_prefix, path[len(exec_prefix) :])\n return path\n\n\ndef path_starts_with(directory, value):\n return directory.startswith(value if value[-1] == sep else value + sep)\n\n\ndef disable_user_site_package():\n \"\"\"Flip the switch on enable user site package\"\"\"\n # sys.flags is a c-extension type, so we cannot monkeypatch it, replace it with a python class to flip it\n sys.original_flags = sys.flags\n\n class Flags(object):\n def __init__(self):\n self.__dict__ = {key: getattr(sys.flags, key) for key in dir(sys.flags) if not key.startswith(\"_\")}\n\n sys.flags = Flags()\n sys.flags.no_user_site = 1\n\n\ndef add_global_site_package():\n \"\"\"add the global site package\"\"\"\n import site\n\n # add user site package\n sys.flags = sys.original_flags # restore original\n site.ENABLE_USER_SITE = None # reset user site check\n # add the global site package to the path - use new prefix and delegate to site.py\n orig_prefixes = None\n try:\n orig_prefixes = site.PREFIXES\n site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]\n site.main()\n finally:\n site.PREFIXES = orig_prefixes + site.PREFIXES\n\n\nmain()\n", "path": "src/virtualenv/create/via_global_ref/builtin/python2/site.py" } ]
diff --git a/docs/changelog/2106.bugfix.rst b/docs/changelog/2106.bugfix.rst new file mode 100644 index 000000000..ec68bb584 --- /dev/null +++ b/docs/changelog/2106.bugfix.rst @@ -0,0 +1 @@ +Fix ``site.getsitepackages()`` ignoring ``--system-site-packages`` on python2 - by :user:`freundTech`. diff --git a/src/virtualenv/create/via_global_ref/builtin/python2/site.py b/src/virtualenv/create/via_global_ref/builtin/python2/site.py index 85eee842a..d2ee84fdb 100644 --- a/src/virtualenv/create/via_global_ref/builtin/python2/site.py +++ b/src/virtualenv/create/via_global_ref/builtin/python2/site.py @@ -158,7 +158,7 @@ def add_global_site_package(): site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix] site.main() finally: - site.PREFIXES = orig_prefixes + site.PREFIXES = orig_prefixes + site.PREFIXES main() diff --git a/tests/unit/create/test_creator.py b/tests/unit/create/test_creator.py index 35f3f2493..69c7c96b3 100644 --- a/tests/unit/create/test_creator.py +++ b/tests/unit/create/test_creator.py @@ -1,5 +1,6 @@ from __future__ import absolute_import, unicode_literals +import ast import difflib import gc import json @@ -621,3 +622,34 @@ def test_pth_in_site_vs_PYTHONPATH(tmp_path): env=env, ) assert out == "ok\n" + + +def test_getsitepackages_system_site(tmp_path): + import site + + old_prefixes = site.PREFIXES + site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix] + system_site_packages = site.getsitepackages() + site.PREFIXES = old_prefixes + + # Test without --system-site-packages + session = cli_run([ensure_text(str(tmp_path))]) + out = subprocess.check_output( + [str(session.creator.exe), "-c", r"import site; print(site.getsitepackages())"], + universal_newlines=True, + ) + site_packages = ast.literal_eval(out) + + for system_site_package in system_site_packages: + assert system_site_package not in site_packages + + # Test with --system-site-packages + session = cli_run([ensure_text(str(tmp_path)), "--system-site-packages"]) + out = subprocess.check_output( + [str(session.creator.exe), "-c", r"import site; print(site.getsitepackages())"], + universal_newlines=True, + ) + site_packages = ast.literal_eval(out) + + for system_site_package in system_site_packages: + assert system_site_package in site_packages
zulip__zulip-11317
Improve formatting for "arguments" sections with long examples. The line-wrapping for this endpoint's API documentation looks really ugly: ![image](https://user-images.githubusercontent.com/2746074/47042583-7e303200-d140-11e8-9b4f-d6fc1325dcba.png) We should either remove the maximum width on "description", or figure out a way to use more than one line for a given endpoint that doesn't look so bad (e.g. having the example be on the next line after the rest of the endpoint description).
[ { "content": "import re\nimport os\nimport ujson\n\nfrom django.utils.html import escape as escape_html\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom zerver.lib.openapi import get_openapi_parameters\nfrom typing import Any, Dict, Optional, List\nimport markdown\n\nREGEXP = re.compile(r'\\{generate_api_arguments_table\\|\\s*(.+?)\\s*\\|\\s*(.+)\\s*\\}')\n\n\nclass MarkdownArgumentsTableGenerator(Extension):\n def __init__(self, configs: Optional[Dict[str, Any]]=None) -> None:\n if configs is None:\n configs = {}\n self.config = {\n 'base_path': ['.', 'Default location from which to evaluate relative paths for the JSON files.'],\n }\n for key, value in configs.items():\n self.setConfig(key, value)\n\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n md.preprocessors.add(\n 'generate_api_arguments', APIArgumentsTablePreprocessor(md, self.getConfigs()), '_begin'\n )\n\n\nclass APIArgumentsTablePreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:\n super(APIArgumentsTablePreprocessor, self).__init__(md)\n self.base_path = config['base_path']\n\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if not match:\n continue\n\n filename = match.group(1)\n doc_name = match.group(2)\n filename = os.path.expanduser(filename)\n\n is_openapi_format = filename.endswith('.yaml')\n\n if not os.path.isabs(filename):\n parent_dir = self.base_path\n filename = os.path.normpath(os.path.join(parent_dir, filename))\n\n if is_openapi_format:\n endpoint, method = doc_name.rsplit(':', 1)\n arguments = [] # type: List[Dict[str, Any]]\n\n try:\n arguments = get_openapi_parameters(endpoint, method)\n except KeyError as e:\n # Don't raise an exception if the \"parameters\"\n # field is missing; we assume that's because the\n # endpoint doesn't accept any parameters\n if e.args != ('parameters',):\n raise e\n else:\n with open(filename, 'r') as fp:\n json_obj = ujson.load(fp)\n arguments = json_obj[doc_name]\n\n if arguments:\n text = self.render_table(arguments)\n else:\n text = ['This endpoint does not consume any arguments.']\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def render_table(self, arguments: List[Dict[str, Any]]) -> List[str]:\n table = []\n beginning = \"\"\"\n<table class=\"table\">\n <thead>\n <tr>\n <th>Argument</th>\n <th>Example</th>\n <th>Required</th>\n <th>Description</th>\n </tr>\n </thead>\n<tbody>\n\"\"\"\n tr = \"\"\"\n<tr>\n <td><code>{argument}</code></td>\n <td><code>{example}</code></td>\n <td>{required}</td>\n <td>{description}</td>\n</tr>\n\"\"\"\n\n table.append(beginning)\n\n md_engine = markdown.Markdown(extensions=[])\n\n for argument in arguments:\n description = argument['description']\n\n oneof = ['`' + item + '`'\n for item in argument.get('schema', {}).get('enum', [])]\n if oneof:\n description += '\\nMust be one of: {}.'.format(', '.join(oneof))\n\n default = argument.get('schema', {}).get('default')\n if default is not None:\n description += '\\nDefaults to `{}`.'.format(ujson.dumps(default))\n\n # TODO: Swagger allows indicating where the argument goes\n # (path, querystring, form data...). A column in the table should\n # be added for this.\n table.append(tr.format(\n argument=argument.get('argument') or argument.get('name'),\n # Show this as JSON to avoid changing the quoting style, which\n # may cause problems with JSON encoding.\n example=escape_html(ujson.dumps(argument['example'])),\n required='Yes' if argument.get('required') else 'No',\n description=md_engine.convert(description),\n ))\n\n table.append(\"</tbody>\")\n table.append(\"</table>\")\n\n return table\n\ndef makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:\n return MarkdownArgumentsTableGenerator(kwargs)\n", "path": "zerver/lib/bugdown/api_arguments_table_generator.py" } ]
[ { "content": "import re\nimport os\nimport ujson\n\nfrom django.utils.html import escape as escape_html\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom zerver.lib.openapi import get_openapi_parameters\nfrom typing import Any, Dict, Optional, List\nimport markdown\n\nREGEXP = re.compile(r'\\{generate_api_arguments_table\\|\\s*(.+?)\\s*\\|\\s*(.+)\\s*\\}')\n\n\nclass MarkdownArgumentsTableGenerator(Extension):\n def __init__(self, configs: Optional[Dict[str, Any]]=None) -> None:\n if configs is None:\n configs = {}\n self.config = {\n 'base_path': ['.', 'Default location from which to evaluate relative paths for the JSON files.'],\n }\n for key, value in configs.items():\n self.setConfig(key, value)\n\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n md.preprocessors.add(\n 'generate_api_arguments', APIArgumentsTablePreprocessor(md, self.getConfigs()), '_begin'\n )\n\n\nclass APIArgumentsTablePreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:\n super(APIArgumentsTablePreprocessor, self).__init__(md)\n self.base_path = config['base_path']\n\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if not match:\n continue\n\n filename = match.group(1)\n doc_name = match.group(2)\n filename = os.path.expanduser(filename)\n\n is_openapi_format = filename.endswith('.yaml')\n\n if not os.path.isabs(filename):\n parent_dir = self.base_path\n filename = os.path.normpath(os.path.join(parent_dir, filename))\n\n if is_openapi_format:\n endpoint, method = doc_name.rsplit(':', 1)\n arguments = [] # type: List[Dict[str, Any]]\n\n try:\n arguments = get_openapi_parameters(endpoint, method)\n except KeyError as e:\n # Don't raise an exception if the \"parameters\"\n # field is missing; we assume that's because the\n # endpoint doesn't accept any parameters\n if e.args != ('parameters',):\n raise e\n else:\n with open(filename, 'r') as fp:\n json_obj = ujson.load(fp)\n arguments = json_obj[doc_name]\n\n if arguments:\n text = self.render_table(arguments)\n else:\n text = ['This endpoint does not consume any arguments.']\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def render_table(self, arguments: List[Dict[str, Any]]) -> List[str]:\n table = []\n beginning = \"\"\"\n<table class=\"table\">\n <thead>\n <tr>\n <th>Argument</th>\n <th>Example</th>\n <th>Required</th>\n <th>Description</th>\n </tr>\n </thead>\n<tbody>\n\"\"\"\n tr = \"\"\"\n<tr>\n <td><code>{argument}</code></td>\n <td class=\"json-api-example\"><code>{example}</code></td>\n <td>{required}</td>\n <td>{description}</td>\n</tr>\n\"\"\"\n\n table.append(beginning)\n\n md_engine = markdown.Markdown(extensions=[])\n\n for argument in arguments:\n description = argument['description']\n\n oneof = ['`' + item + '`'\n for item in argument.get('schema', {}).get('enum', [])]\n if oneof:\n description += '\\nMust be one of: {}.'.format(', '.join(oneof))\n\n default = argument.get('schema', {}).get('default')\n if default is not None:\n description += '\\nDefaults to `{}`.'.format(ujson.dumps(default))\n\n # TODO: Swagger allows indicating where the argument goes\n # (path, querystring, form data...). A column in the table should\n # be added for this.\n table.append(tr.format(\n argument=argument.get('argument') or argument.get('name'),\n # Show this as JSON to avoid changing the quoting style, which\n # may cause problems with JSON encoding.\n example=escape_html(ujson.dumps(argument['example'])),\n required='Yes' if argument.get('required') else 'No',\n description=md_engine.convert(description),\n ))\n\n table.append(\"</tbody>\")\n table.append(\"</table>\")\n\n return table\n\ndef makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:\n return MarkdownArgumentsTableGenerator(kwargs)\n", "path": "zerver/lib/bugdown/api_arguments_table_generator.py" } ]
diff --git a/static/third/bootstrap/css/bootstrap.css b/static/third/bootstrap/css/bootstrap.css index ef4330f7db3fb..23de9383f0f20 100644 --- a/static/third/bootstrap/css/bootstrap.css +++ b/static/third/bootstrap/css/bootstrap.css @@ -2008,6 +2008,16 @@ table { vertical-align: bottom; } +.table .json-api-example { + width: fit-content; + max-width: 300px; + word-wrap: break-word; +} + +.table .json-api-example code { + white-space: pre-wrap; +} + .table caption + thead tr:first-child th, .table caption + thead tr:first-child td, .table colgroup + thead tr:first-child th, diff --git a/zerver/lib/bugdown/api_arguments_table_generator.py b/zerver/lib/bugdown/api_arguments_table_generator.py index 885f8081d1426..d55c332133bc3 100644 --- a/zerver/lib/bugdown/api_arguments_table_generator.py +++ b/zerver/lib/bugdown/api_arguments_table_generator.py @@ -105,7 +105,7 @@ def render_table(self, arguments: List[Dict[str, Any]]) -> List[str]: tr = """ <tr> <td><code>{argument}</code></td> - <td><code>{example}</code></td> + <td class="json-api-example"><code>{example}</code></td> <td>{required}</td> <td>{description}</td> </tr>
voxel51__fiftyone-3905
[BUG] App is stuck on "Pixelating.." screen when loading COCO custom dataset. ### Describe the problem When loading my custom COCO dataset, I cannot launch the app on my Windows PC. The dataset is successfully created, but when I launch the app either in a script or in a jupyter notebook, it remain on the "Pixelating..." screen, even for a sample of 5 images. ### Code to reproduce issue Provide a reproducible test case that is the bare minimum necessary to generate the problem. Please avoid sharing code that relies on your local data or datasets. Include a short video or screenshot if the bug is in the App. ``` import os import fiftyone as fo import fiftyone.zoo as foz # The directory containing the source images data_path = r"...\data" # The path to the COCO labels JSON file labels_path = r"...\val.json" # Import the dataset dataset = fo.Dataset.from_dir( dataset_type=fo.types.COCODetectionDataset, data_path=data_path, labels_path=labels_path, label_types=["detections"], max_samples=1 ) session = fo.launch_app(dataset) ``` ### System information - **OS Platform and Distribution**: Windows 10 - **Python version** (`python --version`): 3.8.16 - **FiftyOne version** (`fiftyone --version`): v0.23.0 - **FiftyOne installed from** (pip or source): pip ### Other info/logs ![Screenshot 2023-12-07 at 2 32 04 PM](https://github.com/voxel51/fiftyone/assets/53239931/50a7c963-0215-44a0-8875-2de03ad8d1d3) ### Willingness to contribute The FiftyOne Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the FiftyOne codebase? - [ ] Yes. I can contribute a fix for this bug independently - [x] Yes. I would be willing to contribute a fix for this bug with guidance from the FiftyOne community - [ ] No. I cannot contribute a bug fix at this time
[ { "content": "\"\"\"\nFiftyOne Server queries.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom dataclasses import asdict\nfrom datetime import date, datetime\nfrom enum import Enum\nimport logging\nimport os\nimport typing as t\n\nimport eta.core.serial as etas\nimport eta.core.utils as etau\nimport strawberry as gql\nfrom bson import ObjectId, json_util\n\nimport fiftyone as fo\nimport fiftyone.brain as fob # pylint: disable=import-error,no-name-in-module\nimport fiftyone.constants as foc\nimport fiftyone.core.context as focx\nimport fiftyone.core.dataset as fod\nimport fiftyone.core.media as fom\nfrom fiftyone.core.odm import SavedViewDocument\nimport fiftyone.core.stages as fosg\nfrom fiftyone.core.state import SampleField, serialize_fields\nimport fiftyone.core.uid as fou\nfrom fiftyone.core.utils import run_sync_task\nimport fiftyone.core.view as fov\n\nimport fiftyone.server.aggregate as fosa\nfrom fiftyone.server.aggregations import aggregate_resolver\nfrom fiftyone.server.color import ColorBy, ColorScheme\nfrom fiftyone.server.data import Info\nfrom fiftyone.server.dataloader import get_dataloader_resolver\nfrom fiftyone.server.indexes import Index, from_dict as indexes_from_dict\nfrom fiftyone.server.lightning import lightning_resolver\nfrom fiftyone.server.metadata import MediaType\nfrom fiftyone.server.paginator import Connection, get_paginator_resolver\nfrom fiftyone.server.samples import (\n SampleFilter,\n SampleItem,\n paginate_samples,\n)\nfrom fiftyone.server.scalars import BSON, BSONArray, JSON\nfrom fiftyone.server.stage_definitions import stage_definitions\nfrom fiftyone.server.utils import from_dict\n\n\nID = gql.scalar(\n t.NewType(\"ID\", str),\n serialize=lambda v: str(v),\n parse_value=lambda v: ObjectId(v),\n)\nDATASET_FILTER = [{\"sample_collection_name\": {\"$regex\": \"^samples\\\\.\"}}]\nDATASET_FILTER_STAGE = [{\"$match\": DATASET_FILTER[0]}]\n\n\[email protected]\nclass Group:\n name: str\n media_type: MediaType\n\n\[email protected]\nclass Target:\n target: str\n value: str\n\n\[email protected]\nclass NamedTargets:\n name: str\n targets: t.List[Target]\n\n\[email protected]\nclass RunConfig:\n cls: str\n\n\[email protected]\nclass Run:\n key: str\n version: t.Optional[str]\n timestamp: t.Optional[datetime]\n config: t.Optional[RunConfig]\n view_stages: t.Optional[t.List[str]]\n\n\[email protected]\nclass BrainRunType(Enum):\n similarity = \"similarity\"\n visualization = \"visualization\"\n\n\[email protected]\nclass BrainRunConfig(RunConfig):\n embeddings_field: t.Optional[str]\n method: t.Optional[str]\n patches_field: t.Optional[str]\n supports_prompts: t.Optional[bool]\n\n @gql.field\n def type(self) -> t.Optional[BrainRunType]:\n try:\n if issubclass(fob.SimilarityConfig, etau.get_class(self.cls)):\n return BrainRunType.similarity\n\n if issubclass(fob.VisualizationConfig, etau.get_class(self.cls)):\n return BrainRunType.visualization\n except:\n pass\n\n return None\n\n @gql.field\n def max_k(self) -> t.Optional[int]:\n config = self._create_config()\n return getattr(config, \"max_k\", None)\n\n @gql.field\n def supports_least_similarity(self) -> t.Optional[bool]:\n config = self._create_config()\n return getattr(config, \"supports_least_similarity\", None)\n\n def _create_config(self):\n try:\n cls = etau.get_class(self.cls)\n return cls(\n embeddings_field=self.embeddings_field,\n patches_field=self.patches_field,\n )\n except:\n return None\n\n\[email protected]\nclass BrainRun(Run):\n config: t.Optional[BrainRunConfig]\n\n\[email protected]\nclass EvaluationRunConfig(RunConfig):\n gt_field: t.Optional[str]\n pred_field: t.Optional[str]\n method: t.Optional[str]\n\n\[email protected]\nclass EvaluationRun(Run):\n config: t.Optional[EvaluationRunConfig]\n\n\[email protected]\nclass SavedView:\n id: t.Optional[str]\n dataset_id: t.Optional[str]\n name: t.Optional[str]\n description: t.Optional[str]\n color: t.Optional[str]\n slug: t.Optional[str]\n view_stages: t.Optional[t.List[str]]\n created_at: t.Optional[datetime]\n last_modified_at: t.Optional[datetime]\n last_loaded_at: t.Optional[datetime]\n\n @gql.field\n def view_name(self) -> t.Optional[str]:\n if isinstance(self, ObjectId):\n return None\n return self.name\n\n @gql.field\n def stage_dicts(self) -> t.Optional[BSONArray]:\n return [json_util.loads(x) for x in self.view_stages]\n\n @classmethod\n def from_doc(cls, doc: SavedViewDocument):\n stage_dicts = [json_util.loads(x) for x in doc.view_stages]\n data = doc.to_dict()\n data[\"id\"] = str(data.pop(\"_id\"))\n data[\"dataset_id\"] = str(data.pop(\"_dataset_id\"))\n saved_view = from_dict(data_class=cls, data=data)\n saved_view.stage_dicts = stage_dicts\n return saved_view\n\n\[email protected]\nclass SidebarGroup:\n name: str\n paths: t.Optional[t.List[str]]\n expanded: t.Optional[bool] = None\n\n\[email protected]\nclass KeypointSkeleton:\n labels: t.Optional[t.List[str]]\n edges: t.List[t.List[int]]\n\n\[email protected]\nclass NamedKeypointSkeleton(KeypointSkeleton):\n name: str\n\n\[email protected]\nclass SidebarMode(Enum):\n all = \"all\"\n best = \"best\"\n fast = \"fast\"\n\n\[email protected]\nclass DatasetAppConfig:\n color_scheme: t.Optional[ColorScheme]\n media_fields: t.Optional[t.List[str]]\n plugins: t.Optional[JSON]\n sidebar_groups: t.Optional[t.List[SidebarGroup]]\n sidebar_mode: t.Optional[SidebarMode]\n spaces: t.Optional[JSON]\n\n grid_media_field: str = \"filepath\"\n modal_media_field: str = \"filepath\"\n\n\[email protected]\nclass Dataset:\n id: gql.ID\n dataset_id: gql.ID\n name: str\n created_at: t.Optional[date]\n last_loaded_at: t.Optional[datetime]\n persistent: bool\n group_media_types: t.Optional[t.List[Group]]\n group_field: t.Optional[str]\n default_group_slice: t.Optional[str]\n media_type: t.Optional[MediaType]\n parent_media_type: t.Optional[MediaType]\n mask_targets: t.List[NamedTargets]\n default_mask_targets: t.Optional[t.List[Target]]\n sample_fields: t.List[SampleField]\n frame_fields: t.Optional[t.List[SampleField]]\n brain_methods: t.Optional[t.List[BrainRun]]\n evaluations: t.Optional[t.List[EvaluationRun]]\n saved_view_slug: t.Optional[str]\n saved_views: t.Optional[t.List[SavedView]]\n version: t.Optional[str]\n view_cls: t.Optional[str]\n view_name: t.Optional[str]\n default_skeleton: t.Optional[KeypointSkeleton]\n skeletons: t.List[NamedKeypointSkeleton]\n app_config: t.Optional[DatasetAppConfig]\n info: t.Optional[JSON]\n\n estimated_frame_count: t.Optional[int]\n estimated_sample_count: t.Optional[int]\n frame_indexes: t.Optional[t.List[Index]]\n sample_indexes: t.Optional[t.List[Index]]\n\n frame_collection_name: gql.Private[t.Optional[str]]\n sample_collection_name: gql.Private[t.Optional[str]]\n\n @gql.field\n def stages(\n self, slug: t.Optional[str] = None, view: t.Optional[BSONArray] = None\n ) -> t.Optional[BSONArray]:\n if slug:\n for view in self.saved_views:\n if view.slug == slug:\n return view.stage_dicts()\n\n return view or []\n\n @gql.field\n async def estimated_sample_count(self, info: Info = None) -> int:\n return await info.context.db[\n self.sample_collection_name\n ].estimated_document_count()\n\n @gql.field\n async def estimated_frame_count(\n self, info: Info = None\n ) -> t.Optional[int]:\n if self.frame_collection_name:\n return await info.context.db[\n self.frame_collection_name\n ].estimated_document_count()\n\n @staticmethod\n def modifier(doc: dict) -> dict:\n doc[\"id\"] = doc.pop(\"_id\")\n doc[\"dataset_id\"] = doc[\"id\"]\n doc[\"default_mask_targets\"] = _convert_targets(\n doc.get(\"default_mask_targets\", {})\n )\n doc[\"mask_targets\"] = [\n NamedTargets(name=name, targets=_convert_targets(targets))\n for name, targets in doc.get(\"mask_targets\", {}).items()\n ]\n flat = _flatten_fields([], doc.get(\"sample_fields\", []))\n doc[\"sample_fields\"] = flat\n\n doc[\"frame_fields\"] = _flatten_fields([], doc.get(\"frame_fields\", []))\n doc[\"brain_methods\"] = list(doc.get(\"brain_methods\", {}).values())\n doc[\"evaluations\"] = list(doc.get(\"evaluations\", {}).values())\n doc[\"saved_views\"] = doc.get(\"saved_views\", [])\n doc[\"skeletons\"] = list(\n dict(name=name, **data)\n for name, data in doc.get(\"skeletons\", {}).items()\n )\n doc[\"group_media_types\"] = [\n Group(name=name, media_type=media_type)\n for name, media_type in doc.get(\"group_media_types\", {}).items()\n ]\n doc[\"default_skeletons\"] = doc.get(\"default_skeletons\", None)\n\n return doc\n\n @classmethod\n async def resolver(\n cls,\n name: str,\n info: Info = None,\n saved_view_slug: t.Optional[str] = gql.UNSET,\n view: t.Optional[BSONArray] = None,\n ) -> t.Optional[\"Dataset\"]:\n return await serialize_dataset(\n dataset_name=name,\n serialized_view=view,\n saved_view_slug=saved_view_slug,\n dicts=False,\n )\n\n\ndataset_dataloader = get_dataloader_resolver(\n Dataset, \"datasets\", \"name\", DATASET_FILTER\n)\n\n\[email protected]\nclass Theme(Enum):\n browser = \"browser\"\n dark = \"dark\"\n light = \"light\"\n\n\[email protected]\nclass AppConfig:\n color_by: ColorBy\n color_pool: t.List[str]\n colorscale: str\n grid_zoom: int\n lightning_threshold: t.Optional[int]\n loop_videos: bool\n multicolor_keypoints: bool\n notebook_height: int\n plugins: t.Optional[JSON]\n show_confidence: bool\n show_index: bool\n show_label: bool\n show_skeletons: bool\n show_tooltip: bool\n sidebar_mode: SidebarMode\n theme: Theme\n timezone: t.Optional[str]\n use_frame_number: bool\n spaces: t.Optional[JSON]\n\n\[email protected]\nclass SchemaResult:\n field_schema: t.List[SampleField]\n frame_field_schema: t.List[SampleField]\n\n\[email protected]\nclass Query(fosa.AggregateQuery):\n aggregations = gql.field(resolver=aggregate_resolver)\n lightning = gql.field(resolver=lightning_resolver)\n\n @gql.field\n def colorscale(self) -> t.Optional[t.List[t.List[int]]]:\n if fo.app_config.colorscale:\n return fo.app_config.get_colormap()\n\n return None\n\n @gql.field\n def config(self) -> AppConfig:\n d = fo.app_config.serialize()\n d[\"timezone\"] = fo.config.timezone\n return from_dict(AppConfig, d)\n\n @gql.field\n def context(self) -> str:\n return focx._get_context()\n\n @gql.field\n def dev(self) -> bool:\n return foc.DEV_INSTALL or foc.RC_INSTALL\n\n @gql.field\n def do_not_track(self) -> bool:\n return fo.config.do_not_track\n\n dataset: Dataset = gql.field(resolver=Dataset.resolver)\n datasets: Connection[Dataset, str] = gql.field(\n resolver=get_paginator_resolver(\n Dataset, \"created_at\", DATASET_FILTER_STAGE, \"datasets\"\n )\n )\n\n @gql.field\n async def samples(\n self,\n dataset: str,\n view: BSONArray,\n first: t.Optional[int] = 20,\n after: t.Optional[str] = None,\n filter: t.Optional[SampleFilter] = None,\n filters: t.Optional[BSON] = None,\n extended_stages: t.Optional[BSON] = None,\n pagination_data: t.Optional[bool] = True,\n ) -> Connection[SampleItem, str]:\n return await paginate_samples(\n dataset,\n view,\n filters,\n first,\n after,\n sample_filter=filter,\n extended_stages=extended_stages,\n pagination_data=pagination_data,\n )\n\n @gql.field\n async def sample(\n self,\n dataset: str,\n view: BSONArray,\n filter: SampleFilter,\n filters: t.Optional[JSON] = None,\n ) -> t.Optional[SampleItem]:\n samples = await paginate_samples(\n dataset,\n view,\n filters,\n 1,\n sample_filter=filter,\n pagination_data=False,\n )\n if samples.edges:\n return samples.edges[0].node\n\n return None\n\n stage_definitions = gql.field(stage_definitions)\n\n @gql.field\n def teams_submission(self) -> bool:\n isfile = os.path.isfile(foc.TEAMS_PATH)\n if isfile:\n submitted = etas.load_json(foc.TEAMS_PATH)[\"submitted\"]\n else:\n submitted = False\n\n return submitted\n\n @gql.field\n def uid(self) -> str:\n return fou.get_user_id()\n\n @gql.field\n def version(self) -> str:\n return foc.VERSION\n\n @gql.field\n def saved_views(self, dataset_name: str) -> t.Optional[t.List[SavedView]]:\n try:\n ds = fod.load_dataset(dataset_name)\n return [\n SavedView.from_doc(view_doc)\n for view_doc in ds._doc.saved_views\n ]\n except:\n return None\n\n @gql.field\n def schema_for_view_stages(\n self,\n dataset_name: str,\n view_stages: BSONArray,\n ) -> SchemaResult:\n try:\n ds = fod.load_dataset(dataset_name)\n if view_stages:\n view = fov.DatasetView._build(ds, view_stages or [])\n\n if ds.media_type == fom.VIDEO:\n frame_schema = serialize_fields(\n view.get_frame_field_schema(flat=True)\n )\n field_schema = serialize_fields(\n view.get_field_schema(flat=True)\n )\n return SchemaResult(\n field_schema=field_schema,\n frame_field_schema=frame_schema,\n )\n\n return SchemaResult(\n field_schema=serialize_fields(\n view.get_field_schema(flat=True)\n ),\n frame_field_schema=[],\n )\n if ds.media_type == fom.VIDEO:\n frames_field_schema = serialize_fields(\n ds.get_frame_field_schema(flat=True)\n )\n field_schema = serialize_fields(ds.get_field_schema(flat=True))\n return SchemaResult(\n field_schema=field_schema,\n frame_field_schema=frames_field_schema,\n )\n\n return SchemaResult(\n field_schema=serialize_fields(ds.get_field_schema(flat=True)),\n frame_field_schema=[],\n )\n except Exception as e:\n return SchemaResult(\n field_schema=[],\n frame_field_schema=[],\n )\n\n\ndef _flatten_fields(\n path: t.List[str], fields: t.List[t.Dict]\n) -> t.List[t.Dict]:\n result = []\n for field in fields:\n key = field.pop(\"name\", None)\n if key is None:\n # Issues with concurrency can cause this to happen.\n # Until it's fixed, just ignore these fields to avoid throwing hard\n # errors when loading in the app.\n logging.debug(\"Skipping field with no name: %s\", field)\n continue\n field_path = path + [key]\n field[\"path\"] = \".\".join(field_path)\n result.append(field)\n\n fields = field.pop(\"fields\", None)\n if fields:\n result = result + _flatten_fields(field_path, fields)\n\n return result\n\n\ndef _convert_targets(targets: t.Dict[str, str]) -> t.List[Target]:\n return [Target(target=k, value=v) for k, v in targets.items()]\n\n\nasync def serialize_dataset(\n dataset_name: str,\n serialized_view: BSONArray,\n saved_view_slug: t.Optional[str] = None,\n dicts=True,\n) -> Dataset:\n def run():\n if not fod.dataset_exists(dataset_name):\n return None\n\n dataset = fod.load_dataset(dataset_name)\n dataset.reload()\n view_name = None\n try:\n doc = dataset._get_saved_view_doc(saved_view_slug, slug=True)\n view = dataset.load_saved_view(doc.name)\n view_name = view.name\n if serialized_view:\n for stage in serialized_view:\n view = view.add_stage(fosg.ViewStage._from_dict(stage))\n except:\n view = fov.DatasetView._build(dataset, serialized_view or [])\n\n doc = dataset._doc.to_dict(no_dereference=True)\n Dataset.modifier(doc)\n data = from_dict(Dataset, doc)\n data.view_cls = None\n data.view_name = view_name\n data.saved_view_slug = saved_view_slug\n\n collection = dataset.view()\n if view is not None:\n # unique id for for the relay global store\n #\n # until a schema is with respect to a view and not a dataset this\n # is required\n data.id = ObjectId()\n if view._dataset != dataset:\n d = view._dataset._serialize()\n data.media_type = d[\"media_type\"]\n data.view_cls = etau.get_class_name(view)\n\n data.parent_media_type = view._parent_media_type\n data.media_type = view.media_type\n\n collection = view\n\n data.sample_fields = serialize_fields(\n collection.get_field_schema(flat=True)\n )\n\n data.frame_fields = serialize_fields(\n collection.get_frame_field_schema(flat=True)\n )\n\n if dicts:\n saved_views = []\n for view in data.saved_views:\n view_dict = asdict(view)\n view_dict[\"view_name\"] = view.view_name()\n view_dict[\"stage_dicts\"] = view.stage_dicts()\n saved_views.append(view_dict)\n\n data.saved_views = saved_views\n\n for brain_method in data.brain_methods:\n try:\n type = brain_method.config.type().value\n except:\n type = None\n\n try:\n max_k = brain_method.config.max_k()\n except:\n max_k = None\n\n try:\n supports_least_similarity = (\n brain_method.config.supports_least_similarity()\n )\n except:\n supports_least_similarity = None\n\n setattr(brain_method.config, \"type\", type)\n setattr(brain_method.config, \"max_k\", max_k)\n setattr(\n brain_method.config,\n \"supports_least_similarity\",\n supports_least_similarity,\n )\n\n _assign_estimated_counts(data, dataset)\n _assign_lightning_info(data, dataset)\n\n return data\n\n return await run_sync_task(run)\n\n\ndef _assign_estimated_counts(dataset: Dataset, fo_dataset: fo.Dataset):\n setattr(\n dataset,\n \"estimated_sample_count\",\n fo_dataset._sample_collection.estimated_document_count(),\n )\n setattr(\n dataset,\n \"estimated_frame_count\",\n fo_dataset._frame_collection.estimated_document_count()\n if fo_dataset._frame_collection_name\n else None,\n )\n\n\ndef _assign_lightning_info(dataset: Dataset, fo_dataset: fo.Dataset):\n dataset.sample_indexes, dataset.frame_indexes = indexes_from_dict(\n fo_dataset.get_index_information()\n )\n", "path": "fiftyone/server/query.py" } ]
[ { "content": "\"\"\"\nFiftyOne Server queries.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom dataclasses import asdict\nfrom datetime import date, datetime\nfrom enum import Enum\nimport logging\nimport os\nimport typing as t\n\nimport eta.core.serial as etas\nimport eta.core.utils as etau\nimport strawberry as gql\nfrom bson import ObjectId, json_util\n\nimport fiftyone as fo\nimport fiftyone.brain as fob # pylint: disable=import-error,no-name-in-module\nimport fiftyone.constants as foc\nimport fiftyone.core.context as focx\nimport fiftyone.core.dataset as fod\nimport fiftyone.core.media as fom\nfrom fiftyone.core.odm import SavedViewDocument\nimport fiftyone.core.stages as fosg\nfrom fiftyone.core.state import SampleField, serialize_fields\nimport fiftyone.core.uid as fou\nfrom fiftyone.core.utils import run_sync_task\nimport fiftyone.core.view as fov\n\nimport fiftyone.server.aggregate as fosa\nfrom fiftyone.server.aggregations import aggregate_resolver\nfrom fiftyone.server.color import ColorBy, ColorScheme\nfrom fiftyone.server.data import Info\nfrom fiftyone.server.dataloader import get_dataloader_resolver\nfrom fiftyone.server.indexes import Index, from_dict as indexes_from_dict\nfrom fiftyone.server.lightning import lightning_resolver\nfrom fiftyone.server.metadata import MediaType\nfrom fiftyone.server.paginator import Connection, get_paginator_resolver\nfrom fiftyone.server.samples import (\n SampleFilter,\n SampleItem,\n paginate_samples,\n)\nfrom fiftyone.server.scalars import BSON, BSONArray, JSON\nfrom fiftyone.server.stage_definitions import stage_definitions\nfrom fiftyone.server.utils import from_dict\n\n\nID = gql.scalar(\n t.NewType(\"ID\", str),\n serialize=lambda v: str(v),\n parse_value=lambda v: ObjectId(v),\n)\nDATASET_FILTER = [{\"sample_collection_name\": {\"$regex\": \"^samples\\\\.\"}}]\nDATASET_FILTER_STAGE = [{\"$match\": DATASET_FILTER[0]}]\n\n\[email protected]\nclass Group:\n name: str\n media_type: MediaType\n\n\[email protected]\nclass Target:\n target: str\n value: str\n\n\[email protected]\nclass NamedTargets:\n name: str\n targets: t.List[Target]\n\n\[email protected]\nclass RunConfig:\n cls: str\n\n\[email protected]\nclass Run:\n key: str\n version: t.Optional[str]\n timestamp: t.Optional[datetime]\n config: t.Optional[RunConfig]\n view_stages: t.Optional[t.List[str]]\n\n\[email protected]\nclass BrainRunType(Enum):\n similarity = \"similarity\"\n visualization = \"visualization\"\n\n\[email protected]\nclass BrainRunConfig(RunConfig):\n embeddings_field: t.Optional[str]\n method: t.Optional[str]\n patches_field: t.Optional[str]\n supports_prompts: t.Optional[bool]\n\n @gql.field\n def type(self) -> t.Optional[BrainRunType]:\n try:\n if issubclass(fob.SimilarityConfig, etau.get_class(self.cls)):\n return BrainRunType.similarity\n\n if issubclass(fob.VisualizationConfig, etau.get_class(self.cls)):\n return BrainRunType.visualization\n except:\n pass\n\n return None\n\n @gql.field\n def max_k(self) -> t.Optional[int]:\n config = self._create_config()\n return getattr(config, \"max_k\", None)\n\n @gql.field\n def supports_least_similarity(self) -> t.Optional[bool]:\n config = self._create_config()\n return getattr(config, \"supports_least_similarity\", None)\n\n def _create_config(self):\n try:\n cls = etau.get_class(self.cls)\n return cls(\n embeddings_field=self.embeddings_field,\n patches_field=self.patches_field,\n )\n except:\n return None\n\n\[email protected]\nclass BrainRun(Run):\n config: t.Optional[BrainRunConfig]\n\n\[email protected]\nclass EvaluationRunConfig(RunConfig):\n gt_field: t.Optional[str]\n pred_field: t.Optional[str]\n method: t.Optional[str]\n\n\[email protected]\nclass EvaluationRun(Run):\n config: t.Optional[EvaluationRunConfig]\n\n\[email protected]\nclass SavedView:\n id: t.Optional[str]\n dataset_id: t.Optional[str]\n name: t.Optional[str]\n description: t.Optional[str]\n color: t.Optional[str]\n slug: t.Optional[str]\n view_stages: t.Optional[t.List[str]]\n created_at: t.Optional[datetime]\n last_modified_at: t.Optional[datetime]\n last_loaded_at: t.Optional[datetime]\n\n @gql.field\n def view_name(self) -> t.Optional[str]:\n if isinstance(self, ObjectId):\n return None\n return self.name\n\n @gql.field\n def stage_dicts(self) -> t.Optional[BSONArray]:\n return [json_util.loads(x) for x in self.view_stages]\n\n @classmethod\n def from_doc(cls, doc: SavedViewDocument):\n stage_dicts = [json_util.loads(x) for x in doc.view_stages]\n data = doc.to_dict()\n data[\"id\"] = str(data.pop(\"_id\"))\n data[\"dataset_id\"] = str(data.pop(\"_dataset_id\"))\n saved_view = from_dict(data_class=cls, data=data)\n saved_view.stage_dicts = stage_dicts\n return saved_view\n\n\[email protected]\nclass SidebarGroup:\n name: str\n paths: t.Optional[t.List[str]]\n expanded: t.Optional[bool] = None\n\n\[email protected]\nclass KeypointSkeleton:\n labels: t.Optional[t.List[str]]\n edges: t.List[t.List[int]]\n\n\[email protected]\nclass NamedKeypointSkeleton(KeypointSkeleton):\n name: str\n\n\[email protected]\nclass SidebarMode(Enum):\n all = \"all\"\n best = \"best\"\n fast = \"fast\"\n\n\[email protected]\nclass DatasetAppConfig:\n color_scheme: t.Optional[ColorScheme]\n media_fields: t.Optional[t.List[str]]\n plugins: t.Optional[JSON]\n sidebar_groups: t.Optional[t.List[SidebarGroup]]\n sidebar_mode: t.Optional[SidebarMode]\n spaces: t.Optional[JSON]\n\n grid_media_field: str = \"filepath\"\n modal_media_field: str = \"filepath\"\n\n\[email protected]\nclass Dataset:\n id: gql.ID\n dataset_id: gql.ID\n name: str\n created_at: t.Optional[date]\n last_loaded_at: t.Optional[datetime]\n persistent: bool\n group_media_types: t.Optional[t.List[Group]]\n group_field: t.Optional[str]\n default_group_slice: t.Optional[str]\n media_type: t.Optional[MediaType]\n parent_media_type: t.Optional[MediaType]\n mask_targets: t.List[NamedTargets]\n default_mask_targets: t.Optional[t.List[Target]]\n sample_fields: t.List[SampleField]\n frame_fields: t.Optional[t.List[SampleField]]\n brain_methods: t.Optional[t.List[BrainRun]]\n evaluations: t.Optional[t.List[EvaluationRun]]\n saved_view_slug: t.Optional[str]\n saved_views: t.Optional[t.List[SavedView]]\n version: t.Optional[str]\n view_cls: t.Optional[str]\n view_name: t.Optional[str]\n default_skeleton: t.Optional[KeypointSkeleton]\n skeletons: t.List[NamedKeypointSkeleton]\n app_config: t.Optional[DatasetAppConfig]\n info: t.Optional[JSON]\n\n estimated_frame_count: t.Optional[int]\n estimated_sample_count: t.Optional[int]\n frame_indexes: t.Optional[t.List[Index]]\n sample_indexes: t.Optional[t.List[Index]]\n\n frame_collection_name: gql.Private[t.Optional[str]]\n sample_collection_name: gql.Private[t.Optional[str]]\n\n @gql.field\n def stages(\n self, slug: t.Optional[str] = None, view: t.Optional[BSONArray] = None\n ) -> t.Optional[BSONArray]:\n if slug:\n for view in self.saved_views:\n if view.slug == slug:\n return view.stage_dicts()\n\n return view or []\n\n @gql.field\n async def estimated_sample_count(self, info: Info = None) -> int:\n return await info.context.db[\n self.sample_collection_name\n ].estimated_document_count()\n\n @gql.field\n async def estimated_frame_count(\n self, info: Info = None\n ) -> t.Optional[int]:\n if self.frame_collection_name:\n return await info.context.db[\n self.frame_collection_name\n ].estimated_document_count()\n\n @staticmethod\n def modifier(doc: dict) -> dict:\n doc[\"id\"] = doc.pop(\"_id\")\n doc[\"dataset_id\"] = doc[\"id\"]\n doc[\"default_mask_targets\"] = _convert_targets(\n doc.get(\"default_mask_targets\", {})\n )\n doc[\"mask_targets\"] = [\n NamedTargets(name=name, targets=_convert_targets(targets))\n for name, targets in doc.get(\"mask_targets\", {}).items()\n ]\n flat = _flatten_fields([], doc.get(\"sample_fields\", []))\n doc[\"sample_fields\"] = flat\n\n doc[\"frame_fields\"] = _flatten_fields([], doc.get(\"frame_fields\", []))\n doc[\"brain_methods\"] = list(doc.get(\"brain_methods\", {}).values())\n doc[\"evaluations\"] = list(doc.get(\"evaluations\", {}).values())\n doc[\"saved_views\"] = doc.get(\"saved_views\", [])\n doc[\"skeletons\"] = list(\n dict(name=name, **data)\n for name, data in doc.get(\"skeletons\", {}).items()\n )\n doc[\"group_media_types\"] = [\n Group(name=name, media_type=media_type)\n for name, media_type in doc.get(\"group_media_types\", {}).items()\n ]\n doc[\"default_skeletons\"] = doc.get(\"default_skeletons\", None)\n\n # gql private fields must always be present\n doc.setdefault(\"frame_collection_name\", None)\n\n return doc\n\n @classmethod\n async def resolver(\n cls,\n name: str,\n info: Info = None,\n saved_view_slug: t.Optional[str] = gql.UNSET,\n view: t.Optional[BSONArray] = None,\n ) -> t.Optional[\"Dataset\"]:\n return await serialize_dataset(\n dataset_name=name,\n serialized_view=view,\n saved_view_slug=saved_view_slug,\n dicts=False,\n )\n\n\ndataset_dataloader = get_dataloader_resolver(\n Dataset, \"datasets\", \"name\", DATASET_FILTER\n)\n\n\[email protected]\nclass Theme(Enum):\n browser = \"browser\"\n dark = \"dark\"\n light = \"light\"\n\n\[email protected]\nclass AppConfig:\n color_by: ColorBy\n color_pool: t.List[str]\n colorscale: str\n grid_zoom: int\n lightning_threshold: t.Optional[int]\n loop_videos: bool\n multicolor_keypoints: bool\n notebook_height: int\n plugins: t.Optional[JSON]\n show_confidence: bool\n show_index: bool\n show_label: bool\n show_skeletons: bool\n show_tooltip: bool\n sidebar_mode: SidebarMode\n theme: Theme\n timezone: t.Optional[str]\n use_frame_number: bool\n spaces: t.Optional[JSON]\n\n\[email protected]\nclass SchemaResult:\n field_schema: t.List[SampleField]\n frame_field_schema: t.List[SampleField]\n\n\[email protected]\nclass Query(fosa.AggregateQuery):\n aggregations = gql.field(resolver=aggregate_resolver)\n lightning = gql.field(resolver=lightning_resolver)\n\n @gql.field\n def colorscale(self) -> t.Optional[t.List[t.List[int]]]:\n if fo.app_config.colorscale:\n return fo.app_config.get_colormap()\n\n return None\n\n @gql.field\n def config(self) -> AppConfig:\n d = fo.app_config.serialize()\n d[\"timezone\"] = fo.config.timezone\n return from_dict(AppConfig, d)\n\n @gql.field\n def context(self) -> str:\n return focx._get_context()\n\n @gql.field\n def dev(self) -> bool:\n return foc.DEV_INSTALL or foc.RC_INSTALL\n\n @gql.field\n def do_not_track(self) -> bool:\n return fo.config.do_not_track\n\n dataset: Dataset = gql.field(resolver=Dataset.resolver)\n datasets: Connection[Dataset, str] = gql.field(\n resolver=get_paginator_resolver(\n Dataset, \"created_at\", DATASET_FILTER_STAGE, \"datasets\"\n )\n )\n\n @gql.field\n async def samples(\n self,\n dataset: str,\n view: BSONArray,\n first: t.Optional[int] = 20,\n after: t.Optional[str] = None,\n filter: t.Optional[SampleFilter] = None,\n filters: t.Optional[BSON] = None,\n extended_stages: t.Optional[BSON] = None,\n pagination_data: t.Optional[bool] = True,\n ) -> Connection[SampleItem, str]:\n return await paginate_samples(\n dataset,\n view,\n filters,\n first,\n after,\n sample_filter=filter,\n extended_stages=extended_stages,\n pagination_data=pagination_data,\n )\n\n @gql.field\n async def sample(\n self,\n dataset: str,\n view: BSONArray,\n filter: SampleFilter,\n filters: t.Optional[JSON] = None,\n ) -> t.Optional[SampleItem]:\n samples = await paginate_samples(\n dataset,\n view,\n filters,\n 1,\n sample_filter=filter,\n pagination_data=False,\n )\n if samples.edges:\n return samples.edges[0].node\n\n return None\n\n stage_definitions = gql.field(stage_definitions)\n\n @gql.field\n def teams_submission(self) -> bool:\n isfile = os.path.isfile(foc.TEAMS_PATH)\n if isfile:\n submitted = etas.load_json(foc.TEAMS_PATH)[\"submitted\"]\n else:\n submitted = False\n\n return submitted\n\n @gql.field\n def uid(self) -> str:\n return fou.get_user_id()\n\n @gql.field\n def version(self) -> str:\n return foc.VERSION\n\n @gql.field\n def saved_views(self, dataset_name: str) -> t.Optional[t.List[SavedView]]:\n try:\n ds = fod.load_dataset(dataset_name)\n return [\n SavedView.from_doc(view_doc)\n for view_doc in ds._doc.saved_views\n ]\n except:\n return None\n\n @gql.field\n def schema_for_view_stages(\n self,\n dataset_name: str,\n view_stages: BSONArray,\n ) -> SchemaResult:\n try:\n ds = fod.load_dataset(dataset_name)\n if view_stages:\n view = fov.DatasetView._build(ds, view_stages or [])\n\n if ds.media_type == fom.VIDEO:\n frame_schema = serialize_fields(\n view.get_frame_field_schema(flat=True)\n )\n field_schema = serialize_fields(\n view.get_field_schema(flat=True)\n )\n return SchemaResult(\n field_schema=field_schema,\n frame_field_schema=frame_schema,\n )\n\n return SchemaResult(\n field_schema=serialize_fields(\n view.get_field_schema(flat=True)\n ),\n frame_field_schema=[],\n )\n if ds.media_type == fom.VIDEO:\n frames_field_schema = serialize_fields(\n ds.get_frame_field_schema(flat=True)\n )\n field_schema = serialize_fields(ds.get_field_schema(flat=True))\n return SchemaResult(\n field_schema=field_schema,\n frame_field_schema=frames_field_schema,\n )\n\n return SchemaResult(\n field_schema=serialize_fields(ds.get_field_schema(flat=True)),\n frame_field_schema=[],\n )\n except Exception as e:\n return SchemaResult(\n field_schema=[],\n frame_field_schema=[],\n )\n\n\ndef _flatten_fields(\n path: t.List[str], fields: t.List[t.Dict]\n) -> t.List[t.Dict]:\n result = []\n for field in fields:\n key = field.pop(\"name\", None)\n if key is None:\n # Issues with concurrency can cause this to happen.\n # Until it's fixed, just ignore these fields to avoid throwing hard\n # errors when loading in the app.\n logging.debug(\"Skipping field with no name: %s\", field)\n continue\n field_path = path + [key]\n field[\"path\"] = \".\".join(field_path)\n result.append(field)\n\n fields = field.pop(\"fields\", None)\n if fields:\n result = result + _flatten_fields(field_path, fields)\n\n return result\n\n\ndef _convert_targets(targets: t.Dict[str, str]) -> t.List[Target]:\n return [Target(target=k, value=v) for k, v in targets.items()]\n\n\nasync def serialize_dataset(\n dataset_name: str,\n serialized_view: BSONArray,\n saved_view_slug: t.Optional[str] = None,\n dicts=True,\n) -> Dataset:\n def run():\n if not fod.dataset_exists(dataset_name):\n return None\n\n dataset = fod.load_dataset(dataset_name)\n dataset.reload()\n view_name = None\n try:\n doc = dataset._get_saved_view_doc(saved_view_slug, slug=True)\n view = dataset.load_saved_view(doc.name)\n view_name = view.name\n if serialized_view:\n for stage in serialized_view:\n view = view.add_stage(fosg.ViewStage._from_dict(stage))\n except:\n view = fov.DatasetView._build(dataset, serialized_view or [])\n\n doc = dataset._doc.to_dict(no_dereference=True)\n Dataset.modifier(doc)\n data = from_dict(Dataset, doc)\n data.view_cls = None\n data.view_name = view_name\n data.saved_view_slug = saved_view_slug\n\n collection = dataset.view()\n if view is not None:\n # unique id for for the relay global store\n #\n # until a schema is with respect to a view and not a dataset this\n # is required\n data.id = ObjectId()\n if view._dataset != dataset:\n d = view._dataset._serialize()\n data.media_type = d[\"media_type\"]\n data.view_cls = etau.get_class_name(view)\n\n data.parent_media_type = view._parent_media_type\n data.media_type = view.media_type\n\n collection = view\n\n data.sample_fields = serialize_fields(\n collection.get_field_schema(flat=True)\n )\n\n data.frame_fields = serialize_fields(\n collection.get_frame_field_schema(flat=True)\n )\n\n if dicts:\n saved_views = []\n for view in data.saved_views:\n view_dict = asdict(view)\n view_dict[\"view_name\"] = view.view_name()\n view_dict[\"stage_dicts\"] = view.stage_dicts()\n saved_views.append(view_dict)\n\n data.saved_views = saved_views\n\n for brain_method in data.brain_methods:\n try:\n type = brain_method.config.type().value\n except:\n type = None\n\n try:\n max_k = brain_method.config.max_k()\n except:\n max_k = None\n\n try:\n supports_least_similarity = (\n brain_method.config.supports_least_similarity()\n )\n except:\n supports_least_similarity = None\n\n setattr(brain_method.config, \"type\", type)\n setattr(brain_method.config, \"max_k\", max_k)\n setattr(\n brain_method.config,\n \"supports_least_similarity\",\n supports_least_similarity,\n )\n\n _assign_estimated_counts(data, dataset)\n _assign_lightning_info(data, dataset)\n\n return data\n\n return await run_sync_task(run)\n\n\ndef _assign_estimated_counts(dataset: Dataset, fo_dataset: fo.Dataset):\n setattr(\n dataset,\n \"estimated_sample_count\",\n fo_dataset._sample_collection.estimated_document_count(),\n )\n setattr(\n dataset,\n \"estimated_frame_count\",\n fo_dataset._frame_collection.estimated_document_count()\n if fo_dataset._frame_collection_name\n else None,\n )\n\n\ndef _assign_lightning_info(dataset: Dataset, fo_dataset: fo.Dataset):\n dataset.sample_indexes, dataset.frame_indexes = indexes_from_dict(\n fo_dataset.get_index_information()\n )\n", "path": "fiftyone/server/query.py" } ]
diff --git a/fiftyone/server/query.py b/fiftyone/server/query.py index 2cda36e6422..03d5e30fd3c 100644 --- a/fiftyone/server/query.py +++ b/fiftyone/server/query.py @@ -317,6 +317,9 @@ def modifier(doc: dict) -> dict: ] doc["default_skeletons"] = doc.get("default_skeletons", None) + # gql private fields must always be present + doc.setdefault("frame_collection_name", None) + return doc @classmethod diff --git a/tests/unittests/server_tests.py b/tests/unittests/server_tests.py index e7b8dac3c9e..7f41262cd48 100644 --- a/tests/unittests/server_tests.py +++ b/tests/unittests/server_tests.py @@ -13,8 +13,9 @@ import fiftyone.core.labels as fol import fiftyone.core.odm as foo import fiftyone.core.sample as fos -import fiftyone.server.view as fosv +from fiftyone.server.query import Dataset from fiftyone.server.samples import paginate_samples +import fiftyone.server.view as fosv from decorators import drop_datasets from utils.groups import make_disjoint_groups_dataset @@ -894,3 +895,10 @@ async def test_disjoint_groups(self): ) self.assertEqual(len(second_samples.edges), 1) self.assertEqual(second_samples.edges[0].node.id, second._id) + + +class ServerDocTests(unittest.TestCase): + def test_dataset_doc(self): + doc = Dataset.modifier({"_id": "id"}) + self.assertIn("frame_collection_name", doc) + self.assertEqual(doc["frame_collection_name"], None)
projectmesa__mesa-112
DataCollector bug Found a minor bug in DataCollector, where some variables are not initialized in the instance, and become class variables instead. Fixing.
[ { "content": "'''\nMesa Data Collection Module\n=====================================================\n\nDataCollector is meant to provide a simple, standard way to collect data\ngenerated by a Mesa model. It collects three types of data: model-level data,\nagent-level data, and tables.\n\nA DataCollector is instantiated with two dictionaries of reporter names and\nassociated functions for each, one for model-level data and one for\nagent-level data; a third dictionary provides table names and columns.\n\nWhen the collect() method is called, each model-level function is called, with\nthe model as the argument, and the results associated with the relevant\nvariable. Then the agent-level functions are called on each\nagent in the model scheduler.\n\nAdditionally, other objects can write directly to tables by passing in an\nappropriate dictionary object for a table row.\n\nThe DataCollector then stores the data it collects in dictionaries:\n * model_vars maps each reporter to a list of its values\n * agent_vars maps each reporter to a list of lists, where each nested list\n stores (agent_id, value) pairs.\n * tables maps each table to a dictionary, with each column as a key with a\n list as its value.\n\nFinally, DataCollector can create a pandas DataFrame from each collection.\n\nThe default DataCollector here makes several assumptions:\n * The model has a schedule object called 'schedule'\n * The schedule has an agent list called agents\n * For collecting agent-level variables, agents must have a unique_id\n'''\nfrom collections import defaultdict\nimport pandas as pd\n\n\nclass DataCollector(object):\n '''\n Class for collecting data generated by a Mesa model.\n\n A DataCollector is instantiated with dictionaries of names of model- and\n agent-level variables to collect, associated with functions which actually\n collect them. When the collect(...) method is called, it executes these\n functions one by one and stores the results.\n '''\n model_reporters = {}\n agent_reporters = {}\n\n model_vars = {}\n agent_vars = {}\n tables = {}\n\n model = None\n\n def __init__(self, model_reporters={}, agent_reporters={}, tables={}):\n '''\n Instantiate a DataCollector with lists of model and agent reporters.\n\n Both model_reporters and agent_reporters accept a dictionary mapping a\n variable name to a method used to collect it.\n For example, if there was only one model-level reporter for number of\n agents, it might look like:\n {\"agent_count\": lambda m: m.schedule.get_agent_count() }\n If there was only one agent-level reporter (e.g. the agent's energy),\n it might look like this:\n {\"energy\": lambda a: a.energy}\n\n The tables arg accepts a dictionary mapping names of tables to lists of\n columns. For example, if we want to allow agents to write their age\n when they are destroyed (to keep track of lifespans), it might look\n like:\n {\"Lifespan\": [\"unique_id\", \"age\"]}\n\n Args:\n model_reporters: Dictionary of reporter names and functions.\n agent_reporters: Dictionary of reporter names and functions.\n '''\n\n self.model_reporters = {}\n self.agent_reporters = {}\n self.tables = {}\n\n for name, func in model_reporters.items():\n self._new_model_reporter(name, func)\n\n for name, func in agent_reporters.items():\n self._new_agent_reporter(name, func)\n\n for name, columns in tables.items():\n self._new_table(name, columns)\n\n def _new_model_reporter(self, reporter_name, reporter_function):\n '''\n Add a new model-level reporter to collect.\n Args:\n reporter_name: Name of the model-level variable to collect.\n reporter_function: Function object that returns the variable when\n given a model instance.\n '''\n\n self.model_reporters[reporter_name] = reporter_function\n self.model_vars[reporter_name] = []\n\n def _new_agent_reporter(self, reporter_name, reporter_function):\n '''\n Add a new agent-level reporter to collect.\n Args:\n reporter_name: Name of the agent-level variable to collect.\n reporter_function: Function object that returns the variable when\n given an agent object.\n '''\n self.agent_reporters[reporter_name] = reporter_function\n self.agent_vars[reporter_name] = []\n\n def _new_table(self, table_name, table_columns):\n '''\n Add a new table that objects can write to.\n Args:\n table_name: Name of the new table.\n table_columns: List of columns to add to the table.\n '''\n new_table = {column: [] for column in table_columns}\n self.tables[table_name] = new_table\n\n def collect(self, model):\n '''\n Collect all the data for the given model object.\n '''\n if self.model_reporters:\n for var, reporter in self.model_reporters.items():\n self.model_vars[var].append(reporter(model))\n\n if self.agent_reporters:\n for var, reporter in self.agent_reporters.items():\n agent_records = []\n for agent in model.schedule.agents:\n agent_records.append((agent.unique_id, reporter(agent)))\n self.agent_vars[var].append(agent_records)\n\n def add_table_row(self, table_name, row, ignore_missing=False):\n '''\n Add a row dictionary to a specific table.\n\n Args:\n table_name: Name of the table to append a row to.\n row: A dictionary of the form {column_name: value...}\n ignore_missing: If True, fill any missing columns with Nones;\n if False, throw an error if any columns are missing\n '''\n if table_name not in self.tables:\n raise Exception(\"Table does not exist.\")\n\n for column in self.tables[table_name]:\n if column in row:\n self.tables[table_name][column].append(row[column])\n elif ignore_missing:\n self.tables[table_name][column].append(None)\n else:\n raise Exception(\"Could not insert row with missing column\")\n\n def get_model_vars_dataframe(self):\n '''\n Create a pandas DataFrame from the model variables.\n The DataFrame has one column for each model variable, and the index is\n (implicitly) the model tick.\n '''\n return pd.DataFrame(self.model_vars)\n\n def get_agent_vars_dataframe(self):\n '''\n Create a pandas DataFrame from the agent variables.\n The DataFrame has one column for each variable, with two additional\n columns for tick and agent_id.\n '''\n\n data = defaultdict(dict)\n for var, records in self.agent_vars.items():\n for step, entries in enumerate(records):\n for entry in entries:\n agent_id = entry[0]\n val = entry[1]\n data[(step, agent_id)][var] = val\n df = pd.DataFrame.from_dict(data, orient=\"index\")\n df.index.names = [\"Step\", \"AgentID\"]\n return df\n\n def get_table_dataframe(self, table_name):\n '''\n Create a pandas DataFrame from a particular table.\n\n Args:\n table_name: The name of the table to convert.\n '''\n if table_name not in self.tables:\n raise Exception(\"No such table.\")\n return pd.DataFrame(self.tables[table_name])\n", "path": "mesa/datacollection.py" } ]
[ { "content": "'''\nMesa Data Collection Module\n=====================================================\n\nDataCollector is meant to provide a simple, standard way to collect data\ngenerated by a Mesa model. It collects three types of data: model-level data,\nagent-level data, and tables.\n\nA DataCollector is instantiated with two dictionaries of reporter names and\nassociated functions for each, one for model-level data and one for\nagent-level data; a third dictionary provides table names and columns.\n\nWhen the collect() method is called, each model-level function is called, with\nthe model as the argument, and the results associated with the relevant\nvariable. Then the agent-level functions are called on each\nagent in the model scheduler.\n\nAdditionally, other objects can write directly to tables by passing in an\nappropriate dictionary object for a table row.\n\nThe DataCollector then stores the data it collects in dictionaries:\n * model_vars maps each reporter to a list of its values\n * agent_vars maps each reporter to a list of lists, where each nested list\n stores (agent_id, value) pairs.\n * tables maps each table to a dictionary, with each column as a key with a\n list as its value.\n\nFinally, DataCollector can create a pandas DataFrame from each collection.\n\nThe default DataCollector here makes several assumptions:\n * The model has a schedule object called 'schedule'\n * The schedule has an agent list called agents\n * For collecting agent-level variables, agents must have a unique_id\n'''\nfrom collections import defaultdict\nimport pandas as pd\n\n\nclass DataCollector(object):\n '''\n Class for collecting data generated by a Mesa model.\n\n A DataCollector is instantiated with dictionaries of names of model- and\n agent-level variables to collect, associated with functions which actually\n collect them. When the collect(...) method is called, it executes these\n functions one by one and stores the results.\n '''\n model_reporters = {}\n agent_reporters = {}\n\n model_vars = {}\n agent_vars = {}\n tables = {}\n\n model = None\n\n def __init__(self, model_reporters={}, agent_reporters={}, tables={}):\n '''\n Instantiate a DataCollector with lists of model and agent reporters.\n\n Both model_reporters and agent_reporters accept a dictionary mapping a\n variable name to a method used to collect it.\n For example, if there was only one model-level reporter for number of\n agents, it might look like:\n {\"agent_count\": lambda m: m.schedule.get_agent_count() }\n If there was only one agent-level reporter (e.g. the agent's energy),\n it might look like this:\n {\"energy\": lambda a: a.energy}\n\n The tables arg accepts a dictionary mapping names of tables to lists of\n columns. For example, if we want to allow agents to write their age\n when they are destroyed (to keep track of lifespans), it might look\n like:\n {\"Lifespan\": [\"unique_id\", \"age\"]}\n\n Args:\n model_reporters: Dictionary of reporter names and functions.\n agent_reporters: Dictionary of reporter names and functions.\n '''\n\n self.model_reporters = {}\n self.agent_reporters = {}\n\n self.model_vars = {}\n self.agent_vars = {}\n self.tables = {}\n\n for name, func in model_reporters.items():\n self._new_model_reporter(name, func)\n\n for name, func in agent_reporters.items():\n self._new_agent_reporter(name, func)\n\n for name, columns in tables.items():\n self._new_table(name, columns)\n\n def _new_model_reporter(self, reporter_name, reporter_function):\n '''\n Add a new model-level reporter to collect.\n Args:\n reporter_name: Name of the model-level variable to collect.\n reporter_function: Function object that returns the variable when\n given a model instance.\n '''\n\n self.model_reporters[reporter_name] = reporter_function\n self.model_vars[reporter_name] = []\n\n def _new_agent_reporter(self, reporter_name, reporter_function):\n '''\n Add a new agent-level reporter to collect.\n Args:\n reporter_name: Name of the agent-level variable to collect.\n reporter_function: Function object that returns the variable when\n given an agent object.\n '''\n self.agent_reporters[reporter_name] = reporter_function\n self.agent_vars[reporter_name] = []\n\n def _new_table(self, table_name, table_columns):\n '''\n Add a new table that objects can write to.\n Args:\n table_name: Name of the new table.\n table_columns: List of columns to add to the table.\n '''\n new_table = {column: [] for column in table_columns}\n self.tables[table_name] = new_table\n\n def collect(self, model):\n '''\n Collect all the data for the given model object.\n '''\n if self.model_reporters:\n for var, reporter in self.model_reporters.items():\n self.model_vars[var].append(reporter(model))\n\n if self.agent_reporters:\n for var, reporter in self.agent_reporters.items():\n agent_records = []\n for agent in model.schedule.agents:\n agent_records.append((agent.unique_id, reporter(agent)))\n self.agent_vars[var].append(agent_records)\n\n def add_table_row(self, table_name, row, ignore_missing=False):\n '''\n Add a row dictionary to a specific table.\n\n Args:\n table_name: Name of the table to append a row to.\n row: A dictionary of the form {column_name: value...}\n ignore_missing: If True, fill any missing columns with Nones;\n if False, throw an error if any columns are missing\n '''\n if table_name not in self.tables:\n raise Exception(\"Table does not exist.\")\n\n for column in self.tables[table_name]:\n if column in row:\n self.tables[table_name][column].append(row[column])\n elif ignore_missing:\n self.tables[table_name][column].append(None)\n else:\n raise Exception(\"Could not insert row with missing column\")\n\n def get_model_vars_dataframe(self):\n '''\n Create a pandas DataFrame from the model variables.\n The DataFrame has one column for each model variable, and the index is\n (implicitly) the model tick.\n '''\n return pd.DataFrame(self.model_vars)\n\n def get_agent_vars_dataframe(self):\n '''\n Create a pandas DataFrame from the agent variables.\n The DataFrame has one column for each variable, with two additional\n columns for tick and agent_id.\n '''\n\n data = defaultdict(dict)\n for var, records in self.agent_vars.items():\n for step, entries in enumerate(records):\n for entry in entries:\n agent_id = entry[0]\n val = entry[1]\n data[(step, agent_id)][var] = val\n df = pd.DataFrame.from_dict(data, orient=\"index\")\n df.index.names = [\"Step\", \"AgentID\"]\n return df\n\n def get_table_dataframe(self, table_name):\n '''\n Create a pandas DataFrame from a particular table.\n\n Args:\n table_name: The name of the table to convert.\n '''\n if table_name not in self.tables:\n raise Exception(\"No such table.\")\n return pd.DataFrame(self.tables[table_name])\n", "path": "mesa/datacollection.py" } ]
diff --git a/mesa/datacollection.py b/mesa/datacollection.py index 0a4a929091b..f3270381373 100644 --- a/mesa/datacollection.py +++ b/mesa/datacollection.py @@ -80,6 +80,9 @@ def __init__(self, model_reporters={}, agent_reporters={}, tables={}): self.model_reporters = {} self.agent_reporters = {} + + self.model_vars = {} + self.agent_vars = {} self.tables = {} for name, func in model_reporters.items():
plotly__dash-796
[BUG] Auto-generated docstrings contain JS boolean values instead of Python boolean values Prompted by https://github.com/plotly/dash-bio/pull/379#discussion_r297840872 While `true` and `false` are not capitalized in JavaScript, they are capitalized in Python. The Python components' docstrings should reflect this, since other JS types are "translated" into Python types (e.g., `PropTypes.shape` -> `dict`).
[ { "content": "from collections import OrderedDict\nimport copy\nimport os\n\nfrom dash.development.base_component import _explicitize_args\nfrom dash.exceptions import NonExistentEventException\nfrom ._all_keywords import python_keywords\nfrom .base_component import Component\n\n\n# pylint: disable=unused-argument\ndef generate_class_string(typename, props, description, namespace):\n \"\"\"\n Dynamically generate class strings to have nicely formatted docstrings,\n keyword arguments, and repr\n\n Inspired by http://jameso.be/2013/08/06/namedtuple.html\n\n Parameters\n ----------\n typename\n props\n description\n namespace\n\n Returns\n -------\n string\n\n \"\"\"\n # TODO _prop_names, _type, _namespace, and available_properties\n # can be modified by a Dash JS developer via setattr\n # TODO - Tab out the repr for the repr of these components to make it\n # look more like a hierarchical tree\n # TODO - Include \"description\" \"defaultValue\" in the repr and docstring\n #\n # TODO - Handle \"required\"\n #\n # TODO - How to handle user-given `null` values? I want to include\n # an expanded docstring like Dropdown(value=None, id=None)\n # but by templating in those None values, I have no way of knowing\n # whether a property is None because the user explicitly wanted\n # it to be `null` or whether that was just the default value.\n # The solution might be to deal with default values better although\n # not all component authors will supply those.\n c = '''class {typename}(Component):\n \"\"\"{docstring}\"\"\"\n @_explicitize_args\n def __init__(self, {default_argtext}):\n self._prop_names = {list_of_valid_keys}\n self._type = '{typename}'\n self._namespace = '{namespace}'\n self._valid_wildcard_attributes =\\\n {list_of_valid_wildcard_attr_prefixes}\n self.available_properties = {list_of_valid_keys}\n self.available_wildcard_properties =\\\n {list_of_valid_wildcard_attr_prefixes}\n\n _explicit_args = kwargs.pop('_explicit_args')\n _locals = locals()\n _locals.update(kwargs) # For wildcard attrs\n args = {{k: _locals[k] for k in _explicit_args if k != 'children'}}\n\n for k in {required_props}:\n if k not in args:\n raise TypeError(\n 'Required argument `' + k + '` was not specified.')\n super({typename}, self).__init__({argtext})\n'''\n\n filtered_props = reorder_props(filter_props(props))\n wildcard_prefixes = repr(parse_wildcards(props))\n list_of_valid_keys = repr(list(map(str, filtered_props.keys())))\n docstring = create_docstring(\n component_name=typename,\n props=filtered_props,\n description=description).replace('\\r\\n', '\\n')\n\n prohibit_events(props)\n\n # pylint: disable=unused-variable\n prop_keys = list(props.keys())\n if 'children' in props:\n prop_keys.remove('children')\n default_argtext = \"children=None, \"\n argtext = 'children=children, **args'\n else:\n default_argtext = \"\"\n argtext = '**args'\n default_argtext += \", \".join(\n [('{:s}=Component.REQUIRED'.format(p)\n if props[p]['required'] else\n '{:s}=Component.UNDEFINED'.format(p))\n for p in prop_keys\n if not p.endswith(\"-*\") and\n p not in python_keywords and\n p != 'setProps'] + [\"**kwargs\"]\n )\n required_args = required_props(props)\n return c.format(\n typename=typename,\n namespace=namespace,\n filtered_props=filtered_props,\n list_of_valid_wildcard_attr_prefixes=wildcard_prefixes,\n list_of_valid_keys=list_of_valid_keys,\n docstring=docstring,\n default_argtext=default_argtext,\n argtext=argtext,\n required_props=required_args\n )\n\n\ndef generate_class_file(typename, props, description, namespace):\n \"\"\"\n Generate a python class file (.py) given a class string\n\n Parameters\n ----------\n typename\n props\n description\n namespace\n\n Returns\n -------\n\n \"\"\"\n import_string =\\\n \"# AUTO GENERATED FILE - DO NOT EDIT\\n\\n\" + \\\n \"from dash.development.base_component import \" + \\\n \"Component, _explicitize_args\\n\\n\\n\"\n class_string = generate_class_string(\n typename,\n props,\n description,\n namespace\n )\n file_name = \"{:s}.py\".format(typename)\n\n file_path = os.path.join(namespace, file_name)\n with open(file_path, 'w') as f:\n f.write(import_string)\n f.write(class_string)\n\n print('Generated {}'.format(file_name))\n\n\ndef generate_imports(project_shortname, components):\n with open(os.path.join(project_shortname, '_imports_.py'), 'w') as f:\n imports_string = '{}\\n\\n{}'.format(\n '\\n'.join(\n 'from .{0} import {0}'.format(x) for x in components),\n '__all__ = [\\n{}\\n]'.format(\n ',\\n'.join(' \"{}\"'.format(x) for x in components))\n )\n\n f.write(imports_string)\n\n\ndef generate_classes_files(project_shortname, metadata, *component_generators):\n components = []\n for component_path, component_data in metadata.items():\n component_name = component_path.split('/')[-1].split('.')[0]\n components.append(component_name)\n\n for generator in component_generators:\n generator(\n component_name,\n component_data['props'],\n component_data['description'],\n project_shortname\n )\n\n return components\n\n\ndef generate_class(typename, props, description, namespace):\n \"\"\"\n Generate a python class object given a class string\n\n Parameters\n ----------\n typename\n props\n description\n namespace\n\n Returns\n -------\n\n \"\"\"\n string = generate_class_string(typename, props, description, namespace)\n scope = {'Component': Component, '_explicitize_args': _explicitize_args}\n # pylint: disable=exec-used\n exec(string, scope)\n result = scope[typename]\n return result\n\n\ndef required_props(props):\n \"\"\"\n Pull names of required props from the props object\n\n Parameters\n ----------\n props: dict\n\n Returns\n -------\n list\n List of prop names (str) that are required for the Component\n \"\"\"\n return [prop_name for prop_name, prop in list(props.items())\n if prop['required']]\n\n\ndef create_docstring(component_name, props, description):\n \"\"\"\n Create the Dash component docstring\n\n Parameters\n ----------\n component_name: str\n Component name\n props: dict\n Dictionary with {propName: propMetadata} structure\n description: str\n Component description\n\n Returns\n -------\n str\n Dash component docstring\n \"\"\"\n # Ensure props are ordered with children first\n props = reorder_props(props=props)\n\n return (\n \"\"\"A{n} {name} component.\\n{description}\n\nKeyword arguments:\\n{args}\"\"\"\n ).format(\n n='n' if component_name[0].lower() in ['a', 'e', 'i', 'o', 'u']\n else '',\n name=component_name,\n description=description,\n args='\\n'.join(\n create_prop_docstring(\n prop_name=p,\n type_object=prop['type'] if 'type' in prop\n else prop['flowType'],\n required=prop['required'],\n description=prop['description'],\n default=prop.get('defaultValue'),\n indent_num=0,\n is_flow_type='flowType' in prop and 'type' not in prop)\n for p, prop in list(filter_props(props).items())))\n\n\ndef prohibit_events(props):\n \"\"\"\n Events have been removed. Raise an error if we see dashEvents or fireEvents\n\n Parameters\n ----------\n props: dict\n Dictionary with {propName: propMetadata} structure\n\n Raises\n -------\n ?\n \"\"\"\n if 'dashEvents' in props or 'fireEvents' in props:\n raise NonExistentEventException(\n 'Events are no longer supported by dash. Use properties instead, '\n 'eg `n_clicks` instead of a `click` event.')\n\n\ndef parse_wildcards(props):\n \"\"\"\n Pull out the wildcard attributes from the Component props\n\n Parameters\n ----------\n props: dict\n Dictionary with {propName: propMetadata} structure\n\n Returns\n -------\n list\n List of Dash valid wildcard prefixes\n \"\"\"\n list_of_valid_wildcard_attr_prefixes = []\n for wildcard_attr in [\"data-*\", \"aria-*\"]:\n if wildcard_attr in props:\n list_of_valid_wildcard_attr_prefixes.append(wildcard_attr[:-1])\n return list_of_valid_wildcard_attr_prefixes\n\n\ndef reorder_props(props):\n \"\"\"\n If \"children\" is in props, then move it to the\n front to respect dash convention\n\n Parameters\n ----------\n props: dict\n Dictionary with {propName: propMetadata} structure\n\n Returns\n -------\n dict\n Dictionary with {propName: propMetadata} structure\n \"\"\"\n if 'children' in props:\n # Constructing an OrderedDict with duplicate keys, you get the order\n # from the first one but the value from the last.\n # Doing this to avoid mutating props, which can cause confusion.\n props = OrderedDict([('children', '')] + list(props.items()))\n\n return props\n\n\ndef filter_props(props):\n \"\"\"\n Filter props from the Component arguments to exclude:\n - Those without a \"type\" or a \"flowType\" field\n - Those with arg.type.name in {'func', 'symbol', 'instanceOf'}\n\n Parameters\n ----------\n props: dict\n Dictionary with {propName: propMetadata} structure\n\n Returns\n -------\n dict\n Filtered dictionary with {propName: propMetadata} structure\n\n Examples\n --------\n ```python\n prop_args = {\n 'prop1': {\n 'type': {'name': 'bool'},\n 'required': False,\n 'description': 'A description',\n 'flowType': {},\n 'defaultValue': {'value': 'false', 'computed': False},\n },\n 'prop2': {'description': 'A prop without a type'},\n 'prop3': {\n 'type': {'name': 'func'},\n 'description': 'A function prop',\n },\n }\n # filtered_prop_args is now\n # {\n # 'prop1': {\n # 'type': {'name': 'bool'},\n # 'required': False,\n # 'description': 'A description',\n # 'flowType': {},\n # 'defaultValue': {'value': 'false', 'computed': False},\n # },\n # }\n filtered_prop_args = filter_props(prop_args)\n ```\n \"\"\"\n filtered_props = copy.deepcopy(props)\n\n for arg_name, arg in list(filtered_props.items()):\n if 'type' not in arg and 'flowType' not in arg:\n filtered_props.pop(arg_name)\n continue\n\n # Filter out functions and instances --\n # these cannot be passed from Python\n if 'type' in arg: # These come from PropTypes\n arg_type = arg['type']['name']\n if arg_type in {'func', 'symbol', 'instanceOf'}:\n filtered_props.pop(arg_name)\n elif 'flowType' in arg: # These come from Flow & handled differently\n arg_type_name = arg['flowType']['name']\n if arg_type_name == 'signature':\n # This does the same as the PropTypes filter above, but \"func\"\n # is under \"type\" if \"name\" is \"signature\" vs just in \"name\"\n if 'type' not in arg['flowType'] \\\n or arg['flowType']['type'] != 'object':\n filtered_props.pop(arg_name)\n else:\n raise ValueError\n\n return filtered_props\n\n\n# pylint: disable=too-many-arguments\ndef create_prop_docstring(prop_name, type_object, required, description,\n default, indent_num, is_flow_type=False):\n \"\"\"\n Create the Dash component prop docstring\n\n Parameters\n ----------\n prop_name: str\n Name of the Dash component prop\n type_object: dict\n react-docgen-generated prop type dictionary\n required: bool\n Component is required?\n description: str\n Dash component description\n default: dict\n Either None if a default value is not defined, or\n dict containing the key 'value' that defines a\n default value for the prop\n indent_num: int\n Number of indents to use for the context block\n (creates 2 spaces for every indent)\n is_flow_type: bool\n Does the prop use Flow types? Otherwise, uses PropTypes\n\n Returns\n -------\n str\n Dash component prop docstring\n \"\"\"\n py_type_name = js_to_py_type(\n type_object=type_object,\n is_flow_type=is_flow_type,\n indent_num=indent_num + 1)\n indent_spacing = ' ' * indent_num\n\n if default is None:\n default = ''\n else:\n default = default['value']\n\n is_required = 'optional'\n if required:\n is_required = 'required'\n elif default and default not in ['null', '{}', '[]']:\n is_required = 'default {}'.format(\n default.replace('\\n', '\\n' + indent_spacing)\n )\n\n if '\\n' in py_type_name:\n return '{indent_spacing}- {name} (dict; {is_required}): ' \\\n '{description}{period}' \\\n '{name} has the following type: {type}'.format(\n indent_spacing=indent_spacing,\n name=prop_name,\n type=py_type_name,\n description=description.strip().strip('.'),\n period='. ' if description else '',\n is_required=is_required)\n return '{indent_spacing}- {name} ({type}' \\\n '{is_required}){description}'.format(\n indent_spacing=indent_spacing,\n name=prop_name,\n type='{}; '.format(py_type_name) if py_type_name else '',\n description=(\n ': {}'.format(description) if description != '' else ''\n ),\n is_required=is_required)\n\n\ndef map_js_to_py_types_prop_types(type_object):\n \"\"\"Mapping from the PropTypes js type object to the Python type\"\"\"\n\n def shape_or_exact():\n return 'dict containing keys {}.\\n{}'.format(\n ', '.join(\n \"'{}'\".format(t) for t in list(type_object['value'].keys())\n ),\n 'Those keys have the following types:\\n{}'.format(\n '\\n'.join(\n create_prop_docstring(\n prop_name=prop_name,\n type_object=prop,\n required=prop['required'],\n description=prop.get('description', ''),\n default=prop.get('defaultValue'),\n indent_num=1\n ) for prop_name, prop in\n list(type_object['value'].items())))\n )\n\n return dict(\n array=lambda: 'list',\n bool=lambda: 'boolean',\n number=lambda: 'number',\n string=lambda: 'string',\n object=lambda: 'dict',\n any=lambda: 'boolean | number | string | dict | list',\n element=lambda: 'dash component',\n node=lambda: 'a list of or a singular dash '\n 'component, string or number',\n\n # React's PropTypes.oneOf\n enum=lambda: 'a value equal to: {}'.format(\n ', '.join(\n '{}'.format(str(t['value']))\n for t in type_object['value'])),\n\n # React's PropTypes.oneOfType\n union=lambda: '{}'.format(\n ' | '.join(\n '{}'.format(js_to_py_type(subType))\n for subType in type_object['value']\n if js_to_py_type(subType) != '')),\n\n # React's PropTypes.arrayOf\n arrayOf=lambda: (\n \"list\" + ((\" of {}\").format(\n js_to_py_type(type_object[\"value\"]) + 's'\n if js_to_py_type(type_object[\"value\"]).split(' ')[0] != 'dict'\n else js_to_py_type(type_object[\"value\"]).replace(\n 'dict', 'dicts', 1\n )\n )\n if js_to_py_type(type_object[\"value\"]) != \"\"\n else \"\")\n ),\n\n # React's PropTypes.objectOf\n objectOf=lambda: (\n 'dict with strings as keys and values of type {}'\n ).format(\n js_to_py_type(type_object['value'])),\n\n # React's PropTypes.shape\n shape=shape_or_exact,\n # React's PropTypes.exact\n exact=shape_or_exact\n )\n\n\ndef map_js_to_py_types_flow_types(type_object):\n \"\"\"Mapping from the Flow js types to the Python type\"\"\"\n\n return dict(\n array=lambda: 'list',\n boolean=lambda: 'boolean',\n number=lambda: 'number',\n string=lambda: 'string',\n Object=lambda: 'dict',\n any=lambda: 'bool | number | str | dict | list',\n Element=lambda: 'dash component',\n Node=lambda: 'a list of or a singular dash '\n 'component, string or number',\n\n # React's PropTypes.oneOfType\n union=lambda: '{}'.format(\n ' | '.join(\n '{}'.format(js_to_py_type(subType))\n for subType in type_object['elements']\n if js_to_py_type(subType) != '')),\n\n # Flow's Array type\n Array=lambda: 'list{}'.format(\n ' of {}s'.format(\n js_to_py_type(type_object['elements'][0]))\n if js_to_py_type(type_object['elements'][0]) != ''\n else ''),\n\n # React's PropTypes.shape\n signature=lambda indent_num: 'dict containing keys {}.\\n{}'.format(\n ', '.join(\"'{}'\".format(d['key'])\n for d in type_object['signature']['properties']),\n '{}Those keys have the following types:\\n{}'.format(\n ' ' * indent_num,\n '\\n'.join(\n create_prop_docstring(\n prop_name=prop['key'],\n type_object=prop['value'],\n required=prop['value']['required'],\n description=prop['value'].get('description', ''),\n default=prop.get('defaultValue'),\n indent_num=indent_num,\n is_flow_type=True)\n for prop in type_object['signature']['properties']))),\n )\n\n\ndef js_to_py_type(type_object, is_flow_type=False, indent_num=0):\n \"\"\"\n Convert JS types to Python types for the component definition\n\n Parameters\n ----------\n type_object: dict\n react-docgen-generated prop type dictionary\n is_flow_type: bool\n Does the prop use Flow types? Otherwise, uses PropTypes\n indent_num: int\n Number of indents to use for the docstring for the prop\n\n Returns\n -------\n str\n Python type string\n \"\"\"\n js_type_name = type_object['name']\n js_to_py_types = map_js_to_py_types_flow_types(type_object=type_object) \\\n if is_flow_type \\\n else map_js_to_py_types_prop_types(type_object=type_object)\n\n if 'computed' in type_object and type_object['computed'] \\\n or type_object.get('type', '') == 'function':\n return ''\n if js_type_name in js_to_py_types:\n if js_type_name == 'signature': # This is a Flow object w/ signature\n return js_to_py_types[js_type_name](indent_num)\n # All other types\n return js_to_py_types[js_type_name]()\n return ''\n", "path": "dash/development/_py_components_generation.py" } ]
[ { "content": "from collections import OrderedDict\nimport copy\nimport os\n\nfrom dash.development.base_component import _explicitize_args\nfrom dash.exceptions import NonExistentEventException\nfrom ._all_keywords import python_keywords\nfrom .base_component import Component\n\n\n# pylint: disable=unused-argument\ndef generate_class_string(typename, props, description, namespace):\n \"\"\"\n Dynamically generate class strings to have nicely formatted docstrings,\n keyword arguments, and repr\n\n Inspired by http://jameso.be/2013/08/06/namedtuple.html\n\n Parameters\n ----------\n typename\n props\n description\n namespace\n\n Returns\n -------\n string\n\n \"\"\"\n # TODO _prop_names, _type, _namespace, and available_properties\n # can be modified by a Dash JS developer via setattr\n # TODO - Tab out the repr for the repr of these components to make it\n # look more like a hierarchical tree\n # TODO - Include \"description\" \"defaultValue\" in the repr and docstring\n #\n # TODO - Handle \"required\"\n #\n # TODO - How to handle user-given `null` values? I want to include\n # an expanded docstring like Dropdown(value=None, id=None)\n # but by templating in those None values, I have no way of knowing\n # whether a property is None because the user explicitly wanted\n # it to be `null` or whether that was just the default value.\n # The solution might be to deal with default values better although\n # not all component authors will supply those.\n c = '''class {typename}(Component):\n \"\"\"{docstring}\"\"\"\n @_explicitize_args\n def __init__(self, {default_argtext}):\n self._prop_names = {list_of_valid_keys}\n self._type = '{typename}'\n self._namespace = '{namespace}'\n self._valid_wildcard_attributes =\\\n {list_of_valid_wildcard_attr_prefixes}\n self.available_properties = {list_of_valid_keys}\n self.available_wildcard_properties =\\\n {list_of_valid_wildcard_attr_prefixes}\n\n _explicit_args = kwargs.pop('_explicit_args')\n _locals = locals()\n _locals.update(kwargs) # For wildcard attrs\n args = {{k: _locals[k] for k in _explicit_args if k != 'children'}}\n\n for k in {required_props}:\n if k not in args:\n raise TypeError(\n 'Required argument `' + k + '` was not specified.')\n super({typename}, self).__init__({argtext})\n'''\n\n filtered_props = reorder_props(filter_props(props))\n wildcard_prefixes = repr(parse_wildcards(props))\n list_of_valid_keys = repr(list(map(str, filtered_props.keys())))\n docstring = create_docstring(\n component_name=typename,\n props=filtered_props,\n description=description).replace('\\r\\n', '\\n')\n\n prohibit_events(props)\n\n # pylint: disable=unused-variable\n prop_keys = list(props.keys())\n if 'children' in props:\n prop_keys.remove('children')\n default_argtext = \"children=None, \"\n argtext = 'children=children, **args'\n else:\n default_argtext = \"\"\n argtext = '**args'\n default_argtext += \", \".join(\n [('{:s}=Component.REQUIRED'.format(p)\n if props[p]['required'] else\n '{:s}=Component.UNDEFINED'.format(p))\n for p in prop_keys\n if not p.endswith(\"-*\") and\n p not in python_keywords and\n p != 'setProps'] + [\"**kwargs\"]\n )\n required_args = required_props(props)\n return c.format(\n typename=typename,\n namespace=namespace,\n filtered_props=filtered_props,\n list_of_valid_wildcard_attr_prefixes=wildcard_prefixes,\n list_of_valid_keys=list_of_valid_keys,\n docstring=docstring,\n default_argtext=default_argtext,\n argtext=argtext,\n required_props=required_args\n )\n\n\ndef generate_class_file(typename, props, description, namespace):\n \"\"\"\n Generate a python class file (.py) given a class string\n\n Parameters\n ----------\n typename\n props\n description\n namespace\n\n Returns\n -------\n\n \"\"\"\n import_string =\\\n \"# AUTO GENERATED FILE - DO NOT EDIT\\n\\n\" + \\\n \"from dash.development.base_component import \" + \\\n \"Component, _explicitize_args\\n\\n\\n\"\n class_string = generate_class_string(\n typename,\n props,\n description,\n namespace\n )\n file_name = \"{:s}.py\".format(typename)\n\n file_path = os.path.join(namespace, file_name)\n with open(file_path, 'w') as f:\n f.write(import_string)\n f.write(class_string)\n\n print('Generated {}'.format(file_name))\n\n\ndef generate_imports(project_shortname, components):\n with open(os.path.join(project_shortname, '_imports_.py'), 'w') as f:\n imports_string = '{}\\n\\n{}'.format(\n '\\n'.join(\n 'from .{0} import {0}'.format(x) for x in components),\n '__all__ = [\\n{}\\n]'.format(\n ',\\n'.join(' \"{}\"'.format(x) for x in components))\n )\n\n f.write(imports_string)\n\n\ndef generate_classes_files(project_shortname, metadata, *component_generators):\n components = []\n for component_path, component_data in metadata.items():\n component_name = component_path.split('/')[-1].split('.')[0]\n components.append(component_name)\n\n for generator in component_generators:\n generator(\n component_name,\n component_data['props'],\n component_data['description'],\n project_shortname\n )\n\n return components\n\n\ndef generate_class(typename, props, description, namespace):\n \"\"\"\n Generate a python class object given a class string\n\n Parameters\n ----------\n typename\n props\n description\n namespace\n\n Returns\n -------\n\n \"\"\"\n string = generate_class_string(typename, props, description, namespace)\n scope = {'Component': Component, '_explicitize_args': _explicitize_args}\n # pylint: disable=exec-used\n exec(string, scope)\n result = scope[typename]\n return result\n\n\ndef required_props(props):\n \"\"\"\n Pull names of required props from the props object\n\n Parameters\n ----------\n props: dict\n\n Returns\n -------\n list\n List of prop names (str) that are required for the Component\n \"\"\"\n return [prop_name for prop_name, prop in list(props.items())\n if prop['required']]\n\n\ndef create_docstring(component_name, props, description):\n \"\"\"\n Create the Dash component docstring\n\n Parameters\n ----------\n component_name: str\n Component name\n props: dict\n Dictionary with {propName: propMetadata} structure\n description: str\n Component description\n\n Returns\n -------\n str\n Dash component docstring\n \"\"\"\n # Ensure props are ordered with children first\n props = reorder_props(props=props)\n\n return (\n \"\"\"A{n} {name} component.\\n{description}\n\nKeyword arguments:\\n{args}\"\"\"\n ).format(\n n='n' if component_name[0].lower() in ['a', 'e', 'i', 'o', 'u']\n else '',\n name=component_name,\n description=description,\n args='\\n'.join(\n create_prop_docstring(\n prop_name=p,\n type_object=prop['type'] if 'type' in prop\n else prop['flowType'],\n required=prop['required'],\n description=prop['description'],\n default=prop.get('defaultValue'),\n indent_num=0,\n is_flow_type='flowType' in prop and 'type' not in prop)\n for p, prop in list(filter_props(props).items())))\n\n\ndef prohibit_events(props):\n \"\"\"\n Events have been removed. Raise an error if we see dashEvents or fireEvents\n\n Parameters\n ----------\n props: dict\n Dictionary with {propName: propMetadata} structure\n\n Raises\n -------\n ?\n \"\"\"\n if 'dashEvents' in props or 'fireEvents' in props:\n raise NonExistentEventException(\n 'Events are no longer supported by dash. Use properties instead, '\n 'eg `n_clicks` instead of a `click` event.')\n\n\ndef parse_wildcards(props):\n \"\"\"\n Pull out the wildcard attributes from the Component props\n\n Parameters\n ----------\n props: dict\n Dictionary with {propName: propMetadata} structure\n\n Returns\n -------\n list\n List of Dash valid wildcard prefixes\n \"\"\"\n list_of_valid_wildcard_attr_prefixes = []\n for wildcard_attr in [\"data-*\", \"aria-*\"]:\n if wildcard_attr in props:\n list_of_valid_wildcard_attr_prefixes.append(wildcard_attr[:-1])\n return list_of_valid_wildcard_attr_prefixes\n\n\ndef reorder_props(props):\n \"\"\"\n If \"children\" is in props, then move it to the\n front to respect dash convention\n\n Parameters\n ----------\n props: dict\n Dictionary with {propName: propMetadata} structure\n\n Returns\n -------\n dict\n Dictionary with {propName: propMetadata} structure\n \"\"\"\n if 'children' in props:\n # Constructing an OrderedDict with duplicate keys, you get the order\n # from the first one but the value from the last.\n # Doing this to avoid mutating props, which can cause confusion.\n props = OrderedDict([('children', '')] + list(props.items()))\n\n return props\n\n\ndef filter_props(props):\n \"\"\"\n Filter props from the Component arguments to exclude:\n - Those without a \"type\" or a \"flowType\" field\n - Those with arg.type.name in {'func', 'symbol', 'instanceOf'}\n\n Parameters\n ----------\n props: dict\n Dictionary with {propName: propMetadata} structure\n\n Returns\n -------\n dict\n Filtered dictionary with {propName: propMetadata} structure\n\n Examples\n --------\n ```python\n prop_args = {\n 'prop1': {\n 'type': {'name': 'bool'},\n 'required': False,\n 'description': 'A description',\n 'flowType': {},\n 'defaultValue': {'value': 'false', 'computed': False},\n },\n 'prop2': {'description': 'A prop without a type'},\n 'prop3': {\n 'type': {'name': 'func'},\n 'description': 'A function prop',\n },\n }\n # filtered_prop_args is now\n # {\n # 'prop1': {\n # 'type': {'name': 'bool'},\n # 'required': False,\n # 'description': 'A description',\n # 'flowType': {},\n # 'defaultValue': {'value': 'false', 'computed': False},\n # },\n # }\n filtered_prop_args = filter_props(prop_args)\n ```\n \"\"\"\n filtered_props = copy.deepcopy(props)\n\n for arg_name, arg in list(filtered_props.items()):\n if 'type' not in arg and 'flowType' not in arg:\n filtered_props.pop(arg_name)\n continue\n\n # Filter out functions and instances --\n # these cannot be passed from Python\n if 'type' in arg: # These come from PropTypes\n arg_type = arg['type']['name']\n if arg_type in {'func', 'symbol', 'instanceOf'}:\n filtered_props.pop(arg_name)\n elif 'flowType' in arg: # These come from Flow & handled differently\n arg_type_name = arg['flowType']['name']\n if arg_type_name == 'signature':\n # This does the same as the PropTypes filter above, but \"func\"\n # is under \"type\" if \"name\" is \"signature\" vs just in \"name\"\n if 'type' not in arg['flowType'] \\\n or arg['flowType']['type'] != 'object':\n filtered_props.pop(arg_name)\n else:\n raise ValueError\n\n return filtered_props\n\n\n# pylint: disable=too-many-arguments\ndef create_prop_docstring(prop_name, type_object, required, description,\n default, indent_num, is_flow_type=False):\n \"\"\"\n Create the Dash component prop docstring\n\n Parameters\n ----------\n prop_name: str\n Name of the Dash component prop\n type_object: dict\n react-docgen-generated prop type dictionary\n required: bool\n Component is required?\n description: str\n Dash component description\n default: dict\n Either None if a default value is not defined, or\n dict containing the key 'value' that defines a\n default value for the prop\n indent_num: int\n Number of indents to use for the context block\n (creates 2 spaces for every indent)\n is_flow_type: bool\n Does the prop use Flow types? Otherwise, uses PropTypes\n\n Returns\n -------\n str\n Dash component prop docstring\n \"\"\"\n py_type_name = js_to_py_type(\n type_object=type_object,\n is_flow_type=is_flow_type,\n indent_num=indent_num + 1)\n indent_spacing = ' ' * indent_num\n\n if default is None:\n default = ''\n else:\n default = default['value']\n\n if default in ['true', 'false']:\n default = default.title()\n\n is_required = 'optional'\n if required:\n is_required = 'required'\n elif default and default not in ['null', '{}', '[]']:\n is_required = 'default {}'.format(\n default.replace('\\n', '\\n' + indent_spacing)\n )\n\n if '\\n' in py_type_name:\n return '{indent_spacing}- {name} (dict; {is_required}): ' \\\n '{description}{period}' \\\n '{name} has the following type: {type}'.format(\n indent_spacing=indent_spacing,\n name=prop_name,\n type=py_type_name,\n description=description.strip().strip('.'),\n period='. ' if description else '',\n is_required=is_required)\n return '{indent_spacing}- {name} ({type}' \\\n '{is_required}){description}'.format(\n indent_spacing=indent_spacing,\n name=prop_name,\n type='{}; '.format(py_type_name) if py_type_name else '',\n description=(\n ': {}'.format(description) if description != '' else ''\n ),\n is_required=is_required)\n\n\ndef map_js_to_py_types_prop_types(type_object):\n \"\"\"Mapping from the PropTypes js type object to the Python type\"\"\"\n\n def shape_or_exact():\n return 'dict containing keys {}.\\n{}'.format(\n ', '.join(\n \"'{}'\".format(t) for t in list(type_object['value'].keys())\n ),\n 'Those keys have the following types:\\n{}'.format(\n '\\n'.join(\n create_prop_docstring(\n prop_name=prop_name,\n type_object=prop,\n required=prop['required'],\n description=prop.get('description', ''),\n default=prop.get('defaultValue'),\n indent_num=1\n ) for prop_name, prop in\n list(type_object['value'].items())))\n )\n\n return dict(\n array=lambda: 'list',\n bool=lambda: 'boolean',\n number=lambda: 'number',\n string=lambda: 'string',\n object=lambda: 'dict',\n any=lambda: 'boolean | number | string | dict | list',\n element=lambda: 'dash component',\n node=lambda: 'a list of or a singular dash '\n 'component, string or number',\n\n # React's PropTypes.oneOf\n enum=lambda: 'a value equal to: {}'.format(\n ', '.join(\n '{}'.format(str(t['value']))\n for t in type_object['value'])),\n\n # React's PropTypes.oneOfType\n union=lambda: '{}'.format(\n ' | '.join(\n '{}'.format(js_to_py_type(subType))\n for subType in type_object['value']\n if js_to_py_type(subType) != '')),\n\n # React's PropTypes.arrayOf\n arrayOf=lambda: (\n \"list\" + ((\" of {}\").format(\n js_to_py_type(type_object[\"value\"]) + 's'\n if js_to_py_type(type_object[\"value\"]).split(' ')[0] != 'dict'\n else js_to_py_type(type_object[\"value\"]).replace(\n 'dict', 'dicts', 1\n )\n )\n if js_to_py_type(type_object[\"value\"]) != \"\"\n else \"\")\n ),\n\n # React's PropTypes.objectOf\n objectOf=lambda: (\n 'dict with strings as keys and values of type {}'\n ).format(\n js_to_py_type(type_object['value'])),\n\n # React's PropTypes.shape\n shape=shape_or_exact,\n # React's PropTypes.exact\n exact=shape_or_exact\n )\n\n\ndef map_js_to_py_types_flow_types(type_object):\n \"\"\"Mapping from the Flow js types to the Python type\"\"\"\n\n return dict(\n array=lambda: 'list',\n boolean=lambda: 'boolean',\n number=lambda: 'number',\n string=lambda: 'string',\n Object=lambda: 'dict',\n any=lambda: 'bool | number | str | dict | list',\n Element=lambda: 'dash component',\n Node=lambda: 'a list of or a singular dash '\n 'component, string or number',\n\n # React's PropTypes.oneOfType\n union=lambda: '{}'.format(\n ' | '.join(\n '{}'.format(js_to_py_type(subType))\n for subType in type_object['elements']\n if js_to_py_type(subType) != '')),\n\n # Flow's Array type\n Array=lambda: 'list{}'.format(\n ' of {}s'.format(\n js_to_py_type(type_object['elements'][0]))\n if js_to_py_type(type_object['elements'][0]) != ''\n else ''),\n\n # React's PropTypes.shape\n signature=lambda indent_num: 'dict containing keys {}.\\n{}'.format(\n ', '.join(\"'{}'\".format(d['key'])\n for d in type_object['signature']['properties']),\n '{}Those keys have the following types:\\n{}'.format(\n ' ' * indent_num,\n '\\n'.join(\n create_prop_docstring(\n prop_name=prop['key'],\n type_object=prop['value'],\n required=prop['value']['required'],\n description=prop['value'].get('description', ''),\n default=prop.get('defaultValue'),\n indent_num=indent_num,\n is_flow_type=True)\n for prop in type_object['signature']['properties']))),\n )\n\n\ndef js_to_py_type(type_object, is_flow_type=False, indent_num=0):\n \"\"\"\n Convert JS types to Python types for the component definition\n\n Parameters\n ----------\n type_object: dict\n react-docgen-generated prop type dictionary\n is_flow_type: bool\n Does the prop use Flow types? Otherwise, uses PropTypes\n indent_num: int\n Number of indents to use for the docstring for the prop\n\n Returns\n -------\n str\n Python type string\n \"\"\"\n js_type_name = type_object['name']\n js_to_py_types = map_js_to_py_types_flow_types(type_object=type_object) \\\n if is_flow_type \\\n else map_js_to_py_types_prop_types(type_object=type_object)\n\n if 'computed' in type_object and type_object['computed'] \\\n or type_object.get('type', '') == 'function':\n return ''\n if js_type_name in js_to_py_types:\n if js_type_name == 'signature': # This is a Flow object w/ signature\n return js_to_py_types[js_type_name](indent_num)\n # All other types\n return js_to_py_types[js_type_name]()\n return ''\n", "path": "dash/development/_py_components_generation.py" } ]
diff --git a/dash/development/_py_components_generation.py b/dash/development/_py_components_generation.py index 3fcd96c89f..61bfd9b228 100644 --- a/dash/development/_py_components_generation.py +++ b/dash/development/_py_components_generation.py @@ -436,6 +436,9 @@ def create_prop_docstring(prop_name, type_object, required, description, else: default = default['value'] + if default in ['true', 'false']: + default = default.title() + is_required = 'optional' if required: is_required = 'required' diff --git a/tests/unit/development/test_base_component.py b/tests/unit/development/test_base_component.py index c39fe9a766..c34c655de8 100644 --- a/tests/unit/development/test_base_component.py +++ b/tests/unit/development/test_base_component.py @@ -1041,7 +1041,7 @@ def assert_flow_docstring(assertEqual, docstring): "Keyword arguments:", "- requiredString (string; required): A required string", "- optionalString (string; default ''): A string that isn't required.", - "- optionalBoolean (boolean; default false): A boolean test", + "- optionalBoolean (boolean; default False): A boolean test", "- optionalNode (a list of or a singular dash component, string or number; optional): " "A node test",
liqd__a4-product-261
HTTP Header I'll propose to set the following HTTP header * `HttpOnly` * `X-XSS-Protection` * `X-Content-Type-Options: nosniff` * ~HSTS~ set via nginx See [OWASP headers project](https://www.owasp.org/index.php/OWASP_Secure_Headers_Project) for details
[ { "content": "\"\"\"Django settings for Beteiligung.in.\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nCONFIG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_DIR = os.path.dirname(CONFIG_DIR)\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n\n # Watch out this needs to be included first\n 'liqd_product.apps.django_overwrites.apps.Config',\n\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n 'background_task',\n\n # Wagtail cms components\n 'wagtail.contrib.settings',\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n 'modelcluster',\n 'taggit',\n 'liqd_product.apps.cms.pages.apps.Config',\n 'liqd_product.apps.cms.settings.apps.Config',\n\n # General adhocracy 4 components\n 'adhocracy4.actions.apps.ActionsConfig',\n 'adhocracy4.categories.apps.CategoriesConfig',\n 'adhocracy4.ckeditor.apps.CKEditorConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n 'adhocracy4.filters.apps.FiltersConfig',\n 'adhocracy4.follows.apps.FollowsConfig',\n 'adhocracy4.forms.apps.FormsConfig',\n 'adhocracy4.images.apps.ImagesConfig',\n 'adhocracy4.maps.apps.MapsConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.reports.apps.ReportsConfig',\n 'adhocracy4.rules.apps.RulesConfig',\n\n # General components that define models or helpers\n 'liqd_product.apps.contrib.apps.Config',\n 'liqd_product.apps.organisations.apps.Config',\n 'liqd_product.apps.partners.apps.Config',\n 'liqd_product.apps.users.apps.Config',\n 'meinberlin.apps.actions.apps.Config',\n 'meinberlin.apps.contrib.apps.Config',\n 'meinberlin.apps.maps.apps.Config',\n 'meinberlin.apps.moderatorfeedback.apps.Config',\n 'meinberlin.apps.notifications.apps.Config',\n\n # General apps containing views\n 'liqd_product.apps.account.apps.Config',\n 'meinberlin.apps.dashboard2.apps.Config',\n 'meinberlin.apps.embed.apps.Config',\n 'meinberlin.apps.exports.apps.Config',\n 'meinberlin.apps.offlineevents.apps.Config',\n 'meinberlin.apps.projects.apps.Config',\n\n # Apps defining phases\n 'meinberlin.apps.documents.apps.Config',\n 'meinberlin.apps.ideas.apps.Config',\n 'meinberlin.apps.mapideas.apps.Config',\n 'meinberlin.apps.polls.apps.Config',\n)\n\nMIDDLEWARE = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'liqd_product.apps.partners.middleware.PartnerMiddleware',\n 'meinberlin.apps.embed.middleware.AjaxPathMiddleware',\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'liqd_product.config.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'wagtail.contrib.settings.context_processors.settings'\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'liqd_product.config.wsgi.application'\n\nREVERSE_METHOD = 'liqd_product.apps.partners.urlresolvers.reverse'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1500, 500)},\n 'tileimage': {'min_resolution': (500, 300)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n 'avatar': {'size': (200, 200), 'crop': 'smart'}\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Authentication\n\nAUTH_USER_MODEL = 'liqd_product_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'liqd_product.apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'liqd_product.apps.users.forms.TermsSignupForm'\nSOCIALACCOUNT_EMAIL_VERIFICATION = 'none'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# CKEditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = 'username'\nCKEDITOR_ALLOW_NONIMAGE_FILES = True\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'collapsible-image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink'],\n ['CollapsibleItem']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n },\n 'collapsible-image-editor': {\n 'tags': ['p', 'strong', 'em', 'u', 'ol', 'li', 'ul', 'a', 'img',\n 'div'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style'],\n 'div': ['class']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n# Wagtail\nWAGTAIL_SITE_NAME = 'Beteiligung.in'\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'liqd_product_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_documents', 'chapter'),\n ('meinberlin_documents', 'paragraph'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_polls', 'poll'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_CATEGORIZABLE = (\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\n\nA4_MAP_BASEURL = 'https://{s}.tile.openstreetmap.org/'\nA4_MAP_ATTRIBUTION = '&copy; <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])\n\nA4_DASHBOARD = {\n 'PROJECT_DASHBOARD_CLASS': 'meinberlin.apps.dashboard2.ProjectDashboard',\n 'BLUEPRINTS': 'liqd_product.apps.dashboard.blueprints.blueprints'\n}\n\nCONTACT_EMAIL = '[email protected]'\n\n# The default language is used for emails and strings\n# that are stored translated to the database.\nDEFAULT_LANGUAGE = 'de'\n", "path": "liqd_product/config/settings/base.py" } ]
[ { "content": "\"\"\"Django settings for Beteiligung.in.\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nCONFIG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_DIR = os.path.dirname(CONFIG_DIR)\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n\n # Watch out this needs to be included first\n 'liqd_product.apps.django_overwrites.apps.Config',\n\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n 'background_task',\n\n # Wagtail cms components\n 'wagtail.contrib.settings',\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n 'modelcluster',\n 'taggit',\n 'liqd_product.apps.cms.pages.apps.Config',\n 'liqd_product.apps.cms.settings.apps.Config',\n\n # General adhocracy 4 components\n 'adhocracy4.actions.apps.ActionsConfig',\n 'adhocracy4.categories.apps.CategoriesConfig',\n 'adhocracy4.ckeditor.apps.CKEditorConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n 'adhocracy4.filters.apps.FiltersConfig',\n 'adhocracy4.follows.apps.FollowsConfig',\n 'adhocracy4.forms.apps.FormsConfig',\n 'adhocracy4.images.apps.ImagesConfig',\n 'adhocracy4.maps.apps.MapsConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.reports.apps.ReportsConfig',\n 'adhocracy4.rules.apps.RulesConfig',\n\n # General components that define models or helpers\n 'liqd_product.apps.contrib.apps.Config',\n 'liqd_product.apps.organisations.apps.Config',\n 'liqd_product.apps.partners.apps.Config',\n 'liqd_product.apps.users.apps.Config',\n 'meinberlin.apps.actions.apps.Config',\n 'meinberlin.apps.contrib.apps.Config',\n 'meinberlin.apps.maps.apps.Config',\n 'meinberlin.apps.moderatorfeedback.apps.Config',\n 'meinberlin.apps.notifications.apps.Config',\n\n # General apps containing views\n 'liqd_product.apps.account.apps.Config',\n 'meinberlin.apps.dashboard2.apps.Config',\n 'meinberlin.apps.embed.apps.Config',\n 'meinberlin.apps.exports.apps.Config',\n 'meinberlin.apps.offlineevents.apps.Config',\n 'meinberlin.apps.projects.apps.Config',\n\n # Apps defining phases\n 'meinberlin.apps.documents.apps.Config',\n 'meinberlin.apps.ideas.apps.Config',\n 'meinberlin.apps.mapideas.apps.Config',\n 'meinberlin.apps.polls.apps.Config',\n)\n\nMIDDLEWARE = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'liqd_product.apps.partners.middleware.PartnerMiddleware',\n 'meinberlin.apps.embed.middleware.AjaxPathMiddleware',\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'liqd_product.config.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'wagtail.contrib.settings.context_processors.settings'\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'liqd_product.config.wsgi.application'\n\nREVERSE_METHOD = 'liqd_product.apps.partners.urlresolvers.reverse'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1500, 500)},\n 'tileimage': {'min_resolution': (500, 300)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n 'avatar': {'size': (200, 200), 'crop': 'smart'}\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Authentication\n\nAUTH_USER_MODEL = 'liqd_product_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'liqd_product.apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'liqd_product.apps.users.forms.TermsSignupForm'\nSOCIALACCOUNT_EMAIL_VERIFICATION = 'none'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# CKEditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = 'username'\nCKEDITOR_ALLOW_NONIMAGE_FILES = True\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'collapsible-image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink'],\n ['CollapsibleItem']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n },\n 'collapsible-image-editor': {\n 'tags': ['p', 'strong', 'em', 'u', 'ol', 'li', 'ul', 'a', 'img',\n 'div'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style'],\n 'div': ['class']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n# Wagtail\nWAGTAIL_SITE_NAME = 'Beteiligung.in'\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'liqd_product_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_documents', 'chapter'),\n ('meinberlin_documents', 'paragraph'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_polls', 'poll'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\nA4_CATEGORIZABLE = (\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n)\n\n\nA4_MAP_BASEURL = 'https://{s}.tile.openstreetmap.org/'\nA4_MAP_ATTRIBUTION = '&copy; <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])\n\nA4_DASHBOARD = {\n 'PROJECT_DASHBOARD_CLASS': 'meinberlin.apps.dashboard2.ProjectDashboard',\n 'BLUEPRINTS': 'liqd_product.apps.dashboard.blueprints.blueprints'\n}\n\nCONTACT_EMAIL = '[email protected]'\n\n# The default language is used for emails and strings\n# that are stored translated to the database.\nDEFAULT_LANGUAGE = 'de'\n\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\n", "path": "liqd_product/config/settings/base.py" } ]
diff --git a/liqd_product/config/settings/base.py b/liqd_product/config/settings/base.py index 0fff0e15f..6f384ea79 100644 --- a/liqd_product/config/settings/base.py +++ b/liqd_product/config/settings/base.py @@ -386,3 +386,7 @@ # The default language is used for emails and strings # that are stored translated to the database. DEFAULT_LANGUAGE = 'de' + +SECURE_BROWSER_XSS_FILTER = True +SESSION_COOKIE_HTTPONLY = True +SECURE_CONTENT_TYPE_NOSNIFF = True
kivy__kivy-7520
kivy.uix.Video._on_eos might be called after unoad. **Software Versions** * Python: 3.7 * OS: linux * Kivy: 2.0.0 * Kivy installation method: pip **Describe the bug** When using ffpyplayer based video implementation, it's possible that ``eos`` gets set from frame fetching thread after the video has been unload, which results in an ``AttributeError``, since ``self._video`` gets set to ``None`` in ``kivy.uix.Video.unload``. **Proposed fix** Add additional check whether ``self._video`` is set in ``_do_eos`` (https://github.com/kivy/kivy/blob/master/kivy/uix/video.py#L260) ```python def _on_eos(self, *largs): if not self._video or self._video.eos != 'loop': self.state = 'stop' self.eos = True ``` Any objections? Otherwise i'd create a PR for this.
[ { "content": "'''\nVideo\n=====\n\nThe :class:`Video` widget is used to display video files and streams.\nDepending on your Video core provider, platform, and plugins, you will\nbe able to play different formats. For example, the pygame video\nprovider only supports MPEG1 on Linux and OSX. GStreamer is more\nversatile, and can read many video containers and codecs such as MKV,\nOGV, AVI, MOV, FLV (if the correct gstreamer plugins are installed). Our\n:class:`~kivy.core.video.VideoBase` implementation is used under the\nhood.\n\nVideo loading is asynchronous - many properties are not available until\nthe video is loaded (when the texture is created)::\n\n def on_position_change(instance, value):\n print('The position in the video is', value)\n\n def on_duration_change(instance, value):\n print('The duration of the video is', value)\n\n video = Video(source='PandaSneezes.avi')\n video.bind(\n position=on_position_change,\n duration=on_duration_change\n )\n\nOne can define a preview image which gets displayed until the video is\nstarted/loaded by passing ``preview`` to the constructor::\n\n video = Video(\n source='PandaSneezes.avi',\n preview='PandaSneezes_preview.png'\n )\n\nOne can display the placeholder image when the video stops by reacting on eos::\n\n def on_eos_change(self, inst, val):\n if val and self.preview:\n self.set_texture_from_resource(self.preview)\n\n video.bind(eos=on_eos_change)\n'''\n\n__all__ = ('Video', )\n\nfrom kivy.clock import Clock\nfrom kivy.uix.image import Image\nfrom kivy.core.video import Video as CoreVideo\nfrom kivy.resources import resource_find\nfrom kivy.properties import (BooleanProperty, NumericProperty, ObjectProperty,\n OptionProperty, StringProperty)\n\n\nclass Video(Image):\n '''Video class. See module documentation for more information.\n '''\n\n preview = StringProperty(None, allownone=True)\n '''Filename / source of a preview image displayed before video starts.\n\n :attr:`preview` is a :class:`~kivy.properties.StringProperty` and\n defaults to None.\n\n If set, it gets displayed until the video is loaded/started.\n\n .. versionadded:: 2.1.0\n '''\n\n state = OptionProperty('stop', options=('play', 'pause', 'stop'))\n '''String, indicates whether to play, pause, or stop the video::\n\n # start playing the video at creation\n video = Video(source='movie.mkv', state='play')\n\n # create the video, and start later\n video = Video(source='movie.mkv')\n # and later\n video.state = 'play'\n\n :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults\n to 'stop'.\n '''\n\n play = BooleanProperty(False, deprecated=True)\n '''\n .. deprecated:: 1.4.0\n Use :attr:`state` instead.\n\n Boolean, indicates whether the video is playing or not.\n You can start/stop the video by setting this property::\n\n # start playing the video at creation\n video = Video(source='movie.mkv', play=True)\n\n # create the video, and start later\n video = Video(source='movie.mkv')\n # and later\n video.play = True\n\n :attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.\n\n .. deprecated:: 1.4.0\n Use :attr:`state` instead.\n '''\n\n eos = BooleanProperty(False)\n '''Boolean, indicates whether the video has finished playing or not\n (reached the end of the stream).\n\n :attr:`eos` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.\n '''\n\n loaded = BooleanProperty(False)\n '''Boolean, indicates whether the video is loaded and ready for playback\n or not.\n\n .. versionadded:: 1.6.0\n\n :attr:`loaded` is a :class:`~kivy.properties.BooleanProperty` and defaults\n to False.\n '''\n\n position = NumericProperty(-1)\n '''Position of the video between 0 and :attr:`duration`. The position\n defaults to -1 and is set to a real position when the video is loaded.\n\n :attr:`position` is a :class:`~kivy.properties.NumericProperty` and\n defaults to -1.\n '''\n\n duration = NumericProperty(-1)\n '''Duration of the video. The duration defaults to -1, and is set to a real\n duration when the video is loaded.\n\n :attr:`duration` is a :class:`~kivy.properties.NumericProperty` and\n defaults to -1.\n '''\n\n volume = NumericProperty(1.)\n '''Volume of the video, in the range 0-1. 1 means full volume, 0\n means mute.\n\n :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.\n '''\n\n options = ObjectProperty({})\n '''Options to pass at Video core object creation.\n\n .. versionadded:: 1.0.4\n\n :attr:`options` is an :class:`kivy.properties.ObjectProperty` and defaults\n to {}.\n '''\n\n _video_load_event = None\n\n def __init__(self, **kwargs):\n self._video = None\n super(Video, self).__init__(**kwargs)\n self.fbind('source', self._trigger_video_load)\n\n if \"eos\" in kwargs:\n self.options[\"eos\"] = kwargs[\"eos\"]\n if self.source:\n self._trigger_video_load()\n\n def texture_update(self, *largs):\n if self.preview:\n self.set_texture_from_resource(self.preview)\n else:\n self.set_texture_from_resource(self.source)\n\n def seek(self, percent, precise=True):\n '''Change the position to a percentage (strictly, a proportion)\n of duration.\n\n :Parameters:\n `percent`: float or int\n Position to seek as a proportion of the total duration,\n must be between 0-1.\n `precise`: bool, defaults to True\n Precise seeking is slower, but seeks to exact requested\n percent.\n\n .. warning::\n Calling seek() before the video is loaded has no effect.\n\n .. versionadded:: 1.2.0\n\n .. versionchanged:: 1.10.1\n The `precise` keyword argument has been added.\n '''\n if self._video is None:\n raise Exception('Video not loaded.')\n self._video.seek(percent, precise=precise)\n\n def _trigger_video_load(self, *largs):\n ev = self._video_load_event\n if ev is None:\n ev = self._video_load_event = Clock.schedule_once(\n self._do_video_load, -1)\n ev()\n\n def _do_video_load(self, *largs):\n if CoreVideo is None:\n return\n self.unload()\n if not self.source:\n self._video = None\n self.texture = None\n else:\n filename = self.source\n # Check if filename is not url\n if '://' not in filename:\n filename = resource_find(filename)\n self._video = CoreVideo(filename=filename, **self.options)\n self._video.volume = self.volume\n self._video.bind(on_load=self._on_load,\n on_frame=self._on_video_frame,\n on_eos=self._on_eos)\n if self.state == 'play' or self.play:\n self._video.play()\n self.duration = 1.\n self.position = 0.\n\n def on_play(self, instance, value):\n value = 'play' if value else 'stop'\n return self.on_state(instance, value)\n\n def on_state(self, instance, value):\n if not self._video:\n return\n if value == 'play':\n if self.eos:\n self._video.stop()\n self._video.position = 0.\n self.eos = False\n self._video.play()\n elif value == 'pause':\n self._video.pause()\n else:\n self._video.stop()\n self._video.position = 0\n\n def _on_video_frame(self, *largs):\n video = self._video\n if not video:\n return\n self.duration = video.duration\n self.position = video.position\n self.texture = video.texture\n self.canvas.ask_update()\n\n def _on_eos(self, *largs):\n if self._video.eos != 'loop':\n self.state = 'stop'\n self.eos = True\n\n def _on_load(self, *largs):\n self.loaded = True\n self._on_video_frame(largs)\n\n def on_volume(self, instance, value):\n if self._video:\n self._video.volume = value\n\n def unload(self):\n '''Unload the video. The playback will be stopped.\n\n .. versionadded:: 1.8.0\n '''\n if self._video:\n self._video.stop()\n self._video.unload()\n self._video = None\n self.loaded = False\n\n\nif __name__ == '__main__':\n from kivy.app import App\n import sys\n\n if len(sys.argv) != 2:\n print(\"usage: %s file\" % sys.argv[0])\n sys.exit(1)\n\n class VideoApp(App):\n def build(self):\n self.v = Video(source=sys.argv[1], state='play')\n self.v.bind(state=self.replay)\n return self.v\n\n def replay(self, *args):\n if self.v.state == 'stop':\n self.v.state = 'play'\n\n VideoApp().run()\n", "path": "kivy/uix/video.py" } ]
[ { "content": "'''\nVideo\n=====\n\nThe :class:`Video` widget is used to display video files and streams.\nDepending on your Video core provider, platform, and plugins, you will\nbe able to play different formats. For example, the pygame video\nprovider only supports MPEG1 on Linux and OSX. GStreamer is more\nversatile, and can read many video containers and codecs such as MKV,\nOGV, AVI, MOV, FLV (if the correct gstreamer plugins are installed). Our\n:class:`~kivy.core.video.VideoBase` implementation is used under the\nhood.\n\nVideo loading is asynchronous - many properties are not available until\nthe video is loaded (when the texture is created)::\n\n def on_position_change(instance, value):\n print('The position in the video is', value)\n\n def on_duration_change(instance, value):\n print('The duration of the video is', value)\n\n video = Video(source='PandaSneezes.avi')\n video.bind(\n position=on_position_change,\n duration=on_duration_change\n )\n\nOne can define a preview image which gets displayed until the video is\nstarted/loaded by passing ``preview`` to the constructor::\n\n video = Video(\n source='PandaSneezes.avi',\n preview='PandaSneezes_preview.png'\n )\n\nOne can display the placeholder image when the video stops by reacting on eos::\n\n def on_eos_change(self, inst, val):\n if val and self.preview:\n self.set_texture_from_resource(self.preview)\n\n video.bind(eos=on_eos_change)\n'''\n\n__all__ = ('Video', )\n\nfrom kivy.clock import Clock\nfrom kivy.uix.image import Image\nfrom kivy.core.video import Video as CoreVideo\nfrom kivy.resources import resource_find\nfrom kivy.properties import (BooleanProperty, NumericProperty, ObjectProperty,\n OptionProperty, StringProperty)\n\n\nclass Video(Image):\n '''Video class. See module documentation for more information.\n '''\n\n preview = StringProperty(None, allownone=True)\n '''Filename / source of a preview image displayed before video starts.\n\n :attr:`preview` is a :class:`~kivy.properties.StringProperty` and\n defaults to None.\n\n If set, it gets displayed until the video is loaded/started.\n\n .. versionadded:: 2.1.0\n '''\n\n state = OptionProperty('stop', options=('play', 'pause', 'stop'))\n '''String, indicates whether to play, pause, or stop the video::\n\n # start playing the video at creation\n video = Video(source='movie.mkv', state='play')\n\n # create the video, and start later\n video = Video(source='movie.mkv')\n # and later\n video.state = 'play'\n\n :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults\n to 'stop'.\n '''\n\n play = BooleanProperty(False, deprecated=True)\n '''\n .. deprecated:: 1.4.0\n Use :attr:`state` instead.\n\n Boolean, indicates whether the video is playing or not.\n You can start/stop the video by setting this property::\n\n # start playing the video at creation\n video = Video(source='movie.mkv', play=True)\n\n # create the video, and start later\n video = Video(source='movie.mkv')\n # and later\n video.play = True\n\n :attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.\n\n .. deprecated:: 1.4.0\n Use :attr:`state` instead.\n '''\n\n eos = BooleanProperty(False)\n '''Boolean, indicates whether the video has finished playing or not\n (reached the end of the stream).\n\n :attr:`eos` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.\n '''\n\n loaded = BooleanProperty(False)\n '''Boolean, indicates whether the video is loaded and ready for playback\n or not.\n\n .. versionadded:: 1.6.0\n\n :attr:`loaded` is a :class:`~kivy.properties.BooleanProperty` and defaults\n to False.\n '''\n\n position = NumericProperty(-1)\n '''Position of the video between 0 and :attr:`duration`. The position\n defaults to -1 and is set to a real position when the video is loaded.\n\n :attr:`position` is a :class:`~kivy.properties.NumericProperty` and\n defaults to -1.\n '''\n\n duration = NumericProperty(-1)\n '''Duration of the video. The duration defaults to -1, and is set to a real\n duration when the video is loaded.\n\n :attr:`duration` is a :class:`~kivy.properties.NumericProperty` and\n defaults to -1.\n '''\n\n volume = NumericProperty(1.)\n '''Volume of the video, in the range 0-1. 1 means full volume, 0\n means mute.\n\n :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.\n '''\n\n options = ObjectProperty({})\n '''Options to pass at Video core object creation.\n\n .. versionadded:: 1.0.4\n\n :attr:`options` is an :class:`kivy.properties.ObjectProperty` and defaults\n to {}.\n '''\n\n _video_load_event = None\n\n def __init__(self, **kwargs):\n self._video = None\n super(Video, self).__init__(**kwargs)\n self.fbind('source', self._trigger_video_load)\n\n if \"eos\" in kwargs:\n self.options[\"eos\"] = kwargs[\"eos\"]\n if self.source:\n self._trigger_video_load()\n\n def texture_update(self, *largs):\n if self.preview:\n self.set_texture_from_resource(self.preview)\n else:\n self.set_texture_from_resource(self.source)\n\n def seek(self, percent, precise=True):\n '''Change the position to a percentage (strictly, a proportion)\n of duration.\n\n :Parameters:\n `percent`: float or int\n Position to seek as a proportion of the total duration,\n must be between 0-1.\n `precise`: bool, defaults to True\n Precise seeking is slower, but seeks to exact requested\n percent.\n\n .. warning::\n Calling seek() before the video is loaded has no effect.\n\n .. versionadded:: 1.2.0\n\n .. versionchanged:: 1.10.1\n The `precise` keyword argument has been added.\n '''\n if self._video is None:\n raise Exception('Video not loaded.')\n self._video.seek(percent, precise=precise)\n\n def _trigger_video_load(self, *largs):\n ev = self._video_load_event\n if ev is None:\n ev = self._video_load_event = Clock.schedule_once(\n self._do_video_load, -1)\n ev()\n\n def _do_video_load(self, *largs):\n if CoreVideo is None:\n return\n self.unload()\n if not self.source:\n self._video = None\n self.texture = None\n else:\n filename = self.source\n # Check if filename is not url\n if '://' not in filename:\n filename = resource_find(filename)\n self._video = CoreVideo(filename=filename, **self.options)\n self._video.volume = self.volume\n self._video.bind(on_load=self._on_load,\n on_frame=self._on_video_frame,\n on_eos=self._on_eos)\n if self.state == 'play' or self.play:\n self._video.play()\n self.duration = 1.\n self.position = 0.\n\n def on_play(self, instance, value):\n value = 'play' if value else 'stop'\n return self.on_state(instance, value)\n\n def on_state(self, instance, value):\n if not self._video:\n return\n if value == 'play':\n if self.eos:\n self._video.stop()\n self._video.position = 0.\n self.eos = False\n self._video.play()\n elif value == 'pause':\n self._video.pause()\n else:\n self._video.stop()\n self._video.position = 0\n\n def _on_video_frame(self, *largs):\n video = self._video\n if not video:\n return\n self.duration = video.duration\n self.position = video.position\n self.texture = video.texture\n self.canvas.ask_update()\n\n def _on_eos(self, *largs):\n if not self._video or self._video.eos != 'loop':\n self.state = 'stop'\n self.eos = True\n\n def _on_load(self, *largs):\n self.loaded = True\n self._on_video_frame(largs)\n\n def on_volume(self, instance, value):\n if self._video:\n self._video.volume = value\n\n def unload(self):\n '''Unload the video. The playback will be stopped.\n\n .. versionadded:: 1.8.0\n '''\n if self._video:\n self._video.stop()\n self._video.unload()\n self._video = None\n self.loaded = False\n\n\nif __name__ == '__main__':\n from kivy.app import App\n import sys\n\n if len(sys.argv) != 2:\n print(\"usage: %s file\" % sys.argv[0])\n sys.exit(1)\n\n class VideoApp(App):\n def build(self):\n self.v = Video(source=sys.argv[1], state='play')\n self.v.bind(state=self.replay)\n return self.v\n\n def replay(self, *args):\n if self.v.state == 'stop':\n self.v.state = 'play'\n\n VideoApp().run()\n", "path": "kivy/uix/video.py" } ]
diff --git a/kivy/uix/video.py b/kivy/uix/video.py index acdc6328f9..935dec70db 100644 --- a/kivy/uix/video.py +++ b/kivy/uix/video.py @@ -257,7 +257,7 @@ def _on_video_frame(self, *largs): self.canvas.ask_update() def _on_eos(self, *largs): - if self._video.eos != 'loop': + if not self._video or self._video.eos != 'loop': self.state = 'stop' self.eos = True
nilearn__nilearn-3337
Spelling Error <!--Describe your proposed enhancement in detail.--> I think the authors meant to describe ADHD but have written ADHD as AHDH. It is just a simple spelling or typographic error. <!--List any pages that would be impacted by the enhancement.--> ### Affected pages 1. https://nilearn.github.io/dev/auto_examples/04_glm_first_level/plot_adhd_dmn.html#sphx-glr-auto-examples-04-glm-first-level-plot-adhd-dmn-py 2. https://nilearn.github.io/dev/glm/first_level_model.html#fitting-a-first-level-model
[ { "content": "\"\"\"Default Mode Network extraction of AHDH dataset\n===============================================\n\nThis example shows a full step-by-step workflow of fitting a GLM to data\nextracted from a seed on the Posterior Cingulate Cortex and saving the results.\n\nMore specifically:\n\n1. A sequence of fMRI volumes are loaded.\n2. A design matrix with the Posterior Cingulate Cortex seed is defined.\n3. A GLM is applied to the dataset (effect/covariance, then contrast estimation).\n4. The Default Mode Network is displayed.\n\n.. include:: ../../../examples/masker_note.rst\n\n\"\"\"\nimport numpy as np\n\nfrom nilearn import datasets, plotting\nfrom nilearn.maskers import NiftiSpheresMasker\n\nfrom nilearn.glm.first_level import FirstLevelModel\nfrom nilearn.glm.first_level import make_first_level_design_matrix\n\n#########################################################################\n# Prepare data and analysis parameters\n# -------------------------------------\n# Prepare the data.\nadhd_dataset = datasets.fetch_adhd(n_subjects=1)\n\n# Prepare timing\nt_r = 2.\nslice_time_ref = 0.\nn_scans = 176\n\n# Prepare seed\npcc_coords = (0, -53, 26)\n\n#########################################################################\n# Estimate contrasts\n# ------------------\n# Specify the contrasts.\nseed_masker = NiftiSpheresMasker([pcc_coords], radius=10, detrend=True,\n standardize=True, low_pass=0.1,\n high_pass=0.01, t_r=2.,\n memory='nilearn_cache',\n memory_level=1, verbose=0)\nseed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])\nframetimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)\ndesign_matrix = make_first_level_design_matrix(frametimes, hrf_model='spm',\n add_regs=seed_time_series,\n add_reg_names=[\"pcc_seed\"])\ndmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))\ncontrasts = {'seed_based_glm': dmn_contrast}\n\n#########################################################################\n# Perform first level analysis\n# ----------------------------\n# Setup and fit GLM.\nfirst_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)\nfirst_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],\n design_matrices=design_matrix)\n\n#########################################################################\n# Estimate the contrast.\nprint('Contrast seed_based_glm computed.')\nz_map = first_level_model.compute_contrast(contrasts['seed_based_glm'],\n output_type='z_score')\n\n# Saving snapshots of the contrasts\nfilename = 'dmn_z_map.png'\ndisplay = plotting.plot_stat_map(z_map, threshold=3.0, title='Seed based GLM',\n cut_coords=pcc_coords)\ndisplay.add_markers(marker_coords=[pcc_coords], marker_color='g',\n marker_size=300)\ndisplay.savefig(filename)\nprint(\"Save z-map in '{0}'.\".format(filename))\n\n###########################################################################\n# Generating a report\n# -------------------\n# It can be useful to quickly generate a\n# portable, ready-to-view report with most of the pertinent information.\n# This is easy to do if you have a fitted model and the list of contrasts,\n# which we do here.\n\nfrom nilearn.reporting import make_glm_report\n\nreport = make_glm_report(first_level_model,\n contrasts=contrasts,\n title='ADHD DMN Report',\n cluster_threshold=15,\n min_distance=8.,\n plot_type='glass',\n )\n\n#########################################################################\n# We have several ways to access the report:\n\n# report # This report can be viewed in a notebook\n# report.save_as_html('report.html')\n# report.open_in_browser()\n", "path": "examples/04_glm_first_level/plot_adhd_dmn.py" } ]
[ { "content": "\"\"\"Default Mode Network extraction of ADHD dataset\n===============================================\n\nThis example shows a full step-by-step workflow of fitting a GLM to data\nextracted from a seed on the Posterior Cingulate Cortex and saving the results.\n\nMore specifically:\n\n1. A sequence of fMRI volumes are loaded.\n2. A design matrix with the Posterior Cingulate Cortex seed is defined.\n3. A GLM is applied to the dataset (effect/covariance, then contrast estimation).\n4. The Default Mode Network is displayed.\n\n.. include:: ../../../examples/masker_note.rst\n\n\"\"\"\nimport numpy as np\n\nfrom nilearn import datasets, plotting\nfrom nilearn.maskers import NiftiSpheresMasker\n\nfrom nilearn.glm.first_level import FirstLevelModel\nfrom nilearn.glm.first_level import make_first_level_design_matrix\n\n#########################################################################\n# Prepare data and analysis parameters\n# -------------------------------------\n# Prepare the data.\nadhd_dataset = datasets.fetch_adhd(n_subjects=1)\n\n# Prepare timing\nt_r = 2.\nslice_time_ref = 0.\nn_scans = 176\n\n# Prepare seed\npcc_coords = (0, -53, 26)\n\n#########################################################################\n# Estimate contrasts\n# ------------------\n# Specify the contrasts.\nseed_masker = NiftiSpheresMasker([pcc_coords], radius=10, detrend=True,\n standardize=True, low_pass=0.1,\n high_pass=0.01, t_r=2.,\n memory='nilearn_cache',\n memory_level=1, verbose=0)\nseed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])\nframetimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)\ndesign_matrix = make_first_level_design_matrix(frametimes, hrf_model='spm',\n add_regs=seed_time_series,\n add_reg_names=[\"pcc_seed\"])\ndmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))\ncontrasts = {'seed_based_glm': dmn_contrast}\n\n#########################################################################\n# Perform first level analysis\n# ----------------------------\n# Setup and fit GLM.\nfirst_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)\nfirst_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],\n design_matrices=design_matrix)\n\n#########################################################################\n# Estimate the contrast.\nprint('Contrast seed_based_glm computed.')\nz_map = first_level_model.compute_contrast(contrasts['seed_based_glm'],\n output_type='z_score')\n\n# Saving snapshots of the contrasts\nfilename = 'dmn_z_map.png'\ndisplay = plotting.plot_stat_map(z_map, threshold=3.0, title='Seed based GLM',\n cut_coords=pcc_coords)\ndisplay.add_markers(marker_coords=[pcc_coords], marker_color='g',\n marker_size=300)\ndisplay.savefig(filename)\nprint(\"Save z-map in '{0}'.\".format(filename))\n\n###########################################################################\n# Generating a report\n# -------------------\n# It can be useful to quickly generate a\n# portable, ready-to-view report with most of the pertinent information.\n# This is easy to do if you have a fitted model and the list of contrasts,\n# which we do here.\n\nfrom nilearn.reporting import make_glm_report\n\nreport = make_glm_report(first_level_model,\n contrasts=contrasts,\n title='ADHD DMN Report',\n cluster_threshold=15,\n min_distance=8.,\n plot_type='glass',\n )\n\n#########################################################################\n# We have several ways to access the report:\n\n# report # This report can be viewed in a notebook\n# report.save_as_html('report.html')\n# report.open_in_browser()\n", "path": "examples/04_glm_first_level/plot_adhd_dmn.py" } ]
diff --git a/doc/changes/latest.rst b/doc/changes/latest.rst index 1d92f16d2b..5a8f083142 100644 --- a/doc/changes/latest.rst +++ b/doc/changes/latest.rst @@ -29,6 +29,7 @@ Fixes Now if band-pass elements are equal :func:`~nilearn.signal.butterworth` returns an unfiltered signal with a warning (:gh:`3293` by `Yasmin Mzayek`_). - The parameter ``alpha`` is now correctly passed to :func:`~plotting.plot_glass_brain` in :func:`~plotting.plot_connectome` (:gh:`3306` by `Koen Helwegen`_). - Fix plotting of background image in :func:`~nilearn.plotting.view_img` when the background is not the MNI template (:gh:`3312` by `Jerome Dockes`_). +- Fix the typographic error on the page :ref:`sphx_glr_auto_examples_04_glm_first_level_plot_adhd_dmn.py` (:gh:`3337` by `Sachin Patalasingh`_). Enhancements ------------ diff --git a/doc/changes/names.rst b/doc/changes/names.rst index a25ccc4f71..9158fb44f2 100644 --- a/doc/changes/names.rst +++ b/doc/changes/names.rst @@ -190,6 +190,8 @@ .. _Ryan Hammonds: https://github.com/ryanhammonds +.. _Sachin Patalasingh: https://github.com/sachin-bsai + .. _Salma Bougacha: https://github.com/salma1601 .. _Sami Jawhar: https://github.com/sjawhar diff --git a/examples/04_glm_first_level/plot_adhd_dmn.py b/examples/04_glm_first_level/plot_adhd_dmn.py index 708d11bccf..8a233dd99e 100644 --- a/examples/04_glm_first_level/plot_adhd_dmn.py +++ b/examples/04_glm_first_level/plot_adhd_dmn.py @@ -1,4 +1,4 @@ -"""Default Mode Network extraction of AHDH dataset +"""Default Mode Network extraction of ADHD dataset =============================================== This example shows a full step-by-step workflow of fitting a GLM to data
ivy-llc__ivy-18341
leaky_relu Paddle Frontend
[ { "content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef selu(\n x,\n /,\n *,\n alpha=1.6732632423543772848170429916717,\n scale=1.0507009873554804934193349852946,\n name=None,\n):\n if scale <= 1.0:\n raise ValueError(f\"The scale must be greater than 1.0. Received: {scale}.\")\n\n if alpha < 0:\n raise ValueError(f\"The alpha must be no less than zero. Received: {alpha}.\")\n\n ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))\n arr = scale * ret\n return ivy.astype(arr, x.dtype)\n\n\ntanh = paddle_tanh\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardshrink(x, threshold=0.5, name=None):\n mask = ivy.logical_or(ivy.greater(x, threshold), ivy.less(x, -threshold))\n return ivy.where(mask, x, 0.0)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardswish(x, name=None):\n relu6_val = ivy.relu6(ivy.add(x, 3))\n ret = ivy.multiply(x, ivy.divide(relu6_val, 6))\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardtanh(\n x,\n /,\n *,\n min=-1.0,\n max=1.0,\n name=None,\n):\n less = ivy.where(ivy.less(x, min), min, x)\n ret = ivy.where(ivy.greater(x, max), max, less).astype(x.dtype)\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gelu(x, approximate=False, name=None):\n return ivy.gelu(x, approximate=approximate)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):\n ret = ivy.minimum(ivy.maximum(ivy.add(ivy.multiply(x, slope), offset), 0), 1)\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef relu6(x, name=None):\n return ivy.relu6(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef softshrink(\n x,\n /,\n *,\n threshold=0.5,\n name=None,\n):\n low = ivy.where(ivy.less(x, -threshold), ivy.add(x, threshold), 0)\n up = ivy.where(ivy.greater(x, threshold), ivy.subtract(x, threshold), 0)\n add = ivy.add(low, up)\n return ivy.astype(add, x.dtype)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef softsign(\n x,\n /,\n *,\n name=None,\n):\n return ivy.divide(x, ivy.add(1, ivy.abs(x)))\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log_softmax(x, axis=-1, dtype=None, name=None):\n x = ivy.astype(x, dtype) if dtype else x\n ret = ivy.log_softmax(x, axis=axis)\n ret = ivy.astype(ret, dtype) if dtype else ret\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef prelu(x, weight, data_format=\"NCHW\", name=None):\n return ivy.add(ivy.maximum(0, x), ivy.multiply(weight, ivy.minimum(0, x)))\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef celu(\n x,\n /,\n *,\n alpha=1.0,\n name=None,\n):\n prod = alpha * (ivy.exp(x / alpha) - 1)\n ret = ivy.maximum(0, x) + ivy.minimum(0, prod)\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rrelu(\n x,\n /,\n *,\n lower=0.125,\n upper=0.3333333333333333,\n training=False,\n name=None,\n):\n if lower < 0 or lower > 1:\n raise ValueError(\n \"The lower value must be no less than zero or greater than one. Received:\"\n f\" {lower}.\"\n )\n\n if upper < lower:\n raise ValueError(\n \"The upper value must be greater than lower value. Received: lower\"\n f\" {lower}, upper {upper}.\"\n )\n\n if upper > 1:\n raise ValueError(\n f\"The upper value must be no greater than one. Received: {upper}.\"\n )\n\n is_test = not training\n if is_test:\n add = lower + upper\n ret = add * x * 0.5\n out = ivy.where(x >= 0, x, ret)\n return out.astype(x.dtype)\n # else:\n # ToDo implement a correctly after fixing ivy.random_uniform\n # a = ivy.random_normal(low=lower, high=upper)\n # ret = ivy.where(x >= 0, x, ivy.multiply(a, x))\n # return ret.astype(x.dtype)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanhshrink(\n x,\n /,\n *,\n name=None,\n):\n return ivy.subtract(x, ivy.tanh(x))\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef relu_(x, name=None):\n ret = ivy.relu(x)\n ivy.inplace_update(x, ret)\n return x\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef elu(\n x,\n /,\n *,\n alpha=1.0,\n name=None,\n):\n return ivy.elu(x, alpha=alpha)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef mish(x, name=None):\n return ivy.mish(x)\n", "path": "ivy/functional/frontends/paddle/nn/functional/activation.py" } ]
[ { "content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef selu(\n x,\n /,\n *,\n alpha=1.6732632423543772848170429916717,\n scale=1.0507009873554804934193349852946,\n name=None,\n):\n if scale <= 1.0:\n raise ValueError(f\"The scale must be greater than 1.0. Received: {scale}.\")\n\n if alpha < 0:\n raise ValueError(f\"The alpha must be no less than zero. Received: {alpha}.\")\n\n ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))\n arr = scale * ret\n return ivy.astype(arr, x.dtype)\n\n\ntanh = paddle_tanh\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardshrink(x, threshold=0.5, name=None):\n mask = ivy.logical_or(ivy.greater(x, threshold), ivy.less(x, -threshold))\n return ivy.where(mask, x, 0.0)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardswish(x, name=None):\n relu6_val = ivy.relu6(ivy.add(x, 3))\n ret = ivy.multiply(x, ivy.divide(relu6_val, 6))\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardtanh(\n x,\n /,\n *,\n min=-1.0,\n max=1.0,\n name=None,\n):\n less = ivy.where(ivy.less(x, min), min, x)\n ret = ivy.where(ivy.greater(x, max), max, less).astype(x.dtype)\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gelu(x, approximate=False, name=None):\n return ivy.gelu(x, approximate=approximate)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):\n ret = ivy.minimum(ivy.maximum(ivy.add(ivy.multiply(x, slope), offset), 0), 1)\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef relu6(x, name=None):\n return ivy.relu6(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef softshrink(\n x,\n /,\n *,\n threshold=0.5,\n name=None,\n):\n low = ivy.where(ivy.less(x, -threshold), ivy.add(x, threshold), 0)\n up = ivy.where(ivy.greater(x, threshold), ivy.subtract(x, threshold), 0)\n add = ivy.add(low, up)\n return ivy.astype(add, x.dtype)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef softsign(\n x,\n /,\n *,\n name=None,\n):\n return ivy.divide(x, ivy.add(1, ivy.abs(x)))\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log_softmax(x, axis=-1, dtype=None, name=None):\n x = ivy.astype(x, dtype) if dtype else x\n ret = ivy.log_softmax(x, axis=axis)\n ret = ivy.astype(ret, dtype) if dtype else ret\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef prelu(x, weight, data_format=\"NCHW\", name=None):\n return ivy.add(ivy.maximum(0, x), ivy.multiply(weight, ivy.minimum(0, x)))\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef celu(\n x,\n /,\n *,\n alpha=1.0,\n name=None,\n):\n prod = alpha * (ivy.exp(x / alpha) - 1)\n ret = ivy.maximum(0, x) + ivy.minimum(0, prod)\n return ret\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rrelu(\n x,\n /,\n *,\n lower=0.125,\n upper=0.3333333333333333,\n training=False,\n name=None,\n):\n if lower < 0 or lower > 1:\n raise ValueError(\n \"The lower value must be no less than zero or greater than one. Received:\"\n f\" {lower}.\"\n )\n\n if upper < lower:\n raise ValueError(\n \"The upper value must be greater than lower value. Received: lower\"\n f\" {lower}, upper {upper}.\"\n )\n\n if upper > 1:\n raise ValueError(\n f\"The upper value must be no greater than one. Received: {upper}.\"\n )\n\n is_test = not training\n if is_test:\n add = lower + upper\n ret = add * x * 0.5\n out = ivy.where(x >= 0, x, ret)\n return out.astype(x.dtype)\n # else:\n # ToDo implement a correctly after fixing ivy.random_uniform\n # a = ivy.random_normal(low=lower, high=upper)\n # ret = ivy.where(x >= 0, x, ivy.multiply(a, x))\n # return ret.astype(x.dtype)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanhshrink(\n x,\n /,\n *,\n name=None,\n):\n return ivy.subtract(x, ivy.tanh(x))\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef relu_(x, name=None):\n ret = ivy.relu(x)\n ivy.inplace_update(x, ret)\n return x\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef elu(\n x,\n /,\n *,\n alpha=1.0,\n name=None,\n):\n return ivy.elu(x, alpha=alpha)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef mish(x, name=None):\n return ivy.mish(x)\n\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef leaky_relu(x, negative_slope=0.01, name=None):\n return ivy.leaky_relu(x)\n", "path": "ivy/functional/frontends/paddle/nn/functional/activation.py" } ]
diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py index 6826012fb0b93..6dc559e93a758 100644 --- a/ivy/functional/frontends/paddle/nn/functional/activation.py +++ b/ivy/functional/frontends/paddle/nn/functional/activation.py @@ -209,3 +209,8 @@ def elu( @to_ivy_arrays_and_back def mish(x, name=None): return ivy.mish(x) + +@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") +@to_ivy_arrays_and_back +def leaky_relu(x, negative_slope=0.01, name=None): + return ivy.leaky_relu(x) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_activation.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_activation.py index 7b55199f1e5b6..d6b1030ee98e8 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_activation.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_paddle_activation.py @@ -514,3 +514,29 @@ def test_paddle_mish( on_device=on_device, x=x[0], ) + + +@handle_frontend_test( + fn_tree="paddle.nn.functional.leaky_relu", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + ), +) +def test_paddle_leaky_relu( + *, + dtype_and_x, + on_device, + fn_tree, + frontend, + test_flags, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + negative_slope=0.01, + x=x[0], + )
saleor__saleor-1540
TypeError when configuring MAX_CART_LINE_QUANTITY via environment variable ### What I'm trying to achieve Add an item to a cart ### Steps to reproduce the problem 1. Configure MAX_CART_LINE_QUANTITY via environment variable 2. Attempt to add an item to the cart ### What I expected to happen A new item is added to the cart ### What happened instead/how it failed TypeError, returns status 500, item is not added to cart ```Traceback (most recent call last): File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner response = get_response(request) File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/core/handlers/base.py", line 187, in _get_response response = self.process_exception_by_middleware(e, request) File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/core/handlers/base.py", line 185, in _get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/home/henry/Code/saleor/saleor/product/views.py", line 94, in product_add_to_cart if form.is_valid(): File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/forms/forms.py", line 183, in is_valid return self.is_bound and not self.errors File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/forms/forms.py", line 175, in errors self.full_clean() File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/forms/forms.py", line 384, in full_clean self._clean_fields() File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/forms/forms.py", line 402, in _clean_fields value = field.clean(value) File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/forms/fields.py", line 162, in clean self.run_validators(value) File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/forms/fields.py", line 145, in run_validators v(value) File "/home/henry/PythonVirtualenvs/saleor/lib/python3.5/site-packages/django/core/validators.py", line 325, in __call__ if self.compare(cleaned, self.limit_value): File "/home/henry/PythonVirtuale nvs/saleor/lib/python3.5/site-packages/django/core/validators.py", line 349, in compare return a > b TypeError: unorderable types: int() > str() ERROR django.server "POST /products/product-4/add/ HTTP/1.1" 500 24428 [PID:32324:Thread-17] ``` python version: 3.5.4 django version: 1.11.5 most recent upstream commit: 0ec214a3deb606a951a268f7f83e659857be4dd7 Here's how I worked around: [integer_cart_line_quantity.patch.txt](https://github.com/mirumee/saleor/files/1587457/integer_cart_line_quantity.patch.txt)
[ { "content": "import ast\nimport os.path\n\nimport dj_database_url\nimport dj_email_url\nfrom django.contrib.messages import constants as messages\nimport django_cache_url\n\n\ndef get_list(text):\n return [item.strip() for item in text.split(',')]\n\n\nDEBUG = ast.literal_eval(os.environ.get('DEBUG', 'True'))\n\nSITE_ID = 1\n\nPROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))\n\nROOT_URLCONF = 'saleor.urls'\n\nWSGI_APPLICATION = 'saleor.wsgi.application'\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\nMANAGERS = ADMINS\n\nINTERNAL_IPS = get_list(os.environ.get('INTERNAL_IPS', '127.0.0.1'))\n\nCACHES = {'default': django_cache_url.config()}\n\nif os.environ.get('REDIS_URL'):\n CACHES['default'] = {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': os.environ.get('REDIS_URL')}\n\nDATABASES = {\n 'default': dj_database_url.config(\n default='postgres://saleor:saleor@localhost:5432/saleor',\n conn_max_age=600)}\n\n\nTIME_ZONE = 'America/Chicago'\nLANGUAGE_CODE = 'en-us'\nLOCALE_PATHS = [os.path.join(PROJECT_ROOT, 'locale')]\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nFORM_RENDERER = 'django.forms.renderers.TemplatesSetting'\n\nEMAIL_URL = os.environ.get('EMAIL_URL')\nSENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME')\nSENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD')\nif not EMAIL_URL and SENDGRID_USERNAME and SENDGRID_PASSWORD:\n EMAIL_URL = 'smtp://%s:%[email protected]:587/?tls=True' % (\n SENDGRID_USERNAME, SENDGRID_PASSWORD)\nemail_config = dj_email_url.parse(EMAIL_URL or 'console://')\n\nEMAIL_FILE_PATH = email_config['EMAIL_FILE_PATH']\nEMAIL_HOST_USER = email_config['EMAIL_HOST_USER']\nEMAIL_HOST_PASSWORD = email_config['EMAIL_HOST_PASSWORD']\nEMAIL_HOST = email_config['EMAIL_HOST']\nEMAIL_PORT = email_config['EMAIL_PORT']\nEMAIL_BACKEND = email_config['EMAIL_BACKEND']\nEMAIL_USE_TLS = email_config['EMAIL_USE_TLS']\nEMAIL_USE_SSL = email_config['EMAIL_USE_SSL']\n\nDEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL')\nORDER_FROM_EMAIL = os.getenv('ORDER_FROM_EMAIL', DEFAULT_FROM_EMAIL)\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [\n ('assets', os.path.join(PROJECT_ROOT, 'saleor', 'static', 'assets')),\n ('images', os.path.join(PROJECT_ROOT, 'saleor', 'static', 'images')),\n ('dashboard', os.path.join(PROJECT_ROOT, 'saleor', 'static', 'dashboard'))\n]\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder'\n]\n\ncontext_processors = [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.request',\n 'saleor.core.context_processors.default_currency',\n 'saleor.core.context_processors.categories',\n 'saleor.cart.context_processors.cart_counter',\n 'saleor.core.context_processors.search_enabled',\n 'saleor.site.context_processors.site',\n 'saleor.core.context_processors.webpage_schema',\n 'social_django.context_processors.backends',\n 'social_django.context_processors.login_redirect',\n]\n\nloaders = [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader']\n\nif not DEBUG:\n loaders = [('django.template.loaders.cached.Loader', loaders)]\n\nTEMPLATES = [{\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],\n 'OPTIONS': {\n 'debug': DEBUG,\n 'context_processors': context_processors,\n 'loaders': loaders,\n 'string_if_invalid': '<< MISSING VARIABLE \"%s\" >>' if DEBUG else ''}}]\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = os.environ.get('SECRET_KEY')\n\nMIDDLEWARE = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_babel.middleware.LocaleMiddleware',\n 'saleor.core.middleware.DiscountMiddleware',\n 'saleor.core.middleware.GoogleAnalytics',\n 'saleor.core.middleware.CountryMiddleware',\n 'saleor.core.middleware.CurrencyMiddleware',\n 'saleor.core.middleware.ClearSiteCacheMiddleware',\n 'social_django.middleware.SocialAuthExceptionMiddleware',\n 'impersonate.middleware.ImpersonateMiddleware'\n]\n\nINSTALLED_APPS = [\n # External apps that need to go before django's\n 'storages',\n\n # Django modules\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.auth',\n 'django.contrib.postgres',\n 'django.forms',\n\n # Local apps\n 'saleor.userprofile',\n 'saleor.discount',\n 'saleor.product',\n 'saleor.cart',\n 'saleor.checkout',\n 'saleor.core',\n 'saleor.graphql',\n 'saleor.order',\n 'saleor.dashboard',\n 'saleor.shipping',\n 'saleor.search',\n 'saleor.site',\n 'saleor.data_feeds',\n\n # External apps\n 'versatileimagefield',\n 'django_babel',\n 'bootstrap3',\n 'django_prices',\n 'django_prices_openexchangerates',\n 'graphene_django',\n 'mptt',\n 'payments',\n 'webpack_loader',\n 'social_django',\n 'django_countries',\n 'django_filters',\n 'django_celery_results',\n 'impersonate',\n 'phonenumber_field',\n]\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'root': {\n 'level': 'INFO',\n 'handlers': ['console']\n },\n 'formatters': {\n 'verbose': {\n 'format': (\n '%(levelname)s %(name)s %(message)s'\n ' [PID:%(process)d:%(threadName)s]')\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n }\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'mail_admins'],\n 'level': 'INFO',\n 'propagate': True\n },\n 'django.server': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True\n },\n 'saleor': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True\n }\n }\n}\n\nAUTH_USER_MODEL = 'userprofile.User'\n\nLOGIN_URL = '/account/login/'\n\nDEFAULT_COUNTRY = 'US'\nDEFAULT_CURRENCY = 'USD'\nAVAILABLE_CURRENCIES = [DEFAULT_CURRENCY]\n\nOPENEXCHANGERATES_API_KEY = os.environ.get('OPENEXCHANGERATES_API_KEY')\n\nACCOUNT_ACTIVATION_DAYS = 3\n\nLOGIN_REDIRECT_URL = 'home'\n\nGOOGLE_ANALYTICS_TRACKING_ID = os.environ.get('GOOGLE_ANALYTICS_TRACKING_ID')\n\n\ndef get_host():\n from django.contrib.sites.models import Site\n return Site.objects.get_current().domain\n\n\nPAYMENT_HOST = get_host\n\nPAYMENT_MODEL = 'order.Payment'\n\nPAYMENT_VARIANTS = {\n 'default': ('payments.dummy.DummyProvider', {})}\n\nSESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'\nSESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'\n\nCHECKOUT_PAYMENT_CHOICES = [\n ('default', 'Dummy provider')]\n\nMESSAGE_TAGS = {\n messages.ERROR: 'danger'}\n\nLOW_STOCK_THRESHOLD = 10\nMAX_CART_LINE_QUANTITY = os.environ.get('MAX_CART_LINE_QUANTITY', 50)\n\nPAGINATE_BY = 16\nDASHBOARD_PAGINATE_BY = 30\nDASHBOARD_SEARCH_LIMIT = 5\n\nBOOTSTRAP3 = {\n 'set_placeholder': False,\n 'set_required': False,\n 'success_css_class': '',\n 'form_renderers': {\n 'default': 'saleor.core.utils.form_renderer.FormRenderer',\n },\n}\n\nTEST_RUNNER = ''\n\nALLOWED_HOSTS = get_list(os.environ.get('ALLOWED_HOSTS', 'localhost'))\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Amazon S3 configuration\nAWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')\nAWS_MEDIA_BUCKET_NAME = os.environ.get('AWS_MEDIA_BUCKET_NAME')\nAWS_QUERYSTRING_AUTH = ast.literal_eval(\n os.environ.get('AWS_QUERYSTRING_AUTH', 'False'))\n\nif AWS_STORAGE_BUCKET_NAME:\n STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n\nif AWS_MEDIA_BUCKET_NAME:\n DEFAULT_FILE_STORAGE = 'saleor.core.storages.S3MediaStorage'\n THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nVERSATILEIMAGEFIELD_RENDITION_KEY_SETS = {\n 'defaults': [\n ('product_gallery', 'crop__540x540'),\n ('product_gallery_2x', 'crop__1080x1080'),\n ('product_small', 'crop__60x60'),\n ('product_small_2x', 'crop__120x120'),\n ('product_list', 'crop__255x255'),\n ('product_list_2x', 'crop__510x510')]}\n\nVERSATILEIMAGEFIELD_SETTINGS = {\n # Images should be pre-generated on Production environment\n 'create_images_on_demand': ast.literal_eval(\n os.environ.get('CREATE_IMAGES_ON_DEMAND', 'True')),\n}\n\nPLACEHOLDER_IMAGES = {\n 60: 'images/placeholder60x60.png',\n 120: 'images/placeholder120x120.png',\n 255: 'images/placeholder255x255.png',\n 540: 'images/placeholder540x540.png',\n 1080: 'images/placeholder1080x1080.png'\n}\n\nDEFAULT_PLACEHOLDER = 'images/placeholder255x255.png'\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'CACHE': not DEBUG,\n 'BUNDLE_DIR_NAME': 'assets/',\n 'STATS_FILE': os.path.join(PROJECT_ROOT, 'webpack-bundle.json'),\n 'POLL_INTERVAL': 0.1,\n 'IGNORE': [\n r'.+\\.hot-update\\.js',\n r'.+\\.map']}}\n\n\nLOGOUT_ON_PASSWORD_CHANGE = False\n\n# SEARCH CONFIGURATION\nDB_SEARCH_ENABLED = True\n\n# support deployment-dependant elastic enviroment variable\nES_URL = (os.environ.get('ELASTICSEARCH_URL') or\n os.environ.get('SEARCHBOX_URL') or os.environ.get('BONSAI_URL'))\n\nENABLE_SEARCH = bool(ES_URL) or DB_SEARCH_ENABLED # global search disabling\n\nSEARCH_BACKEND = 'saleor.search.backends.postgresql'\n\nif ES_URL:\n SEARCH_BACKEND = 'saleor.search.backends.elasticsearch'\n INSTALLED_APPS.append('django_elasticsearch_dsl')\n ELASTICSEARCH_DSL = {\n 'default': {\n 'hosts': ES_URL\n },\n }\n\n\nGRAPHENE = {\n 'MIDDLEWARE': [\n 'graphene_django.debug.DjangoDebugMiddleware'\n ],\n 'SCHEMA': 'saleor.graphql.api.schema',\n 'SCHEMA_OUTPUT': os.path.join(\n PROJECT_ROOT, 'saleor', 'static', 'schema.json')\n}\n\nAUTHENTICATION_BACKENDS = [\n 'saleor.registration.backends.facebook.CustomFacebookOAuth2',\n 'saleor.registration.backends.google.CustomGoogleOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n]\n\nSOCIAL_AUTH_PIPELINE = [\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.auth_allowed',\n 'social_core.pipeline.social_auth.social_user',\n 'social_core.pipeline.social_auth.associate_by_email',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'social_core.pipeline.social_auth.load_extra_data',\n 'social_core.pipeline.user.user_details',\n]\n\nSOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True\nSOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL\nSOCIAL_AUTH_FACEBOOK_SCOPE = ['email']\nSOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {\n 'fields': 'id, email'}\n\n# CELERY SETTINGS\nCELERY_BROKER_URL = os.environ.get('REDIS_BROKER_URL') or ''\nCELERY_TASK_ALWAYS_EAGER = False if CELERY_BROKER_URL else True\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_RESULT_BACKEND = 'django-db'\n\n# Impersonate module settings\nIMPERSONATE_URI_EXCLUSIONS = [r'^dashboard/']\nIMPERSONATE_CUSTOM_USER_QUERYSET = \\\n 'saleor.userprofile.impersonate.get_impersonatable_users'\nIMPERSONATE_USE_HTTP_REFERER = True\nIMPERSONATE_CUSTOM_ALLOW = 'saleor.userprofile.impersonate.can_impersonate'\n", "path": "saleor/settings.py" } ]
[ { "content": "import ast\nimport os.path\n\nimport dj_database_url\nimport dj_email_url\nfrom django.contrib.messages import constants as messages\nimport django_cache_url\n\n\ndef get_list(text):\n return [item.strip() for item in text.split(',')]\n\n\nDEBUG = ast.literal_eval(os.environ.get('DEBUG', 'True'))\n\nSITE_ID = 1\n\nPROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))\n\nROOT_URLCONF = 'saleor.urls'\n\nWSGI_APPLICATION = 'saleor.wsgi.application'\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\nMANAGERS = ADMINS\n\nINTERNAL_IPS = get_list(os.environ.get('INTERNAL_IPS', '127.0.0.1'))\n\nCACHES = {'default': django_cache_url.config()}\n\nif os.environ.get('REDIS_URL'):\n CACHES['default'] = {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': os.environ.get('REDIS_URL')}\n\nDATABASES = {\n 'default': dj_database_url.config(\n default='postgres://saleor:saleor@localhost:5432/saleor',\n conn_max_age=600)}\n\n\nTIME_ZONE = 'America/Chicago'\nLANGUAGE_CODE = 'en-us'\nLOCALE_PATHS = [os.path.join(PROJECT_ROOT, 'locale')]\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nFORM_RENDERER = 'django.forms.renderers.TemplatesSetting'\n\nEMAIL_URL = os.environ.get('EMAIL_URL')\nSENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME')\nSENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD')\nif not EMAIL_URL and SENDGRID_USERNAME and SENDGRID_PASSWORD:\n EMAIL_URL = 'smtp://%s:%[email protected]:587/?tls=True' % (\n SENDGRID_USERNAME, SENDGRID_PASSWORD)\nemail_config = dj_email_url.parse(EMAIL_URL or 'console://')\n\nEMAIL_FILE_PATH = email_config['EMAIL_FILE_PATH']\nEMAIL_HOST_USER = email_config['EMAIL_HOST_USER']\nEMAIL_HOST_PASSWORD = email_config['EMAIL_HOST_PASSWORD']\nEMAIL_HOST = email_config['EMAIL_HOST']\nEMAIL_PORT = email_config['EMAIL_PORT']\nEMAIL_BACKEND = email_config['EMAIL_BACKEND']\nEMAIL_USE_TLS = email_config['EMAIL_USE_TLS']\nEMAIL_USE_SSL = email_config['EMAIL_USE_SSL']\n\nDEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL')\nORDER_FROM_EMAIL = os.getenv('ORDER_FROM_EMAIL', DEFAULT_FROM_EMAIL)\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [\n ('assets', os.path.join(PROJECT_ROOT, 'saleor', 'static', 'assets')),\n ('images', os.path.join(PROJECT_ROOT, 'saleor', 'static', 'images')),\n ('dashboard', os.path.join(PROJECT_ROOT, 'saleor', 'static', 'dashboard'))\n]\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder'\n]\n\ncontext_processors = [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.request',\n 'saleor.core.context_processors.default_currency',\n 'saleor.core.context_processors.categories',\n 'saleor.cart.context_processors.cart_counter',\n 'saleor.core.context_processors.search_enabled',\n 'saleor.site.context_processors.site',\n 'saleor.core.context_processors.webpage_schema',\n 'social_django.context_processors.backends',\n 'social_django.context_processors.login_redirect',\n]\n\nloaders = [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader']\n\nif not DEBUG:\n loaders = [('django.template.loaders.cached.Loader', loaders)]\n\nTEMPLATES = [{\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],\n 'OPTIONS': {\n 'debug': DEBUG,\n 'context_processors': context_processors,\n 'loaders': loaders,\n 'string_if_invalid': '<< MISSING VARIABLE \"%s\" >>' if DEBUG else ''}}]\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = os.environ.get('SECRET_KEY')\n\nMIDDLEWARE = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_babel.middleware.LocaleMiddleware',\n 'saleor.core.middleware.DiscountMiddleware',\n 'saleor.core.middleware.GoogleAnalytics',\n 'saleor.core.middleware.CountryMiddleware',\n 'saleor.core.middleware.CurrencyMiddleware',\n 'saleor.core.middleware.ClearSiteCacheMiddleware',\n 'social_django.middleware.SocialAuthExceptionMiddleware',\n 'impersonate.middleware.ImpersonateMiddleware'\n]\n\nINSTALLED_APPS = [\n # External apps that need to go before django's\n 'storages',\n\n # Django modules\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.auth',\n 'django.contrib.postgres',\n 'django.forms',\n\n # Local apps\n 'saleor.userprofile',\n 'saleor.discount',\n 'saleor.product',\n 'saleor.cart',\n 'saleor.checkout',\n 'saleor.core',\n 'saleor.graphql',\n 'saleor.order',\n 'saleor.dashboard',\n 'saleor.shipping',\n 'saleor.search',\n 'saleor.site',\n 'saleor.data_feeds',\n\n # External apps\n 'versatileimagefield',\n 'django_babel',\n 'bootstrap3',\n 'django_prices',\n 'django_prices_openexchangerates',\n 'graphene_django',\n 'mptt',\n 'payments',\n 'webpack_loader',\n 'social_django',\n 'django_countries',\n 'django_filters',\n 'django_celery_results',\n 'impersonate',\n 'phonenumber_field',\n]\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'root': {\n 'level': 'INFO',\n 'handlers': ['console']\n },\n 'formatters': {\n 'verbose': {\n 'format': (\n '%(levelname)s %(name)s %(message)s'\n ' [PID:%(process)d:%(threadName)s]')\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n }\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'mail_admins'],\n 'level': 'INFO',\n 'propagate': True\n },\n 'django.server': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True\n },\n 'saleor': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True\n }\n }\n}\n\nAUTH_USER_MODEL = 'userprofile.User'\n\nLOGIN_URL = '/account/login/'\n\nDEFAULT_COUNTRY = 'US'\nDEFAULT_CURRENCY = 'USD'\nAVAILABLE_CURRENCIES = [DEFAULT_CURRENCY]\n\nOPENEXCHANGERATES_API_KEY = os.environ.get('OPENEXCHANGERATES_API_KEY')\n\nACCOUNT_ACTIVATION_DAYS = 3\n\nLOGIN_REDIRECT_URL = 'home'\n\nGOOGLE_ANALYTICS_TRACKING_ID = os.environ.get('GOOGLE_ANALYTICS_TRACKING_ID')\n\n\ndef get_host():\n from django.contrib.sites.models import Site\n return Site.objects.get_current().domain\n\n\nPAYMENT_HOST = get_host\n\nPAYMENT_MODEL = 'order.Payment'\n\nPAYMENT_VARIANTS = {\n 'default': ('payments.dummy.DummyProvider', {})}\n\nSESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'\nSESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'\n\nCHECKOUT_PAYMENT_CHOICES = [\n ('default', 'Dummy provider')]\n\nMESSAGE_TAGS = {\n messages.ERROR: 'danger'}\n\nLOW_STOCK_THRESHOLD = 10\nMAX_CART_LINE_QUANTITY = int(os.environ.get('MAX_CART_LINE_QUANTITY', 50))\n\nPAGINATE_BY = 16\nDASHBOARD_PAGINATE_BY = 30\nDASHBOARD_SEARCH_LIMIT = 5\n\nBOOTSTRAP3 = {\n 'set_placeholder': False,\n 'set_required': False,\n 'success_css_class': '',\n 'form_renderers': {\n 'default': 'saleor.core.utils.form_renderer.FormRenderer',\n },\n}\n\nTEST_RUNNER = ''\n\nALLOWED_HOSTS = get_list(os.environ.get('ALLOWED_HOSTS', 'localhost'))\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Amazon S3 configuration\nAWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')\nAWS_MEDIA_BUCKET_NAME = os.environ.get('AWS_MEDIA_BUCKET_NAME')\nAWS_QUERYSTRING_AUTH = ast.literal_eval(\n os.environ.get('AWS_QUERYSTRING_AUTH', 'False'))\n\nif AWS_STORAGE_BUCKET_NAME:\n STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n\nif AWS_MEDIA_BUCKET_NAME:\n DEFAULT_FILE_STORAGE = 'saleor.core.storages.S3MediaStorage'\n THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nVERSATILEIMAGEFIELD_RENDITION_KEY_SETS = {\n 'defaults': [\n ('product_gallery', 'crop__540x540'),\n ('product_gallery_2x', 'crop__1080x1080'),\n ('product_small', 'crop__60x60'),\n ('product_small_2x', 'crop__120x120'),\n ('product_list', 'crop__255x255'),\n ('product_list_2x', 'crop__510x510')]}\n\nVERSATILEIMAGEFIELD_SETTINGS = {\n # Images should be pre-generated on Production environment\n 'create_images_on_demand': ast.literal_eval(\n os.environ.get('CREATE_IMAGES_ON_DEMAND', 'True')),\n}\n\nPLACEHOLDER_IMAGES = {\n 60: 'images/placeholder60x60.png',\n 120: 'images/placeholder120x120.png',\n 255: 'images/placeholder255x255.png',\n 540: 'images/placeholder540x540.png',\n 1080: 'images/placeholder1080x1080.png'\n}\n\nDEFAULT_PLACEHOLDER = 'images/placeholder255x255.png'\n\nWEBPACK_LOADER = {\n 'DEFAULT': {\n 'CACHE': not DEBUG,\n 'BUNDLE_DIR_NAME': 'assets/',\n 'STATS_FILE': os.path.join(PROJECT_ROOT, 'webpack-bundle.json'),\n 'POLL_INTERVAL': 0.1,\n 'IGNORE': [\n r'.+\\.hot-update\\.js',\n r'.+\\.map']}}\n\n\nLOGOUT_ON_PASSWORD_CHANGE = False\n\n# SEARCH CONFIGURATION\nDB_SEARCH_ENABLED = True\n\n# support deployment-dependant elastic enviroment variable\nES_URL = (os.environ.get('ELASTICSEARCH_URL') or\n os.environ.get('SEARCHBOX_URL') or os.environ.get('BONSAI_URL'))\n\nENABLE_SEARCH = bool(ES_URL) or DB_SEARCH_ENABLED # global search disabling\n\nSEARCH_BACKEND = 'saleor.search.backends.postgresql'\n\nif ES_URL:\n SEARCH_BACKEND = 'saleor.search.backends.elasticsearch'\n INSTALLED_APPS.append('django_elasticsearch_dsl')\n ELASTICSEARCH_DSL = {\n 'default': {\n 'hosts': ES_URL\n },\n }\n\n\nGRAPHENE = {\n 'MIDDLEWARE': [\n 'graphene_django.debug.DjangoDebugMiddleware'\n ],\n 'SCHEMA': 'saleor.graphql.api.schema',\n 'SCHEMA_OUTPUT': os.path.join(\n PROJECT_ROOT, 'saleor', 'static', 'schema.json')\n}\n\nAUTHENTICATION_BACKENDS = [\n 'saleor.registration.backends.facebook.CustomFacebookOAuth2',\n 'saleor.registration.backends.google.CustomGoogleOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n]\n\nSOCIAL_AUTH_PIPELINE = [\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.auth_allowed',\n 'social_core.pipeline.social_auth.social_user',\n 'social_core.pipeline.social_auth.associate_by_email',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'social_core.pipeline.social_auth.load_extra_data',\n 'social_core.pipeline.user.user_details',\n]\n\nSOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True\nSOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL\nSOCIAL_AUTH_FACEBOOK_SCOPE = ['email']\nSOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {\n 'fields': 'id, email'}\n\n# CELERY SETTINGS\nCELERY_BROKER_URL = os.environ.get('REDIS_BROKER_URL') or ''\nCELERY_TASK_ALWAYS_EAGER = False if CELERY_BROKER_URL else True\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_RESULT_BACKEND = 'django-db'\n\n# Impersonate module settings\nIMPERSONATE_URI_EXCLUSIONS = [r'^dashboard/']\nIMPERSONATE_CUSTOM_USER_QUERYSET = \\\n 'saleor.userprofile.impersonate.get_impersonatable_users'\nIMPERSONATE_USE_HTTP_REFERER = True\nIMPERSONATE_CUSTOM_ALLOW = 'saleor.userprofile.impersonate.can_impersonate'\n", "path": "saleor/settings.py" } ]
diff --git a/saleor/settings.py b/saleor/settings.py index 8f8bd3fdbc2..2f103c03fc3 100644 --- a/saleor/settings.py +++ b/saleor/settings.py @@ -280,7 +280,7 @@ def get_host(): messages.ERROR: 'danger'} LOW_STOCK_THRESHOLD = 10 -MAX_CART_LINE_QUANTITY = os.environ.get('MAX_CART_LINE_QUANTITY', 50) +MAX_CART_LINE_QUANTITY = int(os.environ.get('MAX_CART_LINE_QUANTITY', 50)) PAGINATE_BY = 16 DASHBOARD_PAGINATE_BY = 30
kivy__kivy-4403
Mouse position wrongly processed on high-dpi screens? Hi, we experience a strange issue where the mouse pos seems to be wrongly processed (or I misunderstood something). When executing the same code ([see gist](https://gist.github.com/johanneshk/a043333546494e0ae5957ac4c08542b7)), on a Macbook with dpi=192 the mouse pos for the top two squares (click on them) is twice what you get on a 'normal' screen with dpi=96. Which is why we assume that something's wrong with the dpi. The behavior on the 96dpi screen is what we expect. The top squares are arranged in a GridLayout, the bottom squares in a RelativeLayout. Interestingly the positions for the squares in the RelativeLayout seem to be correct. Tow screenshots that demonstrate this behavior: [correct](http://i.imgur.com/jGCMR5T.png) [wrong](http://imgur.com/WSULgp1) Any ideas?
[ { "content": "# found a way to include it more easily.\n'''\nSDL2 Window\n===========\n\nWindowing provider directly based on our own wrapped version of SDL.\n\nTODO:\n - fix keys\n - support scrolling\n - clean code\n - manage correctly all sdl events\n\n'''\n\n__all__ = ('WindowSDL2', )\n\nfrom os.path import join\nfrom kivy import kivy_data_dir\nfrom kivy.logger import Logger\nfrom kivy.base import EventLoop, ExceptionManager, stopTouchApp\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.core.window import WindowBase\nfrom kivy.core.window._window_sdl2 import _WindowSDL2Storage\nfrom kivy.input.provider import MotionEventProvider\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.resources import resource_find\nfrom kivy.utils import platform, deprecated\nfrom kivy.compat import unichr\nfrom collections import deque\n\nKMOD_LCTRL = 64\nKMOD_RCTRL = 128\nKMOD_RSHIFT = 2\nKMOD_LSHIFT = 1\nKMOD_RALT = 512\nKMOD_LALT = 256\nKMOD_LMETA = 1024\nKMOD_RMETA = 2048\n\nSDLK_SHIFTL = 1073742049\nSDLK_SHIFTR = 1073742053\nSDLK_LCTRL = 1073742048\nSDLK_RCTRL = 1073742052\nSDLK_LALT = 1073742050\nSDLK_RALT = 1073742054\nSDLK_LEFT = 1073741904\nSDLK_RIGHT = 1073741903\nSDLK_UP = 1073741906\nSDLK_DOWN = 1073741905\nSDLK_HOME = 1073741898\nSDLK_END = 1073741901\nSDLK_PAGEUP = 1073741899\nSDLK_PAGEDOWN = 1073741902\nSDLK_SUPER = 1073742051\nSDLK_CAPS = 1073741881\nSDLK_INSERT = 1073741897\nSDLK_KEYPADNUM = 1073741907\nSDLK_KP_DEVIDE = 1073741908\nSDLK_KP_MULTIPLY = 1073741909\nSDLK_KP_MINUS = 1073741910\nSDLK_KP_PLUS = 1073741911\nSDLK_KP_ENTER = 1073741912\nSDLK_KP_1 = 1073741913\nSDLK_KP_2 = 1073741914\nSDLK_KP_3 = 1073741915\nSDLK_KP_4 = 1073741916\nSDLK_KP_5 = 1073741917\nSDLK_KP_6 = 1073741918\nSDLK_KP_7 = 1073741919\nSDLK_KP_8 = 1073741920\nSDLK_KP_9 = 1073741921\nSDLK_KP_0 = 1073741922\nSDLK_KP_DOT = 1073741923\nSDLK_F1 = 1073741882\nSDLK_F2 = 1073741883\nSDLK_F3 = 1073741884\nSDLK_F4 = 1073741885\nSDLK_F5 = 1073741886\nSDLK_F6 = 1073741887\nSDLK_F7 = 1073741888\nSDLK_F8 = 1073741889\nSDLK_F9 = 1073741890\nSDLK_F10 = 1073741891\nSDLK_F11 = 1073741892\nSDLK_F12 = 1073741893\nSDLK_F13 = 1073741894\nSDLK_F14 = 1073741895\nSDLK_F15 = 1073741896\n\n\nclass SDL2MotionEvent(MotionEvent):\n def depack(self, args):\n self.is_touch = True\n self.profile = ('pos', )\n self.sx, self.sy = args\n win = EventLoop.window\n super(SDL2MotionEvent, self).depack(args)\n\n\nclass SDL2MotionEventProvider(MotionEventProvider):\n win = None\n q = deque()\n touchmap = {}\n\n def update(self, dispatch_fn):\n touchmap = self.touchmap\n while True:\n try:\n value = self.q.pop()\n except IndexError:\n return\n\n action, fid, x, y = value\n y = 1 - y\n if fid not in touchmap:\n touchmap[fid] = me = SDL2MotionEvent('sdl', fid, (x, y))\n else:\n me = touchmap[fid]\n me.move((x, y))\n if action == 'fingerdown':\n dispatch_fn('begin', me)\n elif action == 'fingerup':\n me.update_time_end()\n dispatch_fn('end', me)\n del touchmap[fid]\n else:\n dispatch_fn('update', me)\n\n\nclass WindowSDL(WindowBase):\n\n def __init__(self, **kwargs):\n self._pause_loop = False\n self._win = _WindowSDL2Storage()\n super(WindowSDL, self).__init__()\n self._mouse_x = self._mouse_y = -1\n self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,\n KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,\n KMOD_RMETA)\n self.command_keys = {\n 27: 'escape',\n 9: 'tab',\n 8: 'backspace',\n 13: 'enter',\n 127: 'del',\n 271: 'enter',\n 273: 'up',\n 274: 'down',\n 275: 'right',\n 276: 'left',\n 278: 'home',\n 279: 'end',\n 280: 'pgup',\n 281: 'pgdown'}\n self._mouse_buttons_down = set()\n self.key_map = {SDLK_LEFT: 276, SDLK_RIGHT: 275, SDLK_UP: 273,\n SDLK_DOWN: 274, SDLK_HOME: 278, SDLK_END: 279,\n SDLK_PAGEDOWN: 281, SDLK_PAGEUP: 280, SDLK_SHIFTR: 303,\n SDLK_SHIFTL: 304, SDLK_SUPER: 309, SDLK_LCTRL: 305,\n SDLK_RCTRL: 306, SDLK_LALT: 308, SDLK_RALT: 307,\n SDLK_CAPS: 301, SDLK_INSERT: 277, SDLK_F1: 282,\n SDLK_F2: 283, SDLK_F3: 284, SDLK_F4: 285, SDLK_F5: 286,\n SDLK_F6: 287, SDLK_F7: 288, SDLK_F8: 289, SDLK_F9: 290,\n SDLK_F10: 291, SDLK_F11: 292, SDLK_F12: 293,\n SDLK_F13: 294, SDLK_F14: 295, SDLK_F15: 296,\n SDLK_KEYPADNUM: 300, SDLK_KP_DEVIDE: 267,\n SDLK_KP_MULTIPLY: 268, SDLK_KP_MINUS: 269,\n SDLK_KP_PLUS: 270, SDLK_KP_ENTER: 271,\n SDLK_KP_DOT: 266, SDLK_KP_0: 256, SDLK_KP_1: 257,\n SDLK_KP_2: 258, SDLK_KP_3: 259, SDLK_KP_4: 260,\n SDLK_KP_5: 261, SDLK_KP_6: 262, SDLK_KP_7: 263,\n SDLK_KP_8: 264, SDLK_KP_9: 265}\n if platform == 'ios':\n # XXX ios keyboard suck, when backspace is hit, the delete\n # keycode is sent. fix it.\n self.key_map[127] = 8\n elif platform == 'android':\n # map android back button to escape\n self.key_map[1073742094] = 27\n\n self.bind(minimum_width=self._set_minimum_size,\n minimum_height=self._set_minimum_size)\n\n def _set_minimum_size(self, *args):\n minimum_width = self.minimum_width\n minimum_height = self.minimum_height\n if minimum_width and minimum_height:\n self._win.set_minimum_size(minimum_width, minimum_height)\n elif minimum_width or minimum_height:\n Logger.warning(\n 'Both Window.minimum_width and Window.minimum_height must be '\n 'bigger than 0 for the size restriction to take effect.')\n\n def _event_filter(self, action):\n from kivy.app import App\n if action == 'app_terminating':\n EventLoop.quit = True\n self.close()\n\n elif action == 'app_lowmemory':\n self.dispatch('on_memorywarning')\n\n elif action == 'app_willenterbackground':\n from kivy.base import stopTouchApp\n app = App.get_running_app()\n if not app:\n Logger.info('WindowSDL: No running App found, exit.')\n stopTouchApp()\n return 0\n\n if not app.dispatch('on_pause'):\n Logger.info('WindowSDL: App doesn\\'t support pause mode, stop.')\n stopTouchApp()\n return 0\n\n self._pause_loop = True\n\n elif action == 'app_didenterforeground':\n # on iOS, the did enter foreground is launched at the start\n # of the application. in our case, we want it only when the app\n # is resumed\n if self._pause_loop:\n self._pause_loop = False\n app = App.get_running_app()\n app.dispatch('on_resume')\n\n return 0\n\n def create_window(self, *largs):\n if self._fake_fullscreen:\n if not self.borderless:\n self.fullscreen = self._fake_fullscreen = False\n elif not self.fullscreen or self.fullscreen == 'auto':\n self.borderless = self._fake_fullscreen = False\n if self.fullscreen == 'fake':\n self.borderless = self._fake_fullscreen = True\n Logger.warning(\"The 'fake' fullscreen option has been \"\n \"deprecated, use Window.borderless or the \"\n \"borderless Config option instead.\")\n\n if not self.initialized:\n\n if self.position == 'auto':\n pos = None, None\n elif self.position == 'custom':\n pos = self.left, self.top\n\n # ensure we have an event filter\n self._win.set_event_filter(self._event_filter)\n\n # setup window\n w, h = self.system_size\n resizable = Config.getboolean('graphics', 'resizable')\n state = (Config.get('graphics', 'window_state')\n if self._is_desktop else None)\n self.system_size = _size = self._win.setup_window(\n pos[0], pos[1], w, h, self.borderless,\n self.fullscreen, resizable, state)\n\n # calculate density\n sz = self._win._get_gl_size()[0]\n self._density = density = sz / _size[0]\n if self._is_desktop and self.size[0] != _size[0]:\n self.dpi = density * 96.\n\n # never stay with a None pos, application using w.center\n # will be fired.\n self._pos = (0, 0)\n self._set_minimum_size()\n\n if state == 'hidden':\n self._focus = False\n else:\n w, h = self.system_size\n self._win.resize_window(w, h)\n self._win.set_border_state(self.borderless)\n self._win.set_fullscreen_mode(self.fullscreen)\n\n super(WindowSDL, self).create_window()\n # set mouse visibility\n self._set_cursor_state(self.show_cursor)\n\n if self.initialized:\n return\n\n # auto add input provider\n Logger.info('Window: auto add sdl2 input provider')\n from kivy.base import EventLoop\n SDL2MotionEventProvider.win = self\n EventLoop.add_input_provider(SDL2MotionEventProvider('sdl', ''))\n\n # set window icon before calling set_mode\n try:\n filename_icon = self.icon or Config.get('kivy', 'window_icon')\n if filename_icon == '':\n logo_size = 32\n if platform == 'macosx':\n logo_size = 512\n elif platform == 'win':\n logo_size = 64\n filename_icon = 'kivy-icon-{}.png'.format(logo_size)\n filename_icon = resource_find(\n join(kivy_data_dir, 'logo', filename_icon))\n self.set_icon(filename_icon)\n except:\n Logger.exception('Window: cannot set icon')\n\n def close(self):\n self._win.teardown_window()\n self.dispatch('on_close')\n\n def maximize(self):\n if self._is_desktop:\n self._win.maximize_window()\n else:\n Logger.warning('Window: maximize() is used only on desktop OSes.')\n\n def minimize(self):\n if self._is_desktop:\n self._win.minimize_window()\n else:\n Logger.warning('Window: minimize() is used only on desktop OSes.')\n\n def restore(self):\n if self._is_desktop:\n self._win.restore_window()\n else:\n Logger.warning('Window: restore() is used only on desktop OSes.')\n\n def hide(self):\n if self._is_desktop:\n self._win.hide_window()\n else:\n Logger.warning('Window: hide() is used only on desktop OSes.')\n\n def show(self):\n if self._is_desktop:\n self._win.show_window()\n else:\n Logger.warning('Window: show() is used only on desktop OSes.')\n\n def raise_window(self):\n if self._is_desktop:\n self._win.raise_window()\n else:\n Logger.warning('Window: show() is used only on desktop OSes.')\n\n @deprecated\n def toggle_fullscreen(self):\n if self.fullscreen in (True, 'auto'):\n self.fullscreen = False\n else:\n self.fullscreen = 'auto'\n\n def set_title(self, title):\n self._win.set_window_title(title)\n\n def set_icon(self, filename):\n self._win.set_window_icon(str(filename))\n\n def screenshot(self, *largs, **kwargs):\n filename = super(WindowSDL, self).screenshot(*largs, **kwargs)\n if filename is None:\n return\n\n from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE\n width, height = self.size\n data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)\n self._win.save_bytes_in_png(filename, data, width, height)\n Logger.debug('Window: Screenshot saved at <%s>' % filename)\n return filename\n\n def flip(self):\n self._win.flip()\n super(WindowSDL, self).flip()\n\n def _set_cursor_state(self, value):\n self._win._set_cursor_state(value)\n\n def _fix_mouse_pos(self, x, y):\n y -= 1\n self.mouse_pos = x, self.system_size[1] - y\n return x, y\n\n def _mainloop(self):\n EventLoop.idle()\n\n # for android/iOS, we don't want to have any event nor executing our\n # main loop while the pause is going on. This loop wait any event (not\n # handled by the event filter), and remove them from the queue.\n # Nothing happen during the pause on iOS, except gyroscope value sended\n # over joystick. So it's safe.\n while self._pause_loop:\n self._win.wait_event()\n if not self._pause_loop:\n break\n self._win.poll()\n\n while True:\n event = self._win.poll()\n if event is False:\n break\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n if self.dispatch('on_request_close'):\n continue\n EventLoop.quit = True\n self.close()\n break\n\n elif action in ('fingermotion', 'fingerdown', 'fingerup'):\n # for finger, pass the raw event to SDL motion event provider\n # XXX this is problematic. On OSX, it generates touches with 0,\n # 0 coordinates, at the same times as mouse. But it works.\n # We have a conflict of using either the mouse or the finger.\n # Right now, we have no mechanism that we could use to know\n # which is the preferred one for the application.\n if platform in ('ios', 'android'):\n SDL2MotionEventProvider.q.appendleft(event)\n pass\n\n elif action == 'mousemotion':\n x, y = args\n x, y = self._fix_mouse_pos(x, y)\n self._mouse_x = x\n self._mouse_y = y\n # don't dispatch motion if no button are pressed\n if len(self._mouse_buttons_down) == 0:\n continue\n self._mouse_meta = self.modifiers\n self.dispatch('on_mouse_move', x, y, self.modifiers)\n\n elif action in ('mousebuttondown', 'mousebuttonup'):\n x, y, button = args\n x, y = self._fix_mouse_pos(x, y)\n btn = 'left'\n if button == 3:\n btn = 'right'\n elif button == 2:\n btn = 'middle'\n eventname = 'on_mouse_down'\n self._mouse_buttons_down.add(button)\n if action == 'mousebuttonup':\n eventname = 'on_mouse_up'\n self._mouse_buttons_down.remove(button)\n self._mouse_x = x\n self._mouse_y = y\n self.dispatch(eventname, x, y, btn, self.modifiers)\n elif action.startswith('mousewheel'):\n self._update_modifiers()\n x, y, button = args\n btn = 'scrolldown'\n if action.endswith('up'):\n btn = 'scrollup'\n elif action.endswith('right'):\n btn = 'scrollright'\n elif action.endswith('left'):\n btn = 'scrollleft'\n\n self._mouse_meta = self.modifiers\n self._mouse_btn = btn\n #times = x if y == 0 else y\n #times = min(abs(times), 100)\n #for k in range(times):\n self._mouse_down = True\n self.dispatch('on_mouse_down',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n self._mouse_down = False\n self.dispatch('on_mouse_up',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n\n elif action == 'dropfile':\n dropfile = args\n self.dispatch('on_dropfile', dropfile[0])\n # video resize\n elif action == 'windowresized':\n self._size = self._win.window_size\n # don't use trigger here, we want to delay the resize event\n cb = self._do_resize\n Clock.unschedule(cb)\n Clock.schedule_once(cb, .1)\n\n elif action == 'windowresized':\n self.canvas.ask_update()\n\n elif action == 'windowrestored':\n self.dispatch('on_restore')\n self.canvas.ask_update()\n\n elif action == 'windowexposed':\n self.canvas.ask_update()\n\n elif action == 'windowminimized':\n self.dispatch('on_minimize')\n if Config.getboolean('kivy', 'pause_on_minimize'):\n self.do_pause()\n\n elif action == 'windowmaximized':\n self.dispatch('on_maximize')\n\n elif action == 'windowhidden':\n self.dispatch('on_hide')\n\n elif action == 'windowshown':\n self.dispatch('on_show')\n\n elif action == 'windowfocusgained':\n self._focus = True\n\n elif action == 'windowfocuslost':\n self._focus = False\n\n elif action == 'windowenter':\n self.dispatch('on_cursor_enter')\n\n elif action == 'windowleave':\n self.dispatch('on_cursor_leave')\n\n elif action == 'joyaxismotion':\n stickid, axisid, value = args\n self.dispatch('on_joy_axis', stickid, axisid, value)\n elif action == 'joyhatmotion':\n stickid, hatid, value = args\n self.dispatch('on_joy_hat', stickid, hatid, value)\n elif action == 'joyballmotion':\n stickid, ballid, xrel, yrel = args\n self.dispatch('on_joy_ball', stickid, ballid, xrel, yrel)\n elif action == 'joybuttondown':\n stickid, buttonid = args\n self.dispatch('on_joy_button_down', stickid, buttonid)\n elif action == 'joybuttonup':\n stickid, buttonid = args\n self.dispatch('on_joy_button_up', stickid, buttonid)\n\n elif action in ('keydown', 'keyup'):\n mod, key, scancode, kstr = args\n\n try:\n key = self.key_map[key]\n except KeyError:\n pass\n\n if action == 'keydown':\n self._update_modifiers(mod, key)\n else:\n self._update_modifiers(mod) # ignore the key, it\n # has been released\n\n # if mod in self._meta_keys:\n if (key not in self._modifiers and\n key not in self.command_keys.keys()):\n try:\n kstr = unichr(key)\n except ValueError:\n pass\n #if 'shift' in self._modifiers and key\\\n # not in self.command_keys.keys():\n # return\n\n if action == 'keyup':\n self.dispatch('on_key_up', key, scancode)\n continue\n\n # don't dispatch more key if down event is accepted\n if self.dispatch('on_key_down', key,\n scancode, kstr,\n self.modifiers):\n continue\n self.dispatch('on_keyboard', key,\n scancode, kstr,\n self.modifiers)\n\n elif action == 'textinput':\n text = args[0]\n self.dispatch('on_textinput', text)\n\n # unhandled event !\n else:\n Logger.trace('WindowSDL: Unhandled event %s' % str(event))\n\n def _do_resize(self, dt):\n Logger.debug('Window: Resize window to %s' % str(self.size))\n self._win.resize_window(*self._size)\n self.dispatch('on_resize', *self.size)\n\n def do_pause(self):\n # should go to app pause mode (desktop style)\n from kivy.app import App\n from kivy.base import stopTouchApp\n app = App.get_running_app()\n if not app:\n Logger.info('WindowSDL: No running App found, exit.')\n stopTouchApp()\n return\n\n if not app.dispatch('on_pause'):\n Logger.info('WindowSDL: App doesn\\'t support pause mode, stop.')\n stopTouchApp()\n return\n\n # XXX FIXME wait for sdl resume\n while True:\n event = self._win.poll()\n if event is False:\n continue\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n elif action == 'app_willenterforeground':\n break\n elif action == 'windowrestored':\n break\n\n app.dispatch('on_resume')\n\n def mainloop(self):\n # don't known why, but pygame required a resize event\n # for opengl, before mainloop... window reinit ?\n #self.dispatch('on_resize', *self.size)\n\n while not EventLoop.quit and EventLoop.status == 'started':\n try:\n self._mainloop()\n except BaseException as inst:\n # use exception manager first\n r = ExceptionManager.handle_exception(inst)\n if r == ExceptionManager.RAISE:\n stopTouchApp()\n raise\n else:\n pass\n\n #\n # Pygame wrapper\n #\n def _update_modifiers(self, mods=None, key=None):\n # Available mod, from dir(pygame)\n # 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',\n # 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',\n # 'KMOD_MODE', 'KMOD_NONE'\n if mods is None and key is None:\n return\n modifiers = set()\n\n if mods is not None:\n if mods & (KMOD_RSHIFT | KMOD_LSHIFT):\n modifiers.add('shift')\n if mods & (KMOD_RALT | KMOD_LALT):\n modifiers.add('alt')\n if mods & (KMOD_RCTRL | KMOD_LCTRL):\n modifiers.add('ctrl')\n if mods & (KMOD_RMETA | KMOD_LMETA):\n modifiers.add('meta')\n\n if key is not None:\n if key in (KMOD_RSHIFT, KMOD_LSHIFT):\n modifiers.add('shift')\n if key in (KMOD_RALT, KMOD_LALT):\n modifiers.add('alt')\n if key in (KMOD_RCTRL, KMOD_LCTRL):\n modifiers.add('ctrl')\n if key in (KMOD_RMETA, KMOD_LMETA):\n modifiers.add('meta')\n\n self._modifiers = list(modifiers)\n return\n\n def request_keyboard(self, callback, target, input_type='text'):\n self._sdl_keyboard = super(WindowSDL, self).\\\n request_keyboard(callback, target, input_type)\n self._win.show_keyboard()\n Clock.schedule_interval(self._check_keyboard_shown, 1 / 5.)\n return self._sdl_keyboard\n\n def release_keyboard(self, *largs):\n super(WindowSDL, self).release_keyboard(*largs)\n self._win.hide_keyboard()\n self._sdl_keyboard = None\n return True\n\n def _check_keyboard_shown(self, dt):\n if self._sdl_keyboard is None:\n return False\n if not self._win.is_keyboard_shown():\n self._sdl_keyboard.release()\n\n def map_key(self, original_key, new_key):\n self.key_map[original_key] = new_key\n\n def unmap_key(self, key):\n if key in self.key_map:\n del self.key_map[key]\n", "path": "kivy/core/window/window_sdl2.py" } ]
[ { "content": "# found a way to include it more easily.\n'''\nSDL2 Window\n===========\n\nWindowing provider directly based on our own wrapped version of SDL.\n\nTODO:\n - fix keys\n - support scrolling\n - clean code\n - manage correctly all sdl events\n\n'''\n\n__all__ = ('WindowSDL2', )\n\nfrom os.path import join\nfrom kivy import kivy_data_dir\nfrom kivy.logger import Logger\nfrom kivy.base import EventLoop, ExceptionManager, stopTouchApp\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.core.window import WindowBase\nfrom kivy.core.window._window_sdl2 import _WindowSDL2Storage\nfrom kivy.input.provider import MotionEventProvider\nfrom kivy.input.motionevent import MotionEvent\nfrom kivy.resources import resource_find\nfrom kivy.utils import platform, deprecated\nfrom kivy.compat import unichr\nfrom collections import deque\n\nKMOD_LCTRL = 64\nKMOD_RCTRL = 128\nKMOD_RSHIFT = 2\nKMOD_LSHIFT = 1\nKMOD_RALT = 512\nKMOD_LALT = 256\nKMOD_LMETA = 1024\nKMOD_RMETA = 2048\n\nSDLK_SHIFTL = 1073742049\nSDLK_SHIFTR = 1073742053\nSDLK_LCTRL = 1073742048\nSDLK_RCTRL = 1073742052\nSDLK_LALT = 1073742050\nSDLK_RALT = 1073742054\nSDLK_LEFT = 1073741904\nSDLK_RIGHT = 1073741903\nSDLK_UP = 1073741906\nSDLK_DOWN = 1073741905\nSDLK_HOME = 1073741898\nSDLK_END = 1073741901\nSDLK_PAGEUP = 1073741899\nSDLK_PAGEDOWN = 1073741902\nSDLK_SUPER = 1073742051\nSDLK_CAPS = 1073741881\nSDLK_INSERT = 1073741897\nSDLK_KEYPADNUM = 1073741907\nSDLK_KP_DEVIDE = 1073741908\nSDLK_KP_MULTIPLY = 1073741909\nSDLK_KP_MINUS = 1073741910\nSDLK_KP_PLUS = 1073741911\nSDLK_KP_ENTER = 1073741912\nSDLK_KP_1 = 1073741913\nSDLK_KP_2 = 1073741914\nSDLK_KP_3 = 1073741915\nSDLK_KP_4 = 1073741916\nSDLK_KP_5 = 1073741917\nSDLK_KP_6 = 1073741918\nSDLK_KP_7 = 1073741919\nSDLK_KP_8 = 1073741920\nSDLK_KP_9 = 1073741921\nSDLK_KP_0 = 1073741922\nSDLK_KP_DOT = 1073741923\nSDLK_F1 = 1073741882\nSDLK_F2 = 1073741883\nSDLK_F3 = 1073741884\nSDLK_F4 = 1073741885\nSDLK_F5 = 1073741886\nSDLK_F6 = 1073741887\nSDLK_F7 = 1073741888\nSDLK_F8 = 1073741889\nSDLK_F9 = 1073741890\nSDLK_F10 = 1073741891\nSDLK_F11 = 1073741892\nSDLK_F12 = 1073741893\nSDLK_F13 = 1073741894\nSDLK_F14 = 1073741895\nSDLK_F15 = 1073741896\n\n\nclass SDL2MotionEvent(MotionEvent):\n def depack(self, args):\n self.is_touch = True\n self.profile = ('pos', )\n self.sx, self.sy = args\n win = EventLoop.window\n super(SDL2MotionEvent, self).depack(args)\n\n\nclass SDL2MotionEventProvider(MotionEventProvider):\n win = None\n q = deque()\n touchmap = {}\n\n def update(self, dispatch_fn):\n touchmap = self.touchmap\n while True:\n try:\n value = self.q.pop()\n except IndexError:\n return\n\n action, fid, x, y = value\n y = 1 - y\n if fid not in touchmap:\n touchmap[fid] = me = SDL2MotionEvent('sdl', fid, (x, y))\n else:\n me = touchmap[fid]\n me.move((x, y))\n if action == 'fingerdown':\n dispatch_fn('begin', me)\n elif action == 'fingerup':\n me.update_time_end()\n dispatch_fn('end', me)\n del touchmap[fid]\n else:\n dispatch_fn('update', me)\n\n\nclass WindowSDL(WindowBase):\n\n def __init__(self, **kwargs):\n self._pause_loop = False\n self._win = _WindowSDL2Storage()\n super(WindowSDL, self).__init__()\n self._mouse_x = self._mouse_y = -1\n self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,\n KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,\n KMOD_RMETA)\n self.command_keys = {\n 27: 'escape',\n 9: 'tab',\n 8: 'backspace',\n 13: 'enter',\n 127: 'del',\n 271: 'enter',\n 273: 'up',\n 274: 'down',\n 275: 'right',\n 276: 'left',\n 278: 'home',\n 279: 'end',\n 280: 'pgup',\n 281: 'pgdown'}\n self._mouse_buttons_down = set()\n self.key_map = {SDLK_LEFT: 276, SDLK_RIGHT: 275, SDLK_UP: 273,\n SDLK_DOWN: 274, SDLK_HOME: 278, SDLK_END: 279,\n SDLK_PAGEDOWN: 281, SDLK_PAGEUP: 280, SDLK_SHIFTR: 303,\n SDLK_SHIFTL: 304, SDLK_SUPER: 309, SDLK_LCTRL: 305,\n SDLK_RCTRL: 306, SDLK_LALT: 308, SDLK_RALT: 307,\n SDLK_CAPS: 301, SDLK_INSERT: 277, SDLK_F1: 282,\n SDLK_F2: 283, SDLK_F3: 284, SDLK_F4: 285, SDLK_F5: 286,\n SDLK_F6: 287, SDLK_F7: 288, SDLK_F8: 289, SDLK_F9: 290,\n SDLK_F10: 291, SDLK_F11: 292, SDLK_F12: 293,\n SDLK_F13: 294, SDLK_F14: 295, SDLK_F15: 296,\n SDLK_KEYPADNUM: 300, SDLK_KP_DEVIDE: 267,\n SDLK_KP_MULTIPLY: 268, SDLK_KP_MINUS: 269,\n SDLK_KP_PLUS: 270, SDLK_KP_ENTER: 271,\n SDLK_KP_DOT: 266, SDLK_KP_0: 256, SDLK_KP_1: 257,\n SDLK_KP_2: 258, SDLK_KP_3: 259, SDLK_KP_4: 260,\n SDLK_KP_5: 261, SDLK_KP_6: 262, SDLK_KP_7: 263,\n SDLK_KP_8: 264, SDLK_KP_9: 265}\n if platform == 'ios':\n # XXX ios keyboard suck, when backspace is hit, the delete\n # keycode is sent. fix it.\n self.key_map[127] = 8\n elif platform == 'android':\n # map android back button to escape\n self.key_map[1073742094] = 27\n\n self.bind(minimum_width=self._set_minimum_size,\n minimum_height=self._set_minimum_size)\n\n def _set_minimum_size(self, *args):\n minimum_width = self.minimum_width\n minimum_height = self.minimum_height\n if minimum_width and minimum_height:\n self._win.set_minimum_size(minimum_width, minimum_height)\n elif minimum_width or minimum_height:\n Logger.warning(\n 'Both Window.minimum_width and Window.minimum_height must be '\n 'bigger than 0 for the size restriction to take effect.')\n\n def _event_filter(self, action):\n from kivy.app import App\n if action == 'app_terminating':\n EventLoop.quit = True\n self.close()\n\n elif action == 'app_lowmemory':\n self.dispatch('on_memorywarning')\n\n elif action == 'app_willenterbackground':\n from kivy.base import stopTouchApp\n app = App.get_running_app()\n if not app:\n Logger.info('WindowSDL: No running App found, exit.')\n stopTouchApp()\n return 0\n\n if not app.dispatch('on_pause'):\n Logger.info('WindowSDL: App doesn\\'t support pause mode, stop.')\n stopTouchApp()\n return 0\n\n self._pause_loop = True\n\n elif action == 'app_didenterforeground':\n # on iOS, the did enter foreground is launched at the start\n # of the application. in our case, we want it only when the app\n # is resumed\n if self._pause_loop:\n self._pause_loop = False\n app = App.get_running_app()\n app.dispatch('on_resume')\n\n return 0\n\n def create_window(self, *largs):\n if self._fake_fullscreen:\n if not self.borderless:\n self.fullscreen = self._fake_fullscreen = False\n elif not self.fullscreen or self.fullscreen == 'auto':\n self.borderless = self._fake_fullscreen = False\n if self.fullscreen == 'fake':\n self.borderless = self._fake_fullscreen = True\n Logger.warning(\"The 'fake' fullscreen option has been \"\n \"deprecated, use Window.borderless or the \"\n \"borderless Config option instead.\")\n\n if not self.initialized:\n\n if self.position == 'auto':\n pos = None, None\n elif self.position == 'custom':\n pos = self.left, self.top\n\n # ensure we have an event filter\n self._win.set_event_filter(self._event_filter)\n\n # setup window\n w, h = self.system_size\n resizable = Config.getboolean('graphics', 'resizable')\n state = (Config.get('graphics', 'window_state')\n if self._is_desktop else None)\n self.system_size = _size = self._win.setup_window(\n pos[0], pos[1], w, h, self.borderless,\n self.fullscreen, resizable, state)\n\n # calculate density\n sz = self._win._get_gl_size()[0]\n self._density = density = sz / _size[0]\n if self._is_desktop and self.size[0] != _size[0]:\n self.dpi = density * 96.\n\n # never stay with a None pos, application using w.center\n # will be fired.\n self._pos = (0, 0)\n self._set_minimum_size()\n\n if state == 'hidden':\n self._focus = False\n else:\n w, h = self.system_size\n self._win.resize_window(w, h)\n self._win.set_border_state(self.borderless)\n self._win.set_fullscreen_mode(self.fullscreen)\n\n super(WindowSDL, self).create_window()\n # set mouse visibility\n self._set_cursor_state(self.show_cursor)\n\n if self.initialized:\n return\n\n # auto add input provider\n Logger.info('Window: auto add sdl2 input provider')\n from kivy.base import EventLoop\n SDL2MotionEventProvider.win = self\n EventLoop.add_input_provider(SDL2MotionEventProvider('sdl', ''))\n\n # set window icon before calling set_mode\n try:\n filename_icon = self.icon or Config.get('kivy', 'window_icon')\n if filename_icon == '':\n logo_size = 32\n if platform == 'macosx':\n logo_size = 512\n elif platform == 'win':\n logo_size = 64\n filename_icon = 'kivy-icon-{}.png'.format(logo_size)\n filename_icon = resource_find(\n join(kivy_data_dir, 'logo', filename_icon))\n self.set_icon(filename_icon)\n except:\n Logger.exception('Window: cannot set icon')\n\n def close(self):\n self._win.teardown_window()\n self.dispatch('on_close')\n\n def maximize(self):\n if self._is_desktop:\n self._win.maximize_window()\n else:\n Logger.warning('Window: maximize() is used only on desktop OSes.')\n\n def minimize(self):\n if self._is_desktop:\n self._win.minimize_window()\n else:\n Logger.warning('Window: minimize() is used only on desktop OSes.')\n\n def restore(self):\n if self._is_desktop:\n self._win.restore_window()\n else:\n Logger.warning('Window: restore() is used only on desktop OSes.')\n\n def hide(self):\n if self._is_desktop:\n self._win.hide_window()\n else:\n Logger.warning('Window: hide() is used only on desktop OSes.')\n\n def show(self):\n if self._is_desktop:\n self._win.show_window()\n else:\n Logger.warning('Window: show() is used only on desktop OSes.')\n\n def raise_window(self):\n if self._is_desktop:\n self._win.raise_window()\n else:\n Logger.warning('Window: show() is used only on desktop OSes.')\n\n @deprecated\n def toggle_fullscreen(self):\n if self.fullscreen in (True, 'auto'):\n self.fullscreen = False\n else:\n self.fullscreen = 'auto'\n\n def set_title(self, title):\n self._win.set_window_title(title)\n\n def set_icon(self, filename):\n self._win.set_window_icon(str(filename))\n\n def screenshot(self, *largs, **kwargs):\n filename = super(WindowSDL, self).screenshot(*largs, **kwargs)\n if filename is None:\n return\n\n from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE\n width, height = self.size\n data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)\n self._win.save_bytes_in_png(filename, data, width, height)\n Logger.debug('Window: Screenshot saved at <%s>' % filename)\n return filename\n\n def flip(self):\n self._win.flip()\n super(WindowSDL, self).flip()\n\n def _set_cursor_state(self, value):\n self._win._set_cursor_state(value)\n\n def _fix_mouse_pos(self, x, y):\n y -= 1\n self.mouse_pos = x * self._density, (self.system_size[1] - y) * self._density\n return x, y\n\n def _mainloop(self):\n EventLoop.idle()\n\n # for android/iOS, we don't want to have any event nor executing our\n # main loop while the pause is going on. This loop wait any event (not\n # handled by the event filter), and remove them from the queue.\n # Nothing happen during the pause on iOS, except gyroscope value sended\n # over joystick. So it's safe.\n while self._pause_loop:\n self._win.wait_event()\n if not self._pause_loop:\n break\n self._win.poll()\n\n while True:\n event = self._win.poll()\n if event is False:\n break\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n if self.dispatch('on_request_close'):\n continue\n EventLoop.quit = True\n self.close()\n break\n\n elif action in ('fingermotion', 'fingerdown', 'fingerup'):\n # for finger, pass the raw event to SDL motion event provider\n # XXX this is problematic. On OSX, it generates touches with 0,\n # 0 coordinates, at the same times as mouse. But it works.\n # We have a conflict of using either the mouse or the finger.\n # Right now, we have no mechanism that we could use to know\n # which is the preferred one for the application.\n if platform in ('ios', 'android'):\n SDL2MotionEventProvider.q.appendleft(event)\n pass\n\n elif action == 'mousemotion':\n x, y = args\n x, y = self._fix_mouse_pos(x, y)\n self._mouse_x = x\n self._mouse_y = y\n # don't dispatch motion if no button are pressed\n if len(self._mouse_buttons_down) == 0:\n continue\n self._mouse_meta = self.modifiers\n self.dispatch('on_mouse_move', x, y, self.modifiers)\n\n elif action in ('mousebuttondown', 'mousebuttonup'):\n x, y, button = args\n x, y = self._fix_mouse_pos(x, y)\n btn = 'left'\n if button == 3:\n btn = 'right'\n elif button == 2:\n btn = 'middle'\n eventname = 'on_mouse_down'\n self._mouse_buttons_down.add(button)\n if action == 'mousebuttonup':\n eventname = 'on_mouse_up'\n self._mouse_buttons_down.remove(button)\n self._mouse_x = x\n self._mouse_y = y\n self.dispatch(eventname, x, y, btn, self.modifiers)\n elif action.startswith('mousewheel'):\n self._update_modifiers()\n x, y, button = args\n btn = 'scrolldown'\n if action.endswith('up'):\n btn = 'scrollup'\n elif action.endswith('right'):\n btn = 'scrollright'\n elif action.endswith('left'):\n btn = 'scrollleft'\n\n self._mouse_meta = self.modifiers\n self._mouse_btn = btn\n #times = x if y == 0 else y\n #times = min(abs(times), 100)\n #for k in range(times):\n self._mouse_down = True\n self.dispatch('on_mouse_down',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n self._mouse_down = False\n self.dispatch('on_mouse_up',\n self._mouse_x, self._mouse_y, btn, self.modifiers)\n\n elif action == 'dropfile':\n dropfile = args\n self.dispatch('on_dropfile', dropfile[0])\n # video resize\n elif action == 'windowresized':\n self._size = self._win.window_size\n # don't use trigger here, we want to delay the resize event\n cb = self._do_resize\n Clock.unschedule(cb)\n Clock.schedule_once(cb, .1)\n\n elif action == 'windowresized':\n self.canvas.ask_update()\n\n elif action == 'windowrestored':\n self.dispatch('on_restore')\n self.canvas.ask_update()\n\n elif action == 'windowexposed':\n self.canvas.ask_update()\n\n elif action == 'windowminimized':\n self.dispatch('on_minimize')\n if Config.getboolean('kivy', 'pause_on_minimize'):\n self.do_pause()\n\n elif action == 'windowmaximized':\n self.dispatch('on_maximize')\n\n elif action == 'windowhidden':\n self.dispatch('on_hide')\n\n elif action == 'windowshown':\n self.dispatch('on_show')\n\n elif action == 'windowfocusgained':\n self._focus = True\n\n elif action == 'windowfocuslost':\n self._focus = False\n\n elif action == 'windowenter':\n self.dispatch('on_cursor_enter')\n\n elif action == 'windowleave':\n self.dispatch('on_cursor_leave')\n\n elif action == 'joyaxismotion':\n stickid, axisid, value = args\n self.dispatch('on_joy_axis', stickid, axisid, value)\n elif action == 'joyhatmotion':\n stickid, hatid, value = args\n self.dispatch('on_joy_hat', stickid, hatid, value)\n elif action == 'joyballmotion':\n stickid, ballid, xrel, yrel = args\n self.dispatch('on_joy_ball', stickid, ballid, xrel, yrel)\n elif action == 'joybuttondown':\n stickid, buttonid = args\n self.dispatch('on_joy_button_down', stickid, buttonid)\n elif action == 'joybuttonup':\n stickid, buttonid = args\n self.dispatch('on_joy_button_up', stickid, buttonid)\n\n elif action in ('keydown', 'keyup'):\n mod, key, scancode, kstr = args\n\n try:\n key = self.key_map[key]\n except KeyError:\n pass\n\n if action == 'keydown':\n self._update_modifiers(mod, key)\n else:\n self._update_modifiers(mod) # ignore the key, it\n # has been released\n\n # if mod in self._meta_keys:\n if (key not in self._modifiers and\n key not in self.command_keys.keys()):\n try:\n kstr = unichr(key)\n except ValueError:\n pass\n #if 'shift' in self._modifiers and key\\\n # not in self.command_keys.keys():\n # return\n\n if action == 'keyup':\n self.dispatch('on_key_up', key, scancode)\n continue\n\n # don't dispatch more key if down event is accepted\n if self.dispatch('on_key_down', key,\n scancode, kstr,\n self.modifiers):\n continue\n self.dispatch('on_keyboard', key,\n scancode, kstr,\n self.modifiers)\n\n elif action == 'textinput':\n text = args[0]\n self.dispatch('on_textinput', text)\n\n # unhandled event !\n else:\n Logger.trace('WindowSDL: Unhandled event %s' % str(event))\n\n def _do_resize(self, dt):\n Logger.debug('Window: Resize window to %s' % str(self.size))\n self._win.resize_window(*self._size)\n self.dispatch('on_resize', *self.size)\n\n def do_pause(self):\n # should go to app pause mode (desktop style)\n from kivy.app import App\n from kivy.base import stopTouchApp\n app = App.get_running_app()\n if not app:\n Logger.info('WindowSDL: No running App found, exit.')\n stopTouchApp()\n return\n\n if not app.dispatch('on_pause'):\n Logger.info('WindowSDL: App doesn\\'t support pause mode, stop.')\n stopTouchApp()\n return\n\n # XXX FIXME wait for sdl resume\n while True:\n event = self._win.poll()\n if event is False:\n continue\n if event is None:\n continue\n\n action, args = event[0], event[1:]\n if action == 'quit':\n EventLoop.quit = True\n self.close()\n break\n elif action == 'app_willenterforeground':\n break\n elif action == 'windowrestored':\n break\n\n app.dispatch('on_resume')\n\n def mainloop(self):\n # don't known why, but pygame required a resize event\n # for opengl, before mainloop... window reinit ?\n #self.dispatch('on_resize', *self.size)\n\n while not EventLoop.quit and EventLoop.status == 'started':\n try:\n self._mainloop()\n except BaseException as inst:\n # use exception manager first\n r = ExceptionManager.handle_exception(inst)\n if r == ExceptionManager.RAISE:\n stopTouchApp()\n raise\n else:\n pass\n\n #\n # Pygame wrapper\n #\n def _update_modifiers(self, mods=None, key=None):\n # Available mod, from dir(pygame)\n # 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',\n # 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',\n # 'KMOD_MODE', 'KMOD_NONE'\n if mods is None and key is None:\n return\n modifiers = set()\n\n if mods is not None:\n if mods & (KMOD_RSHIFT | KMOD_LSHIFT):\n modifiers.add('shift')\n if mods & (KMOD_RALT | KMOD_LALT):\n modifiers.add('alt')\n if mods & (KMOD_RCTRL | KMOD_LCTRL):\n modifiers.add('ctrl')\n if mods & (KMOD_RMETA | KMOD_LMETA):\n modifiers.add('meta')\n\n if key is not None:\n if key in (KMOD_RSHIFT, KMOD_LSHIFT):\n modifiers.add('shift')\n if key in (KMOD_RALT, KMOD_LALT):\n modifiers.add('alt')\n if key in (KMOD_RCTRL, KMOD_LCTRL):\n modifiers.add('ctrl')\n if key in (KMOD_RMETA, KMOD_LMETA):\n modifiers.add('meta')\n\n self._modifiers = list(modifiers)\n return\n\n def request_keyboard(self, callback, target, input_type='text'):\n self._sdl_keyboard = super(WindowSDL, self).\\\n request_keyboard(callback, target, input_type)\n self._win.show_keyboard()\n Clock.schedule_interval(self._check_keyboard_shown, 1 / 5.)\n return self._sdl_keyboard\n\n def release_keyboard(self, *largs):\n super(WindowSDL, self).release_keyboard(*largs)\n self._win.hide_keyboard()\n self._sdl_keyboard = None\n return True\n\n def _check_keyboard_shown(self, dt):\n if self._sdl_keyboard is None:\n return False\n if not self._win.is_keyboard_shown():\n self._sdl_keyboard.release()\n\n def map_key(self, original_key, new_key):\n self.key_map[original_key] = new_key\n\n def unmap_key(self, key):\n if key in self.key_map:\n del self.key_map[key]\n", "path": "kivy/core/window/window_sdl2.py" } ]
diff --git a/kivy/core/window/window_sdl2.py b/kivy/core/window/window_sdl2.py index 9809b115b5..a3fc207de4 100644 --- a/kivy/core/window/window_sdl2.py +++ b/kivy/core/window/window_sdl2.py @@ -381,7 +381,7 @@ def _set_cursor_state(self, value): def _fix_mouse_pos(self, x, y): y -= 1 - self.mouse_pos = x, self.system_size[1] - y + self.mouse_pos = x * self._density, (self.system_size[1] - y) * self._density return x, y def _mainloop(self):
SciTools__cartopy-1999
Colouring countries ### Description Cartopy stop working when you try to color some countries, like Austria (AUT), Albania among others in Europe as far as I could see. My maps heve been working fine until couple last update on april. #### Code to reproduce ```python import matplotlib.pyplot as plt import cartopy import cartopy.io.shapereader as shpreader import cartopy.crs as ccrs import matplotlib.pyplot as plt import cartopy.crs as ccrs import cartopy.feature as cfeature def area(ax, iso, clr) : shp = shpreader.natural_earth(resolution='10m',category='cultural',name='admin_0_countries') reader = shpreader.Reader(shp) countries = reader.records() for n in countries : if n.attributes['ADM0_A3'] == iso: ax.add_geometries(n.geometry, ccrs.PlateCarree(), facecolor=clr, alpha = 1.00, linewidth =0.15, edgecolor = "black", label=n.attributes['ADM0_A3']) return ax iso3 = ['USA','CAN','RUS','GBR','ISL','FRA','ITA','AUT'] # works fine with this codes #iso3 = ['USA','CAN','RUS','GBR','ISL','FRA','ITA','CHN','AUT'] # error message - due to "AUT" - Austria def main(): ax = plt.axes(projection=ccrs.Miller()) states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(cfeature.LAND) ax.add_feature(cfeature.COASTLINE) ax.add_feature(states_provinces, edgecolor='gray') for n in iso3 : area(ax,n,"red") plt.show() if __name__ == '__main__': main() ``` #### Traceback ```python-traceback File "C:\ProgramData\Anaconda3\lib\site-packages\cartopy\mpl\geoaxes.py", line 588, in add_geometries feature = cartopy.feature.ShapelyFeature(geoms, crs, **kwargs) File "C:\ProgramData\Anaconda3\lib\site-packages\cartopy\feature\__init__.py", line 231, in __init__ self._geoms = tuple(geometries) TypeError: 'Polygon' object is not iterable ``` <details> <summary>Full environment definition</summary> <!-- fill in the following information as appropriate --> ### Operating system Windows 10 ### Cartopy version 0.17.0 ### conda list 4.64.14 ### pip list 19.1 </details>
[ { "content": "# Copyright Cartopy Contributors\n#\n# This file is part of Cartopy and is released under the LGPL license.\n# See COPYING and COPYING.LESSER in the root of the repository for full\n# licensing details.\n\n\"\"\"\nThis module defines :class:`Feature` instances, for use with\nax.add_feature().\n\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nimport shapely.geometry as sgeom\n\nimport cartopy.io.shapereader as shapereader\nimport cartopy.crs\n\n\nCOLORS = {'land': np.array((240, 240, 220)) / 256.,\n 'land_alt1': np.array((220, 220, 220)) / 256.,\n 'water': np.array((152, 183, 226)) / 256.}\n\"\"\"\nA dictionary of colors useful for drawing Features.\n\nThe named keys in this dictionary represent the \"type\" of\nfeature being plotted.\n\n\"\"\"\n\n_NATURAL_EARTH_GEOM_CACHE = {}\n\"\"\"\nCaches a mapping between (name, category, scale) and a tuple of the\nresulting geometries.\n\nProvides a significant performance benefit (when combined with object id\ncaching in GeoAxes.add_geometries) when producing multiple maps of the\nsame projection.\n\n\"\"\"\n\n\nclass Feature(metaclass=ABCMeta):\n \"\"\"\n Represents a collection of points, lines and polygons with convenience\n methods for common drawing and filtering operations.\n\n Parameters\n ----------\n crs\n The coordinate reference system of this Feature\n\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing this feature.\n\n\n .. seealso::\n\n To add features to the current Matplotlib axes, see\n :func:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes.add_feature>`.\n\n \"\"\"\n\n def __init__(self, crs, **kwargs):\n self._crs = crs\n self._kwargs = dict(kwargs)\n\n @property\n def crs(self):\n \"\"\"The cartopy CRS for the geometries in this feature.\"\"\"\n return self._crs\n\n @property\n def kwargs(self):\n \"\"\"\n The read-only dictionary of keyword arguments that are used when\n creating the Matplotlib artists for this feature.\n\n \"\"\"\n return dict(self._kwargs)\n\n @abstractmethod\n def geometries(self):\n \"\"\"\n Return an iterator of (shapely) geometries for this feature.\n\n \"\"\"\n pass\n\n def intersecting_geometries(self, extent):\n \"\"\"\n Return an iterator of shapely geometries that intersect with\n the given extent. The extent is assumed to be in the CRS of\n the feature. If extent is None, the method returns all\n geometries for this dataset.\n\n \"\"\"\n if extent is not None:\n extent_geom = sgeom.box(extent[0], extent[2],\n extent[1], extent[3])\n return (geom for geom in self.geometries() if\n geom is not None and extent_geom.intersects(geom))\n else:\n return self.geometries()\n\n\nclass Scaler:\n \"\"\"\n General object for handling the scale of the geometries used in a Feature.\n \"\"\"\n def __init__(self, scale):\n self._scale = scale\n\n @property\n def scale(self):\n return self._scale\n\n def scale_from_extent(self, extent):\n \"\"\"\n Given an extent, update the scale.\n\n Parameters\n ----------\n extent\n The boundaries of the plotted area of a projection. The\n coordinate system of the extent should be constant, and at the\n same scale as the scales argument in the constructor.\n\n \"\"\"\n # Note: Implementation does nothing. For subclasses to specialise.\n return self._scale\n\n\nclass AdaptiveScaler(Scaler):\n \"\"\"\n Automatically select scale of geometries based on extent of plotted axes.\n \"\"\"\n def __init__(self, default_scale, limits):\n \"\"\"\n Parameters\n ----------\n default_scale\n Coarsest scale used as default when plot is at maximum extent.\n\n limits\n Scale-extent pairs at which scale of geometries change. Must be a\n tuple of tuples ordered from coarsest to finest scales. Limit\n values are the upper bounds for their corresponding scale.\n\n Example\n -------\n\n >>> s = AdaptiveScaler('coarse',\n ... (('intermediate', 30), ('fine', 10)))\n >>> s.scale_from_extent([-180, 180, -90, 90])\n 'coarse'\n >>> s.scale_from_extent([-5, 6, 45, 56])\n 'intermediate'\n >>> s.scale_from_extent([-5, 5, 45, 56])\n 'fine'\n\n \"\"\"\n super().__init__(default_scale)\n self._default_scale = default_scale\n # Upper limit on extent in degrees.\n self._limits = limits\n\n def scale_from_extent(self, extent):\n scale = self._default_scale\n\n if extent is not None:\n width = abs(extent[1] - extent[0])\n height = abs(extent[3] - extent[2])\n min_extent = min(width, height)\n\n if min_extent != 0:\n for scale_candidate, upper_bound in self._limits:\n if min_extent <= upper_bound:\n # It is a valid scale, so track it.\n scale = scale_candidate\n else:\n # This scale is not valid and we can stop looking.\n # We use the last (valid) scale that we saw.\n break\n\n self._scale = scale\n return self._scale\n\n\nclass ShapelyFeature(Feature):\n \"\"\"\n A class capable of drawing a collection of\n shapely geometries.\n\n \"\"\"\n def __init__(self, geometries, crs, **kwargs):\n \"\"\"\n Parameters\n ----------\n geometries\n A collection of shapely geometries.\n crs\n The cartopy CRS in which the provided geometries are defined.\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing this feature.\n\n \"\"\"\n super().__init__(crs, **kwargs)\n self._geoms = tuple(geometries)\n\n def geometries(self):\n return iter(self._geoms)\n\n\nclass NaturalEarthFeature(Feature):\n \"\"\"\n A simple interface to Natural Earth shapefiles.\n\n See https://www.naturalearthdata.com/\n\n \"\"\"\n def __init__(self, category, name, scale, **kwargs):\n \"\"\"\n Parameters\n ----------\n category\n The category of the dataset, i.e. either 'cultural' or 'physical'.\n name\n The name of the dataset, e.g. 'admin_0_boundary_lines_land'.\n scale\n The dataset scale, i.e. one of '10m', '50m', or '110m',\n or Scaler object. Dataset scales correspond to 1:10,000,000,\n 1:50,000,000, and 1:110,000,000 respectively.\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing this feature.\n\n \"\"\"\n super().__init__(cartopy.crs.PlateCarree(), **kwargs)\n self.category = category\n self.name = name\n\n # Cast the given scale to a (constant) Scaler if a string is passed.\n if isinstance(scale, str):\n scale = Scaler(scale)\n\n self.scaler = scale\n # Make sure this is a valid resolution\n self._validate_scale()\n\n @property\n def scale(self):\n return self.scaler.scale\n\n def _validate_scale(self):\n if self.scale not in ('110m', '50m', '10m'):\n raise ValueError(\n f'{self.scale!r} is not a valid Natural Earth scale. '\n 'Valid scales are \"110m\", \"50m\", and \"10m\".'\n )\n\n def geometries(self):\n \"\"\"\n Returns an iterator of (shapely) geometries for this feature.\n\n \"\"\"\n key = (self.name, self.category, self.scale)\n if key not in _NATURAL_EARTH_GEOM_CACHE:\n path = shapereader.natural_earth(resolution=self.scale,\n category=self.category,\n name=self.name)\n geometries = tuple(shapereader.Reader(path).geometries())\n _NATURAL_EARTH_GEOM_CACHE[key] = geometries\n else:\n geometries = _NATURAL_EARTH_GEOM_CACHE[key]\n\n return iter(geometries)\n\n def intersecting_geometries(self, extent):\n \"\"\"\n Returns an iterator of shapely geometries that intersect with\n the given extent.\n The extent is assumed to be in the CRS of the feature.\n If extent is None, the method returns all geometries for this dataset.\n \"\"\"\n self.scaler.scale_from_extent(extent)\n return super().intersecting_geometries(extent)\n\n def with_scale(self, new_scale):\n \"\"\"\n Return a copy of the feature with a new scale.\n\n Parameters\n ----------\n new_scale\n The new dataset scale, i.e. one of '10m', '50m', or '110m'.\n Corresponding to 1:10,000,000, 1:50,000,000, and 1:110,000,000\n respectively.\n\n \"\"\"\n return NaturalEarthFeature(self.category, self.name, new_scale,\n **self.kwargs)\n\n\nclass GSHHSFeature(Feature):\n \"\"\"\n An interface to the GSHHS dataset.\n\n See https://www.ngdc.noaa.gov/mgg/shorelines/gshhs.html\n\n Parameters\n ----------\n scale\n The dataset scale. One of 'auto', 'coarse', 'low', 'intermediate',\n 'high, or 'full' (default is 'auto').\n levels\n A list of integers 1-6 corresponding to the desired GSHHS feature\n levels to draw (default is [1] which corresponds to coastlines).\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing the feature. Defaults\n are edgecolor='black' and facecolor='none'.\n\n \"\"\"\n\n _geometries_cache = {}\n \"\"\"\n A mapping from scale and level to GSHHS shapely geometry::\n\n {(scale, level): geom}\n\n This provides a performance boost when plotting in interactive mode or\n instantiating multiple GSHHS artists, by reducing repeated file IO.\n\n \"\"\"\n def __init__(self, scale='auto', levels=None, **kwargs):\n super().__init__(cartopy.crs.PlateCarree(), **kwargs)\n\n if scale not in ('auto', 'a', 'coarse', 'c', 'low', 'l',\n 'intermediate', 'i', 'high', 'h', 'full', 'f'):\n raise ValueError(f\"Unknown GSHHS scale {scale!r}.\")\n self._scale = scale\n\n if levels is None:\n levels = [1]\n self._levels = set(levels)\n unknown_levels = self._levels.difference([1, 2, 3, 4])\n if unknown_levels:\n raise ValueError(f\"Unknown GSHHS levels {unknown_levels!r}.\")\n\n # Default kwargs\n self._kwargs.setdefault('edgecolor', 'black')\n self._kwargs.setdefault('facecolor', 'none')\n\n def _scale_from_extent(self, extent):\n \"\"\"\n Return the appropriate scale (e.g. 'i') for the given extent\n expressed in PlateCarree CRS.\n\n \"\"\"\n # Default to coarse scale\n scale = 'c'\n\n if extent is not None:\n # Upper limit on extent in degrees.\n scale_limits = (('c', 20.0),\n ('l', 10.0),\n ('i', 2.0),\n ('h', 0.5),\n ('f', 0.1))\n\n width = abs(extent[1] - extent[0])\n height = abs(extent[3] - extent[2])\n min_extent = min(width, height)\n if min_extent != 0:\n for scale, limit in scale_limits:\n if min_extent > limit:\n break\n\n return scale\n\n def geometries(self):\n return self.intersecting_geometries(extent=None)\n\n def intersecting_geometries(self, extent):\n if self._scale == 'auto':\n scale = self._scale_from_extent(extent)\n else:\n scale = self._scale[0]\n\n if extent is not None:\n extent_geom = sgeom.box(extent[0], extent[2],\n extent[1], extent[3])\n for level in self._levels:\n geoms = GSHHSFeature._geometries_cache.get((scale, level))\n if geoms is None:\n # Load GSHHS geometries from appropriate shape file.\n # TODO selective load based on bbox of each geom in file.\n path = shapereader.gshhs(scale, level)\n geoms = tuple(shapereader.Reader(path).geometries())\n GSHHSFeature._geometries_cache[(scale, level)] = geoms\n for geom in geoms:\n if extent is None or extent_geom.intersects(geom):\n yield geom\n\n\nclass WFSFeature(Feature):\n \"\"\"\n A class capable of drawing a collection of geometries\n obtained from an OGC Web Feature Service (WFS).\n\n This feature requires additional dependencies. If installed via pip,\n try ``pip install cartopy[ows]``.\n \"\"\"\n def __init__(self, wfs, features, **kwargs):\n \"\"\"\n Parameters\n ----------\n wfs: string or :class:`owslib.wfs.WebFeatureService` instance\n The WebFeatureService instance, or URL of a WFS service, from which\n to retrieve the geometries.\n features: string or list of strings\n The typename(s) of features available from the web service that\n will be retrieved. Somewhat analogous to layers in WMS/WMTS.\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing this feature.\n\n \"\"\"\n try:\n from cartopy.io.ogc_clients import WFSGeometrySource\n except ImportError as e:\n raise ImportError(\n 'WFSFeature requires additional dependencies. If installed '\n 'via pip, try `pip install cartopy[ows]`.\\n') from e\n\n self.source = WFSGeometrySource(wfs, features)\n crs = self.source.default_projection()\n super().__init__(crs, **kwargs)\n # Default kwargs\n self._kwargs.setdefault('edgecolor', 'black')\n self._kwargs.setdefault('facecolor', 'none')\n\n def geometries(self):\n min_x, min_y, max_x, max_y = self.crs.boundary.bounds\n geoms = self.source.fetch_geometries(self.crs,\n extent=(min_x, max_x,\n min_y, max_y))\n return iter(geoms)\n\n def intersecting_geometries(self, extent):\n geoms = self.source.fetch_geometries(self.crs, extent)\n return iter(geoms)\n\n\nauto_scaler = AdaptiveScaler('110m', (('50m', 50), ('10m', 15)))\n\"\"\"AdaptiveScaler for NaturalEarthFeature. Default scale is '110m'.\n'110m' is used above 50 degrees, '50m' for 50-15 degrees and '10m' below 15\ndegrees.\"\"\"\n\n\nBORDERS = NaturalEarthFeature(\n 'cultural', 'admin_0_boundary_lines_land',\n auto_scaler, edgecolor='black', facecolor='never')\n\"\"\"Automatically scaled country boundaries.\"\"\"\n\n\nSTATES = NaturalEarthFeature(\n 'cultural', 'admin_1_states_provinces_lakes',\n auto_scaler, edgecolor='black', facecolor='none')\n\"\"\"Automatically scaled state and province boundaries.\"\"\"\n\n\nCOASTLINE = NaturalEarthFeature(\n 'physical', 'coastline', auto_scaler,\n edgecolor='black', facecolor='never')\n\"\"\"Automatically scaled coastline, including major islands.\"\"\"\n\n\nLAKES = NaturalEarthFeature(\n 'physical', 'lakes', auto_scaler,\n edgecolor='none', facecolor=COLORS['water'])\n\"\"\"Automatically scaled natural and artificial lakes.\"\"\"\n\n\nLAND = NaturalEarthFeature(\n 'physical', 'land', auto_scaler,\n edgecolor='none', facecolor=COLORS['land'], zorder=-1)\n\"\"\"Automatically scaled land polygons, including major islands.\"\"\"\n\n\nOCEAN = NaturalEarthFeature(\n 'physical', 'ocean', auto_scaler,\n edgecolor='none', facecolor=COLORS['water'], zorder=-1)\n\"\"\"Automatically scaled ocean polygons.\"\"\"\n\n\nRIVERS = NaturalEarthFeature(\n 'physical', 'rivers_lake_centerlines', auto_scaler,\n edgecolor=COLORS['water'], facecolor='never')\n\"\"\"Automatically scaled single-line drainages, including lake centerlines.\"\"\"\n", "path": "lib/cartopy/feature/__init__.py" } ]
[ { "content": "# Copyright Cartopy Contributors\n#\n# This file is part of Cartopy and is released under the LGPL license.\n# See COPYING and COPYING.LESSER in the root of the repository for full\n# licensing details.\n\n\"\"\"\nThis module defines :class:`Feature` instances, for use with\nax.add_feature().\n\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nimport shapely.geometry as sgeom\n\nimport cartopy.io.shapereader as shapereader\nimport cartopy.crs\n\n\nCOLORS = {'land': np.array((240, 240, 220)) / 256.,\n 'land_alt1': np.array((220, 220, 220)) / 256.,\n 'water': np.array((152, 183, 226)) / 256.}\n\"\"\"\nA dictionary of colors useful for drawing Features.\n\nThe named keys in this dictionary represent the \"type\" of\nfeature being plotted.\n\n\"\"\"\n\n_NATURAL_EARTH_GEOM_CACHE = {}\n\"\"\"\nCaches a mapping between (name, category, scale) and a tuple of the\nresulting geometries.\n\nProvides a significant performance benefit (when combined with object id\ncaching in GeoAxes.add_geometries) when producing multiple maps of the\nsame projection.\n\n\"\"\"\n\n\nclass Feature(metaclass=ABCMeta):\n \"\"\"\n Represents a collection of points, lines and polygons with convenience\n methods for common drawing and filtering operations.\n\n Parameters\n ----------\n crs\n The coordinate reference system of this Feature\n\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing this feature.\n\n\n .. seealso::\n\n To add features to the current Matplotlib axes, see\n :func:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes.add_feature>`.\n\n \"\"\"\n\n def __init__(self, crs, **kwargs):\n self._crs = crs\n self._kwargs = dict(kwargs)\n\n @property\n def crs(self):\n \"\"\"The cartopy CRS for the geometries in this feature.\"\"\"\n return self._crs\n\n @property\n def kwargs(self):\n \"\"\"\n The read-only dictionary of keyword arguments that are used when\n creating the Matplotlib artists for this feature.\n\n \"\"\"\n return dict(self._kwargs)\n\n @abstractmethod\n def geometries(self):\n \"\"\"\n Return an iterator of (shapely) geometries for this feature.\n\n \"\"\"\n pass\n\n def intersecting_geometries(self, extent):\n \"\"\"\n Return an iterator of shapely geometries that intersect with\n the given extent. The extent is assumed to be in the CRS of\n the feature. If extent is None, the method returns all\n geometries for this dataset.\n\n \"\"\"\n if extent is not None:\n extent_geom = sgeom.box(extent[0], extent[2],\n extent[1], extent[3])\n return (geom for geom in self.geometries() if\n geom is not None and extent_geom.intersects(geom))\n else:\n return self.geometries()\n\n\nclass Scaler:\n \"\"\"\n General object for handling the scale of the geometries used in a Feature.\n \"\"\"\n def __init__(self, scale):\n self._scale = scale\n\n @property\n def scale(self):\n return self._scale\n\n def scale_from_extent(self, extent):\n \"\"\"\n Given an extent, update the scale.\n\n Parameters\n ----------\n extent\n The boundaries of the plotted area of a projection. The\n coordinate system of the extent should be constant, and at the\n same scale as the scales argument in the constructor.\n\n \"\"\"\n # Note: Implementation does nothing. For subclasses to specialise.\n return self._scale\n\n\nclass AdaptiveScaler(Scaler):\n \"\"\"\n Automatically select scale of geometries based on extent of plotted axes.\n \"\"\"\n def __init__(self, default_scale, limits):\n \"\"\"\n Parameters\n ----------\n default_scale\n Coarsest scale used as default when plot is at maximum extent.\n\n limits\n Scale-extent pairs at which scale of geometries change. Must be a\n tuple of tuples ordered from coarsest to finest scales. Limit\n values are the upper bounds for their corresponding scale.\n\n Example\n -------\n\n >>> s = AdaptiveScaler('coarse',\n ... (('intermediate', 30), ('fine', 10)))\n >>> s.scale_from_extent([-180, 180, -90, 90])\n 'coarse'\n >>> s.scale_from_extent([-5, 6, 45, 56])\n 'intermediate'\n >>> s.scale_from_extent([-5, 5, 45, 56])\n 'fine'\n\n \"\"\"\n super().__init__(default_scale)\n self._default_scale = default_scale\n # Upper limit on extent in degrees.\n self._limits = limits\n\n def scale_from_extent(self, extent):\n scale = self._default_scale\n\n if extent is not None:\n width = abs(extent[1] - extent[0])\n height = abs(extent[3] - extent[2])\n min_extent = min(width, height)\n\n if min_extent != 0:\n for scale_candidate, upper_bound in self._limits:\n if min_extent <= upper_bound:\n # It is a valid scale, so track it.\n scale = scale_candidate\n else:\n # This scale is not valid and we can stop looking.\n # We use the last (valid) scale that we saw.\n break\n\n self._scale = scale\n return self._scale\n\n\nclass ShapelyFeature(Feature):\n \"\"\"\n A class capable of drawing a collection of\n shapely geometries.\n\n \"\"\"\n def __init__(self, geometries, crs, **kwargs):\n \"\"\"\n Parameters\n ----------\n geometries\n A collection of shapely geometries.\n crs\n The cartopy CRS in which the provided geometries are defined.\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing this feature.\n\n \"\"\"\n super().__init__(crs, **kwargs)\n if isinstance(geometries, sgeom.base.BaseGeometry):\n geometries = [geometries]\n self._geoms = tuple(geometries)\n\n def geometries(self):\n return iter(self._geoms)\n\n\nclass NaturalEarthFeature(Feature):\n \"\"\"\n A simple interface to Natural Earth shapefiles.\n\n See https://www.naturalearthdata.com/\n\n \"\"\"\n def __init__(self, category, name, scale, **kwargs):\n \"\"\"\n Parameters\n ----------\n category\n The category of the dataset, i.e. either 'cultural' or 'physical'.\n name\n The name of the dataset, e.g. 'admin_0_boundary_lines_land'.\n scale\n The dataset scale, i.e. one of '10m', '50m', or '110m',\n or Scaler object. Dataset scales correspond to 1:10,000,000,\n 1:50,000,000, and 1:110,000,000 respectively.\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing this feature.\n\n \"\"\"\n super().__init__(cartopy.crs.PlateCarree(), **kwargs)\n self.category = category\n self.name = name\n\n # Cast the given scale to a (constant) Scaler if a string is passed.\n if isinstance(scale, str):\n scale = Scaler(scale)\n\n self.scaler = scale\n # Make sure this is a valid resolution\n self._validate_scale()\n\n @property\n def scale(self):\n return self.scaler.scale\n\n def _validate_scale(self):\n if self.scale not in ('110m', '50m', '10m'):\n raise ValueError(\n f'{self.scale!r} is not a valid Natural Earth scale. '\n 'Valid scales are \"110m\", \"50m\", and \"10m\".'\n )\n\n def geometries(self):\n \"\"\"\n Returns an iterator of (shapely) geometries for this feature.\n\n \"\"\"\n key = (self.name, self.category, self.scale)\n if key not in _NATURAL_EARTH_GEOM_CACHE:\n path = shapereader.natural_earth(resolution=self.scale,\n category=self.category,\n name=self.name)\n geometries = tuple(shapereader.Reader(path).geometries())\n _NATURAL_EARTH_GEOM_CACHE[key] = geometries\n else:\n geometries = _NATURAL_EARTH_GEOM_CACHE[key]\n\n return iter(geometries)\n\n def intersecting_geometries(self, extent):\n \"\"\"\n Returns an iterator of shapely geometries that intersect with\n the given extent.\n The extent is assumed to be in the CRS of the feature.\n If extent is None, the method returns all geometries for this dataset.\n \"\"\"\n self.scaler.scale_from_extent(extent)\n return super().intersecting_geometries(extent)\n\n def with_scale(self, new_scale):\n \"\"\"\n Return a copy of the feature with a new scale.\n\n Parameters\n ----------\n new_scale\n The new dataset scale, i.e. one of '10m', '50m', or '110m'.\n Corresponding to 1:10,000,000, 1:50,000,000, and 1:110,000,000\n respectively.\n\n \"\"\"\n return NaturalEarthFeature(self.category, self.name, new_scale,\n **self.kwargs)\n\n\nclass GSHHSFeature(Feature):\n \"\"\"\n An interface to the GSHHS dataset.\n\n See https://www.ngdc.noaa.gov/mgg/shorelines/gshhs.html\n\n Parameters\n ----------\n scale\n The dataset scale. One of 'auto', 'coarse', 'low', 'intermediate',\n 'high, or 'full' (default is 'auto').\n levels\n A list of integers 1-6 corresponding to the desired GSHHS feature\n levels to draw (default is [1] which corresponds to coastlines).\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing the feature. Defaults\n are edgecolor='black' and facecolor='none'.\n\n \"\"\"\n\n _geometries_cache = {}\n \"\"\"\n A mapping from scale and level to GSHHS shapely geometry::\n\n {(scale, level): geom}\n\n This provides a performance boost when plotting in interactive mode or\n instantiating multiple GSHHS artists, by reducing repeated file IO.\n\n \"\"\"\n def __init__(self, scale='auto', levels=None, **kwargs):\n super().__init__(cartopy.crs.PlateCarree(), **kwargs)\n\n if scale not in ('auto', 'a', 'coarse', 'c', 'low', 'l',\n 'intermediate', 'i', 'high', 'h', 'full', 'f'):\n raise ValueError(f\"Unknown GSHHS scale {scale!r}.\")\n self._scale = scale\n\n if levels is None:\n levels = [1]\n self._levels = set(levels)\n unknown_levels = self._levels.difference([1, 2, 3, 4])\n if unknown_levels:\n raise ValueError(f\"Unknown GSHHS levels {unknown_levels!r}.\")\n\n # Default kwargs\n self._kwargs.setdefault('edgecolor', 'black')\n self._kwargs.setdefault('facecolor', 'none')\n\n def _scale_from_extent(self, extent):\n \"\"\"\n Return the appropriate scale (e.g. 'i') for the given extent\n expressed in PlateCarree CRS.\n\n \"\"\"\n # Default to coarse scale\n scale = 'c'\n\n if extent is not None:\n # Upper limit on extent in degrees.\n scale_limits = (('c', 20.0),\n ('l', 10.0),\n ('i', 2.0),\n ('h', 0.5),\n ('f', 0.1))\n\n width = abs(extent[1] - extent[0])\n height = abs(extent[3] - extent[2])\n min_extent = min(width, height)\n if min_extent != 0:\n for scale, limit in scale_limits:\n if min_extent > limit:\n break\n\n return scale\n\n def geometries(self):\n return self.intersecting_geometries(extent=None)\n\n def intersecting_geometries(self, extent):\n if self._scale == 'auto':\n scale = self._scale_from_extent(extent)\n else:\n scale = self._scale[0]\n\n if extent is not None:\n extent_geom = sgeom.box(extent[0], extent[2],\n extent[1], extent[3])\n for level in self._levels:\n geoms = GSHHSFeature._geometries_cache.get((scale, level))\n if geoms is None:\n # Load GSHHS geometries from appropriate shape file.\n # TODO selective load based on bbox of each geom in file.\n path = shapereader.gshhs(scale, level)\n geoms = tuple(shapereader.Reader(path).geometries())\n GSHHSFeature._geometries_cache[(scale, level)] = geoms\n for geom in geoms:\n if extent is None or extent_geom.intersects(geom):\n yield geom\n\n\nclass WFSFeature(Feature):\n \"\"\"\n A class capable of drawing a collection of geometries\n obtained from an OGC Web Feature Service (WFS).\n\n This feature requires additional dependencies. If installed via pip,\n try ``pip install cartopy[ows]``.\n \"\"\"\n def __init__(self, wfs, features, **kwargs):\n \"\"\"\n Parameters\n ----------\n wfs: string or :class:`owslib.wfs.WebFeatureService` instance\n The WebFeatureService instance, or URL of a WFS service, from which\n to retrieve the geometries.\n features: string or list of strings\n The typename(s) of features available from the web service that\n will be retrieved. Somewhat analogous to layers in WMS/WMTS.\n\n Other Parameters\n ----------------\n **kwargs\n Keyword arguments to be used when drawing this feature.\n\n \"\"\"\n try:\n from cartopy.io.ogc_clients import WFSGeometrySource\n except ImportError as e:\n raise ImportError(\n 'WFSFeature requires additional dependencies. If installed '\n 'via pip, try `pip install cartopy[ows]`.\\n') from e\n\n self.source = WFSGeometrySource(wfs, features)\n crs = self.source.default_projection()\n super().__init__(crs, **kwargs)\n # Default kwargs\n self._kwargs.setdefault('edgecolor', 'black')\n self._kwargs.setdefault('facecolor', 'none')\n\n def geometries(self):\n min_x, min_y, max_x, max_y = self.crs.boundary.bounds\n geoms = self.source.fetch_geometries(self.crs,\n extent=(min_x, max_x,\n min_y, max_y))\n return iter(geoms)\n\n def intersecting_geometries(self, extent):\n geoms = self.source.fetch_geometries(self.crs, extent)\n return iter(geoms)\n\n\nauto_scaler = AdaptiveScaler('110m', (('50m', 50), ('10m', 15)))\n\"\"\"AdaptiveScaler for NaturalEarthFeature. Default scale is '110m'.\n'110m' is used above 50 degrees, '50m' for 50-15 degrees and '10m' below 15\ndegrees.\"\"\"\n\n\nBORDERS = NaturalEarthFeature(\n 'cultural', 'admin_0_boundary_lines_land',\n auto_scaler, edgecolor='black', facecolor='never')\n\"\"\"Automatically scaled country boundaries.\"\"\"\n\n\nSTATES = NaturalEarthFeature(\n 'cultural', 'admin_1_states_provinces_lakes',\n auto_scaler, edgecolor='black', facecolor='none')\n\"\"\"Automatically scaled state and province boundaries.\"\"\"\n\n\nCOASTLINE = NaturalEarthFeature(\n 'physical', 'coastline', auto_scaler,\n edgecolor='black', facecolor='never')\n\"\"\"Automatically scaled coastline, including major islands.\"\"\"\n\n\nLAKES = NaturalEarthFeature(\n 'physical', 'lakes', auto_scaler,\n edgecolor='none', facecolor=COLORS['water'])\n\"\"\"Automatically scaled natural and artificial lakes.\"\"\"\n\n\nLAND = NaturalEarthFeature(\n 'physical', 'land', auto_scaler,\n edgecolor='none', facecolor=COLORS['land'], zorder=-1)\n\"\"\"Automatically scaled land polygons, including major islands.\"\"\"\n\n\nOCEAN = NaturalEarthFeature(\n 'physical', 'ocean', auto_scaler,\n edgecolor='none', facecolor=COLORS['water'], zorder=-1)\n\"\"\"Automatically scaled ocean polygons.\"\"\"\n\n\nRIVERS = NaturalEarthFeature(\n 'physical', 'rivers_lake_centerlines', auto_scaler,\n edgecolor=COLORS['water'], facecolor='never')\n\"\"\"Automatically scaled single-line drainages, including lake centerlines.\"\"\"\n", "path": "lib/cartopy/feature/__init__.py" } ]
diff --git a/lib/cartopy/feature/__init__.py b/lib/cartopy/feature/__init__.py index e7685522e..86d4b7670 100644 --- a/lib/cartopy/feature/__init__.py +++ b/lib/cartopy/feature/__init__.py @@ -214,6 +214,8 @@ def __init__(self, geometries, crs, **kwargs): """ super().__init__(crs, **kwargs) + if isinstance(geometries, sgeom.base.BaseGeometry): + geometries = [geometries] self._geoms = tuple(geometries) def geometries(self): diff --git a/lib/cartopy/tests/mpl/test_axes.py b/lib/cartopy/tests/mpl/test_axes.py index a431ce989..904274ffe 100644 --- a/lib/cartopy/tests/mpl/test_axes.py +++ b/lib/cartopy/tests/mpl/test_axes.py @@ -13,6 +13,7 @@ import pytest import cartopy.crs as ccrs +import cartopy.feature as cfeature from cartopy.mpl.geoaxes import InterProjectionTransform, GeoAxes @@ -108,6 +109,14 @@ def test_styler_kwarg(self, ShapelyFeature, add_feature_method): add_feature_method.assert_called_once_with( ShapelyFeature(), styler=mock.sentinel.styler) + @pytest.mark.natural_earth + def test_single_geometry(self): + # A single geometry is acceptable + proj = ccrs.PlateCarree() + ax = GeoAxes(plt.figure(), [0, 0, 1, 1], + map_projection=proj) + ax.add_geometries(next(cfeature.COASTLINE.geometries()), crs=proj) + @cleanup def test_geoaxes_subplot():
castorini__pyserini-667
Switch to jnius_config.add_classpath Currently, pyserini replaces any previously registered jars on the classpath in its setup code. Is there any reason to not use add_classpath() instead of set_classpath()? Here is the pyjnius relevant code: ```python def set_classpath(*path): """ Sets the classpath for the JVM to use. Replaces any existing classpath, overriding the CLASSPATH environment variable. """ check_vm_running() global classpath classpath = list(path) def add_classpath(*path): """ Appends items to the classpath for the JVM to use. Replaces any existing classpath, overriding the CLASSPATH environment variable. """ check_vm_running() global classpath if classpath is None: classpath = list(path) else: classpath.extend(path) ```
[ { "content": "#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nModule for adding Anserini jar to classpath for pyjnius usage\n\"\"\"\n\nimport glob\nimport os\n\nimport jnius_config\n\n\ndef configure_classpath(anserini_root=\".\"):\n \"\"\"\n Parameters\n ----------\n anserini_root : str\n (Optional) path to root anserini directory.\n\n \"\"\"\n paths = glob.glob(os.path.join(anserini_root, 'anserini-*-fatjar.jar'))\n if not paths:\n raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root)))\n\n latest = max(paths, key=os.path.getctime)\n jnius_config.set_classpath(latest)\n", "path": "pyserini/setup.py" } ]
[ { "content": "#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nModule for adding Anserini jar to classpath for pyjnius usage\n\"\"\"\n\nimport glob\nimport os\n\nimport jnius_config\n\n\ndef configure_classpath(anserini_root=\".\"):\n \"\"\"\n Parameters\n ----------\n anserini_root : str\n (Optional) path to root anserini directory.\n\n \"\"\"\n paths = glob.glob(os.path.join(anserini_root, 'anserini-*-fatjar.jar'))\n if not paths:\n raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root)))\n\n latest = max(paths, key=os.path.getctime)\n jnius_config.add_classpath(latest)\n", "path": "pyserini/setup.py" } ]
diff --git a/pyserini/setup.py b/pyserini/setup.py index ec07b22a5..1cc1560c2 100644 --- a/pyserini/setup.py +++ b/pyserini/setup.py @@ -37,4 +37,4 @@ def configure_classpath(anserini_root="."): raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root))) latest = max(paths, key=os.path.getctime) - jnius_config.set_classpath(latest) + jnius_config.add_classpath(latest)
ivy-llc__ivy-27943
Fix Ivy Failing Test: jax - elementwise.asinh
[ { "content": "# global\nfrom typing import Union, Optional\n\nimport jax\nimport jax.numpy as jnp\n\n# local\nimport ivy\nfrom ivy import (\n default_float_dtype,\n is_float_dtype,\n)\nfrom ivy import promote_types_of_inputs\nfrom ivy.functional.backends.jax import JaxArray\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom . import backend_version\n\n\ndef abs(\n x: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if (hasattr(x, \"dtype\") and \"bool\" in str(x.dtype)) or isinstance(x, bool):\n return x\n # jnp.where is used for consistent gradients\n return jnp.where(x != 0, jnp.absolute(x), 0)\n\n\ndef acos(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arccos(x)\n\n\ndef acosh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arccosh(x)\n\n\ndef add(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n alpha: Union[int, float] = 1,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if alpha not in (1, None):\n with ivy.ArrayMode(False):\n x2 = multiply(x2, alpha)\n return jnp.add(x1, x2)\n\n\ndef asin(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arcsin(x)\n\n\ndef asinh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arcsinh(x)\n\n\ndef atan(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arctan(x)\n\n\ndef atan2(x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.arctan2(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef atanh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arctanh(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_and(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.bitwise_and(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_invert(\n x: Union[int, JaxArray], /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.bitwise_not(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_left_shift(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.left_shift(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_or(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.bitwise_or(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_right_shift(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.right_shift(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_xor(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.bitwise_xor(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef ceil(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n if \"int\" in str(x.dtype):\n return x\n else:\n return jnp.ceil(x)\n\n\ndef cos(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.cos(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"float16\",)}, backend_version)\ndef cosh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.cosh(x)\n\n\ndef divide(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n ret = jax.numpy.divide(x1, x2)\n if ivy.is_float_dtype(x1.dtype) or ivy.is_complex_dtype(x1.dtype):\n ret = jnp.asarray(ret, dtype=x1.dtype)\n else:\n ret = jnp.asarray(ret, dtype=ivy.default_float_dtype(as_native=True))\n return ret\n\n\ndef equal(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.equal(x1, x2)\n\n\ndef exp(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.exp(x)\n\n\ndef expm1(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.expm1(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef floor(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n if \"int\" in str(x.dtype):\n return x\n else:\n return jnp.floor(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef floor_divide(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.floor(jnp.divide(x1, x2)).astype(x1.dtype)\n\n\ndef fmin(\n x1: JaxArray,\n x2: JaxArray,\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.fmin(x1, x2)\n\n\ndef greater(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.greater(x1, x2)\n\n\ndef greater_equal(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.greater_equal(x1, x2)\n\n\ndef isfinite(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.isfinite(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef isinf(\n x: JaxArray,\n /,\n *,\n detect_positive: bool = True,\n detect_negative: bool = True,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if detect_positive and detect_negative:\n return jnp.isinf(x)\n elif detect_positive:\n return jnp.isposinf(x)\n elif detect_negative:\n return jnp.isneginf(x)\n return jnp.full_like(x, False, dtype=jnp.bool_)\n\n\ndef isnan(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.isnan(x)\n\n\ndef lcm(x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n x1, x2 = promote_types_of_inputs(x1, x2)\n return jnp.lcm(x1, x2)\n\n\ndef less(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.less(x1, x2)\n\n\ndef less_equal(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.less_equal(x1, x2)\n\n\ndef log(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.log(x)\n\n\ndef log10(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.log10(x)\n\n\ndef log1p(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.log1p(x)\n\n\ndef log2(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.log2(x)\n\n\ndef logaddexp(\n x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.logaddexp(x1, x2)\n\n\ndef logaddexp2(\n x1: Union[JaxArray, float, list, tuple],\n x2: Union[JaxArray, float, list, tuple],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = promote_types_of_inputs(x1, x2)\n if not is_float_dtype(x1):\n x1 = x1.astype(default_float_dtype(as_native=True))\n x2 = x2.astype(default_float_dtype(as_native=True))\n return jnp.logaddexp2(x1, x2)\n\n\ndef logical_and(\n x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.logical_and(x1, x2)\n\n\ndef logical_not(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.logical_not(x)\n\n\ndef logical_or(\n x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.logical_or(x1, x2)\n\n\ndef logical_xor(\n x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.logical_xor(x1, x2)\n\n\ndef multiply(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.multiply(x1, x2)\n\n\ndef nan_to_num(\n x: JaxArray,\n /,\n *,\n copy: bool = True,\n nan: Union[float, int] = 0.0,\n posinf: Optional[Union[float, int]] = None,\n neginf: Optional[Union[float, int]] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf)\n\n\ndef negative(\n x: Union[float, JaxArray], /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.negative(x)\n\n\ndef not_equal(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.not_equal(x1, x2)\n\n\ndef positive(\n x: Union[float, JaxArray], /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.positive(x)\n\n\ndef pow(\n x1: JaxArray,\n x2: Union[int, float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if (\n ivy.any(x1 == 0)\n and ivy.is_int_dtype(x1)\n and ivy.any(x2 < 0)\n and all(dtype not in str(x1.dtype) for dtype in [\"int16\", \"int8\"])\n ):\n if ivy.is_int_dtype(x1):\n fill_value = jnp.iinfo(x1.dtype).min\n else:\n fill_value = jnp.finfo(x1.dtype).min\n ret = jnp.float_power(x1, x2)\n return jnp.where(jnp.bitwise_and(x1 == 0, x2 < 0), fill_value, ret).astype(\n x1.dtype\n )\n if ivy.is_int_dtype(x1) and ivy.any(x2 < 0):\n return jnp.float_power(x1, x2).astype(x1.dtype)\n return jnp.power(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef remainder(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n modulus: bool = True,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if not modulus:\n res = x1 / x2\n res_floored = jnp.where(res >= 0, jnp.floor(res), jnp.ceil(res))\n diff = res - res_floored\n diff, x2 = ivy.promote_types_of_inputs(diff, x2)\n return jnp.round(diff * x2).astype(x1.dtype)\n return jnp.remainder(x1, x2)\n\n\ndef round(\n x: JaxArray, /, *, decimals: int = 0, out: Optional[JaxArray] = None\n) -> JaxArray:\n if \"int\" in str(x.dtype):\n ret = jnp.copy(x)\n else:\n ret = jnp.round(x, decimals=decimals)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef _abs_variant_sign(x):\n return jnp.where(x != 0, x / jnp.abs(x), 0)\n\n\ndef sign(\n x: JaxArray, /, *, np_variant: Optional[bool] = True, out: Optional[JaxArray] = None\n) -> JaxArray:\n if \"complex\" in str(x.dtype):\n return jnp.sign(x) if np_variant else _abs_variant_sign(x)\n return jnp.where(x == -0.0, 0.0, jnp.sign(x)).astype(x.dtype)\n\n\ndef sin(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.sin(x)\n\n\ndef sinh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.sinh(x)\n\n\ndef sqrt(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.sqrt(x)\n\n\ndef square(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.square(x)\n\n\ndef subtract(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n alpha: Optional[Union[int, float]] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if alpha not in (1, None):\n ivy.set_array_mode(False)\n x2 = multiply(x2, alpha)\n ivy.unset_array_mode()\n return jnp.subtract(x1, x2)\n\n\ndef trapz(\n y: JaxArray,\n /,\n *,\n x: Optional[JaxArray] = None,\n dx: float = 1.0,\n axis: int = -1,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.trapz(y, x=x, dx=dx, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"0.4.23 and below\": (\"complex\", \"float16\", \"bfloat16\")}, backend_version\n)\ndef tan(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.tan(x)\n\n\ndef tanh(\n x: JaxArray, /, *, complex_mode=\"jax\", out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.tanh(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef trunc(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n if \"int\" in str(x.dtype):\n return x\n else:\n return jnp.trunc(x)\n\n\ndef exp2(\n x: Union[JaxArray, float, list, tuple],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.power(2, x)\n\n\ndef imag(\n val: JaxArray,\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.imag(val)\n\n\ndef angle(\n z: JaxArray,\n /,\n *,\n deg: bool = False,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.angle(z, deg=deg)\n\n\n# Extra #\n# ------#\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef erf(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jax.scipy.special.erf(x)\n\n\ndef maximum(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n use_where: bool = True,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if use_where:\n return jnp.where(x1 >= x2, x1, x2)\n return jnp.maximum(x1, x2)\n\n\ndef minimum(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n use_where: bool = True,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if use_where:\n return jnp.where(x1 <= x2, x1, x2)\n return jnp.minimum(x1, x2)\n\n\ndef reciprocal(\n x: Union[float, JaxArray], /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.reciprocal(x)\n\n\ndef deg2rad(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.deg2rad(x)\n\n\ndef rad2deg(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.rad2deg(x)\n\n\ndef isreal(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.isreal(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef fmod(\n x1: JaxArray,\n x2: JaxArray,\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = promote_types_of_inputs(x1, x2)\n return jnp.fmod(x1, x2)\n\n\ndef gcd(\n x1: Union[JaxArray, float, list, tuple],\n x2: Union[JaxArray, float, list, tuple],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = promote_types_of_inputs(x1, x2)\n return jnp.gcd(x1, x2)\n\n\ndef real(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.real(x)\n", "path": "ivy/functional/backends/jax/elementwise.py" } ]
[ { "content": "# global\nfrom typing import Union, Optional\n\nimport jax\nimport jax.numpy as jnp\n\n# local\nimport ivy\nfrom ivy import (\n default_float_dtype,\n is_float_dtype,\n)\nfrom ivy import promote_types_of_inputs\nfrom ivy.functional.backends.jax import JaxArray\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom . import backend_version\n\n\ndef abs(\n x: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if (hasattr(x, \"dtype\") and \"bool\" in str(x.dtype)) or isinstance(x, bool):\n return x\n # jnp.where is used for consistent gradients\n return jnp.where(x != 0, jnp.absolute(x), 0)\n\n\ndef acos(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arccos(x)\n\n\ndef acosh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arccosh(x)\n\n\ndef add(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n alpha: Union[int, float] = 1,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if alpha not in (1, None):\n with ivy.ArrayMode(False):\n x2 = multiply(x2, alpha)\n return jnp.add(x1, x2)\n\n\ndef asin(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arcsin(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef asinh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arcsinh(x)\n\n\ndef atan(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arctan(x)\n\n\ndef atan2(x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.arctan2(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef atanh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.arctanh(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_and(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.bitwise_and(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_invert(\n x: Union[int, JaxArray], /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.bitwise_not(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_left_shift(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.left_shift(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_or(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.bitwise_or(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_right_shift(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.right_shift(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef bitwise_xor(\n x1: Union[int, JaxArray],\n x2: Union[int, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2, array_api_promotion=True)\n return jnp.bitwise_xor(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef ceil(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n if \"int\" in str(x.dtype):\n return x\n else:\n return jnp.ceil(x)\n\n\ndef cos(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.cos(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"float16\",)}, backend_version)\ndef cosh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.cosh(x)\n\n\ndef divide(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n ret = jax.numpy.divide(x1, x2)\n if ivy.is_float_dtype(x1.dtype) or ivy.is_complex_dtype(x1.dtype):\n ret = jnp.asarray(ret, dtype=x1.dtype)\n else:\n ret = jnp.asarray(ret, dtype=ivy.default_float_dtype(as_native=True))\n return ret\n\n\ndef equal(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.equal(x1, x2)\n\n\ndef exp(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.exp(x)\n\n\ndef expm1(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.expm1(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef floor(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n if \"int\" in str(x.dtype):\n return x\n else:\n return jnp.floor(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef floor_divide(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.floor(jnp.divide(x1, x2)).astype(x1.dtype)\n\n\ndef fmin(\n x1: JaxArray,\n x2: JaxArray,\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.fmin(x1, x2)\n\n\ndef greater(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.greater(x1, x2)\n\n\ndef greater_equal(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.greater_equal(x1, x2)\n\n\ndef isfinite(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.isfinite(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef isinf(\n x: JaxArray,\n /,\n *,\n detect_positive: bool = True,\n detect_negative: bool = True,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if detect_positive and detect_negative:\n return jnp.isinf(x)\n elif detect_positive:\n return jnp.isposinf(x)\n elif detect_negative:\n return jnp.isneginf(x)\n return jnp.full_like(x, False, dtype=jnp.bool_)\n\n\ndef isnan(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.isnan(x)\n\n\ndef lcm(x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n x1, x2 = promote_types_of_inputs(x1, x2)\n return jnp.lcm(x1, x2)\n\n\ndef less(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.less(x1, x2)\n\n\ndef less_equal(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.less_equal(x1, x2)\n\n\ndef log(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.log(x)\n\n\ndef log10(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.log10(x)\n\n\ndef log1p(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.log1p(x)\n\n\ndef log2(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.log2(x)\n\n\ndef logaddexp(\n x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.logaddexp(x1, x2)\n\n\ndef logaddexp2(\n x1: Union[JaxArray, float, list, tuple],\n x2: Union[JaxArray, float, list, tuple],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = promote_types_of_inputs(x1, x2)\n if not is_float_dtype(x1):\n x1 = x1.astype(default_float_dtype(as_native=True))\n x2 = x2.astype(default_float_dtype(as_native=True))\n return jnp.logaddexp2(x1, x2)\n\n\ndef logical_and(\n x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.logical_and(x1, x2)\n\n\ndef logical_not(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.logical_not(x)\n\n\ndef logical_or(\n x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.logical_or(x1, x2)\n\n\ndef logical_xor(\n x1: JaxArray, x2: JaxArray, /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.logical_xor(x1, x2)\n\n\ndef multiply(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.multiply(x1, x2)\n\n\ndef nan_to_num(\n x: JaxArray,\n /,\n *,\n copy: bool = True,\n nan: Union[float, int] = 0.0,\n posinf: Optional[Union[float, int]] = None,\n neginf: Optional[Union[float, int]] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf)\n\n\ndef negative(\n x: Union[float, JaxArray], /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.negative(x)\n\n\ndef not_equal(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n return jnp.not_equal(x1, x2)\n\n\ndef positive(\n x: Union[float, JaxArray], /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.positive(x)\n\n\ndef pow(\n x1: JaxArray,\n x2: Union[int, float, JaxArray],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if (\n ivy.any(x1 == 0)\n and ivy.is_int_dtype(x1)\n and ivy.any(x2 < 0)\n and all(dtype not in str(x1.dtype) for dtype in [\"int16\", \"int8\"])\n ):\n if ivy.is_int_dtype(x1):\n fill_value = jnp.iinfo(x1.dtype).min\n else:\n fill_value = jnp.finfo(x1.dtype).min\n ret = jnp.float_power(x1, x2)\n return jnp.where(jnp.bitwise_and(x1 == 0, x2 < 0), fill_value, ret).astype(\n x1.dtype\n )\n if ivy.is_int_dtype(x1) and ivy.any(x2 < 0):\n return jnp.float_power(x1, x2).astype(x1.dtype)\n return jnp.power(x1, x2)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef remainder(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n modulus: bool = True,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if not modulus:\n res = x1 / x2\n res_floored = jnp.where(res >= 0, jnp.floor(res), jnp.ceil(res))\n diff = res - res_floored\n diff, x2 = ivy.promote_types_of_inputs(diff, x2)\n return jnp.round(diff * x2).astype(x1.dtype)\n return jnp.remainder(x1, x2)\n\n\ndef round(\n x: JaxArray, /, *, decimals: int = 0, out: Optional[JaxArray] = None\n) -> JaxArray:\n if \"int\" in str(x.dtype):\n ret = jnp.copy(x)\n else:\n ret = jnp.round(x, decimals=decimals)\n if ivy.exists(out):\n return ivy.inplace_update(out, ret)\n return ret\n\n\ndef _abs_variant_sign(x):\n return jnp.where(x != 0, x / jnp.abs(x), 0)\n\n\ndef sign(\n x: JaxArray, /, *, np_variant: Optional[bool] = True, out: Optional[JaxArray] = None\n) -> JaxArray:\n if \"complex\" in str(x.dtype):\n return jnp.sign(x) if np_variant else _abs_variant_sign(x)\n return jnp.where(x == -0.0, 0.0, jnp.sign(x)).astype(x.dtype)\n\n\ndef sin(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.sin(x)\n\n\ndef sinh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.sinh(x)\n\n\ndef sqrt(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.sqrt(x)\n\n\ndef square(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.square(x)\n\n\ndef subtract(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n alpha: Optional[Union[int, float]] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if alpha not in (1, None):\n ivy.set_array_mode(False)\n x2 = multiply(x2, alpha)\n ivy.unset_array_mode()\n return jnp.subtract(x1, x2)\n\n\ndef trapz(\n y: JaxArray,\n /,\n *,\n x: Optional[JaxArray] = None,\n dx: float = 1.0,\n axis: int = -1,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.trapz(y, x=x, dx=dx, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"0.4.23 and below\": (\"complex\", \"float16\", \"bfloat16\")}, backend_version\n)\ndef tan(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.tan(x)\n\n\ndef tanh(\n x: JaxArray, /, *, complex_mode=\"jax\", out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.tanh(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef trunc(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n if \"int\" in str(x.dtype):\n return x\n else:\n return jnp.trunc(x)\n\n\ndef exp2(\n x: Union[JaxArray, float, list, tuple],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.power(2, x)\n\n\ndef imag(\n val: JaxArray,\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.imag(val)\n\n\ndef angle(\n z: JaxArray,\n /,\n *,\n deg: bool = False,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.angle(z, deg=deg)\n\n\n# Extra #\n# ------#\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef erf(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jax.scipy.special.erf(x)\n\n\ndef maximum(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n use_where: bool = True,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if use_where:\n return jnp.where(x1 >= x2, x1, x2)\n return jnp.maximum(x1, x2)\n\n\ndef minimum(\n x1: Union[float, JaxArray],\n x2: Union[float, JaxArray],\n /,\n *,\n use_where: bool = True,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = ivy.promote_types_of_inputs(x1, x2)\n if use_where:\n return jnp.where(x1 <= x2, x1, x2)\n return jnp.minimum(x1, x2)\n\n\ndef reciprocal(\n x: Union[float, JaxArray], /, *, out: Optional[JaxArray] = None\n) -> JaxArray:\n return jnp.reciprocal(x)\n\n\ndef deg2rad(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.deg2rad(x)\n\n\ndef rad2deg(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.rad2deg(x)\n\n\ndef isreal(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.isreal(x)\n\n\n@with_unsupported_dtypes({\"0.4.23 and below\": (\"complex\",)}, backend_version)\ndef fmod(\n x1: JaxArray,\n x2: JaxArray,\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = promote_types_of_inputs(x1, x2)\n return jnp.fmod(x1, x2)\n\n\ndef gcd(\n x1: Union[JaxArray, float, list, tuple],\n x2: Union[JaxArray, float, list, tuple],\n /,\n *,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n x1, x2 = promote_types_of_inputs(x1, x2)\n return jnp.gcd(x1, x2)\n\n\ndef real(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:\n return jnp.real(x)\n", "path": "ivy/functional/backends/jax/elementwise.py" } ]
diff --git a/ivy/functional/backends/jax/elementwise.py b/ivy/functional/backends/jax/elementwise.py index 42a5fd51077c5..056996b8cc710 100644 --- a/ivy/functional/backends/jax/elementwise.py +++ b/ivy/functional/backends/jax/elementwise.py @@ -55,6 +55,7 @@ def asin(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: return jnp.arcsin(x) +@with_unsupported_dtypes({"0.4.23 and below": ("complex",)}, backend_version) def asinh(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: return jnp.arcsinh(x)
ethereum__web3.py-2568
Can not parse tuple[][] ABI type * Version: 5.29.2 * Python: 3.7.3 * OS: osx * `pip freeze` output ``` aiohttp==3.8.1 aiosignal==1.2.0 alabaster==0.7.12 anaconda-client==1.7.2 anaconda-navigator==1.9.7 anaconda-project==0.8.3 anticaptchaofficial==1.0.34 apns2==0.7.2 appnope==0.1.0 appscript==1.0.1 asn1crypto==0.24.0 astroid==2.2.5 astropy==3.2.1 async-generator==1.10 async-timeout==4.0.2 asynctest==0.13.0 atomicwrites==1.3.0 attrs==21.2.0 Babel==2.7.0 backcall==0.1.0 backports.functools-lru-cache==1.5 backports.os==0.1.1 backports.shutil-get-terminal-size==1.0.0 backports.tempfile==1.0 backports.weakref==1.0.post1 base58==2.1.1 beautifulsoup4==4.7.1 bitarray==1.2.2 bkcharts==0.2 bleach==3.1.0 blocknative-sdk==0.2.5 bokeh==1.2.0 boto==2.49.0 Bottleneck==1.2.1 certifi==2019.6.16 cffi==1.12.3 chardet==3.0.4 charset-normalizer==2.0.11 Click==7.0 cloudpickle==1.2.1 clyent==1.2.2 colorama==0.4.1 conda==4.7.10 conda-build==3.18.8 conda-package-handling==1.3.11 conda-verify==3.4.2 contextlib2==0.5.5 cryptography==2.7 cycler==0.10.0 Cython==0.29.12 cytoolz==0.11.2 dask==2.1.0 decorator==4.4.0 defusedxml==0.6.0 distributed==2.1.0 dnspython==2.2.0 docutils==0.14 entrypoints==0.3 et-xmlfile==1.0.1 eth-abi==2.1.1 eth-account==0.5.8 eth-hash==0.3.2 eth-keyfile==0.5.1 eth-keys==0.3.4 eth-rlp==0.3.0 eth-typing==2.3.0 eth-utils==1.10.0 fake-useragent==0.1.11 fastcache==1.1.0 filelock==3.0.12 Flask==1.1.1 frozenlist==1.3.0 future==0.17.1 gevent==1.4.0 glob2==0.7 gmpy2==2.0.8 greenlet==0.4.15 h11==0.12.0 h2==2.6.2 h5py==2.9.0 heapdict==1.0.0 hexbytes==0.2.2 hpack==3.0.0 html5lib==1.0.1 hyper==0.7.0 hyperframe==3.2.0 idna==3.2 imageio==2.5.0 imagesize==1.1.0 importlib-metadata==0.17 importlib-resources==5.8.0 ipfshttpclient==0.8.0a2 ipykernel==5.1.1 ipython==7.6.1 ipython-genutils==0.2.0 ipywidgets==7.5.0 isort==4.3.21 itsdangerous==1.1.0 jdcal==1.4.1 jedi==0.13.3 Jinja2==2.10.1 joblib==0.13.2 json5==0.8.4 jsonschema==4.6.0 jupyter==1.0.0 jupyter-client==5.3.1 jupyter-console==6.0.0 jupyter-core==4.5.0 jupyterlab==1.0.2 jupyterlab-server==1.0.0 keyring==18.0.0 kiwisolver==1.1.0 lazy-object-proxy==1.4.1 libarchive-c==2.8 lief==0.9.0 llvmlite==0.29.0 locket==0.2.0 lru-dict==1.1.7 lxml==4.3.4 MarkupSafe==1.1.1 matplotlib==3.1.0 mccabe==0.6.1 mistune==0.8.4 mkl-fft==1.0.12 mkl-random==1.0.2 mkl-service==2.0.2 mock==3.0.5 more-itertools==7.0.0 mpmath==1.1.0 msgpack==0.6.1 multiaddr==0.0.9 multidict==6.0.2 multipledispatch==0.6.0 navigator-updater==0.2.1 nbconvert==5.5.0 nbformat==4.4.0 netaddr==0.8.0 networkx==2.3 nltk==3.4.4 nose==1.3.7 notebook==6.0.0 numba==0.44.1 numexpr==2.6.9 numpy==1.16.4 numpydoc==0.9.1 olefile==0.46 openpyxl==2.6.2 outcome==1.1.0 packaging==19.0 pandas==0.24.2 pandocfilters==1.4.2 parsimonious==0.8.1 parso==0.5.0 partd==1.0.0 path.py==12.0.1 pathlib2==2.3.4 patsy==0.5.1 pep8==1.7.1 pexpect==4.7.0 phxsocket==0.1.2 pickleshare==0.7.5 Pillow==6.1.0 pkginfo==1.5.0.1 pluggy==0.12.0 ply==3.11 prometheus-client==0.7.1 prompt-toolkit==2.0.9 protobuf==3.19.4 psutil==5.6.3 ptyprocess==0.6.0 py==1.8.0 pycodestyle==2.5.0 pycosat==0.6.3 pycparser==2.19 pycrypto==2.6.1 pycryptodome==3.14.1 pycurl==7.43.0.3 pyflakes==2.1.1 Pygments==2.4.2 PyJWT==1.7.1 pylint==2.3.1 pymongo==4.0.1 pyodbc==4.0.26 pyOpenSSL==19.0.0 pyparsing==2.4.0 pyrsistent==0.14.11 PySocks==1.7.0 pytest==5.0.1 pytest-arraydiff==0.3 pytest-astropy==0.5.0 pytest-doctestplus==0.3.0 pytest-openfiles==0.3.2 pytest-remotedata==0.3.1 python-dateutil==2.8.0 pytz==2019.1 PyWavelets==1.0.3 PyYAML==5.1.1 pyzmq==18.0.0 QtAwesome==0.5.7 qtconsole==4.5.1 QtPy==1.8.0 rel==0.4.7 requests==2.27.1 rlp==2.0.1 rope==0.14.0 ruamel-yaml==0.15.46 scikit-image==0.15.0 scikit-learn==0.21.2 scipy==1.3.0 seaborn==0.9.0 Send2Trash==1.5.0 simplegeneric==0.8.1 singledispatch==3.4.0.3 six==1.12.0 sniffio==1.2.0 snowballstemmer==1.9.0 sortedcollections==1.1.2 sortedcontainers==2.4.0 soupsieve==1.8 Sphinx==2.1.2 sphinxcontrib-applehelp==1.0.1 sphinxcontrib-devhelp==1.0.1 sphinxcontrib-htmlhelp==1.0.2 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.2 sphinxcontrib-serializinghtml==1.1.3 sphinxcontrib-websupport==1.1.2 spyder==3.3.6 spyder-kernels==0.5.1 SQLAlchemy==1.3.5 statsmodels==0.10.0 style==1.1.0 sympy==1.4 tables==3.5.2 tblib==1.4.0 terminado==0.8.2 testpath==0.4.2 toolz==0.10.0 tornado==6.0.3 tqdm==4.32.1 traitlets==4.3.2 trio==0.19.0 trio-websocket==0.9.2 typing-extensions==4.0.1 unicodecsv==0.14.1 update==0.0.1 urllib3==1.24.2 varint==1.0.2 wcwidth==0.1.7 web3==5.29.2 webencodings==0.5.1 websocket-client==1.3.2 websockets==9.1 Werkzeug==0.15.4 widgetsnbextension==3.5.0 wrapt==1.11.2 wsproto==1.0.0 wurlitzer==1.0.2 xlrd==1.2.0 XlsxWriter==1.1.8 xlwings==0.15.8 xlwt==1.3.0 yarl==1.7.2 zict==1.0.0 zipp==3.8.0 ``` ### What was wrong? The tuple regex match is something like this (in abi.py) ``` TUPLE_TYPE_STR_RE = re.compile(r'^(tuple)(\[([1-9][0-9]*)?\])?$') ``` where it can only match 'tuple[]' or 'tuple[12]' something like this. But the Seaport contract of opensea using 'tuple[][]' structure type for `fulfillAvailableOrders` and `fulfillAvailableAdvancedOrders`, which causes the regex match failed and can not correct encoding the data. The seaport contract is https://etherscan.io/address/0x00000000006c3852cbEf3e08E8dF289169EdE581 Please help to handle this case.
[ { "content": "import binascii\nfrom collections import (\n abc,\n namedtuple,\n)\nimport copy\nimport itertools\nimport re\nfrom typing import (\n Any,\n Callable,\n Collection,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n)\nimport warnings\n\nfrom eth_abi import (\n codec,\n decoding,\n encoding,\n)\nfrom eth_abi.base import (\n parse_type_str,\n)\nfrom eth_abi.exceptions import (\n ValueOutOfBounds,\n)\nfrom eth_abi.grammar import (\n ABIType,\n BasicType,\n TupleType,\n parse,\n)\nfrom eth_abi.registry import (\n ABIRegistry,\n BaseEquals,\n registry as default_registry,\n)\nfrom eth_typing import (\n HexStr,\n TypeStr,\n)\nfrom eth_utils import (\n combomethod,\n decode_hex,\n is_bytes,\n is_list_like,\n is_text,\n to_text,\n to_tuple,\n)\nfrom eth_utils.abi import (\n collapse_if_tuple,\n)\nfrom eth_utils.toolz import (\n curry,\n partial,\n pipe,\n)\n\nfrom web3._utils.ens import (\n is_ens_name,\n)\nfrom web3._utils.formatters import (\n recursive_map,\n)\nfrom web3.exceptions import (\n FallbackNotFound,\n)\nfrom web3.types import (\n ABI,\n ABIEvent,\n ABIEventParams,\n ABIFunction,\n ABIFunctionParams,\n)\n\n\ndef filter_by_type(_type: str, contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:\n return [abi for abi in contract_abi if abi['type'] == _type]\n\n\ndef filter_by_name(name: str, contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:\n return [\n abi\n for abi\n in contract_abi\n if (\n abi['type'] not in ('fallback', 'constructor', 'receive')\n and abi['name'] == name\n )\n ]\n\n\ndef get_abi_input_types(abi: ABIFunction) -> List[str]:\n if 'inputs' not in abi and (abi['type'] == 'fallback' or abi['type'] == 'receive'):\n return []\n else:\n return [collapse_if_tuple(cast(Dict[str, Any], arg)) for arg in abi['inputs']]\n\n\ndef get_abi_output_types(abi: ABIFunction) -> List[str]:\n if abi['type'] == 'fallback':\n return []\n else:\n return [collapse_if_tuple(cast(Dict[str, Any], arg)) for arg in abi['outputs']]\n\n\ndef get_abi_input_names(abi: Union[ABIFunction, ABIEvent]) -> List[str]:\n if 'inputs' not in abi and abi['type'] == 'fallback':\n return []\n else:\n return [arg['name'] for arg in abi['inputs']]\n\n\ndef get_receive_func_abi(contract_abi: ABI) -> ABIFunction:\n receive_abis = filter_by_type('receive', contract_abi)\n if receive_abis:\n return cast(ABIFunction, receive_abis[0])\n else:\n raise FallbackNotFound(\"No receive function was found in the contract ABI.\")\n\n\ndef get_fallback_func_abi(contract_abi: ABI) -> ABIFunction:\n fallback_abis = filter_by_type('fallback', contract_abi)\n if fallback_abis:\n return cast(ABIFunction, fallback_abis[0])\n else:\n raise FallbackNotFound(\"No fallback function was found in the contract ABI.\")\n\n\ndef fallback_func_abi_exists(contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:\n return filter_by_type('fallback', contract_abi)\n\n\ndef receive_func_abi_exists(contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:\n return filter_by_type('receive', contract_abi)\n\n\ndef get_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIEventParams]:\n return [arg for arg in event_abi['inputs'] if arg['indexed'] is True]\n\n\ndef exclude_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIEventParams]:\n return [arg for arg in event_abi['inputs'] if arg['indexed'] is False]\n\n\ndef get_normalized_abi_arg_type(abi_arg: ABIEventParams) -> str:\n \"\"\"\n Return the normalized type for the abi argument provided. In order to account for tuple argument\n types, this abstraction makes use of `collapse_if_tuple()` to collapse the appropriate component\n types within a tuple type, if present.\n \"\"\"\n return collapse_if_tuple(dict(abi_arg))\n\n\ndef filter_by_argument_count(\n num_arguments: int, contract_abi: ABI\n) -> List[Union[ABIFunction, ABIEvent]]:\n return [\n abi\n for abi\n in contract_abi\n if len(abi['inputs']) == num_arguments\n ]\n\n\ndef filter_by_argument_name(\n argument_names: Collection[str], contract_abi: ABI\n) -> List[Union[ABIFunction, ABIEvent]]:\n return [\n abi\n for abi in contract_abi\n if set(argument_names).intersection(\n get_abi_input_names(abi)\n ) == set(argument_names)\n ]\n\n\nclass AddressEncoder(encoding.AddressEncoder):\n @classmethod\n def validate_value(cls, value: Any) -> None:\n if is_ens_name(value):\n return\n\n super().validate_value(value)\n\n\nclass AcceptsHexStrEncoder(encoding.BaseEncoder):\n subencoder_cls: Type[encoding.BaseEncoder] = None\n is_strict: bool = None\n\n def __init__(self, subencoder: encoding.BaseEncoder) -> None:\n self.subencoder = subencoder\n\n # type ignored b/c conflict w/ defined BaseEncoder.is_dynamic = False\n @property\n def is_dynamic(self) -> bool: # type: ignore\n return self.subencoder.is_dynamic\n\n @classmethod\n def from_type_str(cls, abi_type: TypeStr, registry: ABIRegistry) -> \"AcceptsHexStrEncoder\":\n subencoder_cls = cls.get_subencoder_class()\n # cast b/c expects BaseCoder but `from_type_string` restricted to BaseEncoder subclasses\n subencoder = cast(encoding.BaseEncoder, subencoder_cls.from_type_str(abi_type, registry))\n return cls(subencoder)\n\n @classmethod\n def get_subencoder_class(cls) -> Type[encoding.BaseEncoder]:\n if cls.subencoder_cls is None:\n raise AttributeError(f'No subencoder class is set. {cls.__name__}')\n return cls.subencoder_cls\n\n # type ignored b/c combomethod makes signature conflict w/ defined BaseEncoder.validate_value()\n @combomethod\n def validate_value(self, value: Any) -> None: # type: ignore\n normalized_value = self.validate_and_normalize(value)\n return self.subencoder.validate_value(normalized_value)\n\n def encode(self, value: Any) -> bytes:\n normalized_value = self.validate_and_normalize(value)\n return self.subencoder.encode(normalized_value)\n\n def validate_and_normalize(self, value: Any) -> HexStr:\n raw_value = value\n if is_text(value):\n try:\n value = decode_hex(value)\n except binascii.Error:\n self.invalidate_value(\n value,\n msg=f'{value} is an invalid hex string',\n )\n else:\n if raw_value[:2] != '0x':\n if self.is_strict:\n self.invalidate_value(\n raw_value,\n msg='hex string must be prefixed with 0x'\n )\n elif raw_value[:2] != '0x':\n warnings.warn(\n 'in v6 it will be invalid to pass a hex string without the \"0x\" prefix',\n category=DeprecationWarning\n )\n return value\n\n\nclass BytesEncoder(AcceptsHexStrEncoder):\n subencoder_cls = encoding.BytesEncoder\n is_strict = False\n\n\nclass ByteStringEncoder(AcceptsHexStrEncoder):\n subencoder_cls = encoding.ByteStringEncoder\n is_strict = False\n\n\nclass StrictByteStringEncoder(AcceptsHexStrEncoder):\n subencoder_cls = encoding.ByteStringEncoder\n is_strict = True\n\n\nclass ExactLengthBytesEncoder(encoding.BaseEncoder):\n # TODO: move this to eth-abi once the api is stabilized\n is_big_endian = False\n value_bit_size = None\n data_byte_size = None\n\n def validate(self) -> None:\n super().validate()\n\n if self.value_bit_size is None:\n raise ValueError(\"`value_bit_size` may not be none\")\n if self.data_byte_size is None:\n raise ValueError(\"`data_byte_size` may not be none\")\n if self.encode_fn is None:\n raise ValueError(\"`encode_fn` may not be none\")\n if self.is_big_endian is None:\n raise ValueError(\"`is_big_endian` may not be none\")\n\n if self.value_bit_size % 8 != 0:\n raise ValueError(\n \"Invalid value bit size: {0}. Must be a multiple of 8\".format(\n self.value_bit_size,\n )\n )\n\n if self.value_bit_size > self.data_byte_size * 8:\n raise ValueError(\"Value byte size exceeds data size\")\n\n def encode(self, value: Any) -> bytes:\n normalized_value = self.validate_value(value)\n return self.encode_fn(normalized_value)\n\n # type ignored b/c conflict with defined BaseEncoder.validate_value() -> None\n def validate_value(self, value: Any) -> bytes: # type: ignore\n if not is_bytes(value) and not is_text(value):\n self.invalidate_value(value)\n\n raw_value = value\n if is_text(value):\n try:\n value = decode_hex(value)\n except binascii.Error:\n self.invalidate_value(\n value,\n msg=f'{value} is not a valid hex string',\n )\n else:\n if raw_value[:2] != '0x':\n self.invalidate_value(\n raw_value,\n msg='hex string must be prefixed with 0x'\n )\n\n byte_size = self.value_bit_size // 8\n if len(value) > byte_size:\n self.invalidate_value(\n value,\n exc=ValueOutOfBounds,\n msg=\"exceeds total byte size for bytes{} encoding\".format(byte_size),\n )\n elif len(value) < byte_size:\n self.invalidate_value(\n value,\n exc=ValueOutOfBounds,\n msg=\"less than total byte size for bytes{} encoding\".format(byte_size),\n )\n return value\n\n @staticmethod\n def encode_fn(value: Any) -> bytes:\n return value\n\n @parse_type_str('bytes')\n def from_type_str(cls, abi_type: BasicType, registry: ABIRegistry) -> bytes:\n # type ignored b/c kwargs are set in superclass init\n # Unexpected keyword argument \"value_bit_size\" for \"__call__\" of \"BaseEncoder\"\n return cls( # type: ignore\n value_bit_size=abi_type.sub * 8,\n data_byte_size=abi_type.sub,\n )\n\n\nclass BytesDecoder(decoding.FixedByteSizeDecoder):\n # FixedByteSizeDecoder.is_big_endian is defined as None\n is_big_endian = False # type: ignore\n\n # FixedByteSizeDecoder.decoder_fn is defined as None\n @staticmethod\n def decoder_fn(data: bytes) -> bytes: # type: ignore\n return data\n\n @parse_type_str('bytes')\n def from_type_str(cls, abi_type: BasicType, registry: ABIRegistry) -> bytes:\n # type ignored b/c kwargs are set in superclass init\n # Unexpected keyword argument \"value_bit_size\" for \"__call__\" of \"BaseDecoder\"\n return cls( # type: ignore\n value_bit_size=abi_type.sub * 8,\n data_byte_size=abi_type.sub,\n )\n\n\nclass TextStringEncoder(encoding.TextStringEncoder):\n @classmethod\n def validate_value(cls, value: Any) -> None:\n if is_bytes(value):\n try:\n value = to_text(value)\n except UnicodeDecodeError:\n cls.invalidate_value(\n value,\n msg='not decodable as unicode string',\n )\n\n super().validate_value(value)\n\n\ndef filter_by_encodability(\n abi_codec: codec.ABIEncoder, args: Sequence[Any], kwargs: Dict[str, Any], contract_abi: ABI\n) -> List[ABIFunction]:\n return [\n cast(ABIFunction, function_abi)\n for function_abi\n in contract_abi\n if check_if_arguments_can_be_encoded(\n cast(ABIFunction, function_abi), abi_codec, args, kwargs\n )\n ]\n\n\ndef check_if_arguments_can_be_encoded(\n function_abi: ABIFunction,\n abi_codec: codec.ABIEncoder,\n args: Sequence[Any],\n kwargs: Dict[str, Any],\n) -> bool:\n try:\n arguments = merge_args_and_kwargs(function_abi, args, kwargs)\n except TypeError:\n return False\n\n if len(function_abi.get('inputs', [])) != len(arguments):\n return False\n\n try:\n types, aligned_args = get_aligned_abi_inputs(function_abi, arguments)\n except TypeError:\n return False\n\n return all(\n abi_codec.is_encodable(_type, arg)\n for _type, arg in zip(types, aligned_args)\n )\n\n\ndef merge_args_and_kwargs(\n function_abi: ABIFunction, args: Sequence[Any], kwargs: Dict[str, Any]\n) -> Tuple[Any, ...]:\n \"\"\"\n Takes a list of positional args (``args``) and a dict of keyword args\n (``kwargs``) defining values to be passed to a call to the contract function\n described by ``function_abi``. Checks to ensure that the correct number of\n args were given, no duplicate args were given, and no unknown args were\n given. Returns a list of argument values aligned to the order of inputs\n defined in ``function_abi``.\n \"\"\"\n # Ensure the function is being applied to the correct number of args\n if len(args) + len(kwargs) != len(function_abi.get('inputs', [])):\n raise TypeError(\n \"Incorrect argument count. Expected '{0}'. Got '{1}'\".format(\n len(function_abi['inputs']),\n len(args) + len(kwargs),\n )\n )\n\n # If no keyword args were given, we don't need to align them\n if not kwargs:\n return cast(Tuple[Any, ...], args)\n\n kwarg_names = set(kwargs.keys())\n sorted_arg_names = tuple(arg_abi['name'] for arg_abi in function_abi['inputs'])\n args_as_kwargs = dict(zip(sorted_arg_names, args))\n\n # Check for duplicate args\n duplicate_args = kwarg_names.intersection(args_as_kwargs.keys())\n if duplicate_args:\n raise TypeError(\n \"{fn_name}() got multiple values for argument(s) '{dups}'\".format(\n fn_name=function_abi['name'],\n dups=', '.join(duplicate_args),\n )\n )\n\n # Check for unknown args\n unknown_args = kwarg_names.difference(sorted_arg_names)\n if unknown_args:\n if function_abi.get('name'):\n raise TypeError(\n \"{fn_name}() got unexpected keyword argument(s) '{dups}'\".format(\n fn_name=function_abi.get('name'),\n dups=', '.join(unknown_args),\n )\n )\n raise TypeError(\n \"Type: '{_type}' got unexpected keyword argument(s) '{dups}'\".format(\n _type=function_abi.get('type'),\n dups=', '.join(unknown_args),\n )\n )\n\n # Sort args according to their position in the ABI and unzip them from their\n # names\n sorted_args = tuple(zip(\n *sorted(\n itertools.chain(kwargs.items(), args_as_kwargs.items()),\n key=lambda kv: sorted_arg_names.index(kv[0]),\n )\n ))\n\n if sorted_args:\n return sorted_args[1]\n else:\n return tuple()\n\n\nTUPLE_TYPE_STR_RE = re.compile(r'^(tuple)(\\[([1-9][0-9]*)?\\])?$')\n\n\ndef get_tuple_type_str_parts(s: str) -> Optional[Tuple[str, Optional[str]]]:\n \"\"\"\n Takes a JSON ABI type string. For tuple type strings, returns the separated\n prefix and array dimension parts. For all other strings, returns ``None``.\n \"\"\"\n match = TUPLE_TYPE_STR_RE.match(s)\n\n if match is not None:\n tuple_prefix = match.group(1)\n tuple_dims = match.group(2)\n\n return tuple_prefix, tuple_dims\n\n return None\n\n\ndef _align_abi_input(arg_abi: ABIFunctionParams, arg: Any) -> Tuple[Any, ...]:\n \"\"\"\n Aligns the values of any mapping at any level of nesting in ``arg``\n according to the layout of the corresponding abi spec.\n \"\"\"\n tuple_parts = get_tuple_type_str_parts(arg_abi['type'])\n\n if tuple_parts is None:\n # Arg is non-tuple. Just return value.\n return arg\n\n tuple_prefix, tuple_dims = tuple_parts\n if tuple_dims is None:\n # Arg is non-list tuple. Each sub arg in `arg` will be aligned\n # according to its corresponding abi.\n sub_abis = arg_abi['components']\n else:\n # Arg is list tuple. A non-list version of its abi will be used to\n # align each element in `arg`.\n new_abi = copy.copy(arg_abi)\n new_abi['type'] = tuple_prefix\n\n sub_abis = itertools.repeat(new_abi) # type: ignore\n\n if isinstance(arg, abc.Mapping):\n # Arg is mapping. Align values according to abi order.\n aligned_arg = tuple(arg[abi['name']] for abi in sub_abis)\n else:\n aligned_arg = arg\n\n if not is_list_like(aligned_arg):\n raise TypeError(\n 'Expected non-string sequence for \"{}\" component type: got {}'.format(\n arg_abi['type'],\n aligned_arg,\n ),\n )\n\n # convert NamedTuple to regular tuple\n typing = tuple if isinstance(aligned_arg, tuple) else type(aligned_arg)\n\n return typing(\n _align_abi_input(sub_abi, sub_arg)\n for sub_abi, sub_arg in zip(sub_abis, aligned_arg)\n )\n\n\ndef get_aligned_abi_inputs(\n abi: ABIFunction, args: Union[Tuple[Any, ...], Mapping[Any, Any]]\n) -> Tuple[Tuple[Any, ...], Tuple[Any, ...]]:\n \"\"\"\n Takes a function ABI (``abi``) and a sequence or mapping of args (``args``).\n Returns a list of type strings for the function's inputs and a list of\n arguments which have been aligned to the layout of those types. The args\n contained in ``args`` may contain nested mappings or sequences corresponding\n to tuple-encoded values in ``abi``.\n \"\"\"\n input_abis = abi.get('inputs', [])\n\n if isinstance(args, abc.Mapping):\n # `args` is mapping. Align values according to abi order.\n args = tuple(args[abi['name']] for abi in input_abis)\n\n return (\n # typed dict cannot be used w/ a normal Dict\n # https://github.com/python/mypy/issues/4976\n tuple(collapse_if_tuple(abi) for abi in input_abis), # type: ignore\n type(args)(\n _align_abi_input(abi, arg)\n for abi, arg in zip(input_abis, args)\n ),\n )\n\n\ndef get_constructor_abi(contract_abi: ABI) -> ABIFunction:\n candidates = [\n abi for abi in contract_abi if abi['type'] == 'constructor'\n ]\n if len(candidates) == 1:\n return candidates[0]\n elif len(candidates) == 0:\n return None\n elif len(candidates) > 1:\n raise ValueError(\"Found multiple constructors.\")\n return None\n\n\nDYNAMIC_TYPES = ['bytes', 'string']\n\nINT_SIZES = range(8, 257, 8)\nBYTES_SIZES = range(1, 33)\nUINT_TYPES = ['uint{0}'.format(i) for i in INT_SIZES]\nINT_TYPES = ['int{0}'.format(i) for i in INT_SIZES]\nBYTES_TYPES = ['bytes{0}'.format(i) for i in BYTES_SIZES] + ['bytes32.byte']\n\nSTATIC_TYPES = list(itertools.chain(\n ['address', 'bool'],\n UINT_TYPES,\n INT_TYPES,\n BYTES_TYPES,\n))\n\nBASE_TYPE_REGEX = '|'.join((\n _type + '(?![a-z0-9])'\n for _type\n in itertools.chain(STATIC_TYPES, DYNAMIC_TYPES)\n))\n\nSUB_TYPE_REGEX = (\n r'\\['\n '[0-9]*'\n r'\\]'\n)\n\nTYPE_REGEX = (\n '^'\n '(?:{base_type})'\n '(?:(?:{sub_type})*)?'\n '$'\n).format(\n base_type=BASE_TYPE_REGEX,\n sub_type=SUB_TYPE_REGEX,\n)\n\n\ndef is_recognized_type(abi_type: TypeStr) -> bool:\n return bool(re.match(TYPE_REGEX, abi_type))\n\n\ndef is_bool_type(abi_type: TypeStr) -> bool:\n return abi_type == 'bool'\n\n\ndef is_uint_type(abi_type: TypeStr) -> bool:\n return abi_type in UINT_TYPES\n\n\ndef is_int_type(abi_type: TypeStr) -> bool:\n return abi_type in INT_TYPES\n\n\ndef is_address_type(abi_type: TypeStr) -> bool:\n return abi_type == 'address'\n\n\ndef is_bytes_type(abi_type: TypeStr) -> bool:\n return abi_type in BYTES_TYPES + ['bytes']\n\n\ndef is_string_type(abi_type: TypeStr) -> bool:\n return abi_type == 'string'\n\n\n@curry\ndef is_length(target_length: int, value: abc.Sized) -> bool:\n return len(value) == target_length\n\n\ndef size_of_type(abi_type: TypeStr) -> int:\n \"\"\"\n Returns size in bits of abi_type\n \"\"\"\n if 'string' in abi_type:\n return None\n if 'byte' in abi_type:\n return None\n if '[' in abi_type:\n return None\n if abi_type == 'bool':\n return 8\n if abi_type == 'address':\n return 160\n return int(re.sub(r\"\\D\", \"\", abi_type))\n\n\nEND_BRACKETS_OF_ARRAY_TYPE_REGEX = r\"\\[[^]]*\\]$\"\n\n\ndef sub_type_of_array_type(abi_type: TypeStr) -> str:\n if not is_array_type(abi_type):\n raise ValueError(\n \"Cannot parse subtype of nonarray abi-type: {0}\".format(abi_type)\n )\n\n return re.sub(END_BRACKETS_OF_ARRAY_TYPE_REGEX, '', abi_type, 1)\n\n\ndef length_of_array_type(abi_type: TypeStr) -> int:\n if not is_array_type(abi_type):\n raise ValueError(\n \"Cannot parse length of nonarray abi-type: {0}\".format(abi_type)\n )\n\n inner_brackets = re.search(END_BRACKETS_OF_ARRAY_TYPE_REGEX, abi_type).group(0).strip(\"[]\")\n if not inner_brackets:\n return None\n else:\n return int(inner_brackets)\n\n\nARRAY_REGEX = (\n \"^\"\n \"[a-zA-Z0-9_]+\"\n \"({sub_type})+\"\n \"$\"\n).format(sub_type=SUB_TYPE_REGEX)\n\n\ndef is_array_type(abi_type: TypeStr) -> bool:\n return bool(re.match(ARRAY_REGEX, abi_type))\n\n\nNAME_REGEX = (\n '[a-zA-Z_]'\n '[a-zA-Z0-9_]*'\n)\n\n\nENUM_REGEX = (\n '^'\n '{lib_name}'\n r'\\.'\n '{enum_name}'\n '$'\n).format(lib_name=NAME_REGEX, enum_name=NAME_REGEX)\n\n\ndef is_probably_enum(abi_type: TypeStr) -> bool:\n return bool(re.match(ENUM_REGEX, abi_type))\n\n\n@to_tuple\ndef normalize_event_input_types(\n abi_args: Collection[Union[ABIFunction, ABIEvent]]\n) -> Iterable[Union[ABIFunction, ABIEvent, Dict[TypeStr, Any]]]:\n for arg in abi_args:\n if is_recognized_type(arg['type']):\n yield arg\n elif is_probably_enum(arg['type']):\n yield {k: 'uint8' if k == 'type' else v for k, v in arg.items()}\n else:\n yield arg\n\n\ndef abi_to_signature(abi: Union[ABIFunction, ABIEvent]) -> str:\n function_signature = \"{fn_name}({fn_input_types})\".format(\n fn_name=abi['name'],\n fn_input_types=','.join([\n arg['type'] for arg in normalize_event_input_types(abi.get('inputs', []))\n ]),\n )\n return function_signature\n\n\n########################################################\n#\n# Conditionally modifying data, tagged with ABI Types\n#\n########################################################\n\n\n@curry\ndef map_abi_data(\n normalizers: Sequence[Callable[[TypeStr, Any], Tuple[TypeStr, Any]]],\n types: Sequence[TypeStr],\n data: Sequence[Any],\n) -> Any:\n \"\"\"\n This function will apply normalizers to your data, in the\n context of the relevant types. Each normalizer is in the format:\n\n def normalizer(datatype, data):\n # Conditionally modify data\n return (datatype, data)\n\n Where datatype is a valid ABI type string, like \"uint\".\n\n In case of an array, like \"bool[2]\", normalizer will receive `data`\n as an iterable of typed data, like `[(\"bool\", True), (\"bool\", False)]`.\n\n Internals\n ---\n\n This is accomplished by:\n\n 1. Decorating the data tree with types\n 2. Recursively mapping each of the normalizers to the data\n 3. Stripping the types back out of the tree\n \"\"\"\n pipeline = itertools.chain(\n [abi_data_tree(types)],\n map(data_tree_map, normalizers),\n [partial(recursive_map, strip_abi_type)],\n )\n\n return pipe(data, *pipeline)\n\n\n@curry\ndef abi_data_tree(types: Sequence[TypeStr], data: Sequence[Any]) -> List[Any]:\n \"\"\"\n Decorate the data tree with pairs of (type, data). The pair tuple is actually an\n ABITypedData, but can be accessed as a tuple.\n\n As an example:\n\n >>> abi_data_tree(types=[\"bool[2]\", \"uint\"], data=[[True, False], 0])\n [(\"bool[2]\", [(\"bool\", True), (\"bool\", False)]), (\"uint256\", 0)]\n \"\"\"\n return [\n abi_sub_tree(data_type, data_value)\n for data_type, data_value\n in zip(types, data)\n ]\n\n\n@curry\ndef data_tree_map(\n func: Callable[[TypeStr, Any], Tuple[TypeStr, Any]], data_tree: Any\n) -> \"ABITypedData\":\n \"\"\"\n Map func to every ABITypedData element in the tree. func will\n receive two args: abi_type, and data\n \"\"\"\n def map_to_typed_data(elements: Any) -> \"ABITypedData\":\n if isinstance(elements, ABITypedData) and elements.abi_type is not None:\n return ABITypedData(func(*elements))\n else:\n return elements\n return recursive_map(map_to_typed_data, data_tree)\n\n\nclass ABITypedData(namedtuple('ABITypedData', 'abi_type, data')):\n \"\"\"\n This class marks data as having a certain ABI-type.\n\n >>> a1 = ABITypedData(['address', addr1])\n >>> a2 = ABITypedData(['address', addr2])\n >>> addrs = ABITypedData(['address[]', [a1, a2]])\n\n You can access the fields using tuple() interface, or with\n attributes:\n\n >>> assert a1.abi_type == a1[0]\n >>> assert a1.data == a1[1]\n\n Unlike a typical `namedtuple`, you initialize with a single\n positional argument that is iterable, to match the init\n interface of all other relevant collections.\n \"\"\"\n def __new__(cls, iterable: Iterable[Any]) -> \"ABITypedData\":\n return super().__new__(cls, *iterable)\n\n\ndef abi_sub_tree(\n type_str_or_abi_type: Optional[Union[TypeStr, ABIType]], data_value: Any\n) -> ABITypedData:\n if type_str_or_abi_type is None:\n return ABITypedData([None, data_value])\n\n if isinstance(type_str_or_abi_type, TypeStr):\n abi_type = parse(type_str_or_abi_type)\n else:\n abi_type = type_str_or_abi_type\n\n # In the two special cases below, we rebuild the given data structures with\n # annotated items\n if abi_type.is_array:\n # If type is array, determine item type and annotate all\n # items in iterable with that type\n item_type_str = abi_type.item_type.to_type_str()\n value_to_annotate = [\n abi_sub_tree(item_type_str, item_value)\n for item_value in data_value\n ]\n elif isinstance(abi_type, TupleType):\n # Otherwise, if type is tuple, determine component types and annotate\n # tuple components in iterable respectively with those types\n value_to_annotate = type(data_value)(\n abi_sub_tree(comp_type.to_type_str(), comp_value)\n for comp_type, comp_value in zip(abi_type.components, data_value)\n )\n else:\n value_to_annotate = data_value\n\n return ABITypedData([\n abi_type.to_type_str(),\n value_to_annotate,\n ])\n\n\ndef strip_abi_type(elements: Any) -> Any:\n if isinstance(elements, ABITypedData):\n return elements.data\n else:\n return elements\n\n\ndef build_default_registry() -> ABIRegistry:\n # We make a copy here just to make sure that eth-abi's default registry is not\n # affected by our custom encoder subclasses\n registry = default_registry.copy()\n\n registry.unregister('address')\n registry.unregister('bytes<M>')\n registry.unregister('bytes')\n registry.unregister('string')\n\n registry.register(\n BaseEquals('address'),\n AddressEncoder, decoding.AddressDecoder,\n label='address',\n )\n registry.register(\n BaseEquals('bytes', with_sub=True),\n BytesEncoder, decoding.BytesDecoder,\n label='bytes<M>',\n )\n registry.register(\n BaseEquals('bytes', with_sub=False),\n ByteStringEncoder, decoding.ByteStringDecoder,\n label='bytes',\n )\n registry.register(\n BaseEquals('string'),\n TextStringEncoder, decoding.StringDecoder,\n label='string',\n )\n return registry\n\n\ndef build_strict_registry() -> ABIRegistry:\n registry = default_registry.copy()\n\n registry.unregister('address')\n registry.unregister('bytes<M>')\n registry.unregister('bytes')\n registry.unregister('string')\n\n registry.register(\n BaseEquals('address'),\n AddressEncoder, decoding.AddressDecoder,\n label='address',\n )\n registry.register(\n BaseEquals('bytes', with_sub=True),\n ExactLengthBytesEncoder, BytesDecoder,\n label='bytes<M>',\n )\n registry.register(\n BaseEquals('bytes', with_sub=False),\n StrictByteStringEncoder, decoding.ByteStringDecoder,\n label='bytes',\n )\n registry.register(\n BaseEquals('string'),\n TextStringEncoder, decoding.StringDecoder,\n label='string',\n )\n return registry\n", "path": "web3/_utils/abi.py" } ]
[ { "content": "import binascii\nfrom collections import (\n abc,\n namedtuple,\n)\nimport copy\nimport itertools\nimport re\nfrom typing import (\n Any,\n Callable,\n Collection,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n)\nimport warnings\n\nfrom eth_abi import (\n codec,\n decoding,\n encoding,\n)\nfrom eth_abi.base import (\n parse_type_str,\n)\nfrom eth_abi.exceptions import (\n ValueOutOfBounds,\n)\nfrom eth_abi.grammar import (\n ABIType,\n BasicType,\n TupleType,\n parse,\n)\nfrom eth_abi.registry import (\n ABIRegistry,\n BaseEquals,\n registry as default_registry,\n)\nfrom eth_typing import (\n HexStr,\n TypeStr,\n)\nfrom eth_utils import (\n combomethod,\n decode_hex,\n is_bytes,\n is_list_like,\n is_text,\n to_text,\n to_tuple,\n)\nfrom eth_utils.abi import (\n collapse_if_tuple,\n)\nfrom eth_utils.toolz import (\n curry,\n partial,\n pipe,\n)\n\nfrom web3._utils.ens import (\n is_ens_name,\n)\nfrom web3._utils.formatters import (\n recursive_map,\n)\nfrom web3.exceptions import (\n FallbackNotFound,\n)\nfrom web3.types import (\n ABI,\n ABIEvent,\n ABIEventParams,\n ABIFunction,\n ABIFunctionParams,\n)\n\n\ndef filter_by_type(_type: str, contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:\n return [abi for abi in contract_abi if abi['type'] == _type]\n\n\ndef filter_by_name(name: str, contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:\n return [\n abi\n for abi\n in contract_abi\n if (\n abi['type'] not in ('fallback', 'constructor', 'receive')\n and abi['name'] == name\n )\n ]\n\n\ndef get_abi_input_types(abi: ABIFunction) -> List[str]:\n if 'inputs' not in abi and (abi['type'] == 'fallback' or abi['type'] == 'receive'):\n return []\n else:\n return [collapse_if_tuple(cast(Dict[str, Any], arg)) for arg in abi['inputs']]\n\n\ndef get_abi_output_types(abi: ABIFunction) -> List[str]:\n if abi['type'] == 'fallback':\n return []\n else:\n return [collapse_if_tuple(cast(Dict[str, Any], arg)) for arg in abi['outputs']]\n\n\ndef get_abi_input_names(abi: Union[ABIFunction, ABIEvent]) -> List[str]:\n if 'inputs' not in abi and abi['type'] == 'fallback':\n return []\n else:\n return [arg['name'] for arg in abi['inputs']]\n\n\ndef get_receive_func_abi(contract_abi: ABI) -> ABIFunction:\n receive_abis = filter_by_type('receive', contract_abi)\n if receive_abis:\n return cast(ABIFunction, receive_abis[0])\n else:\n raise FallbackNotFound(\"No receive function was found in the contract ABI.\")\n\n\ndef get_fallback_func_abi(contract_abi: ABI) -> ABIFunction:\n fallback_abis = filter_by_type('fallback', contract_abi)\n if fallback_abis:\n return cast(ABIFunction, fallback_abis[0])\n else:\n raise FallbackNotFound(\"No fallback function was found in the contract ABI.\")\n\n\ndef fallback_func_abi_exists(contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:\n return filter_by_type('fallback', contract_abi)\n\n\ndef receive_func_abi_exists(contract_abi: ABI) -> List[Union[ABIFunction, ABIEvent]]:\n return filter_by_type('receive', contract_abi)\n\n\ndef get_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIEventParams]:\n return [arg for arg in event_abi['inputs'] if arg['indexed'] is True]\n\n\ndef exclude_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIEventParams]:\n return [arg for arg in event_abi['inputs'] if arg['indexed'] is False]\n\n\ndef get_normalized_abi_arg_type(abi_arg: ABIEventParams) -> str:\n \"\"\"\n Return the normalized type for the abi argument provided. In order to account for tuple argument\n types, this abstraction makes use of `collapse_if_tuple()` to collapse the appropriate component\n types within a tuple type, if present.\n \"\"\"\n return collapse_if_tuple(dict(abi_arg))\n\n\ndef filter_by_argument_count(\n num_arguments: int, contract_abi: ABI\n) -> List[Union[ABIFunction, ABIEvent]]:\n return [\n abi\n for abi\n in contract_abi\n if len(abi['inputs']) == num_arguments\n ]\n\n\ndef filter_by_argument_name(\n argument_names: Collection[str], contract_abi: ABI\n) -> List[Union[ABIFunction, ABIEvent]]:\n return [\n abi\n for abi in contract_abi\n if set(argument_names).intersection(\n get_abi_input_names(abi)\n ) == set(argument_names)\n ]\n\n\nclass AddressEncoder(encoding.AddressEncoder):\n @classmethod\n def validate_value(cls, value: Any) -> None:\n if is_ens_name(value):\n return\n\n super().validate_value(value)\n\n\nclass AcceptsHexStrEncoder(encoding.BaseEncoder):\n subencoder_cls: Type[encoding.BaseEncoder] = None\n is_strict: bool = None\n\n def __init__(self, subencoder: encoding.BaseEncoder) -> None:\n self.subencoder = subencoder\n\n # type ignored b/c conflict w/ defined BaseEncoder.is_dynamic = False\n @property\n def is_dynamic(self) -> bool: # type: ignore\n return self.subencoder.is_dynamic\n\n @classmethod\n def from_type_str(cls, abi_type: TypeStr, registry: ABIRegistry) -> \"AcceptsHexStrEncoder\":\n subencoder_cls = cls.get_subencoder_class()\n # cast b/c expects BaseCoder but `from_type_string` restricted to BaseEncoder subclasses\n subencoder = cast(encoding.BaseEncoder, subencoder_cls.from_type_str(abi_type, registry))\n return cls(subencoder)\n\n @classmethod\n def get_subencoder_class(cls) -> Type[encoding.BaseEncoder]:\n if cls.subencoder_cls is None:\n raise AttributeError(f'No subencoder class is set. {cls.__name__}')\n return cls.subencoder_cls\n\n # type ignored b/c combomethod makes signature conflict w/ defined BaseEncoder.validate_value()\n @combomethod\n def validate_value(self, value: Any) -> None: # type: ignore\n normalized_value = self.validate_and_normalize(value)\n return self.subencoder.validate_value(normalized_value)\n\n def encode(self, value: Any) -> bytes:\n normalized_value = self.validate_and_normalize(value)\n return self.subencoder.encode(normalized_value)\n\n def validate_and_normalize(self, value: Any) -> HexStr:\n raw_value = value\n if is_text(value):\n try:\n value = decode_hex(value)\n except binascii.Error:\n self.invalidate_value(\n value,\n msg=f'{value} is an invalid hex string',\n )\n else:\n if raw_value[:2] != '0x':\n if self.is_strict:\n self.invalidate_value(\n raw_value,\n msg='hex string must be prefixed with 0x'\n )\n elif raw_value[:2] != '0x':\n warnings.warn(\n 'in v6 it will be invalid to pass a hex string without the \"0x\" prefix',\n category=DeprecationWarning\n )\n return value\n\n\nclass BytesEncoder(AcceptsHexStrEncoder):\n subencoder_cls = encoding.BytesEncoder\n is_strict = False\n\n\nclass ByteStringEncoder(AcceptsHexStrEncoder):\n subencoder_cls = encoding.ByteStringEncoder\n is_strict = False\n\n\nclass StrictByteStringEncoder(AcceptsHexStrEncoder):\n subencoder_cls = encoding.ByteStringEncoder\n is_strict = True\n\n\nclass ExactLengthBytesEncoder(encoding.BaseEncoder):\n # TODO: move this to eth-abi once the api is stabilized\n is_big_endian = False\n value_bit_size = None\n data_byte_size = None\n\n def validate(self) -> None:\n super().validate()\n\n if self.value_bit_size is None:\n raise ValueError(\"`value_bit_size` may not be none\")\n if self.data_byte_size is None:\n raise ValueError(\"`data_byte_size` may not be none\")\n if self.encode_fn is None:\n raise ValueError(\"`encode_fn` may not be none\")\n if self.is_big_endian is None:\n raise ValueError(\"`is_big_endian` may not be none\")\n\n if self.value_bit_size % 8 != 0:\n raise ValueError(\n \"Invalid value bit size: {0}. Must be a multiple of 8\".format(\n self.value_bit_size,\n )\n )\n\n if self.value_bit_size > self.data_byte_size * 8:\n raise ValueError(\"Value byte size exceeds data size\")\n\n def encode(self, value: Any) -> bytes:\n normalized_value = self.validate_value(value)\n return self.encode_fn(normalized_value)\n\n # type ignored b/c conflict with defined BaseEncoder.validate_value() -> None\n def validate_value(self, value: Any) -> bytes: # type: ignore\n if not is_bytes(value) and not is_text(value):\n self.invalidate_value(value)\n\n raw_value = value\n if is_text(value):\n try:\n value = decode_hex(value)\n except binascii.Error:\n self.invalidate_value(\n value,\n msg=f'{value} is not a valid hex string',\n )\n else:\n if raw_value[:2] != '0x':\n self.invalidate_value(\n raw_value,\n msg='hex string must be prefixed with 0x'\n )\n\n byte_size = self.value_bit_size // 8\n if len(value) > byte_size:\n self.invalidate_value(\n value,\n exc=ValueOutOfBounds,\n msg=\"exceeds total byte size for bytes{} encoding\".format(byte_size),\n )\n elif len(value) < byte_size:\n self.invalidate_value(\n value,\n exc=ValueOutOfBounds,\n msg=\"less than total byte size for bytes{} encoding\".format(byte_size),\n )\n return value\n\n @staticmethod\n def encode_fn(value: Any) -> bytes:\n return value\n\n @parse_type_str('bytes')\n def from_type_str(cls, abi_type: BasicType, registry: ABIRegistry) -> bytes:\n # type ignored b/c kwargs are set in superclass init\n # Unexpected keyword argument \"value_bit_size\" for \"__call__\" of \"BaseEncoder\"\n return cls( # type: ignore\n value_bit_size=abi_type.sub * 8,\n data_byte_size=abi_type.sub,\n )\n\n\nclass BytesDecoder(decoding.FixedByteSizeDecoder):\n # FixedByteSizeDecoder.is_big_endian is defined as None\n is_big_endian = False # type: ignore\n\n # FixedByteSizeDecoder.decoder_fn is defined as None\n @staticmethod\n def decoder_fn(data: bytes) -> bytes: # type: ignore\n return data\n\n @parse_type_str('bytes')\n def from_type_str(cls, abi_type: BasicType, registry: ABIRegistry) -> bytes:\n # type ignored b/c kwargs are set in superclass init\n # Unexpected keyword argument \"value_bit_size\" for \"__call__\" of \"BaseDecoder\"\n return cls( # type: ignore\n value_bit_size=abi_type.sub * 8,\n data_byte_size=abi_type.sub,\n )\n\n\nclass TextStringEncoder(encoding.TextStringEncoder):\n @classmethod\n def validate_value(cls, value: Any) -> None:\n if is_bytes(value):\n try:\n value = to_text(value)\n except UnicodeDecodeError:\n cls.invalidate_value(\n value,\n msg='not decodable as unicode string',\n )\n\n super().validate_value(value)\n\n\ndef filter_by_encodability(\n abi_codec: codec.ABIEncoder, args: Sequence[Any], kwargs: Dict[str, Any], contract_abi: ABI\n) -> List[ABIFunction]:\n return [\n cast(ABIFunction, function_abi)\n for function_abi\n in contract_abi\n if check_if_arguments_can_be_encoded(\n cast(ABIFunction, function_abi), abi_codec, args, kwargs\n )\n ]\n\n\ndef check_if_arguments_can_be_encoded(\n function_abi: ABIFunction,\n abi_codec: codec.ABIEncoder,\n args: Sequence[Any],\n kwargs: Dict[str, Any],\n) -> bool:\n try:\n arguments = merge_args_and_kwargs(function_abi, args, kwargs)\n except TypeError:\n return False\n\n if len(function_abi.get('inputs', [])) != len(arguments):\n return False\n\n try:\n types, aligned_args = get_aligned_abi_inputs(function_abi, arguments)\n except TypeError:\n return False\n\n return all(\n abi_codec.is_encodable(_type, arg)\n for _type, arg in zip(types, aligned_args)\n )\n\n\ndef merge_args_and_kwargs(\n function_abi: ABIFunction, args: Sequence[Any], kwargs: Dict[str, Any]\n) -> Tuple[Any, ...]:\n \"\"\"\n Takes a list of positional args (``args``) and a dict of keyword args\n (``kwargs``) defining values to be passed to a call to the contract function\n described by ``function_abi``. Checks to ensure that the correct number of\n args were given, no duplicate args were given, and no unknown args were\n given. Returns a list of argument values aligned to the order of inputs\n defined in ``function_abi``.\n \"\"\"\n # Ensure the function is being applied to the correct number of args\n if len(args) + len(kwargs) != len(function_abi.get('inputs', [])):\n raise TypeError(\n \"Incorrect argument count. Expected '{0}'. Got '{1}'\".format(\n len(function_abi['inputs']),\n len(args) + len(kwargs),\n )\n )\n\n # If no keyword args were given, we don't need to align them\n if not kwargs:\n return cast(Tuple[Any, ...], args)\n\n kwarg_names = set(kwargs.keys())\n sorted_arg_names = tuple(arg_abi['name'] for arg_abi in function_abi['inputs'])\n args_as_kwargs = dict(zip(sorted_arg_names, args))\n\n # Check for duplicate args\n duplicate_args = kwarg_names.intersection(args_as_kwargs.keys())\n if duplicate_args:\n raise TypeError(\n \"{fn_name}() got multiple values for argument(s) '{dups}'\".format(\n fn_name=function_abi['name'],\n dups=', '.join(duplicate_args),\n )\n )\n\n # Check for unknown args\n unknown_args = kwarg_names.difference(sorted_arg_names)\n if unknown_args:\n if function_abi.get('name'):\n raise TypeError(\n \"{fn_name}() got unexpected keyword argument(s) '{dups}'\".format(\n fn_name=function_abi.get('name'),\n dups=', '.join(unknown_args),\n )\n )\n raise TypeError(\n \"Type: '{_type}' got unexpected keyword argument(s) '{dups}'\".format(\n _type=function_abi.get('type'),\n dups=', '.join(unknown_args),\n )\n )\n\n # Sort args according to their position in the ABI and unzip them from their\n # names\n sorted_args = tuple(zip(\n *sorted(\n itertools.chain(kwargs.items(), args_as_kwargs.items()),\n key=lambda kv: sorted_arg_names.index(kv[0]),\n )\n ))\n\n if sorted_args:\n return sorted_args[1]\n else:\n return tuple()\n\n\nTUPLE_TYPE_STR_RE = re.compile(r\"^(tuple)((\\[([1-9]\\d*\\b)?])*)??$\")\n\n\ndef get_tuple_type_str_parts(s: str) -> Optional[Tuple[str, Optional[str]]]:\n \"\"\"\n Takes a JSON ABI type string. For tuple type strings, returns the separated\n prefix and array dimension parts. For all other strings, returns ``None``.\n \"\"\"\n match = TUPLE_TYPE_STR_RE.match(s)\n\n if match is not None:\n tuple_prefix = match.group(1)\n tuple_dims = match.group(2)\n\n return tuple_prefix, tuple_dims\n\n return None\n\n\ndef _align_abi_input(arg_abi: ABIFunctionParams, arg: Any) -> Tuple[Any, ...]:\n \"\"\"\n Aligns the values of any mapping at any level of nesting in ``arg``\n according to the layout of the corresponding abi spec.\n \"\"\"\n tuple_parts = get_tuple_type_str_parts(arg_abi['type'])\n\n if tuple_parts is None:\n # Arg is non-tuple. Just return value.\n return arg\n\n tuple_prefix, tuple_dims = tuple_parts\n if tuple_dims is None:\n # Arg is non-list tuple. Each sub arg in `arg` will be aligned\n # according to its corresponding abi.\n sub_abis = arg_abi['components']\n else:\n # Arg is list tuple. A non-list version of its abi will be used to\n # align each element in `arg`.\n new_abi = copy.copy(arg_abi)\n new_abi['type'] = tuple_prefix\n\n sub_abis = itertools.repeat(new_abi) # type: ignore\n\n if isinstance(arg, abc.Mapping):\n # Arg is mapping. Align values according to abi order.\n aligned_arg = tuple(arg[abi['name']] for abi in sub_abis)\n else:\n aligned_arg = arg\n\n if not is_list_like(aligned_arg):\n raise TypeError(\n 'Expected non-string sequence for \"{}\" component type: got {}'.format(\n arg_abi['type'],\n aligned_arg,\n ),\n )\n\n # convert NamedTuple to regular tuple\n typing = tuple if isinstance(aligned_arg, tuple) else type(aligned_arg)\n\n return typing(\n _align_abi_input(sub_abi, sub_arg)\n for sub_abi, sub_arg in zip(sub_abis, aligned_arg)\n )\n\n\ndef get_aligned_abi_inputs(\n abi: ABIFunction, args: Union[Tuple[Any, ...], Mapping[Any, Any]]\n) -> Tuple[Tuple[Any, ...], Tuple[Any, ...]]:\n \"\"\"\n Takes a function ABI (``abi``) and a sequence or mapping of args (``args``).\n Returns a list of type strings for the function's inputs and a list of\n arguments which have been aligned to the layout of those types. The args\n contained in ``args`` may contain nested mappings or sequences corresponding\n to tuple-encoded values in ``abi``.\n \"\"\"\n input_abis = abi.get('inputs', [])\n\n if isinstance(args, abc.Mapping):\n # `args` is mapping. Align values according to abi order.\n args = tuple(args[abi['name']] for abi in input_abis)\n\n return (\n # typed dict cannot be used w/ a normal Dict\n # https://github.com/python/mypy/issues/4976\n tuple(collapse_if_tuple(abi) for abi in input_abis), # type: ignore\n type(args)(\n _align_abi_input(abi, arg)\n for abi, arg in zip(input_abis, args)\n ),\n )\n\n\ndef get_constructor_abi(contract_abi: ABI) -> ABIFunction:\n candidates = [\n abi for abi in contract_abi if abi['type'] == 'constructor'\n ]\n if len(candidates) == 1:\n return candidates[0]\n elif len(candidates) == 0:\n return None\n elif len(candidates) > 1:\n raise ValueError(\"Found multiple constructors.\")\n return None\n\n\nDYNAMIC_TYPES = ['bytes', 'string']\n\nINT_SIZES = range(8, 257, 8)\nBYTES_SIZES = range(1, 33)\nUINT_TYPES = ['uint{0}'.format(i) for i in INT_SIZES]\nINT_TYPES = ['int{0}'.format(i) for i in INT_SIZES]\nBYTES_TYPES = ['bytes{0}'.format(i) for i in BYTES_SIZES] + ['bytes32.byte']\n\nSTATIC_TYPES = list(itertools.chain(\n ['address', 'bool'],\n UINT_TYPES,\n INT_TYPES,\n BYTES_TYPES,\n))\n\nBASE_TYPE_REGEX = '|'.join((\n _type + '(?![a-z0-9])'\n for _type\n in itertools.chain(STATIC_TYPES, DYNAMIC_TYPES)\n))\n\nSUB_TYPE_REGEX = (\n r'\\['\n '[0-9]*'\n r'\\]'\n)\n\nTYPE_REGEX = (\n '^'\n '(?:{base_type})'\n '(?:(?:{sub_type})*)?'\n '$'\n).format(\n base_type=BASE_TYPE_REGEX,\n sub_type=SUB_TYPE_REGEX,\n)\n\n\ndef is_recognized_type(abi_type: TypeStr) -> bool:\n return bool(re.match(TYPE_REGEX, abi_type))\n\n\ndef is_bool_type(abi_type: TypeStr) -> bool:\n return abi_type == 'bool'\n\n\ndef is_uint_type(abi_type: TypeStr) -> bool:\n return abi_type in UINT_TYPES\n\n\ndef is_int_type(abi_type: TypeStr) -> bool:\n return abi_type in INT_TYPES\n\n\ndef is_address_type(abi_type: TypeStr) -> bool:\n return abi_type == 'address'\n\n\ndef is_bytes_type(abi_type: TypeStr) -> bool:\n return abi_type in BYTES_TYPES + ['bytes']\n\n\ndef is_string_type(abi_type: TypeStr) -> bool:\n return abi_type == 'string'\n\n\n@curry\ndef is_length(target_length: int, value: abc.Sized) -> bool:\n return len(value) == target_length\n\n\ndef size_of_type(abi_type: TypeStr) -> int:\n \"\"\"\n Returns size in bits of abi_type\n \"\"\"\n if 'string' in abi_type:\n return None\n if 'byte' in abi_type:\n return None\n if '[' in abi_type:\n return None\n if abi_type == 'bool':\n return 8\n if abi_type == 'address':\n return 160\n return int(re.sub(r\"\\D\", \"\", abi_type))\n\n\nEND_BRACKETS_OF_ARRAY_TYPE_REGEX = r\"\\[[^]]*\\]$\"\n\n\ndef sub_type_of_array_type(abi_type: TypeStr) -> str:\n if not is_array_type(abi_type):\n raise ValueError(\n \"Cannot parse subtype of nonarray abi-type: {0}\".format(abi_type)\n )\n\n return re.sub(END_BRACKETS_OF_ARRAY_TYPE_REGEX, '', abi_type, 1)\n\n\ndef length_of_array_type(abi_type: TypeStr) -> int:\n if not is_array_type(abi_type):\n raise ValueError(\n \"Cannot parse length of nonarray abi-type: {0}\".format(abi_type)\n )\n\n inner_brackets = re.search(END_BRACKETS_OF_ARRAY_TYPE_REGEX, abi_type).group(0).strip(\"[]\")\n if not inner_brackets:\n return None\n else:\n return int(inner_brackets)\n\n\nARRAY_REGEX = (\n \"^\"\n \"[a-zA-Z0-9_]+\"\n \"({sub_type})+\"\n \"$\"\n).format(sub_type=SUB_TYPE_REGEX)\n\n\ndef is_array_type(abi_type: TypeStr) -> bool:\n return bool(re.match(ARRAY_REGEX, abi_type))\n\n\nNAME_REGEX = (\n '[a-zA-Z_]'\n '[a-zA-Z0-9_]*'\n)\n\n\nENUM_REGEX = (\n '^'\n '{lib_name}'\n r'\\.'\n '{enum_name}'\n '$'\n).format(lib_name=NAME_REGEX, enum_name=NAME_REGEX)\n\n\ndef is_probably_enum(abi_type: TypeStr) -> bool:\n return bool(re.match(ENUM_REGEX, abi_type))\n\n\n@to_tuple\ndef normalize_event_input_types(\n abi_args: Collection[Union[ABIFunction, ABIEvent]]\n) -> Iterable[Union[ABIFunction, ABIEvent, Dict[TypeStr, Any]]]:\n for arg in abi_args:\n if is_recognized_type(arg['type']):\n yield arg\n elif is_probably_enum(arg['type']):\n yield {k: 'uint8' if k == 'type' else v for k, v in arg.items()}\n else:\n yield arg\n\n\ndef abi_to_signature(abi: Union[ABIFunction, ABIEvent]) -> str:\n function_signature = \"{fn_name}({fn_input_types})\".format(\n fn_name=abi['name'],\n fn_input_types=','.join([\n arg['type'] for arg in normalize_event_input_types(abi.get('inputs', []))\n ]),\n )\n return function_signature\n\n\n########################################################\n#\n# Conditionally modifying data, tagged with ABI Types\n#\n########################################################\n\n\n@curry\ndef map_abi_data(\n normalizers: Sequence[Callable[[TypeStr, Any], Tuple[TypeStr, Any]]],\n types: Sequence[TypeStr],\n data: Sequence[Any],\n) -> Any:\n \"\"\"\n This function will apply normalizers to your data, in the\n context of the relevant types. Each normalizer is in the format:\n\n def normalizer(datatype, data):\n # Conditionally modify data\n return (datatype, data)\n\n Where datatype is a valid ABI type string, like \"uint\".\n\n In case of an array, like \"bool[2]\", normalizer will receive `data`\n as an iterable of typed data, like `[(\"bool\", True), (\"bool\", False)]`.\n\n Internals\n ---\n\n This is accomplished by:\n\n 1. Decorating the data tree with types\n 2. Recursively mapping each of the normalizers to the data\n 3. Stripping the types back out of the tree\n \"\"\"\n pipeline = itertools.chain(\n [abi_data_tree(types)],\n map(data_tree_map, normalizers),\n [partial(recursive_map, strip_abi_type)],\n )\n\n return pipe(data, *pipeline)\n\n\n@curry\ndef abi_data_tree(types: Sequence[TypeStr], data: Sequence[Any]) -> List[Any]:\n \"\"\"\n Decorate the data tree with pairs of (type, data). The pair tuple is actually an\n ABITypedData, but can be accessed as a tuple.\n\n As an example:\n\n >>> abi_data_tree(types=[\"bool[2]\", \"uint\"], data=[[True, False], 0])\n [(\"bool[2]\", [(\"bool\", True), (\"bool\", False)]), (\"uint256\", 0)]\n \"\"\"\n return [\n abi_sub_tree(data_type, data_value)\n for data_type, data_value\n in zip(types, data)\n ]\n\n\n@curry\ndef data_tree_map(\n func: Callable[[TypeStr, Any], Tuple[TypeStr, Any]], data_tree: Any\n) -> \"ABITypedData\":\n \"\"\"\n Map func to every ABITypedData element in the tree. func will\n receive two args: abi_type, and data\n \"\"\"\n def map_to_typed_data(elements: Any) -> \"ABITypedData\":\n if isinstance(elements, ABITypedData) and elements.abi_type is not None:\n return ABITypedData(func(*elements))\n else:\n return elements\n return recursive_map(map_to_typed_data, data_tree)\n\n\nclass ABITypedData(namedtuple('ABITypedData', 'abi_type, data')):\n \"\"\"\n This class marks data as having a certain ABI-type.\n\n >>> a1 = ABITypedData(['address', addr1])\n >>> a2 = ABITypedData(['address', addr2])\n >>> addrs = ABITypedData(['address[]', [a1, a2]])\n\n You can access the fields using tuple() interface, or with\n attributes:\n\n >>> assert a1.abi_type == a1[0]\n >>> assert a1.data == a1[1]\n\n Unlike a typical `namedtuple`, you initialize with a single\n positional argument that is iterable, to match the init\n interface of all other relevant collections.\n \"\"\"\n def __new__(cls, iterable: Iterable[Any]) -> \"ABITypedData\":\n return super().__new__(cls, *iterable)\n\n\ndef abi_sub_tree(\n type_str_or_abi_type: Optional[Union[TypeStr, ABIType]], data_value: Any\n) -> ABITypedData:\n if type_str_or_abi_type is None:\n return ABITypedData([None, data_value])\n\n if isinstance(type_str_or_abi_type, TypeStr):\n abi_type = parse(type_str_or_abi_type)\n else:\n abi_type = type_str_or_abi_type\n\n # In the two special cases below, we rebuild the given data structures with\n # annotated items\n if abi_type.is_array:\n # If type is array, determine item type and annotate all\n # items in iterable with that type\n item_type_str = abi_type.item_type.to_type_str()\n value_to_annotate = [\n abi_sub_tree(item_type_str, item_value)\n for item_value in data_value\n ]\n elif isinstance(abi_type, TupleType):\n # Otherwise, if type is tuple, determine component types and annotate\n # tuple components in iterable respectively with those types\n value_to_annotate = type(data_value)(\n abi_sub_tree(comp_type.to_type_str(), comp_value)\n for comp_type, comp_value in zip(abi_type.components, data_value)\n )\n else:\n value_to_annotate = data_value\n\n return ABITypedData([\n abi_type.to_type_str(),\n value_to_annotate,\n ])\n\n\ndef strip_abi_type(elements: Any) -> Any:\n if isinstance(elements, ABITypedData):\n return elements.data\n else:\n return elements\n\n\ndef build_default_registry() -> ABIRegistry:\n # We make a copy here just to make sure that eth-abi's default registry is not\n # affected by our custom encoder subclasses\n registry = default_registry.copy()\n\n registry.unregister('address')\n registry.unregister('bytes<M>')\n registry.unregister('bytes')\n registry.unregister('string')\n\n registry.register(\n BaseEquals('address'),\n AddressEncoder, decoding.AddressDecoder,\n label='address',\n )\n registry.register(\n BaseEquals('bytes', with_sub=True),\n BytesEncoder, decoding.BytesDecoder,\n label='bytes<M>',\n )\n registry.register(\n BaseEquals('bytes', with_sub=False),\n ByteStringEncoder, decoding.ByteStringDecoder,\n label='bytes',\n )\n registry.register(\n BaseEquals('string'),\n TextStringEncoder, decoding.StringDecoder,\n label='string',\n )\n return registry\n\n\ndef build_strict_registry() -> ABIRegistry:\n registry = default_registry.copy()\n\n registry.unregister('address')\n registry.unregister('bytes<M>')\n registry.unregister('bytes')\n registry.unregister('string')\n\n registry.register(\n BaseEquals('address'),\n AddressEncoder, decoding.AddressDecoder,\n label='address',\n )\n registry.register(\n BaseEquals('bytes', with_sub=True),\n ExactLengthBytesEncoder, BytesDecoder,\n label='bytes<M>',\n )\n registry.register(\n BaseEquals('bytes', with_sub=False),\n StrictByteStringEncoder, decoding.ByteStringDecoder,\n label='bytes',\n )\n registry.register(\n BaseEquals('string'),\n TextStringEncoder, decoding.StringDecoder,\n label='string',\n )\n return registry\n", "path": "web3/_utils/abi.py" } ]
diff --git a/newsfragments/2555.feature.rst b/newsfragments/2555.feature.rst new file mode 100644 index 0000000000..fae2820847 --- /dev/null +++ b/newsfragments/2555.feature.rst @@ -0,0 +1 @@ +support multi-dimensional arrays for ABI tuples types diff --git a/tests/core/utilities/test_abi.py b/tests/core/utilities/test_abi.py index b861ee83ab..5f8c2406c9 100644 --- a/tests/core/utilities/test_abi.py +++ b/tests/core/utilities/test_abi.py @@ -21,23 +21,33 @@ 'input, expected', ( # Well-formed tuple type strings - ('tuple', ('tuple', None)), - ('tuple[]', ('tuple', '[]')), - ('tuple[1]', ('tuple', '[1]')), - ('tuple[10]', ('tuple', '[10]')), - ('tuple[19]', ('tuple', '[19]')), - ('tuple[195]', ('tuple', '[195]')), - + ("tuple", ("tuple", None)), + ("tuple[]", ("tuple", "[]")), + ("tuple[1]", ("tuple", "[1]")), + ("tuple[10]", ("tuple", "[10]")), + ("tuple[19]", ("tuple", "[19]")), + ("tuple[195]", ("tuple", "[195]")), + ("tuple[][]", ("tuple", "[][]")), + ("tuple[1][1]", ("tuple", "[1][1]")), + ("tuple[1][]", ("tuple", "[1][]")), + ("tuple[][1]", ("tuple", "[][1]")), + ("tuple[][][]", ("tuple", "[][][]")), + ("tuple[1][][]", ("tuple", "[1][][]")), + ("tuple[][1][]", ("tuple", "[][1][]")), + ("tuple[][][1]", ("tuple", "[][][1]")), # Malformed tuple type strings - ('tuple[][]', None), - ('tuple[1][1]', None), - ('tuple[0]', None), - ('tuple[01]', None), - ('tupleasfasdf', None), - ('uint256', None), - ('bool', None), - ('', None), - ('tupletuple', None), + ("tupleasfasdf", None), + ("uint256", None), + ("bool", None), + ("", None), + ("tupletuple", None), + ("tuple[0]", None), + ("tuple[01]", None), + ("tuple[][0]", None), + ("tuple[][01]", None), + ("tuple[0][][]", None), + ("tuple[][0][]", None), + ("tuple[][][0]", None), ), ) def test_get_tuple_type_str_parts(input, expected): diff --git a/web3/_utils/abi.py b/web3/_utils/abi.py index 698f4151cd..d58c283902 100644 --- a/web3/_utils/abi.py +++ b/web3/_utils/abi.py @@ -494,7 +494,7 @@ def merge_args_and_kwargs( return tuple() -TUPLE_TYPE_STR_RE = re.compile(r'^(tuple)(\[([1-9][0-9]*)?\])?$') +TUPLE_TYPE_STR_RE = re.compile(r"^(tuple)((\[([1-9]\d*\b)?])*)??$") def get_tuple_type_str_parts(s: str) -> Optional[Tuple[str, Optional[str]]]:
translate__pootle-5024
Exception in terminology management view When visiting https://mozilla.locamotion.org/eu/firefox/terminology/ the following exception is thrown: `'SortedRelatedManager' object does not support indexing`
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render\n\nfrom pootle.core.decorators import get_path_obj, permission_required\nfrom pootle_app.views.admin import util\nfrom pootle_store.models import Store, Unit\n\nfrom .forms import term_unit_form_factory\n\n\ndef get_terminology_filename(translation_project):\n try:\n # See if a terminology store already exists\n return translation_project.stores.live().filter(\n name__startswith='pootle-terminology.',\n ).values_list('name', flat=True)[0]\n except IndexError:\n pass\n\n return (\n 'pootle-terminology.%s'\n % translation_project.project.filetypes[0].extension)\n\n\ndef manage_store(request, ctx, language, term_store):\n TermUnitForm = term_unit_form_factory(term_store)\n template_name = 'translation_projects/terminology/manage.html'\n return util.edit(request, template_name, Unit, ctx,\n None, None, queryset=term_store.units, can_delete=True,\n form=TermUnitForm)\n\n\n@get_path_obj\n@permission_required('administrate')\ndef manage(request, translation_project):\n ctx = {\n 'page': 'admin-terminology',\n\n 'browse_url': reverse('pootle-tp-browse', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n 'translate_url': reverse('pootle-tp-translate', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n\n 'translation_project': translation_project,\n 'language': translation_project.language,\n 'project': translation_project.project,\n 'source_language': translation_project.project.source_language,\n 'directory': translation_project.directory,\n }\n\n if translation_project.project.is_terminology:\n # Which file should we edit?\n stores = list(Store.objects.live().filter(\n translation_project=translation_project,\n ))\n if len(stores) == 1:\n # There is only one, and we're not going to offer file-level\n # activities, so let's just edit the one that is there.\n return manage_store(request, ctx, ctx['language'], stores[0])\n elif len(stores) > 1:\n for store in stores:\n path_length = len(translation_project.pootle_path)\n store.nice_name = store.pootle_path[path_length:]\n\n ctx['stores'] = stores\n return render(request,\n \"translation_projects/terminology/stores.html\", ctx)\n\n try:\n terminology_filename = get_terminology_filename(translation_project)\n term_store = Store.objects.get(\n pootle_path=translation_project.pootle_path + terminology_filename,\n )\n return manage_store(request, ctx, ctx['language'], term_store)\n except Store.DoesNotExist:\n return render(request, \"translation_projects/terminology/manage.html\",\n ctx)\n", "path": "pootle/apps/pootle_terminology/views.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render\n\nfrom pootle.core.decorators import get_path_obj, permission_required\nfrom pootle_app.views.admin import util\nfrom pootle_store.models import Store, Unit\n\nfrom .forms import term_unit_form_factory\n\n\ndef get_terminology_filename(translation_project):\n try:\n # See if a terminology store already exists\n return translation_project.stores.live().filter(\n name__startswith='pootle-terminology.',\n ).values_list('name', flat=True)[0]\n except IndexError:\n pass\n\n return (\n 'pootle-terminology.%s'\n % translation_project.project.filetypes.first().extension)\n\n\ndef manage_store(request, ctx, language, term_store):\n TermUnitForm = term_unit_form_factory(term_store)\n template_name = 'translation_projects/terminology/manage.html'\n return util.edit(request, template_name, Unit, ctx,\n None, None, queryset=term_store.units, can_delete=True,\n form=TermUnitForm)\n\n\n@get_path_obj\n@permission_required('administrate')\ndef manage(request, translation_project):\n ctx = {\n 'page': 'admin-terminology',\n\n 'browse_url': reverse('pootle-tp-browse', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n 'translate_url': reverse('pootle-tp-translate', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n\n 'translation_project': translation_project,\n 'language': translation_project.language,\n 'project': translation_project.project,\n 'source_language': translation_project.project.source_language,\n 'directory': translation_project.directory,\n }\n\n if translation_project.project.is_terminology:\n # Which file should we edit?\n stores = list(Store.objects.live().filter(\n translation_project=translation_project,\n ))\n if len(stores) == 1:\n # There is only one, and we're not going to offer file-level\n # activities, so let's just edit the one that is there.\n return manage_store(request, ctx, ctx['language'], stores[0])\n elif len(stores) > 1:\n for store in stores:\n path_length = len(translation_project.pootle_path)\n store.nice_name = store.pootle_path[path_length:]\n\n ctx['stores'] = stores\n return render(request,\n \"translation_projects/terminology/stores.html\", ctx)\n\n try:\n terminology_filename = get_terminology_filename(translation_project)\n term_store = Store.objects.get(\n pootle_path=translation_project.pootle_path + terminology_filename,\n )\n return manage_store(request, ctx, ctx['language'], term_store)\n except Store.DoesNotExist:\n return render(request, \"translation_projects/terminology/manage.html\",\n ctx)\n", "path": "pootle/apps/pootle_terminology/views.py" } ]
diff --git a/pootle/apps/pootle_terminology/views.py b/pootle/apps/pootle_terminology/views.py index 332559636bf..b65fde84e5e 100644 --- a/pootle/apps/pootle_terminology/views.py +++ b/pootle/apps/pootle_terminology/views.py @@ -27,7 +27,7 @@ def get_terminology_filename(translation_project): return ( 'pootle-terminology.%s' - % translation_project.project.filetypes[0].extension) + % translation_project.project.filetypes.first().extension) def manage_store(request, ctx, language, term_store):
liqd__a4-meinberlin-3150
#3289 Interactive Event **URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung/ **device & browser:** *e.g. Firefox 80.0 (64-bit)* **Comment/Question:** ![Screenshot_2020-09-17-Lorem-Ipsum-—-meinBerlin(2)](https://user-images.githubusercontent.com/59610786/93490308-9d293700-f908-11ea-827f-a0988917bf37.jpg) 01 – The questions were supposed to be 20px, but its also fine for me like that. 02 – The Icons are different and fine, but this one seems a bit off, I have save the ones I did here in case you need them: Nextcloud/Projekte/meinBerlin/Material/CI/Bilder & Grafiken/icons/svg 03 – Shall we put a max number of characters here since the questions should be short? @CarolingerSeilchenspringer 04 – I did the spacing between the questions using 8px but if that was already the meinBerlin spacing guidelines, we leave like that. 05 – The category labels seem a bit far from the question, if possible, could we shorten the spacing here a bit so it gets closer from the question and "far" from the Like button? 06 – Are the moderators supposed to like the question? If yes, its not working for me. :/ ![Screenshot 2020-09-17 at 17 05 20](https://user-images.githubusercontent.com/59610786/93491015-7d464300-f909-11ea-9f17-fc4d7195597a.png) 07 – If possible can we use here the text style from the notes (14px)? ![Screenshot 2020-09-17 at 17 08 19](https://user-images.githubusercontent.com/59610786/93491078-90f1a980-f909-11ea-9b5a-22d06351d485.png) 08 – The statistics seem a bit off compared to the sketch on Zeplin. Same on Mobile. ![Screenshot 2020-09-17 at 17 03 12](https://user-images.githubusercontent.com/59610786/93491135-a1a21f80-f909-11ea-929d-e8bdb2140015.png) 09 – Selected categories seem a bit off after selecting them, they kind of stay there instead of just being selected. (Do you know what I mean? I can also explain to you better sharing the screen or something). ![Screenshot 2020-09-17 at 17 27 12](https://user-images.githubusercontent.com/59610786/93492752-715b8080-f90b-11ea-8a1f-b7d87e13416f.png) 10 – The Menu on Mobile is different than on Zeplin. ![Screenshot 2020-09-17 at 17 24 33](https://user-images.githubusercontent.com/59610786/93492800-86381400-f90b-11ea-993c-2616dc4993ab.png) 11 – Same here, the infobox is difficult to understand. Also the buttons are off, spacing inbetween them are different. ![Screenshot 2020-09-17 at 17 26 58](https://user-images.githubusercontent.com/59610786/93492878-994ae400-f90b-11ea-84a1-db9b760ba010.png) 12 – Small thing here but if possible, the success infobox should be aligned properly to the left. ![Screenshot 2020-09-17 at 17 25 59](https://user-images.githubusercontent.com/59610786/93492962-b2539500-f90b-11ea-9b06-bad2fa5eab15.png) 13 – Can we increase the spacing here a bit between the link and icon? ![Screenshot 2020-09-17 at 16 50 18](https://user-images.githubusercontent.com/59610786/93492603-4709c300-f90b-11ea-973a-8e99376faa24.png) 14 – The Blueprint is not updated, I left the .svg file on Taiga I just didn't manage to test the Initiator's view cause there is no project open to testing, could you edit the phase from one of yours and send me the link? Cause all the ones I created I'm the moderator as well. Thanks a lot! Let me know if you need anything :)
[ { "content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom meinberlin.apps.budgeting import phases as budgeting_phases\nfrom meinberlin.apps.documents import phases as documents_phases\nfrom meinberlin.apps.ideas import phases as ideas_phases\nfrom meinberlin.apps.kiezkasse import phases as kiezkasse_phases\nfrom meinberlin.apps.livequestions import phases as livequestion_phases\nfrom meinberlin.apps.mapideas import phases as mapideas_phases\nfrom meinberlin.apps.maptopicprio import phases as maptopicprio_phases\nfrom meinberlin.apps.polls import phases as poll_phases\nfrom meinberlin.apps.topicprio import phases as topicprio_phases\n\nblueprints = [\n ('brainstorming',\n ProjectBlueprint(\n title=_('Brainstorming'),\n description=_(\n 'Collect first ideas for a specific topic and comment on them.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.svg',\n settings_model=None,\n )),\n ('map-brainstorming',\n ProjectBlueprint(\n title=_('Spatial Brainstorming'),\n description=_(\n 'Collect location specific ideas for a topic and comment on them.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n ],\n image='images/map-brainstorming.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('map-idea-collection',\n ProjectBlueprint(\n title=_('Spatial Idea Collection'),\n description=_(\n 'Collect location specific ideas that can be rated and commented.'\n ),\n content=[\n mapideas_phases.CollectFeedbackPhase(),\n ],\n image='images/map-idea-collection.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('agenda-setting',\n ProjectBlueprint(\n title=_('Agenda Setting'),\n description=_(\n 'With Agenda-Setting it’s possible to identify topics and to '\n 'define mission statements. Anyone can submit topics that can be '\n 'commented and rated.'\n ),\n content=[\n ideas_phases.CollectFeedbackPhase(),\n ],\n image='images/agenda-setting.svg',\n settings_model=None,\n )),\n ('text-review',\n ProjectBlueprint(\n title=_('Text Review'),\n description=_(\n 'In the text-review it’s possible to structure draft texts '\n 'that can be commented.'\n ),\n content=[\n documents_phases.CommentPhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n ('participatory-budgeting',\n ProjectBlueprint(\n title=_('Participatory budgeting'),\n description=_(\n 'With participatory-budgeting it’s possible to make proposals '\n 'with budget specifications and locate them. Anyone can comment '\n 'and rate on different proposals.'\n ),\n content=[\n budgeting_phases.RequestPhase()\n ],\n image='images/participatory-budgeting.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('poll',\n ProjectBlueprint(\n title=_('Poll'),\n description=_(\n 'Create a poll with multiple questions and possible answers. '\n 'Anyone can cast votes and comment on the poll.'\n ),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.svg',\n settings_model=None,\n )),\n ('topic-prioritization',\n ProjectBlueprint(\n title=_('Topic Priorization'),\n description=_(\n 'Comment and prioritize topics.'\n ),\n content=[\n topicprio_phases.PrioritizePhase(),\n ],\n image='images/priorization.svg',\n settings_model=None,\n )),\n ('map-topic-prioritization',\n ProjectBlueprint(\n title=_('Place Prioritization'),\n description=_(\n 'Comment and prioritize places located on a map.'\n ),\n content=[\n maptopicprio_phases.PrioritizePhase(),\n ],\n image='images/place-priotization.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('kiezkasse',\n ProjectBlueprint(\n title=_('Kiezkasse'),\n description=_(\n 'With kiezkasse it’s possible to make proposals '\n 'with budget specifications and locate them. '\n 'The proposals can be commented and rated.'\n ),\n content=[\n kiezkasse_phases.RequestFeedbackPhase(),\n ],\n image='images/kiezkasse.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('interactive-event',\n ProjectBlueprint(\n title=_('Interactive Event'),\n description=_(\n 'The participants of an event can ask their questions online. '\n 'Other participants can support the question. You as the '\n 'moderator can sort the questions by support or '\n 'characteristic.'\n ),\n content=[\n livequestion_phases.IssuePhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n]\n", "path": "meinberlin/apps/dashboard/blueprints.py" } ]
[ { "content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom meinberlin.apps.budgeting import phases as budgeting_phases\nfrom meinberlin.apps.documents import phases as documents_phases\nfrom meinberlin.apps.ideas import phases as ideas_phases\nfrom meinberlin.apps.kiezkasse import phases as kiezkasse_phases\nfrom meinberlin.apps.livequestions import phases as livequestion_phases\nfrom meinberlin.apps.mapideas import phases as mapideas_phases\nfrom meinberlin.apps.maptopicprio import phases as maptopicprio_phases\nfrom meinberlin.apps.polls import phases as poll_phases\nfrom meinberlin.apps.topicprio import phases as topicprio_phases\n\nblueprints = [\n ('brainstorming',\n ProjectBlueprint(\n title=_('Brainstorming'),\n description=_(\n 'Collect first ideas for a specific topic and comment on them.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.svg',\n settings_model=None,\n )),\n ('map-brainstorming',\n ProjectBlueprint(\n title=_('Spatial Brainstorming'),\n description=_(\n 'Collect location specific ideas for a topic and comment on them.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n ],\n image='images/map-brainstorming.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('map-idea-collection',\n ProjectBlueprint(\n title=_('Spatial Idea Collection'),\n description=_(\n 'Collect location specific ideas that can be rated and commented.'\n ),\n content=[\n mapideas_phases.CollectFeedbackPhase(),\n ],\n image='images/map-idea-collection.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('agenda-setting',\n ProjectBlueprint(\n title=_('Agenda Setting'),\n description=_(\n 'With Agenda-Setting it’s possible to identify topics and to '\n 'define mission statements. Anyone can submit topics that can be '\n 'commented and rated.'\n ),\n content=[\n ideas_phases.CollectFeedbackPhase(),\n ],\n image='images/agenda-setting.svg',\n settings_model=None,\n )),\n ('text-review',\n ProjectBlueprint(\n title=_('Text Review'),\n description=_(\n 'In the text-review it’s possible to structure draft texts '\n 'that can be commented.'\n ),\n content=[\n documents_phases.CommentPhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n ('participatory-budgeting',\n ProjectBlueprint(\n title=_('Participatory budgeting'),\n description=_(\n 'With participatory-budgeting it’s possible to make proposals '\n 'with budget specifications and locate them. Anyone can comment '\n 'and rate on different proposals.'\n ),\n content=[\n budgeting_phases.RequestPhase()\n ],\n image='images/participatory-budgeting.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('poll',\n ProjectBlueprint(\n title=_('Poll'),\n description=_(\n 'Create a poll with multiple questions and possible answers. '\n 'Anyone can cast votes and comment on the poll.'\n ),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.svg',\n settings_model=None,\n )),\n ('topic-prioritization',\n ProjectBlueprint(\n title=_('Topic Priorization'),\n description=_(\n 'Comment and prioritize topics.'\n ),\n content=[\n topicprio_phases.PrioritizePhase(),\n ],\n image='images/priorization.svg',\n settings_model=None,\n )),\n ('map-topic-prioritization',\n ProjectBlueprint(\n title=_('Place Prioritization'),\n description=_(\n 'Comment and prioritize places located on a map.'\n ),\n content=[\n maptopicprio_phases.PrioritizePhase(),\n ],\n image='images/place-priotization.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('kiezkasse',\n ProjectBlueprint(\n title=_('Kiezkasse'),\n description=_(\n 'With kiezkasse it’s possible to make proposals '\n 'with budget specifications and locate them. '\n 'The proposals can be commented and rated.'\n ),\n content=[\n kiezkasse_phases.RequestFeedbackPhase(),\n ],\n image='images/kiezkasse.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('interactive-event',\n ProjectBlueprint(\n title=_('Interactive Event'),\n description=_(\n 'The participants of an event can ask their questions online. '\n 'Other participants can support the question. You as the '\n 'moderator can sort the questions by support or '\n 'characteristic.'\n ),\n content=[\n livequestion_phases.IssuePhase(),\n ],\n image='images/interactive-event.svg',\n settings_model=None,\n )),\n]\n", "path": "meinberlin/apps/dashboard/blueprints.py" } ]
diff --git a/meinberlin/apps/dashboard/blueprints.py b/meinberlin/apps/dashboard/blueprints.py index 1dc37d5ea7..d43c17f282 100644 --- a/meinberlin/apps/dashboard/blueprints.py +++ b/meinberlin/apps/dashboard/blueprints.py @@ -152,7 +152,7 @@ content=[ livequestion_phases.IssuePhase(), ], - image='images/text-review.svg', + image='images/interactive-event.svg', settings_model=None, )), ] diff --git a/meinberlin/apps/livequestions/assets/Filters.jsx b/meinberlin/apps/livequestions/assets/Filters.jsx index 2ca14cbcfa..8ae1b9abaf 100644 --- a/meinberlin/apps/livequestions/assets/Filters.jsx +++ b/meinberlin/apps/livequestions/assets/Filters.jsx @@ -50,9 +50,9 @@ export default class Filter extends React.Component { checked={this.props.displayOnShortlist} onChange={this.props.toggleDisplayOnShortlist} // eslint-disable-line react/jsx-handler-names /> - <span className="checkbox-btn__text"> + <div className="checkbox-btn__text"> <i className="far fa-list-alt" aria-label={onlyShowMarkedText} /> - </span> + </div> </label> </div> <div className="checkbox-btn u-spacer-right"> diff --git a/meinberlin/apps/livequestions/assets/InfoBox.jsx b/meinberlin/apps/livequestions/assets/InfoBox.jsx index d7138ea414..9bcd189b75 100644 --- a/meinberlin/apps/livequestions/assets/InfoBox.jsx +++ b/meinberlin/apps/livequestions/assets/InfoBox.jsx @@ -35,29 +35,29 @@ export default class InfoBox extends React.Component { {this.props.isModerator && <div className="alert-dismissible"> <div className="u-align-right"> - <button type="button" className="u-muted" onClick={this.toggleInformation.bind(this)}> + <button type="button" className="btn btn--none u-muted" onClick={this.toggleInformation.bind(this)}> <span aria-label={ariaCloseInfo}>{btnHide} <i className="fa fa-times" /></span> </button> </div> - <div className="infobox u-inline-flex"> + <div className="infobox"> <div className="infobox__box"> <i className="far fa-list-alt" /> - <div>{textAddQuestion}</div> + <span className="infobox__text">{textAddQuestion}</span> </div> <div className="infobox__box"> - <span className="fa-stack fa-1x"> + <span className="fa-stack fa-1x infobox__icon"> <i className="fas fa-tv fa-stack-2x" aria-label="hidden"> </i> - <i className="fas fa-arrow-up fa-stack-1x" aria-label="hidden"> </i> + <i className="fas fa-check fa-stack-1x" aria-label="hidden"> </i> </span> - <div>{textDisplayQuestion}</div> + <span className="infobox__text">{textDisplayQuestion}</span> </div> <div className="infobox__box"> <i className="far fa-check-circle" /> - <div>{textMarkAnswered}</div> + <span className="infobox__text">{textMarkAnswered}</span> </div> <div className="infobox__box infobox__box--last"> <i className="far fa-eye" /> - <div>{textHideQuestion}</div> + <span className="infobox__text">{textHideQuestion}</span> </div> </div> </div>} diff --git a/meinberlin/apps/livequestions/assets/QuestionBox.jsx b/meinberlin/apps/livequestions/assets/QuestionBox.jsx index 635a6809c0..11a6eb6c7c 100644 --- a/meinberlin/apps/livequestions/assets/QuestionBox.jsx +++ b/meinberlin/apps/livequestions/assets/QuestionBox.jsx @@ -268,6 +268,17 @@ export default class QuestionBox extends React.Component { hasLikingPermission={this.props.hasLikingPermission} /> </div> + {this.props.hasAskQuestionsPermission && + <div className="l-center-6"> + <a + href={this.props.askQuestionUrl} + className="btn btn--primary btn--full btn--huge u-spacer-top" + id="question-create" + > + <i className="fa fa-plus question-list-button-icon" aria-hidden="true" /> + {django.gettext('Add Question')} + </a> + </div>} </div> </div> </div> diff --git a/meinberlin/apps/livequestions/assets/QuestionModerator.jsx b/meinberlin/apps/livequestions/assets/QuestionModerator.jsx index 73a77ceb1b..f97a3a07c7 100644 --- a/meinberlin/apps/livequestions/assets/QuestionModerator.jsx +++ b/meinberlin/apps/livequestions/assets/QuestionModerator.jsx @@ -105,7 +105,7 @@ export default class QuestionModerator extends React.Component { return ( <div className="list-item list-item--squashed"> <div> - <p className={this.props.is_hidden ? 'u-muted u-text-decoration-line-through' : ''}>{this.props.children}</p> + <p className={this.props.is_hidden ? 'u-muted u-text-decoration-line-through live_questions__question' : 'live_questions__question'}>{this.props.children}</p> </div> {this.props.category && <div> @@ -118,7 +118,7 @@ export default class QuestionModerator extends React.Component { <div> {this.props.displayIsOnShortlist && <button type="button" className="btn btn--none" onClick={this.toggleIsOnShortList.bind(this)}> - <i className={this.state.is_on_shortlist ? 'far fa-list-alt u-primary' : 'far fa-list-alt'} aria-label={this.state.is_on_shortlist ? addShortlistText : removeShortlistText} /> + <i className={this.state.is_on_shortlist ? 'far fa-list-alt u-primary' : 'far fa-list-alt u-muted'} aria-label={this.state.is_on_shortlist ? addShortlistText : removeShortlistText} /> </button>} {this.props.displayIsLive && <button type="button" className="btn btn--none" onClick={this.toggleIslive.bind(this)}> diff --git a/meinberlin/apps/livequestions/assets/QuestionUser.jsx b/meinberlin/apps/livequestions/assets/QuestionUser.jsx index 63efdff936..b9fafa8e01 100644 --- a/meinberlin/apps/livequestions/assets/QuestionUser.jsx +++ b/meinberlin/apps/livequestions/assets/QuestionUser.jsx @@ -58,7 +58,7 @@ export default class QuestionUser extends React.Component { <div> {this.props.is_on_shortlist && <i className="far fa-list-alt u-primary" aria-label={shortlistText} />} - <p> + <p className="live_questions__question"> {this.props.children} </p> </div> diff --git a/meinberlin/apps/livequestions/assets/StatisticsBox.jsx b/meinberlin/apps/livequestions/assets/StatisticsBox.jsx index b1bea53259..d1806d2f19 100644 --- a/meinberlin/apps/livequestions/assets/StatisticsBox.jsx +++ b/meinberlin/apps/livequestions/assets/StatisticsBox.jsx @@ -43,6 +43,7 @@ export default class StatisticsBox extends React.Component { render () { const questionAnsweredTag = django.gettext('Questions Answered') const categoriesAnsweredTag = django.gettext('Categories Answered') + return ( <div className="module-content"> <div className="l-wrapper"> @@ -58,8 +59,8 @@ export default class StatisticsBox extends React.Component { <div className="progress-bar" style={style} role="progressbar" aria-valuenow="25" aria-valuemin="0" aria-valuemax="100" - ><span>&nbsp;{category}&nbsp;</span>{countPerCategory}% - </div> + /> + <span className="progress-bar__stats">&nbsp;{category}&nbsp;{countPerCategory}%</span> </div> </div> ) diff --git a/meinberlin/apps/livequestions/templates/meinberlin_livequestions/question_present_list.html b/meinberlin/apps/livequestions/templates/meinberlin_livequestions/question_present_list.html index e0d811cb0f..5f313c5e86 100644 --- a/meinberlin/apps/livequestions/templates/meinberlin_livequestions/question_present_list.html +++ b/meinberlin/apps/livequestions/templates/meinberlin_livequestions/question_present_list.html @@ -16,7 +16,7 @@ </div> </div> <div class="l-wrapper live_questions_infographic u-spacer-bottom"> - <div class="block--infographic__body"> + <div class="block--infographic__body u-spacer-bottom"> <div class="block--infographic__icon-block"> <div class="block--infographic__icon"> <img src="{% static 'images/icon_liveq_infografik_phone.svg' %}" alt=""> @@ -54,7 +54,7 @@ </div> </div> </div> - <div class="l-center-6 u-align-center"> + <div class="l-center-8 u-align-center"> <span>{% trans "join in here" %}<a href="{{ project.get_absolute_url }}">{{ view.get_full_url }}</a></span> </div> </div> diff --git a/meinberlin/assets/images/blueprints/interactive-event.svg b/meinberlin/assets/images/blueprints/interactive-event.svg new file mode 100644 index 0000000000..3f7d3e4885 --- /dev/null +++ b/meinberlin/assets/images/blueprints/interactive-event.svg @@ -0,0 +1,25 @@ +<?xml version="1.0" encoding="utf-8"?> +<!-- Generator: Adobe Illustrator 24.3.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> +<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" + viewBox="0 0 270 150" style="enable-background:new 0 0 270 150;" xml:space="preserve"> +<style type="text/css"> + .st0{fill-rule:evenodd;clip-rule:evenodd;fill:#253377;} + .st1{fill:none;stroke:#FFFFFF;stroke-width:3.45;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} + .st2{fill-rule:evenodd;clip-rule:evenodd;fill:#FFFFFF;} + .st3{fill:#253377;stroke:#FFFFFF;stroke-miterlimit:10;} +</style> +<desc>Created with Sketch.</desc> +<rect id="icon_textCommenting" class="st0" width="270" height="150"/> +<g> + <g> + <rect x="99.2" y="21.7" class="st1" width="54.8" height="106.6"/> + <rect x="99.2" y="38" class="st1" width="54.8" height="74"/> + <circle class="st2" cx="126.7" cy="120.2" r="2.2"/> + <path class="st1" d="M118.8,29.7h15.8"/> + </g> + <path class="st3" d="M182.3,49.6h-43.9c-4.6,0-8.4,3.9-8.4,8.7v26.9c0,4.8,3.8,8.7,8.4,8.7H154v11.9l11.4-11.9h16.8 + c4.6,0,8.4-3.9,8.4-8.7V58.3C190.8,53.5,186.9,49.6,182.3,49.6z"/> + <path class="st1" d="M182.3,49.6h-43.9c-4.6,0-8.4,3.9-8.4,8.7v26.9c0,4.8,3.8,8.7,8.4,8.7H154v11.9l11.4-11.9h16.8 + c4.6,0,8.4-3.9,8.4-8.7V58.3C190.8,53.5,186.9,49.6,182.3,49.6z"/> +</g> +</svg> diff --git a/meinberlin/assets/scss/components/_list_item.scss b/meinberlin/assets/scss/components/_list_item.scss index 2f124bbde1..5b9a459132 100644 --- a/meinberlin/assets/scss/components/_list_item.scss +++ b/meinberlin/assets/scss/components/_list_item.scss @@ -18,7 +18,7 @@ .list-item--squashed { border: 0; - margin-bottom: 3px; + margin-bottom: 8px; @media print { border: 1px solid $border-color; diff --git a/meinberlin/assets/scss/components/_live_questions.scss b/meinberlin/assets/scss/components/_live_questions.scss index eb930eca52..641e402da7 100644 --- a/meinberlin/assets/scss/components/_live_questions.scss +++ b/meinberlin/assets/scss/components/_live_questions.scss @@ -14,16 +14,17 @@ } .live_questions__filters--dropdown { - width: 100%; + min-width: 100%; } .live_questions__filters--btns { width: 100%; display: inline-flex; justify-content: flex-end; + margin: $spacer 0; @media screen and (min-width: $breakpoint-xs) { - margin-left: $spacer; + margin: 0 0 0 $spacer; } } @@ -37,23 +38,68 @@ justify-content: space-between; } +.live_questions__question { + font-size: $font-size-lg; + margin-bottom: 0.25*$spacer; +} + .infobox__box { background-color: $bg-tertiary; - padding: 0.5*$spacer; - margin-right: $spacer; - text-align: center; + padding: 0.5*$spacer $spacer; + font-size: $font-size-sm; + margin-bottom: $spacer; i { + position: absolute; color: $brand-primary; + line-height: 1.2rem; } } -.infobox__box--last { - margin-right: 0; +.infobox__text { + margin-left: 2*$spacer; +} + +.infobox__icon { + margin-left: -0.6*$spacer; + position: absolute; +} + +//infobox bigger +@media screen and (min-width: $breakpoint-xs) { + .infobox { + display: inline-flex; + } + + .infobox__box { + text-align: center; + padding: $spacer; + margin-right: $spacer; + } + + .infobox__box > i { + display: block; + position: relative; + } + + .infobox__text { + margin-left: 0; + } + + .infobox__icon { + display: block; + position: relative; + margin: auto; + } + + .infobox__box--last { + margin-right: 0; + } } .progress { margin-bottom: $spacer; + position: relative; } .progress-bar { @@ -61,11 +107,16 @@ height: 1.5rem; background-color: $brand-primary-tint; border-radius: 0.25rem; + position: absolute; +} + +.progress-bar__stats { + position: relative; + z-index: 1; } .checkbox-btn { display: inline-block; - cursor: pointer; } .checkbox-btn__input { @@ -77,7 +128,6 @@ clip: rect(0, 0, 0, 0); white-space: nowrap; border: 0; - cursor: pointer; } .checkbox-btn__label--primary { @@ -87,6 +137,10 @@ cursor: pointer; } + input[type="checkbox"]:focus + .checkbox-btn__text { + box-shadow: 0 0 0 4px #5595ff; // Faking browser focus ring + } + input[type="checkbox"]:checked + .checkbox-btn__text { @extend .btn--secondary; } @@ -136,6 +190,10 @@ color: $body-bg; text-decoration: underline; display: block; + + @media screen and (min-width: $breakpoint) { + display: inline-block; + } } } @@ -155,5 +213,6 @@ } .fa-stack-1x { - font-size: 0.7rem; + font-size: 0.6rem; + line-height: 0.85rem; } diff --git a/meinberlin/assets/scss/utility.scss b/meinberlin/assets/scss/utility.scss index c6df1a6dc5..dd64515951 100644 --- a/meinberlin/assets/scss/utility.scss +++ b/meinberlin/assets/scss/utility.scss @@ -92,6 +92,10 @@ color: $text-color-gray; } +.u-text-decoration-line-through { + text-decoration: line-through; +} + .u-whitespace-pre { white-space: pre-wrap; }
fossasia__open-event-server-7659
Preset roles deletion is allowed **Describe the bug** Currently the preset roles like "organizer, coorganizer etc" should not be deleted from the db. But right now it is possible to delete these entries. **To Reproduce** Steps to reproduce the behavior: 1. Hit the delete endpoint for role 2. Choose any of the ids pointing to any of the 7 preset roles 3. You can find deletion to be successful **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> **Stacktrace** <!-- If applicable, add stacktrace to help explain your problem. --> **Additional details (please complete the following information):** - OS: [e.g. MacOS, Ubuntu, CentOS] - Python Version [e.g. `3.5`, `3.6`] - `HEAD` Commit hash [e.g. `4629c62`] **Additional context** <!-- Add any other context about the problem here. -->
[ { "content": "from flask_rest_jsonapi import ResourceDetail, ResourceList\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query_kwargs\nfrom app.api.helpers.errors import UnprocessableEntityError\nfrom app.api.schema.roles import RoleSchema\nfrom app.models import db\nfrom app.models.role import Role\nfrom app.models.role_invite import RoleInvite\nfrom app.models.users_events_role import UsersEventsRoles\n\n\nclass RoleList(ResourceList):\n \"\"\"\n List and create role\n \"\"\"\n\n decorators = (api.has_permission('is_admin', methods=\"POST\"),)\n schema = RoleSchema\n data_layer = {'session': db.session, 'model': Role}\n\n\nclass RoleDetail(ResourceDetail):\n \"\"\"\n Role detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('role_invite_id') is not None:\n role_invite = safe_query_kwargs(RoleInvite, view_kwargs, 'role_invite_id')\n if role_invite.role_id is not None:\n view_kwargs['id'] = role_invite.role_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_roles_id') is not None:\n users_events_role = safe_query_kwargs(\n UsersEventsRoles,\n view_kwargs,\n 'users_events_roles_id',\n )\n\n if users_events_role.role_id is not None:\n view_kwargs['id'] = users_events_role.role_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, role, data, view_kwargs):\n \"\"\"\n Method to edit object\n :param role:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('name'):\n if data['name'] in [\n 'owner',\n 'organizer',\n 'coorganizer',\n 'registrar',\n 'moderator',\n 'attendee',\n 'track_organizer',\n ]:\n raise UnprocessableEntityError(\n {'data': 'name'}, \"The given name cannot be updated\"\n )\n\n def before_delete_object(self, obj, kwargs):\n \"\"\"\n method to check proper resource name before deleting\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if obj.name in [\n 'owner',\n 'organizer',\n 'coorganizer',\n 'registrar',\n 'moderator',\n 'attendee',\n 'track_organizer',\n ]:\n raise UnprocessableEntityError(\n {'data': 'name'}, \"The resource with given name cannot be deleted\"\n )\n\n decorators = (api.has_permission('is_admin', methods=\"PATCH,DELETE\"),)\n schema = RoleSchema\n data_layer = {\n 'session': db.session,\n 'model': Role,\n 'methods': {'before_get_object': before_get_object},\n }\n", "path": "app/api/roles.py" } ]
[ { "content": "from flask_rest_jsonapi import ResourceDetail, ResourceList\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query_kwargs\nfrom app.api.helpers.errors import UnprocessableEntityError\nfrom app.api.schema.roles import RoleSchema\nfrom app.models import db\nfrom app.models.role import Role\nfrom app.models.role_invite import RoleInvite\nfrom app.models.users_events_role import UsersEventsRoles\n\n\nclass RoleList(ResourceList):\n \"\"\"\n List and create role\n \"\"\"\n\n decorators = (api.has_permission('is_admin', methods=\"POST\"),)\n schema = RoleSchema\n data_layer = {'session': db.session, 'model': Role}\n\n\nclass RoleDetail(ResourceDetail):\n \"\"\"\n Role detail by id\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('role_invite_id') is not None:\n role_invite = safe_query_kwargs(RoleInvite, view_kwargs, 'role_invite_id')\n if role_invite.role_id is not None:\n view_kwargs['id'] = role_invite.role_id\n else:\n view_kwargs['id'] = None\n\n if view_kwargs.get('users_events_roles_id') is not None:\n users_events_role = safe_query_kwargs(\n UsersEventsRoles,\n view_kwargs,\n 'users_events_roles_id',\n )\n\n if users_events_role.role_id is not None:\n view_kwargs['id'] = users_events_role.role_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, role, data, view_kwargs):\n \"\"\"\n Method to edit object\n :param role:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('name'):\n if data['name'] in [\n 'owner',\n 'organizer',\n 'coorganizer',\n 'registrar',\n 'moderator',\n 'attendee',\n 'track_organizer',\n ]:\n raise UnprocessableEntityError(\n {'data': 'name'}, \"The given name cannot be updated\"\n )\n\n def before_delete_object(self, obj, kwargs):\n \"\"\"\n method to check proper resource name before deleting\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if obj.name in [\n 'owner',\n 'organizer',\n 'coorganizer',\n 'registrar',\n 'moderator',\n 'attendee',\n 'track_organizer',\n ]:\n raise UnprocessableEntityError(\n {'data': 'name'}, \"The resource with given name cannot be deleted\"\n )\n\n decorators = (api.has_permission('is_admin', methods=\"PATCH,DELETE\"),)\n schema = RoleSchema\n data_layer = {\n 'session': db.session,\n 'model': Role,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_delete_object': before_delete_object,\n },\n }\n", "path": "app/api/roles.py" } ]
diff --git a/app/api/roles.py b/app/api/roles.py index 71fe93c13c..e30b3ad2bf 100644 --- a/app/api/roles.py +++ b/app/api/roles.py @@ -97,5 +97,8 @@ def before_delete_object(self, obj, kwargs): data_layer = { 'session': db.session, 'model': Role, - 'methods': {'before_get_object': before_get_object}, + 'methods': { + 'before_get_object': before_get_object, + 'before_delete_object': before_delete_object, + }, } diff --git a/tests/hook_main.py b/tests/hook_main.py index 42d93e99cd..4fc51f3408 100644 --- a/tests/hook_main.py +++ b/tests/hook_main.py @@ -2524,7 +2524,7 @@ def role_delete(transaction): :return: """ with stash['app'].app_context(): - role = RoleFactory() + role = RoleFactory(name="example role") db.session.add(role) db.session.commit()
ibis-project__ibis-2249
BUG: Multiple aliases on the same column not behaving as expected ``` python column = table.some_column table.projection( [ column.name("alias1"), column.name("alias2"), column.name("alias3"), ] ) ``` I think the expected behavior would be a table expression with: ``` Selection[table] table: Table: ref_0 selections: alias1 = Column[float64*] 'some_column' from table ref_0 alias2 = Column[float64*] 'some_column' from table ref_0 alias3 = Column[float64*] 'some_column' from table ref_0 ``` However, the result I'm getting is: ``` Selection[table] table: Table: ref_0 selections: alias1 = Column[float64*] 'some_column' from table ref_0 alias1 = Column[float64*] 'some_column' from table ref_0 alias1 = Column[float64*] 'some_column' from table ref_0 ```
[ { "content": "import ibis.expr.operations as ops\nimport ibis.expr.types as ir\nimport ibis.util as util\n\n\nclass FormatMemo:\n # A little sanity hack to simplify the below\n\n def __init__(self):\n from collections import defaultdict\n\n self.formatted = {}\n self.aliases = {}\n self.ops = {}\n self.counts = defaultdict(int)\n self._repr_memo = {}\n self.subexprs = {}\n self.visit_memo = set()\n\n def __contains__(self, obj):\n return self._key(obj) in self.formatted\n\n def _key(self, expr):\n memo = self._repr_memo\n try:\n result = memo[expr]\n except KeyError:\n result = memo[expr] = self._format(expr)\n return result\n\n def _format(self, expr):\n return expr.op()._repr(memo=self)\n\n def observe(self, expr, formatter=None):\n if formatter is None:\n formatter = self._format\n key = self._key(expr)\n if key not in self.formatted:\n self.aliases[key] = 'ref_{:d}'.format(len(self.formatted))\n self.formatted[key] = formatter(expr)\n self.ops[key] = expr.op()\n\n self.counts[key] += 1\n\n def count(self, expr):\n return self.counts[self._key(expr)]\n\n def get_alias(self, expr):\n return self.aliases[self._key(expr)]\n\n def get_formatted(self, expr):\n return self.formatted[self._key(expr)]\n\n\nclass ExprFormatter:\n \"\"\"For creating a nice tree-like representation of an expression graph.\n\n Notes\n -----\n TODO: detect reused DAG nodes and do not display redundant information\n\n \"\"\"\n\n def __init__(\n self, expr, indent_size=2, base_level=0, memo=None, memoize=True\n ):\n self.expr = expr\n self.indent_size = indent_size\n self.base_level = base_level\n\n self.memoize = memoize\n\n # For tracking \"extracted\" objects, like tables, that we don't want to\n # print out more than once, and simply alias in the expression tree\n if memo is None:\n memo = FormatMemo()\n\n self.memo = memo\n\n def get_result(self):\n what = self.expr.op()\n\n if self.memoize:\n self._memoize_tables()\n\n if isinstance(what, ops.TableNode) and what.has_schema():\n # This should also catch aggregations\n if not self.memoize and self.expr in self.memo:\n text = 'Table: %s' % self.memo.get_alias(self.expr)\n elif isinstance(what, ops.PhysicalTable):\n text = self._format_table(self.expr)\n else:\n # Any other node type\n text = self._format_node(self.expr)\n elif isinstance(what, ops.TableColumn):\n text = self._format_column(self.expr)\n elif isinstance(what, ops.Literal):\n text = 'Literal[{}]\\n {}'.format(\n self._get_type_display(), str(what.value)\n )\n elif isinstance(what, ops.ScalarParameter):\n text = 'ScalarParameter[{}]'.format(self._get_type_display())\n elif isinstance(what, ops.Node):\n text = self._format_node(self.expr)\n\n if isinstance(self.expr, ir.ValueExpr) and self.expr._name is not None:\n text = '{} = {}'.format(self.expr.get_name(), text)\n\n if self.memoize:\n alias_to_text = [\n (\n self.memo.aliases[x],\n self.memo.formatted[x],\n self.memo.ops[x],\n )\n for x in self.memo.formatted\n ]\n alias_to_text.sort()\n\n # A hack to suppress printing out of a ref that is the result of\n # the top level expression\n refs = [\n x + '\\n' + y\n for x, y, op in alias_to_text\n if not op.equals(what)\n ]\n\n text = '\\n\\n'.join(refs + [text])\n\n return self._indent(text, self.base_level)\n\n def _memoize_tables(self):\n table_memo_ops = (ops.Aggregation, ops.Selection, ops.SelfReference)\n expr = self.expr\n if expr.op() in self.memo.visit_memo:\n return\n\n stack = [expr]\n seen = set()\n memo = self.memo\n\n while stack:\n e = stack.pop()\n op = e.op()\n\n if op not in seen:\n seen.add(op)\n\n if isinstance(op, ops.PhysicalTable):\n memo.observe(e, self._format_table)\n elif isinstance(op, ops.Node):\n stack.extend(\n arg\n for arg in reversed(op.args)\n if isinstance(arg, ir.Expr)\n )\n if isinstance(op, table_memo_ops):\n memo.observe(e, self._format_node)\n elif isinstance(op, ops.TableNode) and op.has_schema():\n memo.observe(e, self._format_table)\n memo.visit_memo.add(op)\n\n def _indent(self, text, indents=1):\n return util.indent(text, self.indent_size * indents)\n\n def _format_table(self, expr):\n table = expr.op()\n # format the schema\n rows = ['name: {}\\nschema:'.format(table.name)]\n rows.extend(\n map(' {} : {}'.format, table.schema.names, table.schema.types)\n )\n opname = type(table).__name__\n type_display = self._get_type_display(expr)\n opline = '{}[{}]'.format(opname, type_display)\n return '{}\\n{}'.format(opline, self._indent('\\n'.join(rows)))\n\n def _format_column(self, expr):\n # HACK: if column is pulled from a Filter of another table, this parent\n # will not be found in the memo\n col = expr.op()\n parent = col.parent()\n\n if parent not in self.memo:\n self.memo.observe(parent, formatter=self._format_node)\n\n table_formatted = self.memo.get_alias(parent)\n table_formatted = self._indent(table_formatted)\n\n type_display = self._get_type_display(self.expr)\n return \"Column[{0}] '{1}' from table\\n{2}\".format(\n type_display, col.name, table_formatted\n )\n\n def _format_node(self, expr):\n op = expr.op()\n formatted_args = []\n\n def visit(what, extra_indents=0):\n if isinstance(what, ir.Expr):\n result = self._format_subexpr(what)\n else:\n result = self._indent(str(what))\n\n if extra_indents > 0:\n result = util.indent(result, self.indent_size)\n\n formatted_args.append(result)\n\n arg_names = getattr(op, 'display_argnames', op.argnames)\n\n if not arg_names:\n for arg in op.flat_args():\n visit(arg)\n else:\n signature = op.signature\n arg_name_pairs = (\n (arg, name)\n for arg, name in zip(op.args, arg_names)\n if signature[name].show\n )\n for arg, name in arg_name_pairs:\n if name == 'arg' and isinstance(op, ops.ValueOp):\n # don't display first argument's name in repr\n name = None\n if name is not None:\n name = self._indent('{}:'.format(name))\n if util.is_iterable(arg):\n if name is not None and len(arg) > 0:\n formatted_args.append(name)\n indents = 1\n else:\n indents = 0\n for x in arg:\n visit(x, extra_indents=indents)\n else:\n if name is not None:\n formatted_args.append(name)\n indents = 1\n else:\n indents = 0\n visit(arg, extra_indents=indents)\n\n opname = type(op).__name__\n type_display = self._get_type_display(expr)\n opline = '{}[{}]'.format(opname, type_display)\n return '\\n'.join([opline] + formatted_args)\n\n def _format_subexpr(self, expr):\n subexprs = self.memo.subexprs\n key = expr.op()\n try:\n result = subexprs[key]\n except KeyError:\n formatter = ExprFormatter(expr, memo=self.memo, memoize=False)\n result = subexprs[key] = self._indent(formatter.get_result(), 1)\n return result\n\n def _get_type_display(self, expr=None):\n if expr is None:\n expr = self.expr\n return expr._type_display()\n", "path": "ibis/expr/format.py" } ]
[ { "content": "import ibis.expr.operations as ops\nimport ibis.expr.types as ir\nimport ibis.util as util\n\n\nclass FormatMemo:\n # A little sanity hack to simplify the below\n\n def __init__(self):\n from collections import defaultdict\n\n self.formatted = {}\n self.aliases = {}\n self.ops = {}\n self.counts = defaultdict(int)\n self._repr_memo = {}\n self.subexprs = {}\n self.visit_memo = set()\n\n def __contains__(self, obj):\n return self._key(obj) in self.formatted\n\n def _key(self, expr):\n memo = self._repr_memo\n try:\n result = memo[expr]\n except KeyError:\n result = memo[expr] = self._format(expr)\n return result\n\n def _format(self, expr):\n return expr.op()._repr(memo=self)\n\n def observe(self, expr, formatter=None):\n if formatter is None:\n formatter = self._format\n key = self._key(expr)\n if key not in self.formatted:\n self.aliases[key] = 'ref_{:d}'.format(len(self.formatted))\n self.formatted[key] = formatter(expr)\n self.ops[key] = expr.op()\n\n self.counts[key] += 1\n\n def count(self, expr):\n return self.counts[self._key(expr)]\n\n def get_alias(self, expr):\n return self.aliases[self._key(expr)]\n\n def get_formatted(self, expr):\n return self.formatted[self._key(expr)]\n\n\nclass ExprFormatter:\n \"\"\"For creating a nice tree-like representation of an expression graph.\n\n Notes\n -----\n TODO: detect reused DAG nodes and do not display redundant information\n\n \"\"\"\n\n def __init__(\n self, expr, indent_size=2, base_level=0, memo=None, memoize=True\n ):\n self.expr = expr\n self.indent_size = indent_size\n self.base_level = base_level\n\n self.memoize = memoize\n\n # For tracking \"extracted\" objects, like tables, that we don't want to\n # print out more than once, and simply alias in the expression tree\n if memo is None:\n memo = FormatMemo()\n\n self.memo = memo\n\n def get_result(self):\n what = self.expr.op()\n\n if self.memoize:\n self._memoize_tables()\n\n if isinstance(what, ops.TableNode) and what.has_schema():\n # This should also catch aggregations\n if not self.memoize and self.expr in self.memo:\n text = 'Table: %s' % self.memo.get_alias(self.expr)\n elif isinstance(what, ops.PhysicalTable):\n text = self._format_table(self.expr)\n else:\n # Any other node type\n text = self._format_node(self.expr)\n elif isinstance(what, ops.TableColumn):\n text = self._format_column(self.expr)\n elif isinstance(what, ops.Literal):\n text = 'Literal[{}]\\n {}'.format(\n self._get_type_display(), str(what.value)\n )\n elif isinstance(what, ops.ScalarParameter):\n text = 'ScalarParameter[{}]'.format(self._get_type_display())\n elif isinstance(what, ops.Node):\n text = self._format_node(self.expr)\n\n if isinstance(self.expr, ir.ValueExpr) and self.expr._name is not None:\n text = '{} = {}'.format(self.expr.get_name(), text)\n\n if self.memoize:\n alias_to_text = [\n (\n self.memo.aliases[x],\n self.memo.formatted[x],\n self.memo.ops[x],\n )\n for x in self.memo.formatted\n ]\n alias_to_text.sort()\n\n # A hack to suppress printing out of a ref that is the result of\n # the top level expression\n refs = [\n x + '\\n' + y\n for x, y, op in alias_to_text\n if not op.equals(what)\n ]\n\n text = '\\n\\n'.join(refs + [text])\n\n return self._indent(text, self.base_level)\n\n def _memoize_tables(self):\n table_memo_ops = (ops.Aggregation, ops.Selection, ops.SelfReference)\n expr = self.expr\n if expr.op() in self.memo.visit_memo:\n return\n\n stack = [expr]\n seen = set()\n memo = self.memo\n\n while stack:\n e = stack.pop()\n op = e.op()\n\n if op not in seen:\n seen.add(op)\n\n if isinstance(op, ops.PhysicalTable):\n memo.observe(e, self._format_table)\n elif isinstance(op, ops.Node):\n stack.extend(\n arg\n for arg in reversed(op.args)\n if isinstance(arg, ir.Expr)\n )\n if isinstance(op, table_memo_ops):\n memo.observe(e, self._format_node)\n elif isinstance(op, ops.TableNode) and op.has_schema():\n memo.observe(e, self._format_table)\n memo.visit_memo.add(op)\n\n def _indent(self, text, indents=1):\n return util.indent(text, self.indent_size * indents)\n\n def _format_table(self, expr):\n table = expr.op()\n # format the schema\n rows = ['name: {}\\nschema:'.format(table.name)]\n rows.extend(\n map(' {} : {}'.format, table.schema.names, table.schema.types)\n )\n opname = type(table).__name__\n type_display = self._get_type_display(expr)\n opline = '{}[{}]'.format(opname, type_display)\n return '{}\\n{}'.format(opline, self._indent('\\n'.join(rows)))\n\n def _format_column(self, expr):\n # HACK: if column is pulled from a Filter of another table, this parent\n # will not be found in the memo\n col = expr.op()\n parent = col.parent()\n\n if parent not in self.memo:\n self.memo.observe(parent, formatter=self._format_node)\n\n table_formatted = self.memo.get_alias(parent)\n table_formatted = self._indent(table_formatted)\n\n type_display = self._get_type_display(self.expr)\n return \"Column[{0}] '{1}' from table\\n{2}\".format(\n type_display, col.name, table_formatted\n )\n\n def _format_node(self, expr):\n op = expr.op()\n formatted_args = []\n\n def visit(what, extra_indents=0):\n if isinstance(what, ir.Expr):\n result = self._format_subexpr(what)\n else:\n result = self._indent(str(what))\n\n if extra_indents > 0:\n result = util.indent(result, self.indent_size)\n\n formatted_args.append(result)\n\n arg_names = getattr(op, 'display_argnames', op.argnames)\n\n if not arg_names:\n for arg in op.flat_args():\n visit(arg)\n else:\n signature = op.signature\n arg_name_pairs = (\n (arg, name)\n for arg, name in zip(op.args, arg_names)\n if signature[name].show\n )\n for arg, name in arg_name_pairs:\n if name == 'arg' and isinstance(op, ops.ValueOp):\n # don't display first argument's name in repr\n name = None\n if name is not None:\n name = self._indent('{}:'.format(name))\n if util.is_iterable(arg):\n if name is not None and len(arg) > 0:\n formatted_args.append(name)\n indents = 1\n else:\n indents = 0\n for x in arg:\n visit(x, extra_indents=indents)\n else:\n if name is not None:\n formatted_args.append(name)\n indents = 1\n else:\n indents = 0\n visit(arg, extra_indents=indents)\n\n opname = type(op).__name__\n type_display = self._get_type_display(expr)\n opline = '{}[{}]'.format(opname, type_display)\n return '\\n'.join([opline] + formatted_args)\n\n def _format_subexpr(self, expr):\n subexprs = self.memo.subexprs\n key = expr._key\n try:\n result = subexprs[key]\n except KeyError:\n formatter = ExprFormatter(expr, memo=self.memo, memoize=False)\n result = subexprs[key] = self._indent(formatter.get_result(), 1)\n return result\n\n def _get_type_display(self, expr=None):\n if expr is None:\n expr = self.expr\n return expr._type_display()\n", "path": "ibis/expr/format.py" } ]
diff --git a/docs/source/release/index.rst b/docs/source/release/index.rst index e0c1b0b87bad..92158b9b87ff 100644 --- a/docs/source/release/index.rst +++ b/docs/source/release/index.rst @@ -12,6 +12,7 @@ Release Notes These release notes are for versions of ibis **1.0 and later**. Release notes for pre-1.0 versions of ibis can be found at :doc:`release-pre-1.0` +* :bug:`2229` Fix same column with multiple aliases not showing properly in repr * :feature:`2233` Add ibis.pandas.trace module to log time and call stack information. * :feature:`2198` Validate that the output type of a UDF is a single element * :bug:`2223` Fix PySpark compiler error when elementwise UDF output_type is Decimal or Timestamp diff --git a/ibis/expr/format.py b/ibis/expr/format.py index 7db80fbd48fa..c2a17a98822c 100644 --- a/ibis/expr/format.py +++ b/ibis/expr/format.py @@ -248,7 +248,7 @@ def visit(what, extra_indents=0): def _format_subexpr(self, expr): subexprs = self.memo.subexprs - key = expr.op() + key = expr._key try: result = subexprs[key] except KeyError: diff --git a/ibis/expr/tests/test_format.py b/ibis/expr/tests/test_format.py index 6dd0bc589399..e7b5a8ef4642 100644 --- a/ibis/expr/tests/test_format.py +++ b/ibis/expr/tests/test_format.py @@ -246,3 +246,25 @@ def output_type(self): second_arg: 2.0""" assert result == expected + + +def test_same_column_multiple_aliases(): + table = ibis.table([('col', 'int64')], name='t') + expr = table[table.col.name('alias1'), table.col.name('alias2')] + result = repr(expr) + expected = """\ +ref_0 +UnboundTable[table] + name: t + schema: + col : int64 + +Selection[table] + table: + Table: ref_0 + selections: + alias1 = Column[int64*] 'col' from table + ref_0 + alias2 = Column[int64*] 'col' from table + ref_0""" + assert result == expected
pwr-Solaar__Solaar-1826
Release 1.1.7
[ { "content": "#!/usr/bin/env python3\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# from solaar import NAME, __version__\n__version__ = '1.1.7'\nNAME = 'Solaar'\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')\n yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=__version__,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.7',\n install_requires=[\n 'evdev (>= 1.1.2)',\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n 'typing_extensions (>=4.0.0)',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python3\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# from solaar import NAME, __version__\n__version__ = '1.1.7'\nNAME = 'Solaar'\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')\n yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=__version__,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.7',\n install_requires=[\n 'evdev (>= 1.1.2)',\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index ae7045cba8..38982eebd4 100755 --- a/setup.py +++ b/setup.py @@ -66,7 +66,6 @@ def _data_files(): 'PyYAML (>= 3.12)', 'python-xlib (>= 0.27)', 'psutil (>= 5.4.3)', - 'typing_extensions (>=4.0.0)', ], extras_require={ 'report-descriptor': ['hid-parser'],
microsoft__Qcodes-87
PR #70 breaks parameter .get and .set functionality I cannot debug the issue properly because all the objects are `multiprocessing` objects. A minimal example showing the issue: ``` python %matplotlib nbagg import matplotlib.pyplot as plt import time import numpy as np import qcodes as qc from toymodel import AModel, MockGates, MockSource, MockMeter, AverageGetter, AverageAndRaw # now create this "experiment" model = AModel() gates = MockGates('gates', model=model) c0, c1, c2 = gates.chan0, gates.chan1, gates.chan2 print('fine so far...') print('error...') c2.get() print('no effect?') c2.set(0.5) ```
[ { "content": "# code for example notebook\n\nimport math\n\nfrom qcodes import MockInstrument, MockModel, Parameter, Loop, DataArray\nfrom qcodes.utils.validators import Numbers\n\n\nclass AModel(MockModel):\n def __init__(self):\n self._gates = [0.0, 0.0, 0.0]\n self._excitation = 0.1\n super().__init__()\n\n def _output(self):\n # my super exciting model!\n # make a nice pattern that looks sort of double-dotty\n # with the first two gates controlling the two dots,\n # and the third looking like Vsd\n delta_i = 10\n delta_j = 10\n di = (self._gates[0] + delta_i / 2) % delta_i - delta_i / 2\n dj = (self._gates[1] + delta_j / 2) % delta_j - delta_j / 2\n vsd = math.sqrt(self._gates[2]**2 + self._excitation**2)\n dij = math.sqrt(di**2 + dj**2) - vsd\n g = (vsd**2 + 1) * (1 / (dij**2 + 1) +\n 0.1 * (math.atan(-dij) + math.pi / 2))\n return g\n\n def fmt(self, value):\n return '{:.3f}'.format(value)\n\n def gates_set(self, parameter, value):\n if parameter[0] == 'c':\n self._gates[int(parameter[1:])] = float(value)\n elif parameter == 'rst' and value is None:\n self._gates = [0.0, 0.0, 0.0]\n else:\n raise ValueError\n\n def gates_get(self, parameter):\n if parameter[0] == 'c':\n return self.fmt(self.gates[int(parameter[1:])])\n else:\n raise ValueError\n\n def source_set(self, parameter, value):\n if parameter == 'ampl':\n self._excitation = float(value)\n else:\n raise ValueError\n\n def source_get(self, parameter):\n if parameter == 'ampl':\n return self.fmt(self._excitation)\n else:\n raise ValueError\n\n def meter_get(self, parameter):\n if parameter == 'ampl':\n return self.fmt(self._output() * self._excitation)\n else:\n raise ValueError\n\n\n# make our mock instruments\n# real instruments would subclass IPInstrument or VisaInstrument\n# or just the base Instrument instead of MockInstrument,\n# and be instantiated with an address rather than a model\nclass MockGates(MockInstrument):\n def __init__(self, name, model=None, **kwargs):\n super().__init__(name, model=model, **kwargs)\n\n for i in range(3):\n cmdbase = 'c{}'.format(i)\n self.add_parameter('chan{}'.format(i),\n label='Gate Channel {} (mV)'.format(i),\n get_cmd=cmdbase + '?',\n set_cmd=cmdbase + ':{:.4f}',\n get_parser=float,\n vals=Numbers(-100, 100))\n\n self.add_function('reset', call_cmd='rst')\n\n\nclass MockSource(MockInstrument):\n def __init__(self, name, model=None, **kwargs):\n super().__init__(name, model=model, **kwargs)\n\n # this parameter uses built-in sweeping to change slowly\n self.add_parameter('amplitude',\n label='Source Amplitude (\\u03bcV)',\n get_cmd='ampl?',\n set_cmd='ampl:{:.4f}',\n get_parser=float,\n vals=Numbers(0, 10),\n sweep_step=0.1,\n sweep_delay=0.05)\n\n\nclass MockMeter(MockInstrument):\n def __init__(self, name, model=None, **kwargs):\n super().__init__(name, model=model, **kwargs)\n\n self.add_parameter('amplitude',\n label='Current (nA)',\n get_cmd='ampl?',\n get_parser=float)\n\n\nclass AverageGetter(Parameter):\n def __init__(self, measured_param, sweep_values, delay):\n super().__init__(name='avg_' + measured_param.name)\n self.measured_param = measured_param\n self.sweep_values = sweep_values\n self.delay = delay\n if hasattr(measured_param, 'label'):\n self.label = 'Average: ' + measured_param.label\n\n def get(self):\n loop = Loop(self.sweep_values, self.delay).each(self.measured_param)\n data = loop.run_temp()\n return data.arrays[self.measured_param.name].mean()\n\n\nclass AverageAndRaw(Parameter):\n def __init__(self, measured_param, sweep_values, delay):\n name = measured_param.name\n super().__init__(names=(name, 'avg_' + name))\n self.measured_param = measured_param\n self.sweep_values = sweep_values\n self.delay = delay\n self.sizes = (len(sweep_values), None)\n set_array = DataArray(parameter=sweep_values.parameter,\n preset_data=sweep_values)\n self.setpoints = (set_array, None)\n if hasattr(measured_param, 'label'):\n self.labels = (measured_param.label,\n 'Average: ' + measured_param.label)\n\n def get(self):\n loop = Loop(self.sweep_values, self.delay).each(self.measured_param)\n data = loop.run_temp()\n array = data.arrays[self.measured_param.name]\n return (array, array.mean())\n", "path": "docs/examples/toymodel.py" } ]
[ { "content": "# code for example notebook\n\nimport math\n\nfrom qcodes import MockInstrument, MockModel, Parameter, Loop, DataArray\nfrom qcodes.utils.validators import Numbers\n\n\nclass AModel(MockModel):\n def __init__(self):\n self._gates = [0.0, 0.0, 0.0]\n self._excitation = 0.1\n super().__init__()\n\n def _output(self):\n # my super exciting model!\n # make a nice pattern that looks sort of double-dotty\n # with the first two gates controlling the two dots,\n # and the third looking like Vsd\n delta_i = 10\n delta_j = 10\n di = (self._gates[0] + delta_i / 2) % delta_i - delta_i / 2\n dj = (self._gates[1] + delta_j / 2) % delta_j - delta_j / 2\n vsd = math.sqrt(self._gates[2]**2 + self._excitation**2)\n dij = math.sqrt(di**2 + dj**2) - vsd\n g = (vsd**2 + 1) * (1 / (dij**2 + 1) +\n 0.1 * (math.atan(-dij) + math.pi / 2))\n return g\n\n def fmt(self, value):\n return '{:.3f}'.format(value)\n\n def gates_set(self, parameter, value):\n if parameter[0] == 'c':\n self._gates[int(parameter[1:])] = float(value)\n elif parameter == 'rst' and value is None:\n self._gates = [0.0, 0.0, 0.0]\n else:\n raise ValueError\n\n def gates_get(self, parameter):\n if parameter[0] == 'c':\n return self.fmt(self._gates[int(parameter[1:])])\n else:\n raise ValueError\n\n def source_set(self, parameter, value):\n if parameter == 'ampl':\n self._excitation = float(value)\n else:\n raise ValueError\n\n def source_get(self, parameter):\n if parameter == 'ampl':\n return self.fmt(self._excitation)\n else:\n raise ValueError\n\n def meter_get(self, parameter):\n if parameter == 'ampl':\n return self.fmt(self._output() * self._excitation)\n else:\n raise ValueError\n\n\n# make our mock instruments\n# real instruments would subclass IPInstrument or VisaInstrument\n# or just the base Instrument instead of MockInstrument,\n# and be instantiated with an address rather than a model\nclass MockGates(MockInstrument):\n def __init__(self, name, model=None, **kwargs):\n super().__init__(name, model=model, **kwargs)\n\n for i in range(3):\n cmdbase = 'c{}'.format(i)\n self.add_parameter('chan{}'.format(i),\n label='Gate Channel {} (mV)'.format(i),\n get_cmd=cmdbase + '?',\n set_cmd=cmdbase + ':{:.4f}',\n get_parser=float,\n vals=Numbers(-100, 100))\n\n self.add_function('reset', call_cmd='rst')\n\n\nclass MockSource(MockInstrument):\n def __init__(self, name, model=None, **kwargs):\n super().__init__(name, model=model, **kwargs)\n\n # this parameter uses built-in sweeping to change slowly\n self.add_parameter('amplitude',\n label='Source Amplitude (\\u03bcV)',\n get_cmd='ampl?',\n set_cmd='ampl:{:.4f}',\n get_parser=float,\n vals=Numbers(0, 10),\n sweep_step=0.1,\n sweep_delay=0.05)\n\n\nclass MockMeter(MockInstrument):\n def __init__(self, name, model=None, **kwargs):\n super().__init__(name, model=model, **kwargs)\n\n self.add_parameter('amplitude',\n label='Current (nA)',\n get_cmd='ampl?',\n get_parser=float)\n\n\nclass AverageGetter(Parameter):\n def __init__(self, measured_param, sweep_values, delay):\n super().__init__(name='avg_' + measured_param.name)\n self.measured_param = measured_param\n self.sweep_values = sweep_values\n self.delay = delay\n if hasattr(measured_param, 'label'):\n self.label = 'Average: ' + measured_param.label\n\n def get(self):\n loop = Loop(self.sweep_values, self.delay).each(self.measured_param)\n data = loop.run_temp()\n return data.arrays[self.measured_param.name].mean()\n\n\nclass AverageAndRaw(Parameter):\n def __init__(self, measured_param, sweep_values, delay):\n name = measured_param.name\n super().__init__(names=(name, 'avg_' + name))\n self.measured_param = measured_param\n self.sweep_values = sweep_values\n self.delay = delay\n self.sizes = (len(sweep_values), None)\n set_array = DataArray(parameter=sweep_values.parameter,\n preset_data=sweep_values)\n self.setpoints = (set_array, None)\n if hasattr(measured_param, 'label'):\n self.labels = (measured_param.label,\n 'Average: ' + measured_param.label)\n\n def get(self):\n loop = Loop(self.sweep_values, self.delay).each(self.measured_param)\n data = loop.run_temp()\n array = data.arrays[self.measured_param.name]\n return (array, array.mean())\n", "path": "docs/examples/toymodel.py" } ]
diff --git a/docs/examples/toymodel.py b/docs/examples/toymodel.py index bccbeaae2dc..2731171d07f 100644 --- a/docs/examples/toymodel.py +++ b/docs/examples/toymodel.py @@ -40,7 +40,7 @@ def gates_set(self, parameter, value): def gates_get(self, parameter): if parameter[0] == 'c': - return self.fmt(self.gates[int(parameter[1:])]) + return self.fmt(self._gates[int(parameter[1:])]) else: raise ValueError
doccano__doccano-603
[Tiny enhancement request] Allow digit keys as shortkeys Feature description --------- English letters are allowed as shortkeys for annotation now, only. Proposition: allow English letters and digits as shortkeys.
[ { "content": "import string\n\nfrom django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, pre_delete\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.core.exceptions import ValidationError\nfrom polymorphic.models import PolymorphicModel\n\nfrom .managers import AnnotationManager, Seq2seqAnnotationManager\n\nDOCUMENT_CLASSIFICATION = 'DocumentClassification'\nSEQUENCE_LABELING = 'SequenceLabeling'\nSEQ2SEQ = 'Seq2seq'\nPROJECT_CHOICES = (\n (DOCUMENT_CLASSIFICATION, 'document classification'),\n (SEQUENCE_LABELING, 'sequence labeling'),\n (SEQ2SEQ, 'sequence to sequence'),\n)\n\n\nclass Project(PolymorphicModel):\n name = models.CharField(max_length=100)\n description = models.TextField(default='')\n guideline = models.TextField(default='')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n users = models.ManyToManyField(User, related_name='projects')\n project_type = models.CharField(max_length=30, choices=PROJECT_CHOICES)\n randomize_document_order = models.BooleanField(default=False)\n collaborative_annotation = models.BooleanField(default=False)\n\n def get_absolute_url(self):\n return reverse('upload', args=[self.id])\n\n @property\n def image(self):\n raise NotImplementedError()\n\n def get_bundle_name(self):\n raise NotImplementedError()\n\n def get_bundle_name_upload(self):\n raise NotImplementedError()\n\n def get_bundle_name_download(self):\n raise NotImplementedError()\n\n def get_annotation_serializer(self):\n raise NotImplementedError()\n\n def get_annotation_class(self):\n raise NotImplementedError()\n\n def get_storage(self, data):\n raise NotImplementedError()\n\n def __str__(self):\n return self.name\n\n\nclass TextClassificationProject(Project):\n\n @property\n def image(self):\n return staticfiles_storage.url('assets/images/cats/text_classification.jpg')\n\n def get_bundle_name(self):\n return 'document_classification'\n\n def get_bundle_name_upload(self):\n return 'upload_text_classification'\n\n def get_bundle_name_download(self):\n return 'download_text_classification'\n\n def get_annotation_serializer(self):\n from .serializers import DocumentAnnotationSerializer\n return DocumentAnnotationSerializer\n\n def get_annotation_class(self):\n return DocumentAnnotation\n\n def get_storage(self, data):\n from .utils import ClassificationStorage\n return ClassificationStorage(data, self)\n\n\nclass SequenceLabelingProject(Project):\n\n @property\n def image(self):\n return staticfiles_storage.url('assets/images/cats/sequence_labeling.jpg')\n\n def get_bundle_name(self):\n return 'sequence_labeling'\n\n def get_bundle_name_upload(self):\n return 'upload_sequence_labeling'\n\n def get_bundle_name_download(self):\n return 'download_sequence_labeling'\n\n def get_annotation_serializer(self):\n from .serializers import SequenceAnnotationSerializer\n return SequenceAnnotationSerializer\n\n def get_annotation_class(self):\n return SequenceAnnotation\n\n def get_storage(self, data):\n from .utils import SequenceLabelingStorage\n return SequenceLabelingStorage(data, self)\n\n\nclass Seq2seqProject(Project):\n\n @property\n def image(self):\n return staticfiles_storage.url('assets/images/cats/seq2seq.jpg')\n\n def get_bundle_name(self):\n return 'seq2seq'\n\n def get_bundle_name_upload(self):\n return 'upload_seq2seq'\n\n def get_bundle_name_download(self):\n return 'download_seq2seq'\n\n def get_annotation_serializer(self):\n from .serializers import Seq2seqAnnotationSerializer\n return Seq2seqAnnotationSerializer\n\n def get_annotation_class(self):\n return Seq2seqAnnotation\n\n def get_storage(self, data):\n from .utils import Seq2seqStorage\n return Seq2seqStorage(data, self)\n\n\nclass Label(models.Model):\n PREFIX_KEYS = (\n ('ctrl', 'ctrl'),\n ('shift', 'shift'),\n ('ctrl shift', 'ctrl shift')\n )\n SUFFIX_KEYS = tuple(\n (c, c) for c in string.ascii_lowercase\n )\n\n text = models.CharField(max_length=100)\n prefix_key = models.CharField(max_length=10, blank=True, null=True, choices=PREFIX_KEYS)\n suffix_key = models.CharField(max_length=1, blank=True, null=True, choices=SUFFIX_KEYS)\n project = models.ForeignKey(Project, related_name='labels', on_delete=models.CASCADE)\n background_color = models.CharField(max_length=7, default='#209cee')\n text_color = models.CharField(max_length=7, default='#ffffff')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.text\n\n def clean(self):\n # Don't allow shortcut key not to have a suffix key.\n if self.prefix_key and not self.suffix_key:\n raise ValidationError('Shortcut key may not have a suffix key.')\n\n # each shortcut (prefix key + suffix key) can only be assigned to one label\n if self.suffix_key or self.prefix_key:\n other_labels = self.project.labels.exclude(id=self.id)\n if other_labels.filter(suffix_key=self.suffix_key, prefix_key=self.prefix_key).exists():\n raise ValidationError('A label with this shortcut already exists in the project')\n\n super().clean()\n\n class Meta:\n unique_together = (\n ('project', 'text'),\n )\n\n\nclass Document(models.Model):\n text = models.TextField()\n project = models.ForeignKey(Project, related_name='documents', on_delete=models.CASCADE)\n meta = models.TextField(default='{}')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n annotations_approved_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n\n def __str__(self):\n return self.text[:50]\n\n\nclass Annotation(models.Model):\n objects = AnnotationManager()\n\n prob = models.FloatField(default=0.0)\n manual = models.BooleanField(default=False)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass DocumentAnnotation(Annotation):\n document = models.ForeignKey(Document, related_name='doc_annotations', on_delete=models.CASCADE)\n label = models.ForeignKey(Label, on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ('document', 'user', 'label')\n\n\nclass SequenceAnnotation(Annotation):\n document = models.ForeignKey(Document, related_name='seq_annotations', on_delete=models.CASCADE)\n label = models.ForeignKey(Label, on_delete=models.CASCADE)\n start_offset = models.IntegerField()\n end_offset = models.IntegerField()\n\n def clean(self):\n if self.start_offset >= self.end_offset:\n raise ValidationError('start_offset is after end_offset')\n\n class Meta:\n unique_together = ('document', 'user', 'label', 'start_offset', 'end_offset')\n\n\nclass Seq2seqAnnotation(Annotation):\n # Override AnnotationManager for custom functionality\n objects = Seq2seqAnnotationManager()\n\n document = models.ForeignKey(Document, related_name='seq2seq_annotations', on_delete=models.CASCADE)\n text = models.CharField(max_length=500)\n\n class Meta:\n unique_together = ('document', 'user', 'text')\n\n\nclass Role(models.Model):\n name = models.CharField(max_length=100, unique=True)\n description = models.TextField(default='')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.name\n\n\nclass RoleMapping(models.Model):\n user = models.ForeignKey(User, related_name='role_mappings', on_delete=models.CASCADE)\n project = models.ForeignKey(Project, related_name='role_mappings', on_delete=models.CASCADE)\n role = models.ForeignKey(Role, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def clean(self):\n other_rolemappings = self.project.role_mappings.exclude(id=self.id)\n\n if other_rolemappings.filter(user=self.user, project=self.project).exists():\n raise ValidationError('This user is already assigned to a role in this project.')\n\n class Meta:\n unique_together = (\"user\", \"project\", \"role\")\n\n\n@receiver(post_save, sender=RoleMapping)\ndef add_linked_project(sender, instance, created, **kwargs):\n if not created:\n return\n userInstance = instance.user\n projectInstance = instance.project\n if userInstance and projectInstance:\n user = User.objects.get(pk=userInstance.pk)\n project = Project.objects.get(pk=projectInstance.pk)\n user.projects.add(project)\n user.save()\n\n\n@receiver(post_save)\ndef add_superusers_to_project(sender, instance, created, **kwargs):\n if not created:\n return\n if sender not in Project.__subclasses__():\n return\n superusers = User.objects.filter(is_superuser=True)\n admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()\n if superusers and admin_role:\n RoleMapping.objects.bulk_create(\n [RoleMapping(role_id=admin_role.id, user_id=superuser.id, project_id=instance.id)\n for superuser in superusers]\n )\n\n\n@receiver(post_save, sender=User)\ndef add_new_superuser_to_projects(sender, instance, created, **kwargs):\n if created and instance.is_superuser:\n admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()\n projects = Project.objects.all()\n if admin_role and projects:\n RoleMapping.objects.bulk_create(\n [RoleMapping(role_id=admin_role.id, user_id=instance.id, project_id=project.id)\n for project in projects]\n )\n\n\n@receiver(pre_delete, sender=RoleMapping)\ndef delete_linked_project(sender, instance, using, **kwargs):\n userInstance = instance.user\n projectInstance = instance.project\n if userInstance and projectInstance:\n user = User.objects.get(pk=userInstance.pk)\n project = Project.objects.get(pk=projectInstance.pk)\n user.projects.remove(project)\n user.save()\n", "path": "app/api/models.py" } ]
[ { "content": "import string\n\nfrom django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save, pre_delete\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.core.exceptions import ValidationError\nfrom polymorphic.models import PolymorphicModel\n\nfrom .managers import AnnotationManager, Seq2seqAnnotationManager\n\nDOCUMENT_CLASSIFICATION = 'DocumentClassification'\nSEQUENCE_LABELING = 'SequenceLabeling'\nSEQ2SEQ = 'Seq2seq'\nPROJECT_CHOICES = (\n (DOCUMENT_CLASSIFICATION, 'document classification'),\n (SEQUENCE_LABELING, 'sequence labeling'),\n (SEQ2SEQ, 'sequence to sequence'),\n)\n\n\nclass Project(PolymorphicModel):\n name = models.CharField(max_length=100)\n description = models.TextField(default='')\n guideline = models.TextField(default='')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n users = models.ManyToManyField(User, related_name='projects')\n project_type = models.CharField(max_length=30, choices=PROJECT_CHOICES)\n randomize_document_order = models.BooleanField(default=False)\n collaborative_annotation = models.BooleanField(default=False)\n\n def get_absolute_url(self):\n return reverse('upload', args=[self.id])\n\n @property\n def image(self):\n raise NotImplementedError()\n\n def get_bundle_name(self):\n raise NotImplementedError()\n\n def get_bundle_name_upload(self):\n raise NotImplementedError()\n\n def get_bundle_name_download(self):\n raise NotImplementedError()\n\n def get_annotation_serializer(self):\n raise NotImplementedError()\n\n def get_annotation_class(self):\n raise NotImplementedError()\n\n def get_storage(self, data):\n raise NotImplementedError()\n\n def __str__(self):\n return self.name\n\n\nclass TextClassificationProject(Project):\n\n @property\n def image(self):\n return staticfiles_storage.url('assets/images/cats/text_classification.jpg')\n\n def get_bundle_name(self):\n return 'document_classification'\n\n def get_bundle_name_upload(self):\n return 'upload_text_classification'\n\n def get_bundle_name_download(self):\n return 'download_text_classification'\n\n def get_annotation_serializer(self):\n from .serializers import DocumentAnnotationSerializer\n return DocumentAnnotationSerializer\n\n def get_annotation_class(self):\n return DocumentAnnotation\n\n def get_storage(self, data):\n from .utils import ClassificationStorage\n return ClassificationStorage(data, self)\n\n\nclass SequenceLabelingProject(Project):\n\n @property\n def image(self):\n return staticfiles_storage.url('assets/images/cats/sequence_labeling.jpg')\n\n def get_bundle_name(self):\n return 'sequence_labeling'\n\n def get_bundle_name_upload(self):\n return 'upload_sequence_labeling'\n\n def get_bundle_name_download(self):\n return 'download_sequence_labeling'\n\n def get_annotation_serializer(self):\n from .serializers import SequenceAnnotationSerializer\n return SequenceAnnotationSerializer\n\n def get_annotation_class(self):\n return SequenceAnnotation\n\n def get_storage(self, data):\n from .utils import SequenceLabelingStorage\n return SequenceLabelingStorage(data, self)\n\n\nclass Seq2seqProject(Project):\n\n @property\n def image(self):\n return staticfiles_storage.url('assets/images/cats/seq2seq.jpg')\n\n def get_bundle_name(self):\n return 'seq2seq'\n\n def get_bundle_name_upload(self):\n return 'upload_seq2seq'\n\n def get_bundle_name_download(self):\n return 'download_seq2seq'\n\n def get_annotation_serializer(self):\n from .serializers import Seq2seqAnnotationSerializer\n return Seq2seqAnnotationSerializer\n\n def get_annotation_class(self):\n return Seq2seqAnnotation\n\n def get_storage(self, data):\n from .utils import Seq2seqStorage\n return Seq2seqStorage(data, self)\n\n\nclass Label(models.Model):\n PREFIX_KEYS = (\n ('ctrl', 'ctrl'),\n ('shift', 'shift'),\n ('ctrl shift', 'ctrl shift')\n )\n SUFFIX_KEYS = tuple(\n (c, c) for c in string.digits + string.ascii_lowercase\n )\n\n text = models.CharField(max_length=100)\n prefix_key = models.CharField(max_length=10, blank=True, null=True, choices=PREFIX_KEYS)\n suffix_key = models.CharField(max_length=1, blank=True, null=True, choices=SUFFIX_KEYS)\n project = models.ForeignKey(Project, related_name='labels', on_delete=models.CASCADE)\n background_color = models.CharField(max_length=7, default='#209cee')\n text_color = models.CharField(max_length=7, default='#ffffff')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.text\n\n def clean(self):\n # Don't allow shortcut key not to have a suffix key.\n if self.prefix_key and not self.suffix_key:\n raise ValidationError('Shortcut key may not have a suffix key.')\n\n # each shortcut (prefix key + suffix key) can only be assigned to one label\n if self.suffix_key or self.prefix_key:\n other_labels = self.project.labels.exclude(id=self.id)\n if other_labels.filter(suffix_key=self.suffix_key, prefix_key=self.prefix_key).exists():\n raise ValidationError('A label with this shortcut already exists in the project')\n\n super().clean()\n\n class Meta:\n unique_together = (\n ('project', 'text'),\n )\n\n\nclass Document(models.Model):\n text = models.TextField()\n project = models.ForeignKey(Project, related_name='documents', on_delete=models.CASCADE)\n meta = models.TextField(default='{}')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n annotations_approved_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n\n def __str__(self):\n return self.text[:50]\n\n\nclass Annotation(models.Model):\n objects = AnnotationManager()\n\n prob = models.FloatField(default=0.0)\n manual = models.BooleanField(default=False)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass DocumentAnnotation(Annotation):\n document = models.ForeignKey(Document, related_name='doc_annotations', on_delete=models.CASCADE)\n label = models.ForeignKey(Label, on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ('document', 'user', 'label')\n\n\nclass SequenceAnnotation(Annotation):\n document = models.ForeignKey(Document, related_name='seq_annotations', on_delete=models.CASCADE)\n label = models.ForeignKey(Label, on_delete=models.CASCADE)\n start_offset = models.IntegerField()\n end_offset = models.IntegerField()\n\n def clean(self):\n if self.start_offset >= self.end_offset:\n raise ValidationError('start_offset is after end_offset')\n\n class Meta:\n unique_together = ('document', 'user', 'label', 'start_offset', 'end_offset')\n\n\nclass Seq2seqAnnotation(Annotation):\n # Override AnnotationManager for custom functionality\n objects = Seq2seqAnnotationManager()\n\n document = models.ForeignKey(Document, related_name='seq2seq_annotations', on_delete=models.CASCADE)\n text = models.CharField(max_length=500)\n\n class Meta:\n unique_together = ('document', 'user', 'text')\n\n\nclass Role(models.Model):\n name = models.CharField(max_length=100, unique=True)\n description = models.TextField(default='')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.name\n\n\nclass RoleMapping(models.Model):\n user = models.ForeignKey(User, related_name='role_mappings', on_delete=models.CASCADE)\n project = models.ForeignKey(Project, related_name='role_mappings', on_delete=models.CASCADE)\n role = models.ForeignKey(Role, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def clean(self):\n other_rolemappings = self.project.role_mappings.exclude(id=self.id)\n\n if other_rolemappings.filter(user=self.user, project=self.project).exists():\n raise ValidationError('This user is already assigned to a role in this project.')\n\n class Meta:\n unique_together = (\"user\", \"project\", \"role\")\n\n\n@receiver(post_save, sender=RoleMapping)\ndef add_linked_project(sender, instance, created, **kwargs):\n if not created:\n return\n userInstance = instance.user\n projectInstance = instance.project\n if userInstance and projectInstance:\n user = User.objects.get(pk=userInstance.pk)\n project = Project.objects.get(pk=projectInstance.pk)\n user.projects.add(project)\n user.save()\n\n\n@receiver(post_save)\ndef add_superusers_to_project(sender, instance, created, **kwargs):\n if not created:\n return\n if sender not in Project.__subclasses__():\n return\n superusers = User.objects.filter(is_superuser=True)\n admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()\n if superusers and admin_role:\n RoleMapping.objects.bulk_create(\n [RoleMapping(role_id=admin_role.id, user_id=superuser.id, project_id=instance.id)\n for superuser in superusers]\n )\n\n\n@receiver(post_save, sender=User)\ndef add_new_superuser_to_projects(sender, instance, created, **kwargs):\n if created and instance.is_superuser:\n admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()\n projects = Project.objects.all()\n if admin_role and projects:\n RoleMapping.objects.bulk_create(\n [RoleMapping(role_id=admin_role.id, user_id=instance.id, project_id=project.id)\n for project in projects]\n )\n\n\n@receiver(pre_delete, sender=RoleMapping)\ndef delete_linked_project(sender, instance, using, **kwargs):\n userInstance = instance.user\n projectInstance = instance.project\n if userInstance and projectInstance:\n user = User.objects.get(pk=userInstance.pk)\n project = Project.objects.get(pk=projectInstance.pk)\n user.projects.remove(project)\n user.save()\n", "path": "app/api/models.py" } ]
diff --git a/app/api/models.py b/app/api/models.py index 697ceb0f02..256e7ac8f6 100644 --- a/app/api/models.py +++ b/app/api/models.py @@ -150,7 +150,7 @@ class Label(models.Model): ('ctrl shift', 'ctrl shift') ) SUFFIX_KEYS = tuple( - (c, c) for c in string.ascii_lowercase + (c, c) for c in string.digits + string.ascii_lowercase ) text = models.CharField(max_length=100) diff --git a/frontend/components/containers/labels/LabelActionMenu.vue b/frontend/components/containers/labels/LabelActionMenu.vue index 14b9fe4a60..aaa4c8b214 100644 --- a/frontend/components/containers/labels/LabelActionMenu.vue +++ b/frontend/components/containers/labels/LabelActionMenu.vue @@ -9,6 +9,7 @@ <base-dialog :dialog="createDialog"> <label-creation-form :create-label="createLabel" + :keys="shortkeys" @close="createDialog=false" /> </base-dialog> @@ -22,7 +23,7 @@ </template> <script> -import { mapActions } from 'vuex' +import { mapActions, mapGetters } from 'vuex' import ActionMenu from '@/components/molecules/ActionMenu' import BaseDialog from '@/components/molecules/BaseDialog' import LabelCreationForm from '@/components/organisms/labels/LabelCreationForm' @@ -48,6 +49,10 @@ export default { } }, + computed: { + ...mapGetters('labels', ['shortkeys']) + }, + created() { this.setCurrentProject(this.$route.params.id) }, diff --git a/frontend/components/containers/labels/LabelList.vue b/frontend/components/containers/labels/LabelList.vue index 13a94ca379..5f76c5cf52 100644 --- a/frontend/components/containers/labels/LabelList.vue +++ b/frontend/components/containers/labels/LabelList.vue @@ -40,7 +40,7 @@ <template v-slot:input> <v-select :value="item.suffix_key" - :items="keys" + :items="shortkeys" @change="handleUpdateLabel({ id: item.id, suffix_key: $event })" label="Key" /> @@ -74,7 +74,7 @@ </template> <script> -import { mapState, mapActions, mapMutations } from 'vuex' +import { mapGetters, mapState, mapActions, mapMutations } from 'vuex' import { colorRules, labelNameRules } from '@/rules/index' import { idealColor } from '~/plugins/utils' @@ -105,10 +105,7 @@ export default { computed: { ...mapState('labels', ['items', 'selected', 'loading']), - - keys() { - return 'abcdefghijklmnopqrstuvwxyz'.split('') - } + ...mapGetters('labels', ['shortkeys']) }, created() { diff --git a/frontend/components/organisms/labels/LabelCreationForm.vue b/frontend/components/organisms/labels/LabelCreationForm.vue index 944119c451..7a0f9e1b38 100644 --- a/frontend/components/organisms/labels/LabelCreationForm.vue +++ b/frontend/components/organisms/labels/LabelCreationForm.vue @@ -63,7 +63,8 @@ export default { }, keys: { type: Array, - default: () => 'abcdefghijklmnopqrstuvwxyz'.split('') + default: () => [], + required: true } }, data() { diff --git a/frontend/store/labels.js b/frontend/store/labels.js index 80829a3b79..4ebf68833a 100644 --- a/frontend/store/labels.js +++ b/frontend/store/labels.js @@ -9,6 +9,9 @@ export const state = () => ({ export const getters = { isLabelSelected(state) { return state.selected.length > 0 + }, + shortkeys() { + return '0123456789abcdefghijklmnopqrstuvwxyz'.split('') } }
qtile__qtile-2707
BitcoinTicker migration does not work ``` /tmp/crypto cat config.py from libqtile.widget import BitcoinTicker test = BitcoinTicker() /tmp/crypto qtile migrate -c config.py Config unchanged. /tmp/crypto cat config.py from libqtile.widget import BitcoinTicker test = BitcoinTicker() ``` /cc @Graeme22
[ { "content": "# Copyright (c) 2021, Tycho Andersen. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nimport filecmp\nimport os\nimport os.path\nimport shutil\nimport sys\nfrom glob import glob\n\nBACKUP_SUFFIX = \".migrate.bak\"\n\ntry:\n import bowler\nexcept ImportError:\n pass\n\n\ndef rename_hook(query, fro, to):\n # could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' >\n # but the replacement gets more complicated...\n selector = \"'{name}'\".format(name=fro)\n q = query.select_pattern(selector)\n q.current.kwargs[\"name\"] = fro\n return q.rename(to)\n\n\ndef client_name_updated(query):\n \"\"\" Rename window_name_change -> client_name_updated\"\"\"\n return rename_hook(query, \"window_name_change\", \"client_name_updated\")\n\n\ndef tile_master_windows_rename(query):\n return (\n query\n .select_function(\"Tile\")\n .modify_argument(\"masterWindows\", \"master_length\")\n )\n\n\ndef threaded_poll_text_rename(query):\n return (\n query\n .select_class(\"ThreadedPollText\")\n .rename(\"ThreadPoolText\")\n )\n\n\ndef pacman_to_checkupdates(query):\n return (\n query\n .select_class(\"Pacman\")\n .rename(\"CheckUpdates\")\n )\n\n\ndef bitcoin_to_crypto(query):\n return (\n query\n .select_class(\"BitcoinTicker\")\n .rename(\"CryptoTicker\")\n )\n\n\ndef hook_main_function(query):\n def modify_main(node, capture, filename):\n main = capture.get(\"function_def\")\n if main.prev_sibling:\n for leaf in main.prev_sibling.leaves():\n if \"startup\" == leaf.value:\n return\n args = capture.get(\"function_arguments\")\n if args:\n args[0].remove()\n main.prefix += \"from libqtile import hook, qtile\\n\"\n main.prefix += \"@hook.subscribe.startup\\n\"\n\n return (\n query\n .select_function(\"main\")\n .is_def()\n .modify(modify_main)\n )\n\n\n# Deprecated new_at_current key replaced by new_client_position.\n# In the node, we want to change the key name\n# and adapts its value depending of the previous value :\n# new_at_current=True => new_client_position=before_current\n# new_at_current<>True => new_client_position=after_current\ndef update_node_nac(node, capture, filename):\n key = capture.get(\"k\")\n key.value = \"new_client_position\"\n val = capture.get(\"v\")\n if val.value == \"True\":\n val.value = \"'before_current'\"\n else:\n val.value = \"'after_current'\"\n\n\ndef new_at_current_to_new_client_position(query):\n old_pattern = \"\"\"\n argument< k=\"new_at_current\" \"=\" v=any >\n \"\"\"\n return (\n query\n .select(old_pattern)\n .modify(update_node_nac)\n )\n\n\nMIGRATIONS = [\n client_name_updated,\n tile_master_windows_rename,\n threaded_poll_text_rename,\n pacman_to_checkupdates,\n hook_main_function,\n new_at_current_to_new_client_position,\n]\n\n\nMODULE_RENAMES = [\n (\"libqtile.command_graph\", \"libqtile.command.graph\"),\n (\"libqtile.command_client\", \"libqtile.command.client\"),\n (\"libqtile.command_interface\", \"libqtile.command.interface\"),\n (\"libqtile.command_object\", \"libqtile.command.base\"),\n (\"libqtile.window\", \"libqtile.backend.x11.window\"),\n]\n\nfor (fro, to) in MODULE_RENAMES:\n def f(query, fro=fro, to=to):\n return (\n query\n .select_module(fro)\n .rename(to)\n )\n MIGRATIONS.append(f)\n\n\ndef file_and_backup(config_dir):\n for py in glob(os.path.join(config_dir, \"*.py\")):\n backup = py + BACKUP_SUFFIX\n yield py, backup\n\n\ndef do_migrate(args):\n if \"bowler\" not in sys.modules:\n print(\"bowler can't be found, not migrating config file\")\n print(\"install it and try again\")\n sys.exit(1)\n\n config_dir = os.path.dirname(args.config)\n for py, backup in file_and_backup(config_dir):\n shutil.copyfile(py, backup)\n\n for m in MIGRATIONS:\n q = bowler.Query(config_dir)\n m(q).execute(interactive=not args.yes, write=True)\n\n changed = False\n for py, backup in file_and_backup(config_dir):\n backup = py + BACKUP_SUFFIX\n if not filecmp.cmp(py, backup, shallow=False):\n changed = True\n break\n\n if not changed:\n print(\"Config unchanged.\")\n for _, backup in file_and_backup(config_dir):\n os.remove(backup)\n\n\ndef add_subcommand(subparsers, parents):\n parser = subparsers.add_parser(\n \"migrate\",\n parents=parents,\n help=\"Migrate a configuration file to the current API\"\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n action=\"store\",\n default=os.path.expanduser(\n os.path.join(os.getenv(\"XDG_CONFIG_HOME\", \"~/.config\"), \"qtile\", \"config.py\")\n ),\n help=\"Use the specified configuration file (migrates every .py file in this directory)\",\n )\n parser.add_argument(\n \"--yes\",\n action=\"store_true\",\n help=\"Automatically apply diffs with no confirmation\",\n )\n parser.set_defaults(func=do_migrate)\n", "path": "libqtile/scripts/migrate.py" } ]
[ { "content": "# Copyright (c) 2021, Tycho Andersen. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nimport filecmp\nimport os\nimport os.path\nimport shutil\nimport sys\nfrom glob import glob\n\nBACKUP_SUFFIX = \".migrate.bak\"\n\ntry:\n import bowler\nexcept ImportError:\n pass\n\n\ndef rename_hook(query, fro, to):\n # could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' >\n # but the replacement gets more complicated...\n selector = \"'{name}'\".format(name=fro)\n q = query.select_pattern(selector)\n q.current.kwargs[\"name\"] = fro\n return q.rename(to)\n\n\ndef client_name_updated(query):\n \"\"\" Rename window_name_change -> client_name_updated\"\"\"\n return rename_hook(query, \"window_name_change\", \"client_name_updated\")\n\n\ndef tile_master_windows_rename(query):\n return (\n query\n .select_function(\"Tile\")\n .modify_argument(\"masterWindows\", \"master_length\")\n )\n\n\ndef threaded_poll_text_rename(query):\n return (\n query\n .select_class(\"ThreadedPollText\")\n .rename(\"ThreadPoolText\")\n )\n\n\ndef pacman_to_checkupdates(query):\n return (\n query\n .select_class(\"Pacman\")\n .rename(\"CheckUpdates\")\n )\n\n\ndef bitcoin_to_crypto(query):\n return (\n query\n .select_class(\"BitcoinTicker\")\n .rename(\"CryptoTicker\")\n )\n\n\ndef hook_main_function(query):\n def modify_main(node, capture, filename):\n main = capture.get(\"function_def\")\n if main.prev_sibling:\n for leaf in main.prev_sibling.leaves():\n if \"startup\" == leaf.value:\n return\n args = capture.get(\"function_arguments\")\n if args:\n args[0].remove()\n main.prefix += \"from libqtile import hook, qtile\\n\"\n main.prefix += \"@hook.subscribe.startup\\n\"\n\n return (\n query\n .select_function(\"main\")\n .is_def()\n .modify(modify_main)\n )\n\n\n# Deprecated new_at_current key replaced by new_client_position.\n# In the node, we want to change the key name\n# and adapts its value depending of the previous value :\n# new_at_current=True => new_client_position=before_current\n# new_at_current<>True => new_client_position=after_current\ndef update_node_nac(node, capture, filename):\n key = capture.get(\"k\")\n key.value = \"new_client_position\"\n val = capture.get(\"v\")\n if val.value == \"True\":\n val.value = \"'before_current'\"\n else:\n val.value = \"'after_current'\"\n\n\ndef new_at_current_to_new_client_position(query):\n old_pattern = \"\"\"\n argument< k=\"new_at_current\" \"=\" v=any >\n \"\"\"\n return (\n query\n .select(old_pattern)\n .modify(update_node_nac)\n )\n\n\nMIGRATIONS = [\n client_name_updated,\n tile_master_windows_rename,\n threaded_poll_text_rename,\n pacman_to_checkupdates,\n bitcoin_to_crypto,\n hook_main_function,\n new_at_current_to_new_client_position,\n]\n\n\nMODULE_RENAMES = [\n (\"libqtile.command_graph\", \"libqtile.command.graph\"),\n (\"libqtile.command_client\", \"libqtile.command.client\"),\n (\"libqtile.command_interface\", \"libqtile.command.interface\"),\n (\"libqtile.command_object\", \"libqtile.command.base\"),\n (\"libqtile.window\", \"libqtile.backend.x11.window\"),\n]\n\nfor (fro, to) in MODULE_RENAMES:\n def f(query, fro=fro, to=to):\n return (\n query\n .select_module(fro)\n .rename(to)\n )\n MIGRATIONS.append(f)\n\n\ndef file_and_backup(config_dir):\n for py in glob(os.path.join(config_dir, \"*.py\")):\n backup = py + BACKUP_SUFFIX\n yield py, backup\n\n\ndef do_migrate(args):\n if \"bowler\" not in sys.modules:\n print(\"bowler can't be found, not migrating config file\")\n print(\"install it and try again\")\n sys.exit(1)\n\n config_dir = os.path.dirname(args.config)\n for py, backup in file_and_backup(config_dir):\n shutil.copyfile(py, backup)\n\n for m in MIGRATIONS:\n q = bowler.Query(config_dir)\n m(q).execute(interactive=not args.yes, write=True)\n\n changed = False\n for py, backup in file_and_backup(config_dir):\n backup = py + BACKUP_SUFFIX\n if not filecmp.cmp(py, backup, shallow=False):\n changed = True\n break\n\n if not changed:\n print(\"Config unchanged.\")\n for _, backup in file_and_backup(config_dir):\n os.remove(backup)\n\n\ndef add_subcommand(subparsers, parents):\n parser = subparsers.add_parser(\n \"migrate\",\n parents=parents,\n help=\"Migrate a configuration file to the current API\"\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n action=\"store\",\n default=os.path.expanduser(\n os.path.join(os.getenv(\"XDG_CONFIG_HOME\", \"~/.config\"), \"qtile\", \"config.py\")\n ),\n help=\"Use the specified configuration file (migrates every .py file in this directory)\",\n )\n parser.add_argument(\n \"--yes\",\n action=\"store_true\",\n help=\"Automatically apply diffs with no confirmation\",\n )\n parser.set_defaults(func=do_migrate)\n", "path": "libqtile/scripts/migrate.py" } ]
diff --git a/libqtile/scripts/migrate.py b/libqtile/scripts/migrate.py index e02b419d5b..3ec3bce6f0 100644 --- a/libqtile/scripts/migrate.py +++ b/libqtile/scripts/migrate.py @@ -129,6 +129,7 @@ def new_at_current_to_new_client_position(query): tile_master_windows_rename, threaded_poll_text_rename, pacman_to_checkupdates, + bitcoin_to_crypto, hook_main_function, new_at_current_to_new_client_position, ] diff --git a/test/test_migrate.py b/test/test_migrate.py index 5f18b611c9..98c0102c0b 100644 --- a/test/test_migrate.py +++ b/test/test_migrate.py @@ -169,6 +169,24 @@ def test_pacman(): check_migrate(orig, expected) +def test_crypto(): + orig = textwrap.dedent(""" + from libqtile import bar + from libqtile.widget import BitcoinTicker + + bar.Bar([BitcoinTicker()], 30) + """) + + expected = textwrap.dedent(""" + from libqtile import bar + from libqtile.widget import CryptoTicker + + bar.Bar([CryptoTicker()], 30) + """) + + check_migrate(orig, expected) + + def test_main(): orig = textwrap.dedent(""" def main(qtile):
kserve__kserve-2103
Cannot install required version of numpy on M1 mac /kind bug Issue: Installation on python 3.8 or 3.9 (and presumably all versions of Python) of the v0.8.0 release candidate fails due to the pinned requirement of numpy. Expected behavior: kserve's release candidate for 0.8 can be installed on an M1 mac. Extra information: https://github.com/numpy/numpy/releases/tag/v1.21.0 numpy 1.21+ allows installation on M1 macs. **Environment:** - OS (e.g. from `/etc/os-release`): M1 mac
[ { "content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='lgbserver',\n version='0.7.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kserve/kserve/python/lgbserver',\n description='Model Server implementation for LightGBM. \\\n Not intended for use outside KServe Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"lgbserver\"),\n install_requires=[\n \"kserve>=0.7.0\",\n \"lightgbm == 3.3.2\",\n \"pandas == 0.25.3\",\n \"argparse >= 1.4.0\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/lgbserver/setup.py" } ]
[ { "content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='lgbserver',\n version='0.7.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kserve/kserve/python/lgbserver',\n description='Model Server implementation for LightGBM. \\\n Not intended for use outside KServe Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"lgbserver\"),\n install_requires=[\n \"kserve>=0.7.0\",\n \"lightgbm == 3.3.2\",\n \"pandas == 1.3.5\",\n \"argparse >= 1.4.0\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/lgbserver/setup.py" } ]
diff --git a/python/kserve/requirements.txt b/python/kserve/requirements.txt index 06f5ba53946..f0ec86734f7 100644 --- a/python/kserve/requirements.txt +++ b/python/kserve/requirements.txt @@ -1,31 +1,27 @@ -six>=1.15 -python_dateutil>=2.5.3 -setuptools>=21.0.0 -urllib3>=1.15.1 -kubernetes>=12.0.0 -tornado>=6.0.0 +six>=1.16.0 +python_dateutil>=2.8.0 +setuptools>=60.6.0 +urllib3>=1.26.8 +kubernetes>=21.7.0 +tornado>=6.1 argparse>=1.4.0 -minio>=4.0.9,<7.0.0 -google-cloud-storage>=1.41.1 -adal>=1.2.2 -table_logger>=0.3.5 -numpy~=1.19.2 -azure-storage-blob==12.8.1 -azure-identity>=1.6.0 +minio>=4.0.9,<=7.1.3 +google-cloud-storage>=2.1.0 +adal>=1.2.7 +table_logger>=0.3.6 +numpy~=1.21.5 +azure-storage-blob==12.9.0 +azure-identity>=1.8.0 cloudevents>=1.2.0 -avro>=1.10.1 -boto3==1.20.24 -psutil>=5.0 +avro>=1.11.0 +boto3==1.21.0 +psutil>=5.9.0 ray[serve]==1.9.2 grpcio>=1.34.0 -google_api_core==1.29.0 jmespath==0.10.0 -googleapis_common_protos==1.53.0 -cachetools==4.2.2 -google_auth==1.34.0 -cffi==1.14.6 -cryptography==3.4.7 -idna==3.2 -certifi==2021.5.30 -azure_core==1.17.0 -tritonclient==2.14.2 +googleapis_common_protos==1.54.0 +cachetools==5.0.0 +cffi==1.15.0 +idna==3.3 +certifi==2021.10.8 +tritonclient==2.18.0 diff --git a/python/lgbserver/setup.py b/python/lgbserver/setup.py index 304562f08e8..4678324dcc6 100644 --- a/python/lgbserver/setup.py +++ b/python/lgbserver/setup.py @@ -35,7 +35,7 @@ install_requires=[ "kserve>=0.7.0", "lightgbm == 3.3.2", - "pandas == 0.25.3", + "pandas == 1.3.5", "argparse >= 1.4.0", ], tests_require=tests_require, diff --git a/test/e2e/common/utils.py b/test/e2e/common/utils.py index a3489cbe442..fc1ba45abbd 100644 --- a/test/e2e/common/utils.py +++ b/test/e2e/common/utils.py @@ -166,8 +166,7 @@ def explain_response(service_name, input_json): def get_cluster_ip(): api_instance = client.CoreV1Api(client.ApiClient()) service = api_instance.read_namespaced_service("istio-ingressgateway", - "istio-system", - exact="true") + "istio-system") if service.status.load_balancer.ingress is None: cluster_ip = service.spec.cluster_ip else:
aws__aws-cli-4334
Broken docutils==0.15 Hi community, Today docutils were updated to 0.15 (https://pypi.org/project/docutils/#history) and it breaks awscli running on Python 2. ``` # aws --version Traceback (most recent call last): File "/bin/aws", line 19, in <module> import awscli.clidriver File "/usr/lib/python2.7/site-packages/awscli/clidriver.py", line 36, in <module> from awscli.help import ProviderHelpCommand File "/usr/lib/python2.7/site-packages/awscli/help.py", line 20, in <module> from docutils.core import publish_string File "/usr/lib/python2.7/site-packages/docutils/core.py", line 246 print('\n::: Runtime settings:', file=self._stderr) ^ SyntaxError: invalid syntax ```
[ { "content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.12.191',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n # For Python 2.6, we have to require a different verion of PyYAML since the latest\n # versions dropped support for Python 2.6.\n requires.append('PyYAML>=3.10,<=3.13')\nelse:\n requires.append('PyYAML>=3.10,<=5.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*.txt', 'examples/*/*/*.txt',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.12.191',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10,<0.15',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n # For Python 2.6, we have to require a different verion of PyYAML since the latest\n # versions dropped support for Python 2.6.\n requires.append('PyYAML>=3.10,<=3.13')\nelse:\n requires.append('PyYAML>=3.10,<=5.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*.txt', 'examples/*/*/*.txt',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py" } ]
diff --git a/.changes/next-release/bugfix-Dependency-77959.json b/.changes/next-release/bugfix-Dependency-77959.json new file mode 100644 index 000000000000..73ca000cfdfd --- /dev/null +++ b/.changes/next-release/bugfix-Dependency-77959.json @@ -0,0 +1,5 @@ +{ + "type": "bugfix", + "category": "Dependency", + "description": "Fixed dependency issue with broken docutils aws/aws-cli`#4332 <https://github.com/aws/aws-cli/issues/4332>`__" +} diff --git a/requirements-docs.txt b/requirements-docs.txt index 21097b9bb978..5d5400e1063c 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -1,2 +1,3 @@ +docutils>=0.10,<0.15 Sphinx==1.1.3 -e . diff --git a/setup.cfg b/setup.cfg index b2f09eb737d1..db8d6e1de286 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,7 @@ universal = 1 requires-dist = botocore==1.12.191 colorama>=0.2.5,<=0.3.9 - docutils>=0.10 + docutils>=0.10,<0.15 rsa>=3.1.2,<=3.5.0 PyYAML>=3.10,<=3.13; python_version=="2.6" PyYAML>=3.10,<=5.1;python_version!="2.6" diff --git a/setup.py b/setup.py index e2693b229be5..9fdd00fc725f 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ def find_version(*file_paths): requires = ['botocore==1.12.191', 'colorama>=0.2.5,<=0.3.9', - 'docutils>=0.10', + 'docutils>=0.10,<0.15', 'rsa>=3.1.2,<=3.5.0', 's3transfer>=0.2.0,<0.3.0']
mirumee__ariadne-270
Upgrade to GraphQL-core v3 I'm getting the following deprecation warning. Is this something that is already on your radar / that you are planning to resolve for the next release? >**DeprecationWarning**: GraphQL-core-next has been discontinued. It is now released as GraphQL-core v3 and newer.
[ { "content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.8.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core-next<3.0.0\",\n \"starlette<0.14\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.8.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.0.0\",\n \"starlette<0.14\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 45218d21e..02b67c642 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # CHANGELOG +## Unreleased + +- Updated `graphql-core-next` to `graphql-core` 3.x. + + ## 0.8.0 (2019-11-25) - Added recursive loading of GraphQL schema files from provided path. diff --git a/requirements.txt b/requirements.txt index 9eb669ac7..ee718e880 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,6 @@ # # pip-compile --output-file=requirements.txt setup.py # -graphql-core-next==1.1.1 +graphql-core==3.0.0 starlette==0.13.0 typing-extensions==3.7.4.1 diff --git a/setup.py b/setup.py index 3a0d7e9bb..780a9a5fb 100755 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ packages=["ariadne"], include_package_data=True, install_requires=[ - "graphql-core-next<3.0.0", + "graphql-core>=3.0.0", "starlette<0.14", "typing_extensions>=3.6.0", ], diff --git a/tests/asgi/snapshots/snap_test_query_execution.py b/tests/asgi/snapshots/snap_test_query_execution.py index 1c2756d14..adaaa9b77 100644 --- a/tests/asgi/snapshots/snap_test_query_execution.py +++ b/tests/asgi/snapshots/snap_test_query_execution.py @@ -30,10 +30,10 @@ 'errors': [ { 'locations': [ - [ - 2, - 18 - ] + { + 'column': 18, + 'line': 2 + } ], 'message': "Variable '$name' of required type 'String!' was not provided.", 'path': None @@ -94,10 +94,10 @@ snapshots['test_attempt_execute_subscription_with_invalid_query_returns_error_json 1'] = { 'locations': [ - [ - 1, - 16 - ] + { + 'column': 16, + 'line': 1 + } ], 'message': "Cannot query field 'error' on type 'Subscription'.", 'path': None diff --git a/tests/wsgi/snapshots/snap_test_query_execution.py b/tests/wsgi/snapshots/snap_test_query_execution.py index 8f7cc8f16..a7c3d7127 100644 --- a/tests/wsgi/snapshots/snap_test_query_execution.py +++ b/tests/wsgi/snapshots/snap_test_query_execution.py @@ -30,10 +30,10 @@ 'errors': [ { 'locations': [ - [ - 2, - 18 - ] + { + 'column': 18, + 'line': 2 + } ], 'message': "Variable '$name' of required type 'String!' was not provided.", 'path': None
ckan__ckan-5439
docs.ckan.org for 2.6, 2.7 and 2.8 haven't been updated since 2018 ### Please describe the expected behaviour https://docs.ckan.org/en/2.8/, https://docs.ckan.org/en/2.7/ and https://docs.ckan.org/en/2.6/ should have latest docs for each version. ### Please describe the actual behaviour The docs are generated for 2.8.2, 2.7.5 and 2.6.7
[ { "content": "# encoding: utf-8\n\nimport os\nimport os.path\n\n# Avoid problem releasing to pypi from vagrant\nif os.environ.get('USER', '') == 'vagrant':\n del os.link\n\ntry:\n from setuptools import (setup, find_packages,\n __version__ as setuptools_version)\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import (setup, find_packages,\n __version__ as setuptools_version)\n\nfrom ckan import (__version__, __description__, __long_description__,\n __license__)\n\n\n#\n# Check setuptools version\n#\n\ndef parse_version(s):\n return map(int, s.split('.'))\n\nHERE = os.path.dirname(__file__)\nwith open(os.path.join(HERE, 'requirement-setuptools.txt')) as f:\n setuptools_requirement = f.read().strip()\nmin_setuptools_version = parse_version(setuptools_requirement.split('==')[1])\nif parse_version(setuptools_version) < min_setuptools_version:\n raise AssertionError(\n 'setuptools version error\\n'\n 'You need a newer version of setuptools.\\n'\n 'Install the recommended version:\\n'\n ' pip install -r requirement-setuptools.txt\\n'\n 'and then try again to install ckan into your python environment.'\n )\n\n\nentry_points = {\n 'nose.plugins.0.10': [\n 'main = ckan.ckan_nose_plugin:CkanNose',\n ],\n 'paste.app_factory': [\n 'main = ckan.config.middleware:make_app',\n ],\n 'paste.app_install': [\n 'main = ckan.config.install:CKANInstaller',\n ],\n 'paste.paster_command': [\n 'db = ckan.lib.cli:ManageDb',\n 'create-test-data = ckan.lib.cli:CreateTestDataCommand',\n 'sysadmin = ckan.lib.cli:Sysadmin',\n 'user = ckan.lib.cli:UserCmd',\n 'dataset = ckan.lib.cli:DatasetCmd',\n 'search-index = ckan.lib.cli:SearchIndexCommand',\n 'ratings = ckan.lib.cli:Ratings',\n 'notify = ckan.lib.cli:Notification',\n 'rdf-export = ckan.lib.cli:RDFExport',\n 'tracking = ckan.lib.cli:Tracking',\n 'plugin-info = ckan.lib.cli:PluginInfo',\n 'profile = ckan.lib.cli:Profile',\n 'color = ckan.lib.cli:CreateColorSchemeCommand',\n 'check-po-files = ckan.i18n.check_po_files:CheckPoFiles',\n 'trans = ckan.lib.cli:TranslationsCommand',\n 'minify = ckan.lib.cli:MinifyCommand',\n 'less = ckan.lib.cli:LessCommand',\n 'datastore = ckanext.datastore.commands:datastore_group',\n 'datapusher = ckanext.datapusher.cli:DatapusherCommand',\n 'front-end-build = ckan.lib.cli:FrontEndBuildCommand',\n 'views = ckan.lib.cli:ViewsCommand',\n 'config-tool = ckan.lib.cli:ConfigToolCommand',\n 'jobs = ckan.lib.cli:JobsCommand',\n ],\n 'console_scripts': [\n 'ckan-admin = bin.ckan_admin:Command',\n ],\n 'paste.paster_create_template': [\n 'ckanext = ckan.pastertemplates:CkanextTemplate',\n ],\n 'ckan.forms': [\n 'standard = ckan.forms.package:get_standard_fieldset',\n 'package = ckan.forms.package:get_standard_fieldset',\n 'group = ckan.forms.group:get_group_fieldset',\n 'package_group = ckan.forms.group:get_package_group_fieldset',\n ],\n 'ckan.search': [\n 'sql = ckan.lib.search.sql:SqlSearchBackend',\n 'solr = ckan.lib.search.solr_backend:SolrSearchBackend',\n ],\n 'ckan.plugins': [\n 'synchronous_search = ckan.lib.search:SynchronousSearchPlugin',\n 'stats = ckanext.stats.plugin:StatsPlugin',\n 'publisher_form = ckanext.publisher_form.forms:PublisherForm',\n 'publisher_dataset_form = ckanext.publisher_form.forms:PublisherDatasetForm',\n 'multilingual_dataset = ckanext.multilingual.plugin:MultilingualDataset',\n 'multilingual_group = ckanext.multilingual.plugin:MultilingualGroup',\n 'multilingual_tag = ckanext.multilingual.plugin:MultilingualTag',\n 'multilingual_resource = ckanext.multilingual.plugin:MultilingualResource',\n 'organizations = ckanext.organizations.forms:OrganizationForm',\n 'organizations_dataset = ckanext.organizations.forms:OrganizationDatasetForm',\n 'datastore = ckanext.datastore.plugin:DatastorePlugin',\n 'datapusher=ckanext.datapusher.plugin:DatapusherPlugin',\n 'test_tag_vocab_plugin = ckanext.test_tag_vocab_plugin:MockVocabTagsPlugin',\n 'resource_proxy = ckanext.resourceproxy.plugin:ResourceProxy',\n 'text_view = ckanext.textview.plugin:TextView',\n 'recline_view = ckanext.reclineview.plugin:ReclineView',\n 'recline_grid_view = ckanext.reclineview.plugin:ReclineGridView',\n 'recline_graph_view = ckanext.reclineview.plugin:ReclineGraphView',\n 'recline_map_view = ckanext.reclineview.plugin:ReclineMapView',\n 'datatables_view = ckanext.datatablesview.plugin:DataTablesView',\n 'image_view = ckanext.imageview.plugin:ImageView',\n 'webpage_view = ckanext.webpageview.plugin:WebPageView',\n # FIXME: Remove deprecated resource previews below. You should use the\n # versions as *_view instead.\n 'text_preview = ckanext.textview.plugin:TextView',\n 'recline_preview = ckanext.reclineview.plugin:ReclineView',\n 'recline_grid = ckanext.reclineview.plugin:ReclineGridView',\n 'recline_graph = ckanext.reclineview.plugin:ReclineGraphView',\n 'recline_map = ckanext.reclineview.plugin:ReclineMapView',\n # End of deprecated previews\n 'example_itemplatehelpers = ckanext.example_itemplatehelpers.plugin:ExampleITemplateHelpersPlugin',\n 'example_idatasetform = ckanext.example_idatasetform.plugin:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v1 = ckanext.example_idatasetform.plugin_v1:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v2 = ckanext.example_idatasetform.plugin_v2:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v3 = ckanext.example_idatasetform.plugin_v3:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v4 = ckanext.example_idatasetform.plugin_v4:ExampleIDatasetFormPlugin',\n 'example_igroupform = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin',\n 'example_igroupform_default_group_type = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin_DefaultGroupType',\n 'example_igroupform_organization = ckanext.example_igroupform.plugin:ExampleIGroupFormOrganizationPlugin',\n 'example_iauthfunctions_v1 = ckanext.example_iauthfunctions.plugin_v1:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v2 = ckanext.example_iauthfunctions.plugin_v2:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v3 = ckanext.example_iauthfunctions.plugin_v3:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v4 = ckanext.example_iauthfunctions.plugin_v4:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v5_custom_config_setting = ckanext.example_iauthfunctions.plugin_v5_custom_config_setting:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v6_parent_auth_functions = ckanext.example_iauthfunctions.plugin_v6_parent_auth_functions:ExampleIAuthFunctionsPlugin',\n 'example_theme_v01_empty_extension = ckanext.example_theme_docs.v01_empty_extension.plugin:ExampleThemePlugin',\n 'example_theme_v02_empty_template = ckanext.example_theme_docs.v02_empty_template.plugin:ExampleThemePlugin',\n 'example_theme_v03_jinja = ckanext.example_theme_docs.v03_jinja.plugin:ExampleThemePlugin',\n 'example_theme_v04_ckan_extends = ckanext.example_theme_docs.v04_ckan_extends.plugin:ExampleThemePlugin',\n 'example_theme_v05_block = ckanext.example_theme_docs.v05_block.plugin:ExampleThemePlugin',\n 'example_theme_v06_super = ckanext.example_theme_docs.v06_super.plugin:ExampleThemePlugin',\n 'example_theme_v07_helper_function = ckanext.example_theme_docs.v07_helper_function.plugin:ExampleThemePlugin',\n 'example_theme_v08_custom_helper_function = ckanext.example_theme_docs.v08_custom_helper_function.plugin:ExampleThemePlugin',\n 'example_theme_v09_snippet = ckanext.example_theme_docs.v09_snippet.plugin:ExampleThemePlugin',\n 'example_theme_v10_custom_snippet = ckanext.example_theme_docs.v10_custom_snippet.plugin:ExampleThemePlugin',\n 'example_theme_v11_HTML_and_CSS = ckanext.example_theme_docs.v11_HTML_and_CSS.plugin:ExampleThemePlugin',\n 'example_theme_v12_extra_public_dir = ckanext.example_theme_docs.v12_extra_public_dir.plugin:ExampleThemePlugin',\n 'example_theme_v13_custom_css = ckanext.example_theme_docs.v13_custom_css.plugin:ExampleThemePlugin',\n 'example_theme_v14_more_custom_css = ckanext.example_theme_docs.v14_more_custom_css.plugin:ExampleThemePlugin',\n 'example_theme_v15_fanstatic = ckanext.example_theme_docs.v15_fanstatic.plugin:ExampleThemePlugin',\n 'example_theme_v16_initialize_a_javascript_module = ckanext.example_theme_docs.v16_initialize_a_javascript_module.plugin:ExampleThemePlugin',\n 'example_theme_v17_popover = ckanext.example_theme_docs.v17_popover.plugin:ExampleThemePlugin',\n 'example_theme_v18_snippet_api = ckanext.example_theme_docs.v18_snippet_api.plugin:ExampleThemePlugin',\n 'example_theme_v19_01_error = ckanext.example_theme_docs.v19_01_error.plugin:ExampleThemePlugin',\n 'example_theme_v19_02_error_handling = ckanext.example_theme_docs.v19_02_error_handling.plugin:ExampleThemePlugin',\n 'example_theme_v20_pubsub = ckanext.example_theme_docs.v20_pubsub.plugin:ExampleThemePlugin',\n 'example_theme_v21_custom_jquery_plugin = ckanext.example_theme_docs.v21_custom_jquery_plugin.plugin:ExampleThemePlugin',\n 'example_theme_custom_config_setting = ckanext.example_theme_docs.custom_config_setting.plugin:ExampleThemePlugin',\n 'example_theme_custom_emails = ckanext.example_theme_docs.custom_emails.plugin:ExampleCustomEmailsPlugin',\n 'example_iresourcecontroller = ckanext.example_iresourcecontroller.plugin:ExampleIResourceControllerPlugin',\n 'example_ivalidators = ckanext.example_ivalidators.plugin:ExampleIValidatorsPlugin',\n 'example_iconfigurer = ckanext.example_iconfigurer.plugin:ExampleIConfigurerPlugin',\n 'example_itranslation = ckanext.example_itranslation.plugin:ExampleITranslationPlugin',\n 'example_iconfigurer_v1 = ckanext.example_iconfigurer.plugin_v1:ExampleIConfigurerPlugin',\n 'example_iconfigurer_v2 = ckanext.example_iconfigurer.plugin_v2:ExampleIConfigurerPlugin',\n 'example_flask_iblueprint = ckanext.example_flask_iblueprint.plugin:ExampleFlaskIBlueprintPlugin',\n 'example_flask_streaming = ckanext.example_flask_streaming.plugin:ExampleFlaskStreamingPlugin',\n 'example_iuploader = ckanext.example_iuploader.plugin:ExampleIUploader',\n 'example_idatastorebackend = ckanext.example_idatastorebackend.plugin:ExampleIDatastoreBackendPlugin',\n 'example_ipermissionlabels = ckanext.example_ipermissionlabels.plugin:ExampleIPermissionLabelsPlugin',\n ],\n 'ckan.system_plugins': [\n 'domain_object_mods = ckan.model.modification:DomainObjectModificationExtension',\n ],\n 'ckan.test_plugins': [\n 'routes_plugin = tests.legacy.ckantestplugins:RoutesPlugin',\n 'mapper_plugin = tests.legacy.ckantestplugins:MapperPlugin',\n 'session_plugin = tests.legacy.ckantestplugins:SessionPlugin',\n 'mapper_plugin2 = tests.legacy.ckantestplugins:MapperPlugin2',\n 'authorizer_plugin = tests.legacy.ckantestplugins:AuthorizerPlugin',\n 'test_observer_plugin = tests.legacy.ckantestplugins:PluginObserverPlugin',\n 'action_plugin = tests.legacy.ckantestplugins:ActionPlugin',\n 'auth_plugin = tests.legacy.ckantestplugins:AuthPlugin',\n 'test_group_plugin = tests.legacy.ckantestplugins:MockGroupControllerPlugin',\n 'test_package_controller_plugin = tests.legacy.ckantestplugins:MockPackageControllerPlugin',\n 'test_resource_preview = tests.legacy.ckantestplugins:MockResourcePreviewExtension',\n 'test_json_resource_preview = tests.legacy.ckantestplugins:JsonMockResourcePreviewExtension',\n 'sample_datastore_plugin = ckanext.datastore.tests.sample_datastore_plugin:SampleDataStorePlugin',\n 'example_datastore_deleted_with_count_plugin = ckanext.datastore.tests.test_chained_action:ExampleDataStoreDeletedWithCountPlugin',\n 'example_data_store_search_sql_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleDataStoreSearchSQLPlugin',\n 'example_external_provider_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleExternalProviderPlugin',\n 'test_datastore_view = ckan.tests.lib.test_datapreview:MockDatastoreBasedResourceView',\n 'test_datapusher_plugin = ckanext.datapusher.tests.test_interfaces:FakeDataPusherPlugin',\n 'test_routing_plugin = ckan.tests.config.test_middleware:MockRoutingPlugin',\n 'test_flash_plugin = ckan.tests.config.test_sessions:FlashMessagePlugin',\n 'test_helpers_plugin = ckan.tests.lib.test_helpers:TestHelpersPlugin',\n 'test_feed_plugin = ckan.tests.controllers.test_feed:MockFeedPlugin',\n 'test_js_translations_plugin = ckan.tests.lib.test_i18n:TestJSTranslationsPlugin',\n ],\n 'babel.extractors': [\n 'ckan = ckan.lib.extract:extract_ckan',\n ],\n}\n\nsetup(\n name='ckan',\n version=__version__,\n author='https://github.com/ckan/ckan/graphs/contributors',\n author_email='[email protected]',\n license=__license__,\n url='http://ckan.org/',\n description=__description__,\n keywords='data packaging component tool server',\n long_description=__long_description__,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['ckanext', 'ckanext.stats'],\n message_extractors={\n 'ckan': [\n ('**.py', 'python', None),\n ('**.js', 'javascript', None),\n ('templates/importer/**', 'ignore', None),\n ('templates/**.html', 'ckan', None),\n ('templates/**.txt', 'ckan', None),\n ('templates_legacy/**.html', 'ckan', None),\n ('public/**', 'ignore', None),\n ],\n 'ckanext': [\n ('**.py', 'python', None),\n ('**.js', 'javascript', None),\n ('**.html', 'ckan', None),\n ('multilingual/solr/*.txt', 'ignore', None),\n ]\n },\n entry_points=entry_points,\n # setup.py test command needs a TestSuite so does not work with py.test\n # test_suite = 'nose.collector',\n # tests_require=[ 'py >= 0.8.0-alpha2' ]\n classifiers=[\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2 :: Only',\n 'Programming Language :: Python :: 2.7',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# encoding: utf-8\n\nimport os\nimport os.path\n\n# Avoid problem releasing to pypi from vagrant\nif os.environ.get('USER', '') == 'vagrant':\n del os.link\n\ntry:\n from setuptools import (setup, find_packages,\n __version__ as setuptools_version)\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import (setup, find_packages,\n __version__ as setuptools_version)\n\nfrom ckan import (__version__, __description__, __long_description__,\n __license__)\n\n\n#\n# Check setuptools version\n#\n\ndef parse_version(s):\n return map(int, s.split('.'))\n\nHERE = os.path.dirname(__file__)\nwith open(os.path.join(HERE, 'requirement-setuptools.txt')) as f:\n setuptools_requirement = f.read().strip()\nmin_setuptools_version = parse_version(setuptools_requirement.split('==')[1])\nif parse_version(setuptools_version) < min_setuptools_version:\n raise AssertionError(\n 'setuptools version error\\n'\n 'You need a newer version of setuptools.\\n'\n 'Install the recommended version:\\n'\n ' pip install -r requirement-setuptools.txt\\n'\n 'and then try again to install ckan into your python environment.'\n )\n\n\nentry_points = {\n 'nose.plugins.0.10': [\n 'main = ckan.ckan_nose_plugin:CkanNose',\n ],\n 'paste.app_factory': [\n 'main = ckan.config.middleware:make_app',\n ],\n 'paste.app_install': [\n 'main = ckan.config.install:CKANInstaller',\n ],\n 'paste.paster_command': [\n 'db = ckan.lib.cli:ManageDb',\n 'create-test-data = ckan.lib.cli:CreateTestDataCommand',\n 'sysadmin = ckan.lib.cli:Sysadmin',\n 'user = ckan.lib.cli:UserCmd',\n 'dataset = ckan.lib.cli:DatasetCmd',\n 'search-index = ckan.lib.cli:SearchIndexCommand',\n 'ratings = ckan.lib.cli:Ratings',\n 'notify = ckan.lib.cli:Notification',\n 'rdf-export = ckan.lib.cli:RDFExport',\n 'tracking = ckan.lib.cli:Tracking',\n 'plugin-info = ckan.lib.cli:PluginInfo',\n 'profile = ckan.lib.cli:Profile',\n 'color = ckan.lib.cli:CreateColorSchemeCommand',\n 'check-po-files = ckan.i18n.check_po_files:CheckPoFiles',\n 'trans = ckan.lib.cli:TranslationsCommand',\n 'minify = ckan.lib.cli:MinifyCommand',\n 'less = ckan.lib.cli:LessCommand',\n 'datastore = ckanext.datastore.commands:datastore_group',\n 'datapusher = ckanext.datapusher.cli:DatapusherCommand',\n 'front-end-build = ckan.lib.cli:FrontEndBuildCommand',\n 'views = ckan.lib.cli:ViewsCommand',\n 'config-tool = ckan.lib.cli:ConfigToolCommand',\n 'jobs = ckan.lib.cli:JobsCommand',\n ],\n 'console_scripts': [\n 'ckan-admin = bin.ckan_admin:Command',\n ],\n 'paste.paster_create_template': [\n 'ckanext = ckan.pastertemplates:CkanextTemplate',\n ],\n 'ckan.forms': [\n 'standard = ckan.forms.package:get_standard_fieldset',\n 'package = ckan.forms.package:get_standard_fieldset',\n 'group = ckan.forms.group:get_group_fieldset',\n 'package_group = ckan.forms.group:get_package_group_fieldset',\n ],\n 'ckan.search': [\n 'sql = ckan.lib.search.sql:SqlSearchBackend',\n 'solr = ckan.lib.search.solr_backend:SolrSearchBackend',\n ],\n 'ckan.plugins': [\n 'synchronous_search = ckan.lib.search:SynchronousSearchPlugin',\n 'stats = ckanext.stats.plugin:StatsPlugin',\n 'publisher_form = ckanext.publisher_form.forms:PublisherForm',\n 'publisher_dataset_form = ckanext.publisher_form.forms:PublisherDatasetForm',\n 'multilingual_dataset = ckanext.multilingual.plugin:MultilingualDataset',\n 'multilingual_group = ckanext.multilingual.plugin:MultilingualGroup',\n 'multilingual_tag = ckanext.multilingual.plugin:MultilingualTag',\n 'multilingual_resource = ckanext.multilingual.plugin:MultilingualResource',\n 'organizations = ckanext.organizations.forms:OrganizationForm',\n 'organizations_dataset = ckanext.organizations.forms:OrganizationDatasetForm',\n 'datastore = ckanext.datastore.plugin:DatastorePlugin',\n 'datapusher=ckanext.datapusher.plugin:DatapusherPlugin',\n 'test_tag_vocab_plugin = ckanext.test_tag_vocab_plugin:MockVocabTagsPlugin',\n 'resource_proxy = ckanext.resourceproxy.plugin:ResourceProxy',\n 'text_view = ckanext.textview.plugin:TextView',\n 'recline_view = ckanext.reclineview.plugin:ReclineView',\n 'recline_grid_view = ckanext.reclineview.plugin:ReclineGridView',\n 'recline_graph_view = ckanext.reclineview.plugin:ReclineGraphView',\n 'recline_map_view = ckanext.reclineview.plugin:ReclineMapView',\n 'datatables_view = ckanext.datatablesview.plugin:DataTablesView',\n 'image_view = ckanext.imageview.plugin:ImageView',\n 'webpage_view = ckanext.webpageview.plugin:WebPageView',\n # FIXME: Remove deprecated resource previews below. You should use the\n # versions as *_view instead.\n 'text_preview = ckanext.textview.plugin:TextView',\n 'recline_preview = ckanext.reclineview.plugin:ReclineView',\n 'recline_grid = ckanext.reclineview.plugin:ReclineGridView',\n 'recline_graph = ckanext.reclineview.plugin:ReclineGraphView',\n 'recline_map = ckanext.reclineview.plugin:ReclineMapView',\n # End of deprecated previews\n 'example_itemplatehelpers = ckanext.example_itemplatehelpers.plugin:ExampleITemplateHelpersPlugin',\n 'example_idatasetform = ckanext.example_idatasetform.plugin:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v1 = ckanext.example_idatasetform.plugin_v1:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v2 = ckanext.example_idatasetform.plugin_v2:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v3 = ckanext.example_idatasetform.plugin_v3:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v4 = ckanext.example_idatasetform.plugin_v4:ExampleIDatasetFormPlugin',\n 'example_igroupform = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin',\n 'example_igroupform_default_group_type = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin_DefaultGroupType',\n 'example_igroupform_organization = ckanext.example_igroupform.plugin:ExampleIGroupFormOrganizationPlugin',\n 'example_iauthfunctions_v1 = ckanext.example_iauthfunctions.plugin_v1:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v2 = ckanext.example_iauthfunctions.plugin_v2:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v3 = ckanext.example_iauthfunctions.plugin_v3:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v4 = ckanext.example_iauthfunctions.plugin_v4:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v5_custom_config_setting = ckanext.example_iauthfunctions.plugin_v5_custom_config_setting:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v6_parent_auth_functions = ckanext.example_iauthfunctions.plugin_v6_parent_auth_functions:ExampleIAuthFunctionsPlugin',\n 'example_theme_v01_empty_extension = ckanext.example_theme_docs.v01_empty_extension.plugin:ExampleThemePlugin',\n 'example_theme_v02_empty_template = ckanext.example_theme_docs.v02_empty_template.plugin:ExampleThemePlugin',\n 'example_theme_v03_jinja = ckanext.example_theme_docs.v03_jinja.plugin:ExampleThemePlugin',\n 'example_theme_v04_ckan_extends = ckanext.example_theme_docs.v04_ckan_extends.plugin:ExampleThemePlugin',\n 'example_theme_v05_block = ckanext.example_theme_docs.v05_block.plugin:ExampleThemePlugin',\n 'example_theme_v06_super = ckanext.example_theme_docs.v06_super.plugin:ExampleThemePlugin',\n 'example_theme_v07_helper_function = ckanext.example_theme_docs.v07_helper_function.plugin:ExampleThemePlugin',\n 'example_theme_v08_custom_helper_function = ckanext.example_theme_docs.v08_custom_helper_function.plugin:ExampleThemePlugin',\n 'example_theme_v09_snippet = ckanext.example_theme_docs.v09_snippet.plugin:ExampleThemePlugin',\n 'example_theme_v10_custom_snippet = ckanext.example_theme_docs.v10_custom_snippet.plugin:ExampleThemePlugin',\n 'example_theme_v11_HTML_and_CSS = ckanext.example_theme_docs.v11_HTML_and_CSS.plugin:ExampleThemePlugin',\n 'example_theme_v12_extra_public_dir = ckanext.example_theme_docs.v12_extra_public_dir.plugin:ExampleThemePlugin',\n 'example_theme_v13_custom_css = ckanext.example_theme_docs.v13_custom_css.plugin:ExampleThemePlugin',\n 'example_theme_v14_more_custom_css = ckanext.example_theme_docs.v14_more_custom_css.plugin:ExampleThemePlugin',\n 'example_theme_v15_fanstatic = ckanext.example_theme_docs.v15_fanstatic.plugin:ExampleThemePlugin',\n 'example_theme_v16_initialize_a_javascript_module = ckanext.example_theme_docs.v16_initialize_a_javascript_module.plugin:ExampleThemePlugin',\n 'example_theme_v17_popover = ckanext.example_theme_docs.v17_popover.plugin:ExampleThemePlugin',\n 'example_theme_v18_snippet_api = ckanext.example_theme_docs.v18_snippet_api.plugin:ExampleThemePlugin',\n 'example_theme_v19_01_error = ckanext.example_theme_docs.v19_01_error.plugin:ExampleThemePlugin',\n 'example_theme_v19_02_error_handling = ckanext.example_theme_docs.v19_02_error_handling.plugin:ExampleThemePlugin',\n 'example_theme_v20_pubsub = ckanext.example_theme_docs.v20_pubsub.plugin:ExampleThemePlugin',\n 'example_theme_v21_custom_jquery_plugin = ckanext.example_theme_docs.v21_custom_jquery_plugin.plugin:ExampleThemePlugin',\n 'example_theme_custom_config_setting = ckanext.example_theme_docs.custom_config_setting.plugin:ExampleThemePlugin',\n 'example_theme_custom_emails = ckanext.example_theme_docs.custom_emails.plugin:ExampleCustomEmailsPlugin',\n 'example_iresourcecontroller = ckanext.example_iresourcecontroller.plugin:ExampleIResourceControllerPlugin',\n 'example_ivalidators = ckanext.example_ivalidators.plugin:ExampleIValidatorsPlugin',\n 'example_iconfigurer = ckanext.example_iconfigurer.plugin:ExampleIConfigurerPlugin',\n 'example_itranslation = ckanext.example_itranslation.plugin:ExampleITranslationPlugin',\n 'example_iconfigurer_v1 = ckanext.example_iconfigurer.plugin_v1:ExampleIConfigurerPlugin',\n 'example_iconfigurer_v2 = ckanext.example_iconfigurer.plugin_v2:ExampleIConfigurerPlugin',\n 'example_flask_iblueprint = ckanext.example_flask_iblueprint.plugin:ExampleFlaskIBlueprintPlugin',\n 'example_flask_streaming = ckanext.example_flask_streaming.plugin:ExampleFlaskStreamingPlugin',\n 'example_iuploader = ckanext.example_iuploader.plugin:ExampleIUploader',\n 'example_idatastorebackend = ckanext.example_idatastorebackend.plugin:ExampleIDatastoreBackendPlugin',\n 'example_ipermissionlabels = ckanext.example_ipermissionlabels.plugin:ExampleIPermissionLabelsPlugin',\n ],\n 'ckan.system_plugins': [\n 'domain_object_mods = ckan.model.modification:DomainObjectModificationExtension',\n ],\n 'ckan.test_plugins': [\n 'routes_plugin = tests.legacy.ckantestplugins:RoutesPlugin',\n 'mapper_plugin = tests.legacy.ckantestplugins:MapperPlugin',\n 'session_plugin = tests.legacy.ckantestplugins:SessionPlugin',\n 'mapper_plugin2 = tests.legacy.ckantestplugins:MapperPlugin2',\n 'authorizer_plugin = tests.legacy.ckantestplugins:AuthorizerPlugin',\n 'test_observer_plugin = tests.legacy.ckantestplugins:PluginObserverPlugin',\n 'action_plugin = tests.legacy.ckantestplugins:ActionPlugin',\n 'auth_plugin = tests.legacy.ckantestplugins:AuthPlugin',\n 'test_group_plugin = tests.legacy.ckantestplugins:MockGroupControllerPlugin',\n 'test_package_controller_plugin = tests.legacy.ckantestplugins:MockPackageControllerPlugin',\n 'test_resource_preview = tests.legacy.ckantestplugins:MockResourcePreviewExtension',\n 'test_json_resource_preview = tests.legacy.ckantestplugins:JsonMockResourcePreviewExtension',\n 'sample_datastore_plugin = ckanext.datastore.tests.sample_datastore_plugin:SampleDataStorePlugin',\n 'example_datastore_deleted_with_count_plugin = ckanext.datastore.tests.test_chained_action:ExampleDataStoreDeletedWithCountPlugin',\n 'example_data_store_search_sql_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleDataStoreSearchSQLPlugin',\n 'example_external_provider_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleExternalProviderPlugin',\n 'test_datastore_view = ckan.tests.lib.test_datapreview:MockDatastoreBasedResourceView',\n 'test_datapusher_plugin = ckanext.datapusher.tests.test_interfaces:FakeDataPusherPlugin',\n 'test_routing_plugin = ckan.tests.config.test_middleware:MockRoutingPlugin',\n 'test_flash_plugin = ckan.tests.config.test_sessions:FlashMessagePlugin',\n 'test_helpers_plugin = ckan.tests.lib.test_helpers:TestHelpersPlugin',\n 'test_feed_plugin = ckan.tests.controllers.test_feed:MockFeedPlugin',\n 'test_js_translations_plugin = ckan.tests.lib.test_i18n:TestJSTranslationsPlugin',\n ],\n 'babel.extractors': [\n 'ckan = ckan.lib.extract:extract_ckan',\n ],\n}\n\nsetup(\n name='ckan',\n version=__version__,\n author='https://github.com/ckan/ckan/graphs/contributors',\n author_email='[email protected]',\n license=__license__,\n url='http://ckan.org/',\n description=__description__,\n keywords='data packaging component tool server',\n long_description=__long_description__,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['ckanext', 'ckanext.stats'],\n message_extractors={\n 'ckan': [\n ('**.py', 'python', None),\n ('**.js', 'javascript', None),\n ('templates/importer/**', 'ignore', None),\n ('templates/**.html', 'ckan', None),\n ('templates/**.txt', 'ckan', None),\n ('templates_legacy/**.html', 'ckan', None),\n ('public/**', 'ignore', None),\n ],\n 'ckanext': [\n ('**.py', 'python', None),\n ('**.js', 'javascript', None),\n ('**.html', 'ckan', None),\n ('multilingual/solr/*.txt', 'ignore', None),\n ]\n },\n entry_points=entry_points,\n # setup.py test command needs a TestSuite so does not work with py.test\n # test_suite = 'nose.collector',\n # tests_require=[ 'py >= 0.8.0-alpha2' ]\n classifiers=[\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2 :: Only',\n 'Programming Language :: Python :: 2.7',\n ],\n # this is used to fix an incompatiblity with readthedocs dependencies\n extras_require={\n \"readthedocs\": [\"Jinja2>=2.3\"],\n }\n)\n", "path": "setup.py" } ]
diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 00000000000..c3f5fedc411 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,27 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: doc/conf.py + +# Build documentation with MkDocs +#mkdocs: +# configuration: mkdocs.yml + +# Optionally build your docs in additional formats such as PDF +formats: + - pdf + +# Optionally set the version of Python and requirements required to build your docs +python: + version: 2.7 + install: + - requirements: pip-requirements-docs.txt + - method: pip + path: . + extra_requirements: + - readthedocs diff --git a/setup.py b/setup.py index 52e19ea9726..c6c1ad7e998 100644 --- a/setup.py +++ b/setup.py @@ -250,4 +250,8 @@ def parse_version(s): 'Programming Language :: Python :: 2 :: Only', 'Programming Language :: Python :: 2.7', ], + # this is used to fix an incompatiblity with readthedocs dependencies + extras_require={ + "readthedocs": ["Jinja2>=2.3"], + } )
pyodide__pyodide-4435
Python 3.12 version ## 🚀 Feature <!-- A clear and concise description of the feature proposal --> Hi, I tried [REPL](https://pyodide.org/en/stable/console.html), maybe it uses the latest 0.25.0, and I noticed that the python is 3.11.3. Python 3.12 has released for a few months with a lot of new features. Since there is no issue track the progress. So, I created this one. ### Motivation <!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too --> N.A. ### Pitch <!-- A clear and concise description of what you want to happen. --> N.A. ### Alternatives <!-- A clear and concise description of any alternative solutions or features you've considered, if any. --> N.A. ### Additional context <!-- Add any other context or screenshots about the feature request here. --> N.A.
[ { "content": "import shutil\nfrom collections.abc import Callable\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nfrom ._py_compile import _compile\nfrom .common import make_zip_archive\n\n# These files are removed from the stdlib\nREMOVED_FILES = (\n # package management\n \"ensurepip/\",\n \"venv/\",\n # build system\n \"lib2to3/\",\n # other platforms\n \"_osx_support.py\",\n \"_aix_support.py\",\n # Not supported by browser\n \"curses/\",\n \"dbm/\",\n \"idlelib/\",\n \"tkinter/\",\n \"turtle.py\",\n \"turtledemo\",\n)\n\n# These files are unvendored from the stdlib and can be loaded with `loadPackage`\nUNVENDORED_FILES = (\n \"test/\",\n \"distutils/\",\n \"sqlite3\",\n \"ssl.py\",\n \"lzma.py\",\n \"_pydecimal.py\",\n \"pydoc_data\",\n)\n\n# We have JS implementations of these modules\nJS_STUB_FILES = (\"webbrowser.py\",)\n\n\ndef default_filterfunc(\n root: Path, verbose: bool = False\n) -> Callable[[str, list[str]], set[str]]:\n \"\"\"\n The default filter function used by `create_zipfile`.\n\n This function filters out several modules that are:\n\n - not supported in Pyodide due to browser limitations (e.g. `tkinter`)\n - unvendored from the standard library (e.g. `sqlite3`)\n \"\"\"\n\n def _should_skip(path: Path) -> bool:\n \"\"\"Skip common files that are not needed in the zip file.\"\"\"\n name = path.name\n\n if path.is_dir() and name in (\"__pycache__\", \"dist\"):\n return True\n\n if path.is_dir() and name.endswith((\".egg-info\", \".dist-info\")):\n return True\n\n if path.is_file() and name in (\n \"LICENSE\",\n \"LICENSE.txt\",\n \"setup.py\",\n \".gitignore\",\n ):\n return True\n\n if path.is_file() and name.endswith((\"pyi\", \"toml\", \"cfg\", \"md\", \"rst\")):\n return True\n\n return False\n\n def filterfunc(path: Path | str, names: list[str]) -> set[str]:\n filtered_files = {\n (root / f).resolve() for f in REMOVED_FILES + UNVENDORED_FILES\n }\n\n # We have JS implementations of these modules, so we don't need to\n # include the Python ones. Checking the name of the root directory\n # is a bit of a hack, but it works...\n if root.name.startswith(\"python3\"):\n filtered_files.update({root / f for f in JS_STUB_FILES})\n\n path = Path(path).resolve()\n\n if _should_skip(path):\n return set(names)\n\n _names = []\n for name in names:\n fullpath = path / name\n\n if _should_skip(fullpath) or fullpath in filtered_files:\n if verbose:\n print(f\"Skipping {fullpath}\")\n\n _names.append(name)\n\n return set(_names)\n\n return filterfunc\n\n\ndef create_zipfile(\n libdirs: list[Path],\n output: Path | str = \"python\",\n pycompile: bool = False,\n filterfunc: Callable[[str, list[str]], set[str]] | None = None,\n compression_level: int = 6,\n) -> None:\n \"\"\"\n Bundle Python standard libraries into a zip file.\n\n The basic idea of this function is similar to the standard library's\n {ref}`zipfile.PyZipFile` class.\n\n However, we need some additional functionality for Pyodide. For example:\n\n - We need to remove some unvendored modules, e.g. `sqlite3`\n - We need an option to \"not\" compile the files in the zip file\n\n hence this function.\n\n Parameters\n ----------\n libdirs\n List of paths to the directory containing the Python standard library or extra packages.\n\n output\n Path to the output zip file. Defaults to python.zip.\n\n pycompile\n Whether to compile the .py files into .pyc, by default False\n\n filterfunc\n A function that filters the files to be included in the zip file.\n This function will be passed to {ref}`shutil.copytree` 's ignore argument.\n By default, Pyodide's default filter function is used.\n\n compression_level\n Level of zip compression to apply. 0 means no compression. If a strictly\n positive integer is provided, ZIP_DEFLATED option is used.\n\n Returns\n -------\n BytesIO\n A BytesIO object containing the zip file.\n \"\"\"\n\n archive = Path(output)\n\n with TemporaryDirectory() as temp_dir_str:\n temp_dir = Path(temp_dir_str)\n\n for libdir in libdirs:\n libdir = Path(libdir)\n\n if filterfunc is None:\n _filterfunc = default_filterfunc(libdir)\n\n shutil.copytree(libdir, temp_dir, ignore=_filterfunc, dirs_exist_ok=True)\n\n make_zip_archive(\n archive,\n temp_dir,\n compression_level=compression_level,\n )\n\n if pycompile:\n _compile(\n archive,\n archive,\n verbose=False,\n keep=False,\n compression_level=compression_level,\n )\n", "path": "pyodide-build/pyodide_build/pyzip.py" } ]
[ { "content": "import shutil\nfrom collections.abc import Callable\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nfrom ._py_compile import _compile\nfrom .common import make_zip_archive\n\n# These files are removed from the stdlib\nREMOVED_FILES = (\n # package management\n \"ensurepip/\",\n \"venv/\",\n # build system\n \"lib2to3/\",\n # other platforms\n \"_osx_support.py\",\n \"_aix_support.py\",\n # Not supported by browser\n \"curses/\",\n \"dbm/\",\n \"idlelib/\",\n \"tkinter/\",\n \"turtle.py\",\n \"turtledemo\",\n)\n\n# These files are unvendored from the stdlib and can be loaded with `loadPackage`\nUNVENDORED_FILES = (\n \"test/\",\n \"sqlite3\",\n \"ssl.py\",\n \"lzma.py\",\n \"_pydecimal.py\",\n \"pydoc_data\",\n)\n\n# We have JS implementations of these modules\nJS_STUB_FILES = (\"webbrowser.py\",)\n\n\ndef default_filterfunc(\n root: Path, verbose: bool = False\n) -> Callable[[str, list[str]], set[str]]:\n \"\"\"\n The default filter function used by `create_zipfile`.\n\n This function filters out several modules that are:\n\n - not supported in Pyodide due to browser limitations (e.g. `tkinter`)\n - unvendored from the standard library (e.g. `sqlite3`)\n \"\"\"\n\n def _should_skip(path: Path) -> bool:\n \"\"\"Skip common files that are not needed in the zip file.\"\"\"\n name = path.name\n\n if path.is_dir() and name in (\"__pycache__\", \"dist\"):\n return True\n\n if path.is_dir() and name.endswith((\".egg-info\", \".dist-info\")):\n return True\n\n if path.is_file() and name in (\n \"LICENSE\",\n \"LICENSE.txt\",\n \"setup.py\",\n \".gitignore\",\n ):\n return True\n\n if path.is_file() and name.endswith((\"pyi\", \"toml\", \"cfg\", \"md\", \"rst\")):\n return True\n\n return False\n\n def filterfunc(path: Path | str, names: list[str]) -> set[str]:\n filtered_files = {\n (root / f).resolve() for f in REMOVED_FILES + UNVENDORED_FILES\n }\n\n # We have JS implementations of these modules, so we don't need to\n # include the Python ones. Checking the name of the root directory\n # is a bit of a hack, but it works...\n if root.name.startswith(\"python3\"):\n filtered_files.update({root / f for f in JS_STUB_FILES})\n\n path = Path(path).resolve()\n\n if _should_skip(path):\n return set(names)\n\n _names = []\n for name in names:\n fullpath = path / name\n\n if _should_skip(fullpath) or fullpath in filtered_files:\n if verbose:\n print(f\"Skipping {fullpath}\")\n\n _names.append(name)\n\n return set(_names)\n\n return filterfunc\n\n\ndef create_zipfile(\n libdirs: list[Path],\n output: Path | str = \"python\",\n pycompile: bool = False,\n filterfunc: Callable[[str, list[str]], set[str]] | None = None,\n compression_level: int = 6,\n) -> None:\n \"\"\"\n Bundle Python standard libraries into a zip file.\n\n The basic idea of this function is similar to the standard library's\n {ref}`zipfile.PyZipFile` class.\n\n However, we need some additional functionality for Pyodide. For example:\n\n - We need to remove some unvendored modules, e.g. `sqlite3`\n - We need an option to \"not\" compile the files in the zip file\n\n hence this function.\n\n Parameters\n ----------\n libdirs\n List of paths to the directory containing the Python standard library or extra packages.\n\n output\n Path to the output zip file. Defaults to python.zip.\n\n pycompile\n Whether to compile the .py files into .pyc, by default False\n\n filterfunc\n A function that filters the files to be included in the zip file.\n This function will be passed to {ref}`shutil.copytree` 's ignore argument.\n By default, Pyodide's default filter function is used.\n\n compression_level\n Level of zip compression to apply. 0 means no compression. If a strictly\n positive integer is provided, ZIP_DEFLATED option is used.\n\n Returns\n -------\n BytesIO\n A BytesIO object containing the zip file.\n \"\"\"\n\n archive = Path(output)\n\n with TemporaryDirectory() as temp_dir_str:\n temp_dir = Path(temp_dir_str)\n\n for libdir in libdirs:\n libdir = Path(libdir)\n\n if filterfunc is None:\n _filterfunc = default_filterfunc(libdir)\n\n shutil.copytree(libdir, temp_dir, ignore=_filterfunc, dirs_exist_ok=True)\n\n make_zip_archive(\n archive,\n temp_dir,\n compression_level=compression_level,\n )\n\n if pycompile:\n _compile(\n archive,\n archive,\n verbose=False,\n keep=False,\n compression_level=compression_level,\n )\n", "path": "pyodide-build/pyodide_build/pyzip.py" } ]
diff --git a/.circleci/config.yml b/.circleci/config.yml index 7006061437a..ee54cc3cd82 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,7 @@ defaults: &defaults # Note: when updating the docker image version, # make sure there are no extra old versions lying around. # (e.g. `rg -F --hidden <old_tag>`) - - image: pyodide/pyodide-env:20240127-chrome114-firefox122-py311 + - image: pyodide/pyodide-env:20240127-chrome114-firefox122-py312 environment: - EMSDK_NUM_CORES: 3 EMCC_CORES: 3 diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 65a94e46758..b3d97ef0c01 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,7 +1,7 @@ { "name": "Docker", // keep in sync with "run_docker" - "image": "pyodide/pyodide-env:20240127-chrome114-firefox122-py311", + "image": "pyodide/pyodide-env:20240127-chrome114-firefox122-py312", "remoteUser": "root", "onCreateCommand": ".devcontainer/onCreate-docker.sh" } diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index def36ad6828..2c024fba289 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -170,7 +170,7 @@ jobs: - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: pyodide-env - python-version: "3.11" + python-version: "3.12" channels: conda-forge - name: install test requirements diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c33c5f247cc..925aa7524fd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ exclude: (^.*patches|.*\.cgi$|^packages/micropip/src/micropip/externals|^benchmark/benchmarks$) default_language_version: - python: "3.11" + python: "3.12" repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: "v4.5.0" diff --git a/Dockerfile b/Dockerfile index 3201f06d823..b96c2174391 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM node:20.11-bookworm-slim AS node-image -FROM python:3.11.7-slim-bookworm +FROM python:3.12.1-slim-bookworm # Requirements for building packages RUN apt-get update \ diff --git a/Makefile.envs b/Makefile.envs index 5f717f194ff..3617bd7382d 100644 --- a/Makefile.envs +++ b/Makefile.envs @@ -1,6 +1,8 @@ -export PYVERSION ?= 3.11.7 +export PYVERSION ?= 3.12.1 export PYODIDE_EMSCRIPTEN_VERSION ?= 3.1.52 +export PYTHON_ARCHIVE_SHA256=d01ec6a33bc10009b09c17da95cc2759af5a580a7316b3a446eb4190e13f97b2 + ifdef CPYTHON_DEBUG export CPYTHON_ABI_FLAGS=d endif @@ -29,7 +31,6 @@ export HOSTPYTHONROOT=$(shell python${PYMAJOR}.${PYMINOR} -c "import sys; print( export HOSTPYTHON=$(HOSTPYTHONROOT)/bin/python$(PYMAJOR).$(PYMINOR) export PYTHON_ARCHIVE_URL=https://www.python.org/ftp/python/$(PYSTABLEVERSION)/Python-$(PYVERSION).tgz -export PYTHON_ARCHIVE_SHA256=068c05f82262e57641bd93458dfa883128858f5f4997aad7a36fd25b13b29209 export CPYTHONROOT=$(PYODIDE_ROOT)/cpython export CPYTHONINSTALL=$(CPYTHONROOT)/installs/python-$(PYVERSION) @@ -148,6 +149,7 @@ export MAIN_MODULE_LDFLAGS= $(LDFLAGS_BASE) \ -lwebsocket.js \ -leventloop.js \ -lhiwire \ + -lHacl_Hash_SHA2 \ \ -lGL \ -legl.js \ @@ -219,6 +221,7 @@ ifeq ($(DISABLE_DYLINK), 1) -lhiwire \ -lidbfs.js \ -lnodefs.js \ + -lHacl_Hash_SHA2 \ endif diff --git a/cpython/Makefile b/cpython/Makefile index f9f78d57158..8c616b8cd7d 100644 --- a/cpython/Makefile +++ b/cpython/Makefile @@ -47,6 +47,8 @@ $(INSTALL)/lib/$(LIB): $(BUILD)/$(LIB) sysconfigdata cp $(LIB) $(INSTALL)/lib/ \ ) + cp $(BUILD)/Modules/_hacl/libHacl_Hash_SHA2.a $(INSTALL)/lib/ + .PHONY=rebuild rebuild: sysconfigdata diff --git a/cpython/patches/0001-Public-pymain_run_python.patch b/cpython/patches/0001-Public-pymain_run_python.patch index 9cea08c3334..5c8f1c39475 100644 --- a/cpython/patches/0001-Public-pymain_run_python.patch +++ b/cpython/patches/0001-Public-pymain_run_python.patch @@ -1,17 +1,19 @@ -From 2bc35a83ee2ba29369db1fd8fa40f97ffeb8fa83 Mon Sep 17 00:00:00 2001 +From d32ad19aa3e0c8b5524c999219d294d235a47602 Mon Sep 17 00:00:00 2001 From: Hood Chatham <[email protected]> Date: Sun, 17 Jul 2022 14:40:39 +0100 -Subject: [PATCH 1/9] Public pymain_run_python +Subject: [PATCH 1/6] Public pymain_run_python +Discussion here: +https://discuss.python.org/t/unstable-api-for-pymain-run-python-run-python-cli-but-dont-finalize-interpreter/44675 --- Modules/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Modules/main.c b/Modules/main.c -index 6904e3f76e..07bea58080 100644 +index 1b189b4561..8c082f3b65 100644 --- a/Modules/main.c +++ b/Modules/main.c -@@ -537,7 +537,7 @@ pymain_repl(PyConfig *config, int *exitcode) +@@ -546,7 +546,7 @@ pymain_repl(PyConfig *config, int *exitcode) } diff --git a/cpython/patches/0002-Patch-importlib-to-allow-modifications-to-ModuleNotF.patch b/cpython/patches/0002-Patch-importlib-to-allow-modifications-to-ModuleNotF.patch index e86cee60f12..5ec111152c1 100644 --- a/cpython/patches/0002-Patch-importlib-to-allow-modifications-to-ModuleNotF.patch +++ b/cpython/patches/0002-Patch-importlib-to-allow-modifications-to-ModuleNotF.patch @@ -1,32 +1,34 @@ -From 406996817ac1b8224da3ef3ee071cb02368a25a2 Mon Sep 17 00:00:00 2001 +From f02d3a2a1a8a7115b832704ad82852061c76233e Mon Sep 17 00:00:00 2001 From: Hood Chatham <[email protected]> Date: Wed, 16 Nov 2022 14:02:53 -0800 -Subject: [PATCH 2/9] Patch importlib to allow modifications to +Subject: [PATCH 2/6] Patch importlib to allow modifications to ModuleNotFoundError +Upstream PR: +https://github.com/python/cpython/pull/114813 --- Lib/importlib/_bootstrap.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Lib/importlib/_bootstrap.py b/Lib/importlib/_bootstrap.py -index ee93ebc396..aa9f71a295 100644 +index d942045f3d..9aee448e2e 100644 --- a/Lib/importlib/_bootstrap.py +++ b/Lib/importlib/_bootstrap.py -@@ -1117,6 +1117,9 @@ def _sanity_check(name, package, level): +@@ -1301,6 +1301,9 @@ def _sanity_check(name, package, level): _ERR_MSG_PREFIX = 'No module named ' _ERR_MSG = _ERR_MSG_PREFIX + '{!r}' +def _get_module_not_found_error(name): -+ return ModuleNotFoundError(_ERR_MSG.format(name), name=name) ++ raise ModuleNotFoundError(f'{_ERR_MSG_PREFIX}{name!r}', name=name) + def _find_and_load_unlocked(name, import_): path = None parent = name.rpartition('.')[0] -@@ -1137,7 +1140,7 @@ def _find_and_load_unlocked(name, import_): +@@ -1321,7 +1324,7 @@ def _find_and_load_unlocked(name, import_): child = name.rpartition('.')[2] spec = _find_spec(name, path) if spec is None: -- raise ModuleNotFoundError(_ERR_MSG.format(name), name=name) +- raise ModuleNotFoundError(f'{_ERR_MSG_PREFIX}{name!r}', name=name) + raise _get_module_not_found_error(name) else: if parent_spec: diff --git a/cpython/patches/0003-Add-emscripten-platform-support-to-ctypes.util.find_.patch b/cpython/patches/0003-Add-emscripten-platform-support-to-ctypes.util.find_.patch index beadf78ac33..ce9e8b7c4df 100644 --- a/cpython/patches/0003-Add-emscripten-platform-support-to-ctypes.util.find_.patch +++ b/cpython/patches/0003-Add-emscripten-platform-support-to-ctypes.util.find_.patch @@ -1,7 +1,7 @@ -From 38c04a993e06ef4ebe22f6beb930299a4b70586d Mon Sep 17 00:00:00 2001 +From 55c6694ccc7cd62f2aecbc15f72df89e437adf57 Mon Sep 17 00:00:00 2001 From: ryanking13 <[email protected]> Date: Fri, 2 Dec 2022 11:36:44 +0000 -Subject: [PATCH 3/9] Add emscripten platform support to +Subject: [PATCH 3/6] Add emscripten platform support to ctypes.util.find_library --- diff --git a/cpython/patches/0004-Allow-multiprocessing.connection-top-level-import.patch b/cpython/patches/0004-Allow-multiprocessing.connection-top-level-import.patch index b8de9a72002..d24ef5139d7 100644 --- a/cpython/patches/0004-Allow-multiprocessing.connection-top-level-import.patch +++ b/cpython/patches/0004-Allow-multiprocessing.connection-top-level-import.patch @@ -1,14 +1,17 @@ -From 5b2a0f085f695e129e661192954b97f1efc3ded5 Mon Sep 17 00:00:00 2001 +From 3228232e38b3aee5023231b518750d8de38a9de7 Mon Sep 17 00:00:00 2001 From: Hood Chatham <[email protected]> Date: Mon, 19 Dec 2022 09:09:14 -0800 -Subject: [PATCH 4/9] Allow multiprocessing.connection top level import +Subject: [PATCH 4/6] Allow multiprocessing.connection top level import + +Upstream PR: +https://github.com/python/cpython/pull/114808 --- Lib/multiprocessing/connection.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py -index 8b81f9954e..e16dfede2e 100644 +index dbbf106f68..e92edf6c30 100644 --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -19,7 +19,10 @@ diff --git a/cpython/patches/0008-Make-Emscripten-trampolines-work-with-JSPI.patch b/cpython/patches/0005-Make-Emscripten-trampolines-work-with-JSPI.patch similarity index 91% rename from cpython/patches/0008-Make-Emscripten-trampolines-work-with-JSPI.patch rename to cpython/patches/0005-Make-Emscripten-trampolines-work-with-JSPI.patch index fa1e74ec8b5..b1d40d3d124 100644 --- a/cpython/patches/0008-Make-Emscripten-trampolines-work-with-JSPI.patch +++ b/cpython/patches/0005-Make-Emscripten-trampolines-work-with-JSPI.patch @@ -1,7 +1,7 @@ -From 9a8180dcd7733c49c1651a98e32211fb1abbbaad Mon Sep 17 00:00:00 2001 +From ed163b0cf72c4cae56274db02b44b7f7bc8e6d2d Mon Sep 17 00:00:00 2001 From: Hood Chatham <[email protected]> Date: Wed, 28 Jun 2023 10:46:19 -0700 -Subject: [PATCH 8/9] Make Emscripten trampolines work with JSPI +Subject: [PATCH 5/6] Make Emscripten trampolines work with JSPI There is a WIP proposal to enable webassembly stack switching which have been implemented in v8: @@ -22,6 +22,10 @@ trampoline. We cache the function argument counts since when I didn't cache them performance was negatively affected. + +Upstreamed here: +https://github.com/python/cpython/pull/106219 + --- .../internal/pycore_emscripten_trampoline.h | 67 +++++++++++++++++ Include/internal/pycore_object.h | 28 +------ @@ -111,7 +115,7 @@ index 0000000000..900d527e48 +#endif // defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE) +#endif // ndef Py_EMSCRIPTEN_SIGNAL_H diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h -index f022f82469..8bf4ebac38 100644 +index 7a2f13a21b..3d8ad0c7f2 100644 --- a/Include/internal/pycore_object.h +++ b/Include/internal/pycore_object.h @@ -10,6 +10,7 @@ extern "C" { @@ -122,7 +126,7 @@ index f022f82469..8bf4ebac38 100644 #include "pycore_interp.h" // PyInterpreterState.gc #include "pycore_pystate.h" // _PyInterpreterState_GET() #include "pycore_runtime.h" // _PyRuntime -@@ -277,33 +278,6 @@ extern PyObject* _PyType_GetSubclasses(PyTypeObject *); +@@ -410,33 +411,6 @@ extern int _PyObject_IsInstanceDictEmpty(PyObject *); PyAPI_FUNC(PyObject *) _PyObject_LookupSpecial(PyObject *, PyObject *); @@ -138,12 +142,12 @@ index f022f82469..8bf4ebac38 100644 - * match. - * - * Third party code unintentionally rely on problematic fpcasts. The call -- * trampoline mitigates common occurences of bad fpcasts on Emscripten. +- * trampoline mitigates common occurrences of bad fpcasts on Emscripten. - */ -#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE) -#define _PyCFunction_TrampolineCall(meth, self, args) \ - _PyCFunctionWithKeywords_TrampolineCall( \ -- (*(PyCFunctionWithKeywords)(void(*)(void))meth), self, args, NULL) +- (*(PyCFunctionWithKeywords)(void(*)(void))(meth)), (self), (args), NULL) -extern PyObject* _PyCFunctionWithKeywords_TrampolineCall( - PyCFunctionWithKeywords meth, PyObject *, PyObject *, PyObject *); -#else @@ -157,10 +161,10 @@ index f022f82469..8bf4ebac38 100644 } #endif diff --git a/Include/internal/pycore_runtime.h b/Include/internal/pycore_runtime.h -index ae63ae74af..521a1e87a4 100644 +index 99c4b0760b..8ceb6b72ec 100644 --- a/Include/internal/pycore_runtime.h +++ b/Include/internal/pycore_runtime.h -@@ -75,6 +75,11 @@ typedef struct pyruntimestate { +@@ -77,6 +77,11 @@ typedef struct pyruntimestate { /* Is Python fully initialized? Set to 1 by Py_Initialize() */ int initialized; @@ -173,7 +177,7 @@ index ae63ae74af..521a1e87a4 100644 is called again. diff --git a/Objects/descrobject.c b/Objects/descrobject.c -index 4d8b83758b..11badd4860 100644 +index 72ac470394..c62672d619 100644 --- a/Objects/descrobject.c +++ b/Objects/descrobject.c @@ -2,6 +2,7 @@ @@ -184,7 +188,7 @@ index 4d8b83758b..11badd4860 100644 #include "pycore_object.h" // _PyObject_GC_UNTRACK() #include "pycore_pystate.h" // _PyThreadState_GET() #include "pycore_tuple.h" // _PyTuple_ITEMS() -@@ -13,24 +14,11 @@ class property "propertyobject *" "&PyProperty_Type" +@@ -14,24 +15,11 @@ class property "propertyobject *" "&PyProperty_Type" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=556352653fd4c02e]*/ @@ -212,10 +216,10 @@ index 4d8b83758b..11badd4860 100644 static void descr_dealloc(PyDescrObject *descr) diff --git a/Objects/methodobject.c b/Objects/methodobject.c -index 953cf4666d..d344bfa234 100644 +index 51752dec3d..af4794a913 100644 --- a/Objects/methodobject.c +++ b/Objects/methodobject.c -@@ -555,10 +555,3 @@ cfunction_call(PyObject *func, PyObject *args, PyObject *kwargs) +@@ -550,10 +550,3 @@ cfunction_call(PyObject *func, PyObject *args, PyObject *kwargs) return _Py_CheckFunctionResult(tstate, func, result, NULL); } @@ -307,18 +311,18 @@ index 0000000000..8d29393bd8 + +#endif diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c -index 9248e971d9..05fd28b2df 100644 +index a0130fde15..aec638e85e 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c -@@ -5,6 +5,7 @@ - #include "pycore_bytesobject.h" // _PyBytes_InitTypes() +@@ -4,6 +4,7 @@ + #include "pycore_ceval.h" // _PyEval_FiniGIL() #include "pycore_context.h" // _PyContext_Init() +#include "pycore_emscripten_trampoline.h" // _Py_EmscriptenTrampoline_Init() #include "pycore_exceptions.h" // _PyExc_InitTypes() #include "pycore_dict.h" // _PyDict_Fini() #include "pycore_fileutils.h" // _Py_ResetForceASCII() -@@ -606,7 +607,9 @@ pycore_init_runtime(_PyRuntimeState *runtime, +@@ -539,7 +540,9 @@ pycore_init_runtime(_PyRuntimeState *runtime, if (_PyStatus_EXCEPTION(status)) { return status; } @@ -329,7 +333,7 @@ index 9248e971d9..05fd28b2df 100644 diff --git a/Python/pystate.c b/Python/pystate.c -index db2ce878af..4284c8655d 100644 +index 1337516aa5..1043e1f2fe 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -2,6 +2,7 @@ @@ -338,10 +342,10 @@ index db2ce878af..4284c8655d 100644 #include "Python.h" +#include "pycore_emscripten_trampoline.h" // _Py_EmscriptenTrampoline_Init() #include "pycore_ceval.h" - #include "pycore_code.h" // stats - #include "pycore_frame.h" -@@ -126,6 +127,7 @@ init_runtime(_PyRuntimeState *runtime, - runtime->unicode_ids.lock = unicode_ids_mutex; + #include "pycore_code.h" // stats + #include "pycore_dtoa.h" // _dtoa_state_INIT() +@@ -451,6 +452,7 @@ init_runtime(_PyRuntimeState *runtime, + runtime->unicode_state.ids.next_index = unicode_next_index; runtime->_initialized = 1; + _Py_EmscriptenTrampoline_Init(runtime); @@ -349,10 +353,10 @@ index db2ce878af..4284c8655d 100644 PyStatus diff --git a/configure b/configure -index 8d2f3f4cc0..25cc425825 100755 +index 99dd1fe595..fe73b21a62 100755 --- a/configure +++ b/configure -@@ -15132,8 +15132,8 @@ PLATFORM_OBJS= +@@ -17432,8 +17432,8 @@ PLATFORM_OBJS= case $ac_sys_system in #( Emscripten) : @@ -364,10 +368,10 @@ index 8d2f3f4cc0..25cc425825 100755 *) : ;; diff --git a/configure.ac b/configure.ac -index 52d5c1f7dd..54003f97dc 100644 +index bd2be94b47..6bb201e0b3 100644 --- a/configure.ac +++ b/configure.ac -@@ -4535,8 +4535,8 @@ PLATFORM_OBJS= +@@ -4811,8 +4811,8 @@ PLATFORM_OBJS= AS_CASE([$ac_sys_system], [Emscripten], [ diff --git a/cpython/patches/0005-gh-93839-Move-Lib-ctypes-test-to-Lib-test-test_ctype.patch b/cpython/patches/0005-gh-93839-Move-Lib-ctypes-test-to-Lib-test-test_ctype.patch deleted file mode 100644 index 89fa88e21c3..00000000000 --- a/cpython/patches/0005-gh-93839-Move-Lib-ctypes-test-to-Lib-test-test_ctype.patch +++ /dev/null @@ -1,752 +0,0 @@ -From cefc59779517e9084ba5ee8ebb2e7f4981912d71 Mon Sep 17 00:00:00 2001 -From: Victor Stinner <[email protected]> -Date: Tue, 21 Jun 2022 10:24:33 +0200 -Subject: [PATCH 5/9] gh-93839: Move Lib/ctypes/test/ to Lib/test/test_ctypes/ - (#94041) - -* Move Lib/ctypes/test/ to Lib/test/test_ctypes/ -* Remove Lib/test/test_ctypes.py -* Update imports and build system. ---- - Lib/ctypes/test/__main__.py | 4 - - Lib/test/leakers/test_ctypes.py | 2 +- - Lib/test/test_ctypes.py | 10 -- - .../test => test/test_ctypes}/__init__.py | 0 - Lib/test/test_ctypes/__main__.py | 4 + - .../test => test/test_ctypes}/test_anon.py | 0 - .../test_ctypes}/test_array_in_pointer.py | 0 - .../test => test/test_ctypes}/test_arrays.py | 2 +- - .../test_ctypes}/test_as_parameter.py | 2 +- - .../test_ctypes}/test_bitfields.py | 2 +- - .../test => test/test_ctypes}/test_buffers.py | 2 +- - .../test => test/test_ctypes}/test_bytes.py | 0 - .../test_ctypes}/test_byteswap.py | 0 - .../test_ctypes}/test_callbacks.py | 2 +- - .../test => test/test_ctypes}/test_cast.py | 2 +- - .../test => test/test_ctypes}/test_cfuncs.py | 2 +- - .../test_ctypes}/test_checkretval.py | 2 +- - .../test => test/test_ctypes}/test_delattr.py | 0 - .../test => test/test_ctypes}/test_errno.py | 0 - .../test => test/test_ctypes}/test_find.py | 0 - .../test_ctypes}/test_frombuffer.py | 0 - .../test => test/test_ctypes}/test_funcptr.py | 0 - .../test_ctypes}/test_functions.py | 2 +- - .../test_ctypes}/test_incomplete.py | 0 - .../test => test/test_ctypes}/test_init.py | 0 - .../test_ctypes}/test_internals.py | 0 - .../test_ctypes}/test_keeprefs.py | 0 - .../test => test/test_ctypes}/test_libc.py | 0 - .../test => test/test_ctypes}/test_loading.py | 0 - .../test_ctypes}/test_macholib.py | 0 - .../test_ctypes}/test_memfunctions.py | 2 +- - .../test => test/test_ctypes}/test_numbers.py | 0 - .../test => test/test_ctypes}/test_objects.py | 8 +- - .../test_ctypes}/test_parameters.py | 2 +- - .../test => test/test_ctypes}/test_pep3118.py | 0 - .../test_ctypes}/test_pickling.py | 0 - .../test_ctypes}/test_pointers.py | 0 - .../test_ctypes}/test_prototypes.py | 2 +- - .../test_ctypes}/test_python_api.py | 0 - .../test_ctypes}/test_random_things.py | 0 - .../test_ctypes}/test_refcounts.py | 0 - .../test => test/test_ctypes}/test_repr.py | 0 - .../test_ctypes}/test_returnfuncptrs.py | 0 - .../test_ctypes}/test_simplesubclasses.py | 0 - .../test => test/test_ctypes}/test_sizes.py | 0 - .../test => test/test_ctypes}/test_slicing.py | 2 +- - .../test_ctypes}/test_stringptr.py | 0 - .../test => test/test_ctypes}/test_strings.py | 2 +- - .../test_ctypes}/test_struct_fields.py | 0 - .../test_ctypes}/test_structures.py | 2 +- - .../test_ctypes}/test_unaligned_structures.py | 0 - .../test => test/test_ctypes}/test_unicode.py | 2 +- - .../test => test/test_ctypes}/test_values.py | 0 - .../test_ctypes}/test_varsize_struct.py | 0 - .../test => test/test_ctypes}/test_win32.py | 0 - .../test_ctypes}/test_wintypes.py | 0 - ...2-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst | 2 + - PCbuild/lib.pyproj | 109 +++++++++--------- - Tools/wasm/wasm_assets.py | 1 - - 59 files changed, 81 insertions(+), 91 deletions(-) - delete mode 100644 Lib/ctypes/test/__main__.py - delete mode 100644 Lib/test/test_ctypes.py - rename Lib/{ctypes/test => test/test_ctypes}/__init__.py (100%) - create mode 100644 Lib/test/test_ctypes/__main__.py - rename Lib/{ctypes/test => test/test_ctypes}/test_anon.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_array_in_pointer.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_arrays.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_as_parameter.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_bitfields.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_buffers.py (98%) - rename Lib/{ctypes/test => test/test_ctypes}/test_bytes.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_byteswap.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_callbacks.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_cast.py (98%) - rename Lib/{ctypes/test => test/test_ctypes}/test_cfuncs.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_checkretval.py (95%) - rename Lib/{ctypes/test => test/test_ctypes}/test_delattr.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_errno.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_find.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_frombuffer.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_funcptr.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_functions.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_incomplete.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_init.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_internals.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_keeprefs.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_libc.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_loading.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_macholib.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_memfunctions.py (98%) - rename Lib/{ctypes/test => test/test_ctypes}/test_numbers.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_objects.py (87%) - rename Lib/{ctypes/test => test/test_ctypes}/test_parameters.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_pep3118.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_pickling.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_pointers.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_prototypes.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_python_api.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_random_things.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_refcounts.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_repr.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_returnfuncptrs.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_simplesubclasses.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_sizes.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_slicing.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_stringptr.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_strings.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_struct_fields.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_structures.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_unaligned_structures.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_unicode.py (97%) - rename Lib/{ctypes/test => test/test_ctypes}/test_values.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_varsize_struct.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_win32.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_wintypes.py (100%) - create mode 100644 Misc/NEWS.d/next/Tests/2022-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst - -diff --git a/Lib/ctypes/test/__main__.py b/Lib/ctypes/test/__main__.py -deleted file mode 100644 -index 362a9ec8cf..0000000000 ---- a/Lib/ctypes/test/__main__.py -+++ /dev/null -@@ -1,4 +0,0 @@ --from ctypes.test import load_tests --import unittest -- --unittest.main() -diff --git a/Lib/test/leakers/test_ctypes.py b/Lib/test/leakers/test_ctypes.py -index 7d7e9ff3a1..ec09ac3699 100644 ---- a/Lib/test/leakers/test_ctypes.py -+++ b/Lib/test/leakers/test_ctypes.py -@@ -1,5 +1,5 @@ - --# Taken from Lib/ctypes/test/test_keeprefs.py, PointerToStructure.test(). -+# Taken from Lib/test/test_ctypes/test_keeprefs.py, PointerToStructure.test(). - - from ctypes import Structure, c_int, POINTER - import gc -diff --git a/Lib/test/test_ctypes.py b/Lib/test/test_ctypes.py -deleted file mode 100644 -index b0a12c9734..0000000000 ---- a/Lib/test/test_ctypes.py -+++ /dev/null -@@ -1,10 +0,0 @@ --import unittest --from test.support.import_helper import import_module -- -- --ctypes_test = import_module('ctypes.test') -- --load_tests = ctypes_test.load_tests -- --if __name__ == "__main__": -- unittest.main() -diff --git a/Lib/ctypes/test/__init__.py b/Lib/test/test_ctypes/__init__.py -similarity index 100% -rename from Lib/ctypes/test/__init__.py -rename to Lib/test/test_ctypes/__init__.py -diff --git a/Lib/test/test_ctypes/__main__.py b/Lib/test/test_ctypes/__main__.py -new file mode 100644 -index 0000000000..3003d4db89 ---- /dev/null -+++ b/Lib/test/test_ctypes/__main__.py -@@ -0,0 +1,4 @@ -+from test.test_ctypes import load_tests -+import unittest -+ -+unittest.main() -diff --git a/Lib/ctypes/test/test_anon.py b/Lib/test/test_ctypes/test_anon.py -similarity index 100% -rename from Lib/ctypes/test/test_anon.py -rename to Lib/test/test_ctypes/test_anon.py -diff --git a/Lib/ctypes/test/test_array_in_pointer.py b/Lib/test/test_ctypes/test_array_in_pointer.py -similarity index 100% -rename from Lib/ctypes/test/test_array_in_pointer.py -rename to Lib/test/test_ctypes/test_array_in_pointer.py -diff --git a/Lib/ctypes/test/test_arrays.py b/Lib/test/test_ctypes/test_arrays.py -similarity index 99% -rename from Lib/ctypes/test/test_arrays.py -rename to Lib/test/test_ctypes/test_arrays.py -index a653877961..78aead26da 100644 ---- a/Lib/ctypes/test/test_arrays.py -+++ b/Lib/test/test_ctypes/test_arrays.py -@@ -3,7 +3,7 @@ - import sys - from ctypes import * - --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - formats = "bBhHiIlLqQfd" - -diff --git a/Lib/ctypes/test/test_as_parameter.py b/Lib/test/test_ctypes/test_as_parameter.py -similarity index 99% -rename from Lib/ctypes/test/test_as_parameter.py -rename to Lib/test/test_ctypes/test_as_parameter.py -index aaaf6e2ceb..36fec572b1 100644 ---- a/Lib/ctypes/test/test_as_parameter.py -+++ b/Lib/test/test_ctypes/test_as_parameter.py -@@ -1,6 +1,6 @@ - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import _ctypes_test - - dll = CDLL(_ctypes_test.__file__) -diff --git a/Lib/ctypes/test/test_bitfields.py b/Lib/test/test_ctypes/test_bitfields.py -similarity index 99% -rename from Lib/ctypes/test/test_bitfields.py -rename to Lib/test/test_ctypes/test_bitfields.py -index 66acd62e68..dad71a0ba7 100644 ---- a/Lib/ctypes/test/test_bitfields.py -+++ b/Lib/test/test_ctypes/test_bitfields.py -@@ -1,5 +1,5 @@ - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - from test import support - import unittest - import os -diff --git a/Lib/ctypes/test/test_buffers.py b/Lib/test/test_ctypes/test_buffers.py -similarity index 98% -rename from Lib/ctypes/test/test_buffers.py -rename to Lib/test/test_ctypes/test_buffers.py -index 15782be757..a9be2023aa 100644 ---- a/Lib/ctypes/test/test_buffers.py -+++ b/Lib/test/test_ctypes/test_buffers.py -@@ -1,5 +1,5 @@ - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import unittest - - class StringBufferTestCase(unittest.TestCase): -diff --git a/Lib/ctypes/test/test_bytes.py b/Lib/test/test_ctypes/test_bytes.py -similarity index 100% -rename from Lib/ctypes/test/test_bytes.py -rename to Lib/test/test_ctypes/test_bytes.py -diff --git a/Lib/ctypes/test/test_byteswap.py b/Lib/test/test_ctypes/test_byteswap.py -similarity index 100% -rename from Lib/ctypes/test/test_byteswap.py -rename to Lib/test/test_ctypes/test_byteswap.py -diff --git a/Lib/ctypes/test/test_callbacks.py b/Lib/test/test_ctypes/test_callbacks.py -similarity index 99% -rename from Lib/ctypes/test/test_callbacks.py -rename to Lib/test/test_ctypes/test_callbacks.py -index 5c514db511..fbb8feafbf 100644 ---- a/Lib/ctypes/test/test_callbacks.py -+++ b/Lib/test/test_ctypes/test_callbacks.py -@@ -3,7 +3,7 @@ - from test import support - - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - from _ctypes import CTYPES_MAX_ARGCOUNT - import _ctypes_test - -diff --git a/Lib/ctypes/test/test_cast.py b/Lib/test/test_ctypes/test_cast.py -similarity index 98% -rename from Lib/ctypes/test/test_cast.py -rename to Lib/test/test_ctypes/test_cast.py -index 6878f97328..7ee23b16f1 100644 ---- a/Lib/ctypes/test/test_cast.py -+++ b/Lib/test/test_ctypes/test_cast.py -@@ -1,5 +1,5 @@ - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import unittest - import sys - -diff --git a/Lib/ctypes/test/test_cfuncs.py b/Lib/test/test_ctypes/test_cfuncs.py -similarity index 99% -rename from Lib/ctypes/test/test_cfuncs.py -rename to Lib/test/test_ctypes/test_cfuncs.py -index 09b06840bf..7cba4b0e52 100644 ---- a/Lib/ctypes/test/test_cfuncs.py -+++ b/Lib/test/test_ctypes/test_cfuncs.py -@@ -3,7 +3,7 @@ - - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - import _ctypes_test - -diff --git a/Lib/ctypes/test/test_checkretval.py b/Lib/test/test_ctypes/test_checkretval.py -similarity index 95% -rename from Lib/ctypes/test/test_checkretval.py -rename to Lib/test/test_ctypes/test_checkretval.py -index e9567dc391..1492099f4b 100644 ---- a/Lib/ctypes/test/test_checkretval.py -+++ b/Lib/test/test_ctypes/test_checkretval.py -@@ -1,7 +1,7 @@ - import unittest - - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - class CHECKED(c_int): - def _check_retval_(value): -diff --git a/Lib/ctypes/test/test_delattr.py b/Lib/test/test_ctypes/test_delattr.py -similarity index 100% -rename from Lib/ctypes/test/test_delattr.py -rename to Lib/test/test_ctypes/test_delattr.py -diff --git a/Lib/ctypes/test/test_errno.py b/Lib/test/test_ctypes/test_errno.py -similarity index 100% -rename from Lib/ctypes/test/test_errno.py -rename to Lib/test/test_ctypes/test_errno.py -diff --git a/Lib/ctypes/test/test_find.py b/Lib/test/test_ctypes/test_find.py -similarity index 100% -rename from Lib/ctypes/test/test_find.py -rename to Lib/test/test_ctypes/test_find.py -diff --git a/Lib/ctypes/test/test_frombuffer.py b/Lib/test/test_ctypes/test_frombuffer.py -similarity index 100% -rename from Lib/ctypes/test/test_frombuffer.py -rename to Lib/test/test_ctypes/test_frombuffer.py -diff --git a/Lib/ctypes/test/test_funcptr.py b/Lib/test/test_ctypes/test_funcptr.py -similarity index 100% -rename from Lib/ctypes/test/test_funcptr.py -rename to Lib/test/test_ctypes/test_funcptr.py -diff --git a/Lib/ctypes/test/test_functions.py b/Lib/test/test_ctypes/test_functions.py -similarity index 99% -rename from Lib/ctypes/test/test_functions.py -rename to Lib/test/test_ctypes/test_functions.py -index d1fbc32a41..4110066bda 100644 ---- a/Lib/ctypes/test/test_functions.py -+++ b/Lib/test/test_ctypes/test_functions.py -@@ -6,7 +6,7 @@ - """ - - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import sys, unittest - - try: -diff --git a/Lib/ctypes/test/test_incomplete.py b/Lib/test/test_ctypes/test_incomplete.py -similarity index 100% -rename from Lib/ctypes/test/test_incomplete.py -rename to Lib/test/test_ctypes/test_incomplete.py -diff --git a/Lib/ctypes/test/test_init.py b/Lib/test/test_ctypes/test_init.py -similarity index 100% -rename from Lib/ctypes/test/test_init.py -rename to Lib/test/test_ctypes/test_init.py -diff --git a/Lib/ctypes/test/test_internals.py b/Lib/test/test_ctypes/test_internals.py -similarity index 100% -rename from Lib/ctypes/test/test_internals.py -rename to Lib/test/test_ctypes/test_internals.py -diff --git a/Lib/ctypes/test/test_keeprefs.py b/Lib/test/test_ctypes/test_keeprefs.py -similarity index 100% -rename from Lib/ctypes/test/test_keeprefs.py -rename to Lib/test/test_ctypes/test_keeprefs.py -diff --git a/Lib/ctypes/test/test_libc.py b/Lib/test/test_ctypes/test_libc.py -similarity index 100% -rename from Lib/ctypes/test/test_libc.py -rename to Lib/test/test_ctypes/test_libc.py -diff --git a/Lib/ctypes/test/test_loading.py b/Lib/test/test_ctypes/test_loading.py -similarity index 100% -rename from Lib/ctypes/test/test_loading.py -rename to Lib/test/test_ctypes/test_loading.py -diff --git a/Lib/ctypes/test/test_macholib.py b/Lib/test/test_ctypes/test_macholib.py -similarity index 100% -rename from Lib/ctypes/test/test_macholib.py -rename to Lib/test/test_ctypes/test_macholib.py -diff --git a/Lib/ctypes/test/test_memfunctions.py b/Lib/test/test_ctypes/test_memfunctions.py -similarity index 98% -rename from Lib/ctypes/test/test_memfunctions.py -rename to Lib/test/test_ctypes/test_memfunctions.py -index e784b9a706..d5c9735211 100644 ---- a/Lib/ctypes/test/test_memfunctions.py -+++ b/Lib/test/test_ctypes/test_memfunctions.py -@@ -2,7 +2,7 @@ - from test import support - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - class MemFunctionsTest(unittest.TestCase): - @unittest.skip('test disabled') -diff --git a/Lib/ctypes/test/test_numbers.py b/Lib/test/test_ctypes/test_numbers.py -similarity index 100% -rename from Lib/ctypes/test/test_numbers.py -rename to Lib/test/test_ctypes/test_numbers.py -diff --git a/Lib/ctypes/test/test_objects.py b/Lib/test/test_ctypes/test_objects.py -similarity index 87% -rename from Lib/ctypes/test/test_objects.py -rename to Lib/test/test_ctypes/test_objects.py -index 19e3dc1f2d..44a3c61ad7 100644 ---- a/Lib/ctypes/test/test_objects.py -+++ b/Lib/test/test_ctypes/test_objects.py -@@ -42,7 +42,7 @@ - of 'x' ('_b_base_' is either None, or the root object owning the memory block): - - >>> print(x.array._b_base_) # doctest: +ELLIPSIS --<ctypes.test.test_objects.X object at 0x...> -+<test.test_ctypes.test_objects.X object at 0x...> - >>> - - >>> x.array[0] = b'spam spam spam' -@@ -56,12 +56,12 @@ - - import unittest, doctest - --import ctypes.test.test_objects -+import test.test_ctypes.test_objects - - class TestCase(unittest.TestCase): - def test(self): -- failures, tests = doctest.testmod(ctypes.test.test_objects) -+ failures, tests = doctest.testmod(test.test_ctypes.test_objects) - self.assertFalse(failures, 'doctests failed, see output above') - - if __name__ == '__main__': -- doctest.testmod(ctypes.test.test_objects) -+ doctest.testmod(test.test_ctypes.test_objects) -diff --git a/Lib/ctypes/test/test_parameters.py b/Lib/test/test_ctypes/test_parameters.py -similarity index 99% -rename from Lib/ctypes/test/test_parameters.py -rename to Lib/test/test_ctypes/test_parameters.py -index 59c94e3cc2..00a1953e97 100644 ---- a/Lib/ctypes/test/test_parameters.py -+++ b/Lib/test/test_ctypes/test_parameters.py -@@ -1,5 +1,5 @@ - import unittest --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import test.support - - class SimpleTypesTestCase(unittest.TestCase): -diff --git a/Lib/ctypes/test/test_pep3118.py b/Lib/test/test_ctypes/test_pep3118.py -similarity index 100% -rename from Lib/ctypes/test/test_pep3118.py -rename to Lib/test/test_ctypes/test_pep3118.py -diff --git a/Lib/ctypes/test/test_pickling.py b/Lib/test/test_ctypes/test_pickling.py -similarity index 100% -rename from Lib/ctypes/test/test_pickling.py -rename to Lib/test/test_ctypes/test_pickling.py -diff --git a/Lib/ctypes/test/test_pointers.py b/Lib/test/test_ctypes/test_pointers.py -similarity index 100% -rename from Lib/ctypes/test/test_pointers.py -rename to Lib/test/test_ctypes/test_pointers.py -diff --git a/Lib/ctypes/test/test_prototypes.py b/Lib/test/test_ctypes/test_prototypes.py -similarity index 99% -rename from Lib/ctypes/test/test_prototypes.py -rename to Lib/test/test_ctypes/test_prototypes.py -index cd0c649de3..bf27561487 100644 ---- a/Lib/ctypes/test/test_prototypes.py -+++ b/Lib/test/test_ctypes/test_prototypes.py -@@ -1,5 +1,5 @@ - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import unittest - - # IMPORTANT INFO: -diff --git a/Lib/ctypes/test/test_python_api.py b/Lib/test/test_ctypes/test_python_api.py -similarity index 100% -rename from Lib/ctypes/test/test_python_api.py -rename to Lib/test/test_ctypes/test_python_api.py -diff --git a/Lib/ctypes/test/test_random_things.py b/Lib/test/test_ctypes/test_random_things.py -similarity index 100% -rename from Lib/ctypes/test/test_random_things.py -rename to Lib/test/test_ctypes/test_random_things.py -diff --git a/Lib/ctypes/test/test_refcounts.py b/Lib/test/test_ctypes/test_refcounts.py -similarity index 100% -rename from Lib/ctypes/test/test_refcounts.py -rename to Lib/test/test_ctypes/test_refcounts.py -diff --git a/Lib/ctypes/test/test_repr.py b/Lib/test/test_ctypes/test_repr.py -similarity index 100% -rename from Lib/ctypes/test/test_repr.py -rename to Lib/test/test_ctypes/test_repr.py -diff --git a/Lib/ctypes/test/test_returnfuncptrs.py b/Lib/test/test_ctypes/test_returnfuncptrs.py -similarity index 100% -rename from Lib/ctypes/test/test_returnfuncptrs.py -rename to Lib/test/test_ctypes/test_returnfuncptrs.py -diff --git a/Lib/ctypes/test/test_simplesubclasses.py b/Lib/test/test_ctypes/test_simplesubclasses.py -similarity index 100% -rename from Lib/ctypes/test/test_simplesubclasses.py -rename to Lib/test/test_ctypes/test_simplesubclasses.py -diff --git a/Lib/ctypes/test/test_sizes.py b/Lib/test/test_ctypes/test_sizes.py -similarity index 100% -rename from Lib/ctypes/test/test_sizes.py -rename to Lib/test/test_ctypes/test_sizes.py -diff --git a/Lib/ctypes/test/test_slicing.py b/Lib/test/test_ctypes/test_slicing.py -similarity index 99% -rename from Lib/ctypes/test/test_slicing.py -rename to Lib/test/test_ctypes/test_slicing.py -index a3932f1767..b3e68f9a82 100644 ---- a/Lib/ctypes/test/test_slicing.py -+++ b/Lib/test/test_ctypes/test_slicing.py -@@ -1,6 +1,6 @@ - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - import _ctypes_test - -diff --git a/Lib/ctypes/test/test_stringptr.py b/Lib/test/test_ctypes/test_stringptr.py -similarity index 100% -rename from Lib/ctypes/test/test_stringptr.py -rename to Lib/test/test_ctypes/test_stringptr.py -diff --git a/Lib/ctypes/test/test_strings.py b/Lib/test/test_ctypes/test_strings.py -similarity index 99% -rename from Lib/ctypes/test/test_strings.py -rename to Lib/test/test_ctypes/test_strings.py -index 12e208828a..a9003be3f5 100644 ---- a/Lib/ctypes/test/test_strings.py -+++ b/Lib/test/test_ctypes/test_strings.py -@@ -1,6 +1,6 @@ - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - class StringArrayTestCase(unittest.TestCase): - def test(self): -diff --git a/Lib/ctypes/test/test_struct_fields.py b/Lib/test/test_ctypes/test_struct_fields.py -similarity index 100% -rename from Lib/ctypes/test/test_struct_fields.py -rename to Lib/test/test_ctypes/test_struct_fields.py -diff --git a/Lib/ctypes/test/test_structures.py b/Lib/test/test_ctypes/test_structures.py -similarity index 99% -rename from Lib/ctypes/test/test_structures.py -rename to Lib/test/test_ctypes/test_structures.py -index 2168aa7df7..a40ce3e866 100644 ---- a/Lib/ctypes/test/test_structures.py -+++ b/Lib/test/test_ctypes/test_structures.py -@@ -2,7 +2,7 @@ - import sys - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - from struct import calcsize - import _ctypes_test - from test import support -diff --git a/Lib/ctypes/test/test_unaligned_structures.py b/Lib/test/test_ctypes/test_unaligned_structures.py -similarity index 100% -rename from Lib/ctypes/test/test_unaligned_structures.py -rename to Lib/test/test_ctypes/test_unaligned_structures.py -diff --git a/Lib/ctypes/test/test_unicode.py b/Lib/test/test_ctypes/test_unicode.py -similarity index 97% -rename from Lib/ctypes/test/test_unicode.py -rename to Lib/test/test_ctypes/test_unicode.py -index 60c75424b7..319cb3b1dc 100644 ---- a/Lib/ctypes/test/test_unicode.py -+++ b/Lib/test/test_ctypes/test_unicode.py -@@ -1,6 +1,6 @@ - import unittest - import ctypes --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - import _ctypes_test - -diff --git a/Lib/ctypes/test/test_values.py b/Lib/test/test_ctypes/test_values.py -similarity index 100% -rename from Lib/ctypes/test/test_values.py -rename to Lib/test/test_ctypes/test_values.py -diff --git a/Lib/ctypes/test/test_varsize_struct.py b/Lib/test/test_ctypes/test_varsize_struct.py -similarity index 100% -rename from Lib/ctypes/test/test_varsize_struct.py -rename to Lib/test/test_ctypes/test_varsize_struct.py -diff --git a/Lib/ctypes/test/test_win32.py b/Lib/test/test_ctypes/test_win32.py -similarity index 100% -rename from Lib/ctypes/test/test_win32.py -rename to Lib/test/test_ctypes/test_win32.py -diff --git a/Lib/ctypes/test/test_wintypes.py b/Lib/test/test_ctypes/test_wintypes.py -similarity index 100% -rename from Lib/ctypes/test/test_wintypes.py -rename to Lib/test/test_ctypes/test_wintypes.py -diff --git a/Misc/NEWS.d/next/Tests/2022-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst b/Misc/NEWS.d/next/Tests/2022-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst -new file mode 100644 -index 0000000000..121b64b133 ---- /dev/null -+++ b/Misc/NEWS.d/next/Tests/2022-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst -@@ -0,0 +1,2 @@ -+Move ``Lib/ctypes/test/`` to ``Lib/test/test_ctypes/``. Patch by Victor -+Stinner. -diff --git a/PCbuild/lib.pyproj b/PCbuild/lib.pyproj -index 43c570f1da..692b083349 100644 ---- a/PCbuild/lib.pyproj -+++ b/PCbuild/lib.pyproj -@@ -83,59 +83,6 @@ - <Compile Include="ctypes\macholib\dylib.py" /> - <Compile Include="ctypes\macholib\framework.py" /> - <Compile Include="ctypes\macholib\__init__.py" /> -- <Compile Include="ctypes\test\test_anon.py" /> -- <Compile Include="ctypes\test\test_arrays.py" /> -- <Compile Include="ctypes\test\test_array_in_pointer.py" /> -- <Compile Include="ctypes\test\test_as_parameter.py" /> -- <Compile Include="ctypes\test\test_bitfields.py" /> -- <Compile Include="ctypes\test\test_buffers.py" /> -- <Compile Include="ctypes\test\test_bytes.py" /> -- <Compile Include="ctypes\test\test_byteswap.py" /> -- <Compile Include="ctypes\test\test_callbacks.py" /> -- <Compile Include="ctypes\test\test_cast.py" /> -- <Compile Include="ctypes\test\test_cfuncs.py" /> -- <Compile Include="ctypes\test\test_checkretval.py" /> -- <Compile Include="ctypes\test\test_delattr.py" /> -- <Compile Include="ctypes\test\test_errno.py" /> -- <Compile Include="ctypes\test\test_find.py" /> -- <Compile Include="ctypes\test\test_frombuffer.py" /> -- <Compile Include="ctypes\test\test_funcptr.py" /> -- <Compile Include="ctypes\test\test_functions.py" /> -- <Compile Include="ctypes\test\test_incomplete.py" /> -- <Compile Include="ctypes\test\test_init.py" /> -- <Compile Include="ctypes\test\test_internals.py" /> -- <Compile Include="ctypes\test\test_keeprefs.py" /> -- <Compile Include="ctypes\test\test_libc.py" /> -- <Compile Include="ctypes\test\test_loading.py" /> -- <Compile Include="ctypes\test\test_macholib.py" /> -- <Compile Include="ctypes\test\test_memfunctions.py" /> -- <Compile Include="ctypes\test\test_numbers.py" /> -- <Compile Include="ctypes\test\test_objects.py" /> -- <Compile Include="ctypes\test\test_parameters.py" /> -- <Compile Include="ctypes\test\test_pep3118.py" /> -- <Compile Include="ctypes\test\test_pickling.py" /> -- <Compile Include="ctypes\test\test_pointers.py" /> -- <Compile Include="ctypes\test\test_prototypes.py" /> -- <Compile Include="ctypes\test\test_python_api.py" /> -- <Compile Include="ctypes\test\test_random_things.py" /> -- <Compile Include="ctypes\test\test_refcounts.py" /> -- <Compile Include="ctypes\test\test_repr.py" /> -- <Compile Include="ctypes\test\test_returnfuncptrs.py" /> -- <Compile Include="ctypes\test\test_simplesubclasses.py" /> -- <Compile Include="ctypes\test\test_sizes.py" /> -- <Compile Include="ctypes\test\test_slicing.py" /> -- <Compile Include="ctypes\test\test_stringptr.py" /> -- <Compile Include="ctypes\test\test_strings.py" /> -- <Compile Include="ctypes\test\test_structures.py" /> -- <Compile Include="ctypes\test\test_struct_fields.py" /> -- <Compile Include="ctypes\test\test_unaligned_structures.py" /> -- <Compile Include="ctypes\test\test_unicode.py" /> -- <Compile Include="ctypes\test\test_values.py" /> -- <Compile Include="ctypes\test\test_varsize_struct.py" /> -- <Compile Include="ctypes\test\test_win32.py" /> -- <Compile Include="ctypes\test\test_wintypes.py" /> -- <Compile Include="ctypes\test\__init__.py" /> -- <Compile Include="ctypes\test\__main__.py" /> - <Compile Include="ctypes\util.py" /> - <Compile Include="ctypes\wintypes.py" /> - <Compile Include="ctypes\_endian.py" /> -@@ -944,7 +891,59 @@ - <Compile Include="test\test_crashers.py" /> - <Compile Include="test\test_crypt.py" /> - <Compile Include="test\test_csv.py" /> -- <Compile Include="test\test_ctypes.py" /> -+ <Compile Include="test\test_ctypes\test_anon.py" /> -+ <Compile Include="test\test_ctypes\test_arrays.py" /> -+ <Compile Include="test\test_ctypes\test_array_in_pointer.py" /> -+ <Compile Include="test\test_ctypes\test_as_parameter.py" /> -+ <Compile Include="test\test_ctypes\test_bitfields.py" /> -+ <Compile Include="test\test_ctypes\test_buffers.py" /> -+ <Compile Include="test\test_ctypes\test_bytes.py" /> -+ <Compile Include="test\test_ctypes\test_byteswap.py" /> -+ <Compile Include="test\test_ctypes\test_callbacks.py" /> -+ <Compile Include="test\test_ctypes\test_cast.py" /> -+ <Compile Include="test\test_ctypes\test_cfuncs.py" /> -+ <Compile Include="test\test_ctypes\test_checkretval.py" /> -+ <Compile Include="test\test_ctypes\test_delattr.py" /> -+ <Compile Include="test\test_ctypes\test_errno.py" /> -+ <Compile Include="test\test_ctypes\test_find.py" /> -+ <Compile Include="test\test_ctypes\test_frombuffer.py" /> -+ <Compile Include="test\test_ctypes\test_funcptr.py" /> -+ <Compile Include="test\test_ctypes\test_functions.py" /> -+ <Compile Include="test\test_ctypes\test_incomplete.py" /> -+ <Compile Include="test\test_ctypes\test_init.py" /> -+ <Compile Include="test\test_ctypes\test_internals.py" /> -+ <Compile Include="test\test_ctypes\test_keeprefs.py" /> -+ <Compile Include="test\test_ctypes\test_libc.py" /> -+ <Compile Include="test\test_ctypes\test_loading.py" /> -+ <Compile Include="test\test_ctypes\test_macholib.py" /> -+ <Compile Include="test\test_ctypes\test_memfunctions.py" /> -+ <Compile Include="test\test_ctypes\test_numbers.py" /> -+ <Compile Include="test\test_ctypes\test_objects.py" /> -+ <Compile Include="test\test_ctypes\test_parameters.py" /> -+ <Compile Include="test\test_ctypes\test_pep3118.py" /> -+ <Compile Include="test\test_ctypes\test_pickling.py" /> -+ <Compile Include="test\test_ctypes\test_pointers.py" /> -+ <Compile Include="test\test_ctypes\test_prototypes.py" /> -+ <Compile Include="test\test_ctypes\test_python_api.py" /> -+ <Compile Include="test\test_ctypes\test_random_things.py" /> -+ <Compile Include="test\test_ctypes\test_refcounts.py" /> -+ <Compile Include="test\test_ctypes\test_repr.py" /> -+ <Compile Include="test\test_ctypes\test_returnfuncptrs.py" /> -+ <Compile Include="test\test_ctypes\test_simplesubclasses.py" /> -+ <Compile Include="test\test_ctypes\test_sizes.py" /> -+ <Compile Include="test\test_ctypes\test_slicing.py" /> -+ <Compile Include="test\test_ctypes\test_stringptr.py" /> -+ <Compile Include="test\test_ctypes\test_strings.py" /> -+ <Compile Include="test\test_ctypes\test_structures.py" /> -+ <Compile Include="test\test_ctypes\test_struct_fields.py" /> -+ <Compile Include="test\test_ctypes\test_unaligned_structures.py" /> -+ <Compile Include="test\test_ctypes\test_unicode.py" /> -+ <Compile Include="test\test_ctypes\test_values.py" /> -+ <Compile Include="test\test_ctypes\test_varsize_struct.py" /> -+ <Compile Include="test\test_ctypes\test_win32.py" /> -+ <Compile Include="test\test_ctypes\test_wintypes.py" /> -+ <Compile Include="test\test_ctypes\__init__.py" /> -+ <Compile Include="test\test_ctypes\__main__.py" /> - <Compile Include="test\test_curses.py" /> - <Compile Include="test\test_datetime.py" /> - <Compile Include="test\test_dbm.py" /> -@@ -1725,7 +1724,6 @@ - <Folder Include="concurrent\futures" /> - <Folder Include="ctypes" /> - <Folder Include="ctypes\macholib" /> -- <Folder Include="ctypes\test" /> - <Folder Include="curses" /> - <Folder Include="dbm" /> - <Folder Include="distutils" /> -@@ -1769,6 +1767,7 @@ - <Folder Include="test\subprocessdata" /> - <Folder Include="test\support" /> - <Folder Include="test\test_asyncio" /> -+ <Folder Include="test\test_ctypes" /> - <Folder Include="test\test_email" /> - <Folder Include="test\test_email\data" /> - <Folder Include="test\test_import" /> -diff --git a/Tools/wasm/wasm_assets.py b/Tools/wasm/wasm_assets.py -index 6557e3f37a..aa09d2dda8 100755 ---- a/Tools/wasm/wasm_assets.py -+++ b/Tools/wasm/wasm_assets.py -@@ -112,7 +112,6 @@ - - # regression test sub directories - OMIT_SUBDIRS = ( -- "ctypes/test/", - "tkinter/test/", - "unittest/test/", - ) --- -2.25.1 - diff --git a/cpython/patches/0009-Fix-LONG_BIT-constant-to-be-always-32bit.patch b/cpython/patches/0006-Fix-LONG_BIT-constant-to-be-always-32bit.patch similarity index 78% rename from cpython/patches/0009-Fix-LONG_BIT-constant-to-be-always-32bit.patch rename to cpython/patches/0006-Fix-LONG_BIT-constant-to-be-always-32bit.patch index 450ce7c10c3..7c4eb7425af 100644 --- a/cpython/patches/0009-Fix-LONG_BIT-constant-to-be-always-32bit.patch +++ b/cpython/patches/0006-Fix-LONG_BIT-constant-to-be-always-32bit.patch @@ -1,7 +1,7 @@ -From d873a0a76209b13b86176d674c44a2c073a2faaf Mon Sep 17 00:00:00 2001 +From b6369c9a0192a617548812203b78862da8a49e52 Mon Sep 17 00:00:00 2001 From: ryanking13 <[email protected]> Date: Fri, 12 Jan 2024 00:52:57 +0900 -Subject: [PATCH 9/9] Fix LONG_BIT constant to be always 32bit +Subject: [PATCH 6/6] Fix LONG_BIT constant to be always 32bit Starting from Emscripten 3.1.50, there is an issue where LONG_BIT is calculated to 64 for some reason. This is very strange because LONG_MAX @@ -17,10 +17,10 @@ Related: https://github.com/emscripten-core/emscripten/pull/20752 1 file changed, 1 insertion(+) diff --git a/Include/pyport.h b/Include/pyport.h -index b3b8b6f09a..157816de42 100644 +index 35eca7234c..1c277e2377 100644 --- a/Include/pyport.h +++ b/Include/pyport.h -@@ -583,6 +583,7 @@ extern char * _getpty(int *, int, mode_t, int); +@@ -573,6 +573,7 @@ extern char * _getpty(int *, int, mode_t, int); #define LONG_MIN (-LONG_MAX-1) #endif diff --git a/cpython/patches/0006-gh-93839-Move-Lib-unttest-test-to-Lib-test-test_unit.patch b/cpython/patches/0006-gh-93839-Move-Lib-unttest-test-to-Lib-test-test_unit.patch deleted file mode 100644 index 1d420f7687e..00000000000 --- a/cpython/patches/0006-gh-93839-Move-Lib-unttest-test-to-Lib-test-test_unit.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 7c3f1d534ee32f2a2d8cb5ab9b117d26ae4d1eb4 Mon Sep 17 00:00:00 2001 -From: Victor Stinner <[email protected]> -Date: Tue, 21 Jun 2022 10:27:59 +0200 -Subject: [PATCH 6/9] gh-93839: Move Lib/unttest/test/ to - Lib/test/test_unittest/ (#94043) - -* Move Lib/unittest/test/ to Lib/test/test_unittest/ -* Remove Lib/test/test_unittest.py -* Replace unittest.test with test.test_unittest -* Remove unittest.load_tests() -* Rewrite unittest __init__.py and __main__.py -* Update build system, CODEOWNERS, and wasm_assets.py ---- - Tools/wasm/wasm_assets.py | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/Tools/wasm/wasm_assets.py b/Tools/wasm/wasm_assets.py -index aa09d2dda8..30aad35760 100755 ---- a/Tools/wasm/wasm_assets.py -+++ b/Tools/wasm/wasm_assets.py -@@ -113,7 +113,6 @@ - # regression test sub directories - OMIT_SUBDIRS = ( - "tkinter/test/", -- "unittest/test/", - ) - - SYSCONFIG_NAMES = ( --- -2.25.1 - diff --git a/cpython/patches/0007-Move-test-directories.patch b/cpython/patches/0007-Move-test-directories.patch deleted file mode 100644 index 93af44adb5c..00000000000 --- a/cpython/patches/0007-Move-test-directories.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 9b04fdb7888bcfd0a22238f8e47d2f2ab822a32d Mon Sep 17 00:00:00 2001 -From: ryanking13 <[email protected]> -Date: Wed, 25 Jan 2023 15:54:16 +0900 -Subject: [PATCH 7/9] Move test directories - ---- - Makefile.pre.in | 7 +++---- - 1 file changed, 3 insertions(+), 4 deletions(-) - -diff --git a/Makefile.pre.in b/Makefile.pre.in -index c61b093fc9..1245cbc602 100644 ---- a/Makefile.pre.in -+++ b/Makefile.pre.in -@@ -1938,8 +1938,7 @@ LIBSUBDIRS= asyncio \ - xmlrpc \ - zoneinfo \ - __phello__ --TESTSUBDIRS= ctypes/test \ -- distutils/tests \ -+TESTSUBDIRS= distutils/tests \ - idlelib/idle_test \ - lib2to3/tests \ - lib2to3/tests/data \ -@@ -2057,8 +2056,8 @@ TESTSUBDIRS= ctypes/test \ - tkinter/test \ - tkinter/test/test_tkinter \ - tkinter/test/test_ttk \ -- unittest/test \ -- unittest/test/testmock -+ test/test_ctypes \ -+ test/test_unittest test/test_unittest/testmock - - TEST_MODULES=@TEST_MODULES@ - libinstall: all $(srcdir)/Modules/xxmodule.c --- -2.25.1 - diff --git a/docs/project/changelog.md b/docs/project/changelog.md index 14aab711675..a70a9cfa0a5 100644 --- a/docs/project/changelog.md +++ b/docs/project/changelog.md @@ -16,8 +16,8 @@ myst: ## Unreleased -- Upgraded Python to v3.11.7 - {pr}`4431` +- Upgraded Python to v3.12.1 + {pr}`4431` {pr}`4435` - Upgraded CoolProp to 6.6.0 {pr}`4397`. diff --git a/docs/usage/wasm-constraints.md b/docs/usage/wasm-constraints.md index eda105c8c31..781e16ca109 100644 --- a/docs/usage/wasm-constraints.md +++ b/docs/usage/wasm-constraints.md @@ -18,7 +18,6 @@ However this has a significant impact on the download size. Instead, it is better to load individual modules as needed using {js:func}`pyodide.loadPackage` or {py:func}`micropip.install`. -- distutils - ssl - lzma - sqlite3 diff --git a/environment.yml b/environment.yml index 22418fe635f..24c2c1e219a 100644 --- a/environment.yml +++ b/environment.yml @@ -2,7 +2,7 @@ name: pyodide-env channels: - conda-forge dependencies: - - python=3.11 + - python=3.12 - nodejs - ccache - f2c diff --git a/packages/bcrypt/patches/0001-Use-patched-instant.patch b/packages/bcrypt/patches/0001-Use-patched-instant.patch deleted file mode 100644 index a9e0e5decbe..00000000000 --- a/packages/bcrypt/patches/0001-Use-patched-instant.patch +++ /dev/null @@ -1,26 +0,0 @@ -From cc353c8a68246c79e84a075bb246eff1ce4c1acd Mon Sep 17 00:00:00 2001 -From: Hood Chatham <[email protected]> -Date: Sun, 18 Sep 2022 17:47:16 -0700 -Subject: [PATCH] Use patched instant - ---- - src/_bcrypt/Cargo.toml | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/src/_bcrypt/Cargo.toml b/src/_bcrypt/Cargo.toml -index 6c75126..bc89ade 100644 ---- a/src/_bcrypt/Cargo.toml -+++ b/src/_bcrypt/Cargo.toml -@@ -5,6 +5,9 @@ authors = ["The bcrypt developers <[email protected]>"] - edition = "2018" - publish = false - -+[patch.crates-io] -+instant = { path = "../../instant" } -+ - [dependencies] - pyo3 = { version = "0.15.2" } - bcrypt = "0.13" --- -2.25.1 - diff --git a/packages/cryptography/patches/0001-Use-patched-instant.patch b/packages/cryptography/patches/0001-Use-patched-instant.patch deleted file mode 100644 index dd1d0d19785..00000000000 --- a/packages/cryptography/patches/0001-Use-patched-instant.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 1c579acc950f6cacfe1c2ff045708fc864dea684 Mon Sep 17 00:00:00 2001 -From: Hood Chatham <[email protected]> -Date: Sun, 18 Sep 2022 17:44:03 -0700 -Subject: [PATCH] Use patched instant - ---- - src/rust/Cargo.toml | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/src/rust/Cargo.toml b/src/rust/Cargo.toml -index 617167d04..b51c2d2f2 100644 ---- a/src/rust/Cargo.toml -+++ b/src/rust/Cargo.toml -@@ -5,6 +5,9 @@ authors = ["The cryptography developers <[email protected]>"] - edition = "2018" - publish = false - -+[patch.crates-io] -+instant = { path = "../../instant" } -+ - [dependencies] - lazy_static = "1" - pyo3 = { version = "0.15.1" } --- -2.25.1 - diff --git a/packages/distutils/meta.yaml b/packages/distutils/meta.yaml deleted file mode 100644 index 7190ea634e7..00000000000 --- a/packages/distutils/meta.yaml +++ /dev/null @@ -1,14 +0,0 @@ -package: - name: distutils - version: 1.0.0 # Nonsense - tag: - - always - top-level: - - distutils -source: - sha256: $(PYTHON_ARCHIVE_SHA256) - url: $(PYTHON_ARCHIVE_URL) -build: - type: cpython_module - script: | - cd Lib && tar --exclude=__pycache__ --exclude=tests -cf - distutils | tar -C $DISTDIR -xf - diff --git a/packages/distutils/test_distutils.py b/packages/distutils/test_distutils.py deleted file mode 100644 index 5d6117801da..00000000000 --- a/packages/distutils/test_distutils.py +++ /dev/null @@ -1,34 +0,0 @@ -from pytest_pyodide import run_in_pyodide - - -@run_in_pyodide(packages=["test", "distutils"], pytest_assert_rewrites=False) -def test_distutils(selenium): - import sys - import unittest - import unittest.mock - from test.libregrtest.main import main - - name = "test_distutils" - - ignore_tests = [ - "test_check_environ_getpwuid", # no pwd - "test_get_platform", # no _osx_support - "test_simple_built", - "test_optional_extension", # thread - "test_customize_compiler_before_get_config_vars", # subprocess - "test_spawn", # subprocess - "test_debug_mode", # no _osx_support - "test_record", # no _osx_support - "test_get_config_h_filename", # /include/python3.10/pyconfig.h not exists - "test_srcdir", # /lib/python3.10/config-3.10-wasm32-emscripten not exists - "test_mkpath_with_custom_mode", - "test_finalize_options", # no executable - ] - match_tests = [[pat, False] for pat in ignore_tests] - - sys.modules["_osx_support"] = unittest.mock.Mock() - try: - main([name], match_tests=match_tests, verbose=True, verbose3=True) - except SystemExit as e: - if e.code != 0: - raise RuntimeError(f"Failed with code: {e.code}") from None diff --git a/packages/gdal/meta.yaml b/packages/gdal/meta.yaml index 76f8d355fed..302515e8408 100644 --- a/packages/gdal/meta.yaml +++ b/packages/gdal/meta.yaml @@ -102,7 +102,7 @@ build: cat link_cmd.txt > ${LINKLIBS} cat linked_static_libs.txt >> ${LINKLIBS} - emmake make -j ${PYODIDE_JOBS:-3} + emmake make -j ${PYODIDE_JOBS:-16} emmake make install cp ${WASM_LIBRARY_DIR}/lib/libgdal.so ${DISTDIR} diff --git a/packages/gmpy2/meta.yaml b/packages/gmpy2/meta.yaml index 732f771930f..eccab7d132d 100644 --- a/packages/gmpy2/meta.yaml +++ b/packages/gmpy2/meta.yaml @@ -6,6 +6,8 @@ package: source: url: https://files.pythonhosted.org/packages/d9/2e/2848cb5ab5240cb34b967602990450d0fd715f013806929b2f82821cef7f/gmpy2-2.1.5.tar.gz sha256: bc297f1fd8c377ae67a4f493fc0f926e5d1b157e5c342e30a4d84dc7b9f95d96 + patches: + - patches/gmpy2-2.1.5-py312.patch requirements: host: diff --git a/packages/gmpy2/patches/gmpy2-2.1.5-py312.patch b/packages/gmpy2/patches/gmpy2-2.1.5-py312.patch new file mode 100644 index 00000000000..1ee2a22d594 --- /dev/null +++ b/packages/gmpy2/patches/gmpy2-2.1.5-py312.patch @@ -0,0 +1,250 @@ +Fixes gmpy2 v2.1.5 support for python 3 12. + +https://github.com/aleaxit/gmpy/issues/446#issuecomment-1784443227 +diff --git a/src/gmpy2_convert.h b/src/gmpy2_convert.h +index f887d47..3e8cb2b 100644 +--- a/src/gmpy2_convert.h ++++ b/src/gmpy2_convert.h +@@ -142,6 +142,27 @@ extern "C" { + #define IS_TYPE_COMPLEX_ONLY(x) ((x > OBJ_TYPE_REAL) && \ + (x < OBJ_TYPE_COMPLEX)) + ++/* Compatibility macros (to work with PyLongObject internals). ++ */ ++ ++#if PY_VERSION_HEX >= 0x030C0000 ++# define TAG_FROM_SIGN_AND_SIZE(is_neg, size) ((is_neg?2:(size==0)) | (((size_t)size) << 3)) ++# define _PyLong_SetSignAndDigitCount(obj, is_neg, size) (obj->long_value.lv_tag = TAG_FROM_SIGN_AND_SIZE(is_neg, size)) ++#elif PY_VERSION_HEX >= 0x030900A4 ++# define _PyLong_SetSignAndDigitCount(obj, is_neg, size) (Py_SET_SIZE(obj, (is_neg?-1:1)*size)) ++#else ++# define _PyLong_SetSignAndDigitCount(obj, is_neg, size) (Py_SIZE(obj) = (is_neg?-1:1)*size) ++#endif ++ ++#if PY_VERSION_HEX >= 0x030C0000 ++# define GET_OB_DIGIT(obj) obj->long_value.ob_digit ++# define _PyLong_IsNegative(obj) ((obj->long_value.lv_tag & 3) == 2) ++# define _PyLong_DigitCount(obj) (obj->long_value.lv_tag >> 3) ++#else ++# define GET_OB_DIGIT(obj) obj->ob_digit ++# define _PyLong_IsNegative(obj) (Py_SIZE(obj) < 0) ++# define _PyLong_DigitCount(obj) (_PyLong_IsNegative(obj)? -Py_SIZE(obj):Py_SIZE(obj)) ++#endif + + /* Since the macros are used in gmpy2's codebase, these functions are skipped + * until they are needed for the C API in the future. +diff --git a/src/gmpy2_convert_gmp.c b/src/gmpy2_convert_gmp.c +index cf0891e..8b8df81 100644 +--- a/src/gmpy2_convert_gmp.c ++++ b/src/gmpy2_convert_gmp.c +@@ -59,33 +59,24 @@ GMPy_MPZ_From_PyIntOrLong(PyObject *obj, CTXT_Object *context) + } + #endif + +- switch (Py_SIZE(templong)) { +- case -1: +- mpz_set_si(result->z, -(sdigit)templong->ob_digit[0]); ++ len = _PyLong_DigitCount(templong); ++ negative = _PyLong_IsNegative(templong); ++ ++ switch (len) { ++ case 1: ++ mpz_set_si(result->z, (sdigit)GET_OB_DIGIT(templong)[0]); + break; + case 0: + mpz_set_si(result->z, 0); + break; +- case 1: +- mpz_set_si(result->z, templong->ob_digit[0]); +- break; + default: +- mpz_set_si(result->z, 0); +- +- if (Py_SIZE(templong) < 0) { +- len = - Py_SIZE(templong); +- negative = 1; +- } else { +- len = Py_SIZE(templong); +- negative = 0; +- } +- +- mpz_import(result->z, len, -1, sizeof(templong->ob_digit[0]), 0, +- sizeof(templong->ob_digit[0])*8 - PyLong_SHIFT, templong->ob_digit); ++ mpz_import(result->z, len, -1, sizeof(GET_OB_DIGIT(templong)[0]), 0, ++ sizeof(GET_OB_DIGIT(templong)[0])*8 - PyLong_SHIFT, ++ GET_OB_DIGIT(templong)); ++ } + +- if (negative) { +- mpz_neg(result->z, result->z); +- } ++ if (negative) { ++ mpz_neg(result->z, result->z); + } + return result; + } +@@ -105,33 +96,24 @@ mpz_set_PyIntOrLong(mpz_t z, PyObject *obj) + } + #endif + +- switch (Py_SIZE(templong)) { +- case -1: +- mpz_set_si(z, -(sdigit)templong->ob_digit[0]); ++ len = _PyLong_DigitCount(templong); ++ negative = _PyLong_IsNegative(templong); ++ ++ switch (len) { ++ case 1: ++ mpz_set_si(z, (sdigit)GET_OB_DIGIT(templong)[0]); + break; + case 0: + mpz_set_si(z, 0); + break; +- case 1: +- mpz_set_si(z, templong->ob_digit[0]); +- break; + default: +- mpz_set_si(z, 0); +- +- if (Py_SIZE(templong) < 0) { +- len = - Py_SIZE(templong); +- negative = 1; +- } else { +- len = Py_SIZE(templong); +- negative = 0; +- } +- +- mpz_import(z, len, -1, sizeof(templong->ob_digit[0]), 0, +- sizeof(templong->ob_digit[0])*8 - PyLong_SHIFT, templong->ob_digit); ++ mpz_import(z, len, -1, sizeof(GET_OB_DIGIT(templong)[0]), 0, ++ sizeof(GET_OB_DIGIT(templong)[0])*8 - PyLong_SHIFT, ++ GET_OB_DIGIT(templong)); ++ } + +- if (negative) { +- mpz_neg(z, z); +- } ++ if (negative) { ++ mpz_neg(z, z); + } + return; + } +@@ -186,12 +168,7 @@ GMPy_PyLong_From_MPZ(MPZ_Object *obj, CTXT_Object *context) + + /* Assume gmp uses limbs as least as large as the builtin longs do */ + +- if (mpz_sgn(obj->z) < 0) { +- negative = 1; +- } else { +- negative = 0; +- } +- ++ negative = mpz_sgn(obj->z) < 0; + size = (mpz_sizeinbase(obj->z, 2) + PyLong_SHIFT - 1) / PyLong_SHIFT; + + if (!(result = _PyLong_New(size))) { +@@ -200,31 +177,20 @@ GMPy_PyLong_From_MPZ(MPZ_Object *obj, CTXT_Object *context) + /* LCOV_EXCL_STOP */ + } + +- mpz_export(result->ob_digit, &count, -1, sizeof(result->ob_digit[0]), 0, +- sizeof(result->ob_digit[0])*8 - PyLong_SHIFT, obj->z); ++ mpz_export(GET_OB_DIGIT(result), &count, -1, sizeof(GET_OB_DIGIT(result)[0]), 0, ++ sizeof(GET_OB_DIGIT(result)[0])*8 - PyLong_SHIFT, obj->z); + + if (count == 0) { +- result->ob_digit[0] = 0; ++ GET_OB_DIGIT(result)[0] = 0; + } + + /* long_normalize() is file-static so we must reimplement it */ + /* longobjp = long_normalize(longobjp); */ +- while ((size>0) && (result->ob_digit[size-1] == 0)) { ++ while ((size>0) && (GET_OB_DIGIT(result)[size-1] == 0)) { + size--; + } +-#if PY_VERSION_HEX >= 0x030900A4 +- Py_SET_SIZE(result, size); +-#else +- Py_SIZE(result) = size; +-#endif + +- if (negative) { +-#if PY_VERSION_HEX >= 0x030900A4 +- Py_SET_SIZE(result, - Py_SIZE(result)); +-#else +- Py_SIZE(result) = - Py_SIZE(result); +-#endif +- } ++ _PyLong_SetSignAndDigitCount(result, negative, size); + return (PyObject*)result; + } + +@@ -476,33 +442,24 @@ GMPy_XMPZ_From_PyIntOrLong(PyObject *obj, CTXT_Object *context) + } + #endif + +- switch (Py_SIZE(templong)) { +- case -1: +- mpz_set_si(result->z, -(sdigit)templong->ob_digit[0]); ++ len = _PyLong_DigitCount(templong); ++ negative = _PyLong_IsNegative(templong); ++ ++ switch (len) { ++ case 1: ++ mpz_set_si(result->z, (sdigit)GET_OB_DIGIT(templong)[0]); + break; + case 0: + mpz_set_si(result->z, 0); + break; +- case 1: +- mpz_set_si(result->z, templong->ob_digit[0]); +- break; + default: +- mpz_set_si(result->z, 0); +- +- if (Py_SIZE(templong) < 0) { +- len = - Py_SIZE(templong); +- negative = 1; +- } else { +- len = Py_SIZE(templong); +- negative = 0; +- } +- +- mpz_import(result->z, len, -1, sizeof(templong->ob_digit[0]), 0, +- sizeof(templong->ob_digit[0])*8 - PyLong_SHIFT, templong->ob_digit); ++ mpz_import(result->z, len, -1, sizeof(GET_OB_DIGIT(templong)[0]), 0, ++ sizeof(GET_OB_DIGIT(templong)[0])*8 - PyLong_SHIFT, ++ GET_OB_DIGIT(templong)); ++ } + +- if (negative) { +- mpz_neg(result->z, result->z); +- } ++ if (negative) { ++ mpz_neg(result->z, result->z); + } + return result; + } +@@ -639,7 +596,7 @@ GMPy_MPQ_From_PyStr(PyObject *s, int base, CTXT_Object *context) + } + + cp = PyBytes_AsString(ascii_str); +- ++ + { + char *whereslash = strchr((char*)cp, '/'); + char *wheredot = strchr((char*)cp, '.'); +diff --git a/src/gmpy2_convert_utils.c b/src/gmpy2_convert_utils.c +index d676eaf..8908d17 100644 +--- a/src/gmpy2_convert_utils.c ++++ b/src/gmpy2_convert_utils.c +@@ -123,7 +123,7 @@ static unsigned long + GMPy_Integer_AsUnsignedLongWithType_v2(PyObject *x, int xtype) + { + if IS_TYPE_PyInteger(xtype) { +- if (Py_SIZE(x) < 0) { ++ if (_PyLong_IsNegative(((PyLongObject*)x))) { + VALUE_ERROR("n must be > 0"); + return (unsigned long)-1; + } diff --git a/packages/nose/meta.yaml b/packages/nose/meta.yaml deleted file mode 100644 index 4b8207a40c9..00000000000 --- a/packages/nose/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -package: - name: nose - version: 1.3.7 - top-level: - - nose -source: - url: https://files.pythonhosted.org/packages/15/d8/dd071918c040f50fa1cf80da16423af51ff8ce4a0f2399b7bf8de45ac3d9/nose-1.3.7-py3-none-any.whl - sha256: 9ff7c6cc443f8c51994b34a667bbcf45afd6d945be7477b52e97516fd17c53ac - -requirements: - run: - - setuptools -about: - home: http://readthedocs.org/docs/nose/ - PyPI: https://pypi.org/project/nose - summary: nose extends unittest to make testing easier - license: GNU LGPL diff --git a/packages/test/meta.yaml b/packages/test/meta.yaml index a2e861e887e..f859c5d61bc 100644 --- a/packages/test/meta.yaml +++ b/packages/test/meta.yaml @@ -14,13 +14,49 @@ build: cat $(PYODIDE_ROOT)/cpython/patches/* | patch -p1 export TEST_EXTENSIONS="\ _testinternalcapi.so \ - _testcapi.so \ _testbuffer.so \ _testimportmultiple.so \ _testmultiphase.so \ _ctypes_test.so \ " + export TEST_CAPI_SRCS=( \ + _testcapimodule.c \ + _testcapi/vectorcall.c \ + _testcapi/vectorcall_limited.c \ + _testcapi/heaptype.c \ + _testcapi/abstract.c \ + _testcapi/bytearray.c \ + _testcapi/bytes.c \ + _testcapi/unicode.c \ + _testcapi/dict.c \ + _testcapi/set.c \ + _testcapi/list.c \ + _testcapi/tuple.c \ + _testcapi/getargs.c \ + _testcapi/pytime.c \ + _testcapi/datetime.c \ + _testcapi/docstring.c \ + _testcapi/mem.c \ + _testcapi/watchers.c \ + _testcapi/long.c \ + _testcapi/float.c \ + _testcapi/complex.c \ + _testcapi/numbers.c \ + _testcapi/structmember.c \ + _testcapi/exceptions.c \ + _testcapi/code.c \ + _testcapi/buffer.c \ + _testcapi/pyos.c \ + _testcapi/file.c \ + _testcapi/codec.c \ + _testcapi/immortal.c \ + _testcapi/heaptype_relative.c \ + _testcapi/gc.c \ + _testcapi/sys.c \ + ) + + export TEST_MODULE_CFLAGS="${SIDE_MODULE_CFLAGS} -I Include/ -I Include/internal/ -I ." emcc ${TEST_MODULE_CFLAGS} -c Modules/_testinternalcapi.c -o Modules/_testinternalcapi.o \ @@ -31,11 +67,18 @@ build: emcc ${TEST_MODULE_CFLAGS} -c Modules/_testmultiphase.c -o Modules/_testmultiphase.o emcc ${TEST_MODULE_CFLAGS} -c Modules/_ctypes/_ctypes_test.c -o Modules/_ctypes_test.o + for capi_src in ${TEST_CAPI_SRCS[@]}; do \ + emcc ${TEST_MODULE_CFLAGS} -c Modules/${capi_src} -o Modules/${capi_src/.c/.o} + done + + export TEST_CAPI_OBJECTS=( "${TEST_CAPI_SRCS[@]/#/Modules/}" ) + emcc ${SIDE_MODULE_LDFLAGS} ${TEST_CAPI_OBJECTS[@]//.c/.o} -o ${DISTDIR}/_testcapi.so + for testname in ${TEST_EXTENSIONS}; do \ emcc Modules/${testname/.so/.o} -o ${DISTDIR}/$testname ${SIDE_MODULE_LDFLAGS} done cd Lib && \ tar --exclude=__pycache__ -cf - \ - test distutils/tests sqlite3/test \ + test test/test_sqlite3/ \ | tar -C $DISTDIR -xf - diff --git a/packages/test/patches/0005-gh-93839-Move-Lib-ctypes-test-to-Lib-test-test_ctype.patch b/packages/test/patches/0005-gh-93839-Move-Lib-ctypes-test-to-Lib-test-test_ctype.patch deleted file mode 100644 index 49353eb6f49..00000000000 --- a/packages/test/patches/0005-gh-93839-Move-Lib-ctypes-test-to-Lib-test-test_ctype.patch +++ /dev/null @@ -1,753 +0,0 @@ -From d82e0bfe8b98a122ca443b356d81998c804b686e Mon Sep 17 00:00:00 2001 -From: Victor Stinner <[email protected]> -Date: Tue, 21 Jun 2022 10:24:33 +0200 -Subject: [PATCH 5/9] gh-93839: Move Lib/ctypes/test/ to Lib/test/test_ctypes/ - (#94041) - -* Move Lib/ctypes/test/ to Lib/test/test_ctypes/ -* Remove Lib/test/test_ctypes.py -* Update imports and build system. ---- - Lib/ctypes/test/__main__.py | 4 - - Lib/test/leakers/test_ctypes.py | 2 +- - Lib/test/test_ctypes.py | 10 -- - .../test => test/test_ctypes}/__init__.py | 0 - Lib/test/test_ctypes/__main__.py | 4 + - .../test => test/test_ctypes}/test_anon.py | 0 - .../test_ctypes}/test_array_in_pointer.py | 0 - .../test => test/test_ctypes}/test_arrays.py | 2 +- - .../test_ctypes}/test_as_parameter.py | 2 +- - .../test_ctypes}/test_bitfields.py | 2 +- - .../test => test/test_ctypes}/test_buffers.py | 2 +- - .../test => test/test_ctypes}/test_bytes.py | 0 - .../test_ctypes}/test_byteswap.py | 0 - .../test_ctypes}/test_callbacks.py | 2 +- - .../test => test/test_ctypes}/test_cast.py | 2 +- - .../test => test/test_ctypes}/test_cfuncs.py | 2 +- - .../test_ctypes}/test_checkretval.py | 2 +- - .../test => test/test_ctypes}/test_delattr.py | 0 - .../test => test/test_ctypes}/test_errno.py | 0 - .../test => test/test_ctypes}/test_find.py | 0 - .../test_ctypes}/test_frombuffer.py | 0 - .../test => test/test_ctypes}/test_funcptr.py | 0 - .../test_ctypes}/test_functions.py | 2 +- - .../test_ctypes}/test_incomplete.py | 0 - .../test => test/test_ctypes}/test_init.py | 0 - .../test_ctypes}/test_internals.py | 0 - .../test_ctypes}/test_keeprefs.py | 0 - .../test => test/test_ctypes}/test_libc.py | 0 - .../test => test/test_ctypes}/test_loading.py | 0 - .../test_ctypes}/test_macholib.py | 0 - .../test_ctypes}/test_memfunctions.py | 2 +- - .../test => test/test_ctypes}/test_numbers.py | 0 - .../test => test/test_ctypes}/test_objects.py | 8 +- - .../test_ctypes}/test_parameters.py | 2 +- - .../test => test/test_ctypes}/test_pep3118.py | 0 - .../test_ctypes}/test_pickling.py | 0 - .../test_ctypes}/test_pointers.py | 0 - .../test_ctypes}/test_prototypes.py | 2 +- - .../test_ctypes}/test_python_api.py | 0 - .../test_ctypes}/test_random_things.py | 0 - .../test_ctypes}/test_refcounts.py | 0 - .../test => test/test_ctypes}/test_repr.py | 0 - .../test_ctypes}/test_returnfuncptrs.py | 0 - .../test_ctypes}/test_simplesubclasses.py | 0 - .../test => test/test_ctypes}/test_sizes.py | 0 - .../test => test/test_ctypes}/test_slicing.py | 2 +- - .../test_ctypes}/test_stringptr.py | 0 - .../test => test/test_ctypes}/test_strings.py | 2 +- - .../test_ctypes}/test_struct_fields.py | 0 - .../test_ctypes}/test_structures.py | 2 +- - .../test_ctypes}/test_unaligned_structures.py | 0 - .../test => test/test_ctypes}/test_unicode.py | 2 +- - .../test => test/test_ctypes}/test_values.py | 0 - .../test_ctypes}/test_varsize_struct.py | 0 - .../test => test/test_ctypes}/test_win32.py | 0 - .../test_ctypes}/test_wintypes.py | 0 - Makefile.pre.in | 4 +- - ...2-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst | 2 + - PCbuild/lib.pyproj | 109 +++++++++--------- - Tools/wasm/wasm_assets.py | 1 - - 60 files changed, 83 insertions(+), 93 deletions(-) - delete mode 100644 Lib/ctypes/test/__main__.py - delete mode 100644 Lib/test/test_ctypes.py - rename Lib/{ctypes/test => test/test_ctypes}/__init__.py (100%) - create mode 100644 Lib/test/test_ctypes/__main__.py - rename Lib/{ctypes/test => test/test_ctypes}/test_anon.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_array_in_pointer.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_arrays.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_as_parameter.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_bitfields.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_buffers.py (98%) - rename Lib/{ctypes/test => test/test_ctypes}/test_bytes.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_byteswap.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_callbacks.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_cast.py (98%) - rename Lib/{ctypes/test => test/test_ctypes}/test_cfuncs.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_checkretval.py (95%) - rename Lib/{ctypes/test => test/test_ctypes}/test_delattr.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_errno.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_find.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_frombuffer.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_funcptr.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_functions.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_incomplete.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_init.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_internals.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_keeprefs.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_libc.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_loading.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_macholib.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_memfunctions.py (98%) - rename Lib/{ctypes/test => test/test_ctypes}/test_numbers.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_objects.py (87%) - rename Lib/{ctypes/test => test/test_ctypes}/test_parameters.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_pep3118.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_pickling.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_pointers.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_prototypes.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_python_api.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_random_things.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_refcounts.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_repr.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_returnfuncptrs.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_simplesubclasses.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_sizes.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_slicing.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_stringptr.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_strings.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_struct_fields.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_structures.py (99%) - rename Lib/{ctypes/test => test/test_ctypes}/test_unaligned_structures.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_unicode.py (97%) - rename Lib/{ctypes/test => test/test_ctypes}/test_values.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_varsize_struct.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_win32.py (100%) - rename Lib/{ctypes/test => test/test_ctypes}/test_wintypes.py (100%) - create mode 100644 Misc/NEWS.d/next/Tests/2022-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst - -diff --git a/Lib/ctypes/test/__main__.py b/Lib/ctypes/test/__main__.py -deleted file mode 100644 -index 362a9ec8cf..0000000000 ---- a/Lib/ctypes/test/__main__.py -+++ /dev/null -@@ -1,4 +0,0 @@ --from ctypes.test import load_tests --import unittest -- --unittest.main() -diff --git a/Lib/test/leakers/test_ctypes.py b/Lib/test/leakers/test_ctypes.py -index 7d7e9ff3a1..ec09ac3699 100644 ---- a/Lib/test/leakers/test_ctypes.py -+++ b/Lib/test/leakers/test_ctypes.py -@@ -1,5 +1,5 @@ - --# Taken from Lib/ctypes/test/test_keeprefs.py, PointerToStructure.test(). -+# Taken from Lib/test/test_ctypes/test_keeprefs.py, PointerToStructure.test(). - - from ctypes import Structure, c_int, POINTER - import gc -diff --git a/Lib/test/test_ctypes.py b/Lib/test/test_ctypes.py -deleted file mode 100644 -index b0a12c9734..0000000000 ---- a/Lib/test/test_ctypes.py -+++ /dev/null -@@ -1,10 +0,0 @@ --import unittest --from test.support.import_helper import import_module -- -- --ctypes_test = import_module('ctypes.test') -- --load_tests = ctypes_test.load_tests -- --if __name__ == "__main__": -- unittest.main() -diff --git a/Lib/ctypes/test/__init__.py b/Lib/test/test_ctypes/__init__.py -similarity index 100% -rename from Lib/ctypes/test/__init__.py -rename to Lib/test/test_ctypes/__init__.py -diff --git a/Lib/test/test_ctypes/__main__.py b/Lib/test/test_ctypes/__main__.py -new file mode 100644 -index 0000000000..3003d4db89 ---- /dev/null -+++ b/Lib/test/test_ctypes/__main__.py -@@ -0,0 +1,4 @@ -+from test.test_ctypes import load_tests -+import unittest -+ -+unittest.main() -diff --git a/Lib/ctypes/test/test_anon.py b/Lib/test/test_ctypes/test_anon.py -similarity index 100% -rename from Lib/ctypes/test/test_anon.py -rename to Lib/test/test_ctypes/test_anon.py -diff --git a/Lib/ctypes/test/test_array_in_pointer.py b/Lib/test/test_ctypes/test_array_in_pointer.py -similarity index 100% -rename from Lib/ctypes/test/test_array_in_pointer.py -rename to Lib/test/test_ctypes/test_array_in_pointer.py -diff --git a/Lib/ctypes/test/test_arrays.py b/Lib/test/test_ctypes/test_arrays.py -similarity index 99% -rename from Lib/ctypes/test/test_arrays.py -rename to Lib/test/test_ctypes/test_arrays.py -index 14603b7049..415a5785a9 100644 ---- a/Lib/ctypes/test/test_arrays.py -+++ b/Lib/test/test_ctypes/test_arrays.py -@@ -3,7 +3,7 @@ - import sys - from ctypes import * - --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - formats = "bBhHiIlLqQfd" - -diff --git a/Lib/ctypes/test/test_as_parameter.py b/Lib/test/test_ctypes/test_as_parameter.py -similarity index 99% -rename from Lib/ctypes/test/test_as_parameter.py -rename to Lib/test/test_ctypes/test_as_parameter.py -index f9d27cb89d..b35defb158 100644 ---- a/Lib/ctypes/test/test_as_parameter.py -+++ b/Lib/test/test_ctypes/test_as_parameter.py -@@ -1,6 +1,6 @@ - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import _ctypes_test - - dll = CDLL(_ctypes_test.__file__) -diff --git a/Lib/ctypes/test/test_bitfields.py b/Lib/test/test_ctypes/test_bitfields.py -similarity index 99% -rename from Lib/ctypes/test/test_bitfields.py -rename to Lib/test/test_ctypes/test_bitfields.py -index 66acd62e68..dad71a0ba7 100644 ---- a/Lib/ctypes/test/test_bitfields.py -+++ b/Lib/test/test_ctypes/test_bitfields.py -@@ -1,5 +1,5 @@ - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - from test import support - import unittest - import os -diff --git a/Lib/ctypes/test/test_buffers.py b/Lib/test/test_ctypes/test_buffers.py -similarity index 98% -rename from Lib/ctypes/test/test_buffers.py -rename to Lib/test/test_ctypes/test_buffers.py -index 15782be757..a9be2023aa 100644 ---- a/Lib/ctypes/test/test_buffers.py -+++ b/Lib/test/test_ctypes/test_buffers.py -@@ -1,5 +1,5 @@ - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import unittest - - class StringBufferTestCase(unittest.TestCase): -diff --git a/Lib/ctypes/test/test_bytes.py b/Lib/test/test_ctypes/test_bytes.py -similarity index 100% -rename from Lib/ctypes/test/test_bytes.py -rename to Lib/test/test_ctypes/test_bytes.py -diff --git a/Lib/ctypes/test/test_byteswap.py b/Lib/test/test_ctypes/test_byteswap.py -similarity index 100% -rename from Lib/ctypes/test/test_byteswap.py -rename to Lib/test/test_ctypes/test_byteswap.py -diff --git a/Lib/ctypes/test/test_callbacks.py b/Lib/test/test_ctypes/test_callbacks.py -similarity index 99% -rename from Lib/ctypes/test/test_callbacks.py -rename to Lib/test/test_ctypes/test_callbacks.py -index 1099cf9a69..2758720d4a 100644 ---- a/Lib/ctypes/test/test_callbacks.py -+++ b/Lib/test/test_ctypes/test_callbacks.py -@@ -3,7 +3,7 @@ - from test import support - - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - from _ctypes import CTYPES_MAX_ARGCOUNT - import _ctypes_test - -diff --git a/Lib/ctypes/test/test_cast.py b/Lib/test/test_ctypes/test_cast.py -similarity index 98% -rename from Lib/ctypes/test/test_cast.py -rename to Lib/test/test_ctypes/test_cast.py -index 6878f97328..7ee23b16f1 100644 ---- a/Lib/ctypes/test/test_cast.py -+++ b/Lib/test/test_ctypes/test_cast.py -@@ -1,5 +1,5 @@ - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import unittest - import sys - -diff --git a/Lib/ctypes/test/test_cfuncs.py b/Lib/test/test_ctypes/test_cfuncs.py -similarity index 99% -rename from Lib/ctypes/test/test_cfuncs.py -rename to Lib/test/test_ctypes/test_cfuncs.py -index ac2240fa19..0a9394bf31 100644 ---- a/Lib/ctypes/test/test_cfuncs.py -+++ b/Lib/test/test_ctypes/test_cfuncs.py -@@ -3,7 +3,7 @@ - - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - import _ctypes_test - -diff --git a/Lib/ctypes/test/test_checkretval.py b/Lib/test/test_ctypes/test_checkretval.py -similarity index 95% -rename from Lib/ctypes/test/test_checkretval.py -rename to Lib/test/test_ctypes/test_checkretval.py -index e9567dc391..1492099f4b 100644 ---- a/Lib/ctypes/test/test_checkretval.py -+++ b/Lib/test/test_ctypes/test_checkretval.py -@@ -1,7 +1,7 @@ - import unittest - - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - class CHECKED(c_int): - def _check_retval_(value): -diff --git a/Lib/ctypes/test/test_delattr.py b/Lib/test/test_ctypes/test_delattr.py -similarity index 100% -rename from Lib/ctypes/test/test_delattr.py -rename to Lib/test/test_ctypes/test_delattr.py -diff --git a/Lib/ctypes/test/test_errno.py b/Lib/test/test_ctypes/test_errno.py -similarity index 100% -rename from Lib/ctypes/test/test_errno.py -rename to Lib/test/test_ctypes/test_errno.py -diff --git a/Lib/ctypes/test/test_find.py b/Lib/test/test_ctypes/test_find.py -similarity index 100% -rename from Lib/ctypes/test/test_find.py -rename to Lib/test/test_ctypes/test_find.py -diff --git a/Lib/ctypes/test/test_frombuffer.py b/Lib/test/test_ctypes/test_frombuffer.py -similarity index 100% -rename from Lib/ctypes/test/test_frombuffer.py -rename to Lib/test/test_ctypes/test_frombuffer.py -diff --git a/Lib/ctypes/test/test_funcptr.py b/Lib/test/test_ctypes/test_funcptr.py -similarity index 100% -rename from Lib/ctypes/test/test_funcptr.py -rename to Lib/test/test_ctypes/test_funcptr.py -diff --git a/Lib/ctypes/test/test_functions.py b/Lib/test/test_ctypes/test_functions.py -similarity index 99% -rename from Lib/ctypes/test/test_functions.py -rename to Lib/test/test_ctypes/test_functions.py -index f9e92e1cc6..4a784c8d79 100644 ---- a/Lib/ctypes/test/test_functions.py -+++ b/Lib/test/test_ctypes/test_functions.py -@@ -6,7 +6,7 @@ - """ - - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import sys, unittest - - try: -diff --git a/Lib/ctypes/test/test_incomplete.py b/Lib/test/test_ctypes/test_incomplete.py -similarity index 100% -rename from Lib/ctypes/test/test_incomplete.py -rename to Lib/test/test_ctypes/test_incomplete.py -diff --git a/Lib/ctypes/test/test_init.py b/Lib/test/test_ctypes/test_init.py -similarity index 100% -rename from Lib/ctypes/test/test_init.py -rename to Lib/test/test_ctypes/test_init.py -diff --git a/Lib/ctypes/test/test_internals.py b/Lib/test/test_ctypes/test_internals.py -similarity index 100% -rename from Lib/ctypes/test/test_internals.py -rename to Lib/test/test_ctypes/test_internals.py -diff --git a/Lib/ctypes/test/test_keeprefs.py b/Lib/test/test_ctypes/test_keeprefs.py -similarity index 100% -rename from Lib/ctypes/test/test_keeprefs.py -rename to Lib/test/test_ctypes/test_keeprefs.py -diff --git a/Lib/ctypes/test/test_libc.py b/Lib/test/test_ctypes/test_libc.py -similarity index 100% -rename from Lib/ctypes/test/test_libc.py -rename to Lib/test/test_ctypes/test_libc.py -diff --git a/Lib/ctypes/test/test_loading.py b/Lib/test/test_ctypes/test_loading.py -similarity index 100% -rename from Lib/ctypes/test/test_loading.py -rename to Lib/test/test_ctypes/test_loading.py -diff --git a/Lib/ctypes/test/test_macholib.py b/Lib/test/test_ctypes/test_macholib.py -similarity index 100% -rename from Lib/ctypes/test/test_macholib.py -rename to Lib/test/test_ctypes/test_macholib.py -diff --git a/Lib/ctypes/test/test_memfunctions.py b/Lib/test/test_ctypes/test_memfunctions.py -similarity index 98% -rename from Lib/ctypes/test/test_memfunctions.py -rename to Lib/test/test_ctypes/test_memfunctions.py -index e784b9a706..d5c9735211 100644 ---- a/Lib/ctypes/test/test_memfunctions.py -+++ b/Lib/test/test_ctypes/test_memfunctions.py -@@ -2,7 +2,7 @@ - from test import support - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - class MemFunctionsTest(unittest.TestCase): - @unittest.skip('test disabled') -diff --git a/Lib/ctypes/test/test_numbers.py b/Lib/test/test_ctypes/test_numbers.py -similarity index 100% -rename from Lib/ctypes/test/test_numbers.py -rename to Lib/test/test_ctypes/test_numbers.py -diff --git a/Lib/ctypes/test/test_objects.py b/Lib/test/test_ctypes/test_objects.py -similarity index 87% -rename from Lib/ctypes/test/test_objects.py -rename to Lib/test/test_ctypes/test_objects.py -index 19e3dc1f2d..44a3c61ad7 100644 ---- a/Lib/ctypes/test/test_objects.py -+++ b/Lib/test/test_ctypes/test_objects.py -@@ -42,7 +42,7 @@ - of 'x' ('_b_base_' is either None, or the root object owning the memory block): - - >>> print(x.array._b_base_) # doctest: +ELLIPSIS --<ctypes.test.test_objects.X object at 0x...> -+<test.test_ctypes.test_objects.X object at 0x...> - >>> - - >>> x.array[0] = b'spam spam spam' -@@ -56,12 +56,12 @@ - - import unittest, doctest - --import ctypes.test.test_objects -+import test.test_ctypes.test_objects - - class TestCase(unittest.TestCase): - def test(self): -- failures, tests = doctest.testmod(ctypes.test.test_objects) -+ failures, tests = doctest.testmod(test.test_ctypes.test_objects) - self.assertFalse(failures, 'doctests failed, see output above') - - if __name__ == '__main__': -- doctest.testmod(ctypes.test.test_objects) -+ doctest.testmod(test.test_ctypes.test_objects) -diff --git a/Lib/ctypes/test/test_parameters.py b/Lib/test/test_ctypes/test_parameters.py -similarity index 99% -rename from Lib/ctypes/test/test_parameters.py -rename to Lib/test/test_ctypes/test_parameters.py -index 38af7ac13d..2f755a6d09 100644 ---- a/Lib/ctypes/test/test_parameters.py -+++ b/Lib/test/test_ctypes/test_parameters.py -@@ -1,5 +1,5 @@ - import unittest --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import test.support - - class SimpleTypesTestCase(unittest.TestCase): -diff --git a/Lib/ctypes/test/test_pep3118.py b/Lib/test/test_ctypes/test_pep3118.py -similarity index 100% -rename from Lib/ctypes/test/test_pep3118.py -rename to Lib/test/test_ctypes/test_pep3118.py -diff --git a/Lib/ctypes/test/test_pickling.py b/Lib/test/test_ctypes/test_pickling.py -similarity index 100% -rename from Lib/ctypes/test/test_pickling.py -rename to Lib/test/test_ctypes/test_pickling.py -diff --git a/Lib/ctypes/test/test_pointers.py b/Lib/test/test_ctypes/test_pointers.py -similarity index 100% -rename from Lib/ctypes/test/test_pointers.py -rename to Lib/test/test_ctypes/test_pointers.py -diff --git a/Lib/ctypes/test/test_prototypes.py b/Lib/test/test_ctypes/test_prototypes.py -similarity index 99% -rename from Lib/ctypes/test/test_prototypes.py -rename to Lib/test/test_ctypes/test_prototypes.py -index cd0c649de3..bf27561487 100644 ---- a/Lib/ctypes/test/test_prototypes.py -+++ b/Lib/test/test_ctypes/test_prototypes.py -@@ -1,5 +1,5 @@ - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - import unittest - - # IMPORTANT INFO: -diff --git a/Lib/ctypes/test/test_python_api.py b/Lib/test/test_ctypes/test_python_api.py -similarity index 100% -rename from Lib/ctypes/test/test_python_api.py -rename to Lib/test/test_ctypes/test_python_api.py -diff --git a/Lib/ctypes/test/test_random_things.py b/Lib/test/test_ctypes/test_random_things.py -similarity index 100% -rename from Lib/ctypes/test/test_random_things.py -rename to Lib/test/test_ctypes/test_random_things.py -diff --git a/Lib/ctypes/test/test_refcounts.py b/Lib/test/test_ctypes/test_refcounts.py -similarity index 100% -rename from Lib/ctypes/test/test_refcounts.py -rename to Lib/test/test_ctypes/test_refcounts.py -diff --git a/Lib/ctypes/test/test_repr.py b/Lib/test/test_ctypes/test_repr.py -similarity index 100% -rename from Lib/ctypes/test/test_repr.py -rename to Lib/test/test_ctypes/test_repr.py -diff --git a/Lib/ctypes/test/test_returnfuncptrs.py b/Lib/test/test_ctypes/test_returnfuncptrs.py -similarity index 100% -rename from Lib/ctypes/test/test_returnfuncptrs.py -rename to Lib/test/test_ctypes/test_returnfuncptrs.py -diff --git a/Lib/ctypes/test/test_simplesubclasses.py b/Lib/test/test_ctypes/test_simplesubclasses.py -similarity index 100% -rename from Lib/ctypes/test/test_simplesubclasses.py -rename to Lib/test/test_ctypes/test_simplesubclasses.py -diff --git a/Lib/ctypes/test/test_sizes.py b/Lib/test/test_ctypes/test_sizes.py -similarity index 100% -rename from Lib/ctypes/test/test_sizes.py -rename to Lib/test/test_ctypes/test_sizes.py -diff --git a/Lib/ctypes/test/test_slicing.py b/Lib/test/test_ctypes/test_slicing.py -similarity index 99% -rename from Lib/ctypes/test/test_slicing.py -rename to Lib/test/test_ctypes/test_slicing.py -index a3932f1767..b3e68f9a82 100644 ---- a/Lib/ctypes/test/test_slicing.py -+++ b/Lib/test/test_ctypes/test_slicing.py -@@ -1,6 +1,6 @@ - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - import _ctypes_test - -diff --git a/Lib/ctypes/test/test_stringptr.py b/Lib/test/test_ctypes/test_stringptr.py -similarity index 100% -rename from Lib/ctypes/test/test_stringptr.py -rename to Lib/test/test_ctypes/test_stringptr.py -diff --git a/Lib/ctypes/test/test_strings.py b/Lib/test/test_ctypes/test_strings.py -similarity index 99% -rename from Lib/ctypes/test/test_strings.py -rename to Lib/test/test_ctypes/test_strings.py -index 12e208828a..a9003be3f5 100644 ---- a/Lib/ctypes/test/test_strings.py -+++ b/Lib/test/test_ctypes/test_strings.py -@@ -1,6 +1,6 @@ - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - class StringArrayTestCase(unittest.TestCase): - def test(self): -diff --git a/Lib/ctypes/test/test_struct_fields.py b/Lib/test/test_ctypes/test_struct_fields.py -similarity index 100% -rename from Lib/ctypes/test/test_struct_fields.py -rename to Lib/test/test_ctypes/test_struct_fields.py -diff --git a/Lib/ctypes/test/test_structures.py b/Lib/test/test_ctypes/test_structures.py -similarity index 99% -rename from Lib/ctypes/test/test_structures.py -rename to Lib/test/test_ctypes/test_structures.py -index 97ad2b8ed8..13c0470ba2 100644 ---- a/Lib/ctypes/test/test_structures.py -+++ b/Lib/test/test_ctypes/test_structures.py -@@ -2,7 +2,7 @@ - import sys - import unittest - from ctypes import * --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - from struct import calcsize - import _ctypes_test - from test import support -diff --git a/Lib/ctypes/test/test_unaligned_structures.py b/Lib/test/test_ctypes/test_unaligned_structures.py -similarity index 100% -rename from Lib/ctypes/test/test_unaligned_structures.py -rename to Lib/test/test_ctypes/test_unaligned_structures.py -diff --git a/Lib/ctypes/test/test_unicode.py b/Lib/test/test_ctypes/test_unicode.py -similarity index 97% -rename from Lib/ctypes/test/test_unicode.py -rename to Lib/test/test_ctypes/test_unicode.py -index 60c75424b7..319cb3b1dc 100644 ---- a/Lib/ctypes/test/test_unicode.py -+++ b/Lib/test/test_ctypes/test_unicode.py -@@ -1,6 +1,6 @@ - import unittest - import ctypes --from ctypes.test import need_symbol -+from test.test_ctypes import need_symbol - - import _ctypes_test - -diff --git a/Lib/ctypes/test/test_values.py b/Lib/test/test_ctypes/test_values.py -similarity index 100% -rename from Lib/ctypes/test/test_values.py -rename to Lib/test/test_ctypes/test_values.py -diff --git a/Lib/ctypes/test/test_varsize_struct.py b/Lib/test/test_ctypes/test_varsize_struct.py -similarity index 100% -rename from Lib/ctypes/test/test_varsize_struct.py -rename to Lib/test/test_ctypes/test_varsize_struct.py -diff --git a/Lib/ctypes/test/test_win32.py b/Lib/test/test_ctypes/test_win32.py -similarity index 100% -rename from Lib/ctypes/test/test_win32.py -rename to Lib/test/test_ctypes/test_win32.py -diff --git a/Lib/ctypes/test/test_wintypes.py b/Lib/test/test_ctypes/test_wintypes.py -similarity index 100% -rename from Lib/ctypes/test/test_wintypes.py -rename to Lib/test/test_ctypes/test_wintypes.py -diff --git a/Misc/NEWS.d/next/Tests/2022-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst b/Misc/NEWS.d/next/Tests/2022-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst -new file mode 100644 -index 0000000000..121b64b133 ---- /dev/null -+++ b/Misc/NEWS.d/next/Tests/2022-06-20-23-04-52.gh-issue-93839.OE3Ybk.rst -@@ -0,0 +1,2 @@ -+Move ``Lib/ctypes/test/`` to ``Lib/test/test_ctypes/``. Patch by Victor -+Stinner. -diff --git a/PCbuild/lib.pyproj b/PCbuild/lib.pyproj -index 43c570f1da..692b083349 100644 ---- a/PCbuild/lib.pyproj -+++ b/PCbuild/lib.pyproj -@@ -83,59 +83,6 @@ - <Compile Include="ctypes\macholib\dylib.py" /> - <Compile Include="ctypes\macholib\framework.py" /> - <Compile Include="ctypes\macholib\__init__.py" /> -- <Compile Include="ctypes\test\test_anon.py" /> -- <Compile Include="ctypes\test\test_arrays.py" /> -- <Compile Include="ctypes\test\test_array_in_pointer.py" /> -- <Compile Include="ctypes\test\test_as_parameter.py" /> -- <Compile Include="ctypes\test\test_bitfields.py" /> -- <Compile Include="ctypes\test\test_buffers.py" /> -- <Compile Include="ctypes\test\test_bytes.py" /> -- <Compile Include="ctypes\test\test_byteswap.py" /> -- <Compile Include="ctypes\test\test_callbacks.py" /> -- <Compile Include="ctypes\test\test_cast.py" /> -- <Compile Include="ctypes\test\test_cfuncs.py" /> -- <Compile Include="ctypes\test\test_checkretval.py" /> -- <Compile Include="ctypes\test\test_delattr.py" /> -- <Compile Include="ctypes\test\test_errno.py" /> -- <Compile Include="ctypes\test\test_find.py" /> -- <Compile Include="ctypes\test\test_frombuffer.py" /> -- <Compile Include="ctypes\test\test_funcptr.py" /> -- <Compile Include="ctypes\test\test_functions.py" /> -- <Compile Include="ctypes\test\test_incomplete.py" /> -- <Compile Include="ctypes\test\test_init.py" /> -- <Compile Include="ctypes\test\test_internals.py" /> -- <Compile Include="ctypes\test\test_keeprefs.py" /> -- <Compile Include="ctypes\test\test_libc.py" /> -- <Compile Include="ctypes\test\test_loading.py" /> -- <Compile Include="ctypes\test\test_macholib.py" /> -- <Compile Include="ctypes\test\test_memfunctions.py" /> -- <Compile Include="ctypes\test\test_numbers.py" /> -- <Compile Include="ctypes\test\test_objects.py" /> -- <Compile Include="ctypes\test\test_parameters.py" /> -- <Compile Include="ctypes\test\test_pep3118.py" /> -- <Compile Include="ctypes\test\test_pickling.py" /> -- <Compile Include="ctypes\test\test_pointers.py" /> -- <Compile Include="ctypes\test\test_prototypes.py" /> -- <Compile Include="ctypes\test\test_python_api.py" /> -- <Compile Include="ctypes\test\test_random_things.py" /> -- <Compile Include="ctypes\test\test_refcounts.py" /> -- <Compile Include="ctypes\test\test_repr.py" /> -- <Compile Include="ctypes\test\test_returnfuncptrs.py" /> -- <Compile Include="ctypes\test\test_simplesubclasses.py" /> -- <Compile Include="ctypes\test\test_sizes.py" /> -- <Compile Include="ctypes\test\test_slicing.py" /> -- <Compile Include="ctypes\test\test_stringptr.py" /> -- <Compile Include="ctypes\test\test_strings.py" /> -- <Compile Include="ctypes\test\test_structures.py" /> -- <Compile Include="ctypes\test\test_struct_fields.py" /> -- <Compile Include="ctypes\test\test_unaligned_structures.py" /> -- <Compile Include="ctypes\test\test_unicode.py" /> -- <Compile Include="ctypes\test\test_values.py" /> -- <Compile Include="ctypes\test\test_varsize_struct.py" /> -- <Compile Include="ctypes\test\test_win32.py" /> -- <Compile Include="ctypes\test\test_wintypes.py" /> -- <Compile Include="ctypes\test\__init__.py" /> -- <Compile Include="ctypes\test\__main__.py" /> - <Compile Include="ctypes\util.py" /> - <Compile Include="ctypes\wintypes.py" /> - <Compile Include="ctypes\_endian.py" /> -@@ -944,7 +891,59 @@ - <Compile Include="test\test_crashers.py" /> - <Compile Include="test\test_crypt.py" /> - <Compile Include="test\test_csv.py" /> -- <Compile Include="test\test_ctypes.py" /> -+ <Compile Include="test\test_ctypes\test_anon.py" /> -+ <Compile Include="test\test_ctypes\test_arrays.py" /> -+ <Compile Include="test\test_ctypes\test_array_in_pointer.py" /> -+ <Compile Include="test\test_ctypes\test_as_parameter.py" /> -+ <Compile Include="test\test_ctypes\test_bitfields.py" /> -+ <Compile Include="test\test_ctypes\test_buffers.py" /> -+ <Compile Include="test\test_ctypes\test_bytes.py" /> -+ <Compile Include="test\test_ctypes\test_byteswap.py" /> -+ <Compile Include="test\test_ctypes\test_callbacks.py" /> -+ <Compile Include="test\test_ctypes\test_cast.py" /> -+ <Compile Include="test\test_ctypes\test_cfuncs.py" /> -+ <Compile Include="test\test_ctypes\test_checkretval.py" /> -+ <Compile Include="test\test_ctypes\test_delattr.py" /> -+ <Compile Include="test\test_ctypes\test_errno.py" /> -+ <Compile Include="test\test_ctypes\test_find.py" /> -+ <Compile Include="test\test_ctypes\test_frombuffer.py" /> -+ <Compile Include="test\test_ctypes\test_funcptr.py" /> -+ <Compile Include="test\test_ctypes\test_functions.py" /> -+ <Compile Include="test\test_ctypes\test_incomplete.py" /> -+ <Compile Include="test\test_ctypes\test_init.py" /> -+ <Compile Include="test\test_ctypes\test_internals.py" /> -+ <Compile Include="test\test_ctypes\test_keeprefs.py" /> -+ <Compile Include="test\test_ctypes\test_libc.py" /> -+ <Compile Include="test\test_ctypes\test_loading.py" /> -+ <Compile Include="test\test_ctypes\test_macholib.py" /> -+ <Compile Include="test\test_ctypes\test_memfunctions.py" /> -+ <Compile Include="test\test_ctypes\test_numbers.py" /> -+ <Compile Include="test\test_ctypes\test_objects.py" /> -+ <Compile Include="test\test_ctypes\test_parameters.py" /> -+ <Compile Include="test\test_ctypes\test_pep3118.py" /> -+ <Compile Include="test\test_ctypes\test_pickling.py" /> -+ <Compile Include="test\test_ctypes\test_pointers.py" /> -+ <Compile Include="test\test_ctypes\test_prototypes.py" /> -+ <Compile Include="test\test_ctypes\test_python_api.py" /> -+ <Compile Include="test\test_ctypes\test_random_things.py" /> -+ <Compile Include="test\test_ctypes\test_refcounts.py" /> -+ <Compile Include="test\test_ctypes\test_repr.py" /> -+ <Compile Include="test\test_ctypes\test_returnfuncptrs.py" /> -+ <Compile Include="test\test_ctypes\test_simplesubclasses.py" /> -+ <Compile Include="test\test_ctypes\test_sizes.py" /> -+ <Compile Include="test\test_ctypes\test_slicing.py" /> -+ <Compile Include="test\test_ctypes\test_stringptr.py" /> -+ <Compile Include="test\test_ctypes\test_strings.py" /> -+ <Compile Include="test\test_ctypes\test_structures.py" /> -+ <Compile Include="test\test_ctypes\test_struct_fields.py" /> -+ <Compile Include="test\test_ctypes\test_unaligned_structures.py" /> -+ <Compile Include="test\test_ctypes\test_unicode.py" /> -+ <Compile Include="test\test_ctypes\test_values.py" /> -+ <Compile Include="test\test_ctypes\test_varsize_struct.py" /> -+ <Compile Include="test\test_ctypes\test_win32.py" /> -+ <Compile Include="test\test_ctypes\test_wintypes.py" /> -+ <Compile Include="test\test_ctypes\__init__.py" /> -+ <Compile Include="test\test_ctypes\__main__.py" /> - <Compile Include="test\test_curses.py" /> - <Compile Include="test\test_datetime.py" /> - <Compile Include="test\test_dbm.py" /> -@@ -1725,7 +1724,6 @@ - <Folder Include="concurrent\futures" /> - <Folder Include="ctypes" /> - <Folder Include="ctypes\macholib" /> -- <Folder Include="ctypes\test" /> - <Folder Include="curses" /> - <Folder Include="dbm" /> - <Folder Include="distutils" /> -@@ -1769,6 +1767,7 @@ - <Folder Include="test\subprocessdata" /> - <Folder Include="test\support" /> - <Folder Include="test\test_asyncio" /> -+ <Folder Include="test\test_ctypes" /> - <Folder Include="test\test_email" /> - <Folder Include="test\test_email\data" /> - <Folder Include="test\test_import" /> -diff --git a/Tools/wasm/wasm_assets.py b/Tools/wasm/wasm_assets.py -index b7e83517ca..d0a0570840 100755 ---- a/Tools/wasm/wasm_assets.py -+++ b/Tools/wasm/wasm_assets.py -@@ -111,7 +111,6 @@ - - # regression test sub directories - OMIT_SUBDIRS = ( -- "ctypes/test/", - "tkinter/test/", - "unittest/test/", - ) --- -2.29.2.windows.2 - diff --git a/packages/test/patches/0006-gh-93839-Move-Lib-unttest-test-to-Lib-test-test_unit.patch b/packages/test/patches/0006-gh-93839-Move-Lib-unttest-test-to-Lib-test-test_unit.patch deleted file mode 100644 index 9e8d8c47ccd..00000000000 --- a/packages/test/patches/0006-gh-93839-Move-Lib-unttest-test-to-Lib-test-test_unit.patch +++ /dev/null @@ -1,724 +0,0 @@ -From c735d545343c3ab002c62596b2fb2cfa4488b0af Mon Sep 17 00:00:00 2001 -From: Victor Stinner <[email protected]> -Date: Tue, 21 Jun 2022 10:27:59 +0200 -Subject: [PATCH 6/9] gh-93839: Move Lib/unttest/test/ to Lib/test/test_unittest/ - (#94043) - -* Move Lib/unittest/test/ to Lib/test/test_unittest/ -* Remove Lib/test/test_unittest.py -* Replace unittest.test with test.test_unittest -* Remove unittest.load_tests() -* Rewrite unittest __init__.py and __main__.py -* Update build system, CODEOWNERS, and wasm_assets.py ---- - .github/CODEOWNERS | 2 +- - Lib/test/test_unittest.py | 16 ----- - Lib/test/test_unittest/__init__.py | 6 ++ - Lib/test/test_unittest/__main__.py | 4 ++ - .../test_unittest}/_test_warnings.py | 0 - .../test => test/test_unittest}/dummy.py | 0 - .../test => test/test_unittest}/support.py | 0 - .../test_unittest}/test_assertions.py | 0 - .../test_unittest}/test_async_case.py | 0 - .../test => test/test_unittest}/test_break.py | 0 - .../test => test/test_unittest}/test_case.py | 2 +- - .../test_unittest}/test_discovery.py | 6 +- - .../test_unittest}/test_functiontestcase.py | 2 +- - .../test_unittest}/test_loader.py | 6 +- - .../test_unittest}/test_program.py | 16 ++--- - .../test_unittest}/test_result.py | 0 - .../test_unittest}/test_runner.py | 2 +- - .../test_unittest}/test_setups.py | 0 - .../test_unittest}/test_skipping.py | 2 +- - .../test => test/test_unittest}/test_suite.py | 2 +- - .../test_unittest}/testmock/__init__.py | 2 +- - .../test_unittest}/testmock/__main__.py | 2 +- - .../test_unittest}/testmock/support.py | 0 - .../test_unittest}/testmock/testasync.py | 0 - .../test_unittest}/testmock/testcallable.py | 2 +- - .../test_unittest}/testmock/testhelpers.py | 0 - .../testmock/testmagicmethods.py | 0 - .../test_unittest}/testmock/testmock.py | 2 +- - .../test_unittest}/testmock/testpatch.py | 22 +++---- - .../test_unittest}/testmock/testsealable.py | 0 - .../test_unittest}/testmock/testsentinel.py | 0 - .../test_unittest}/testmock/testwith.py | 2 +- - Lib/unittest/__init__.py | 10 ---- - Lib/unittest/test/__init__.py | 25 -------- - Lib/unittest/test/__main__.py | 18 ------ - Makefile.pre.in | 4 +- - PCbuild/lib.pyproj | 58 +++++++++---------- - Tools/wasm/wasm_assets.py | 1 - - 38 files changed, 77 insertions(+), 137 deletions(-) - delete mode 100644 Lib/test/test_unittest.py - create mode 100644 Lib/test/test_unittest/__init__.py - create mode 100644 Lib/test/test_unittest/__main__.py - rename Lib/{unittest/test => test/test_unittest}/_test_warnings.py (100%) - rename Lib/{unittest/test => test/test_unittest}/dummy.py (100%) - rename Lib/{unittest/test => test/test_unittest}/support.py (100%) - rename Lib/{unittest/test => test/test_unittest}/test_assertions.py (100%) - rename Lib/{unittest/test => test/test_unittest}/test_async_case.py (100%) - rename Lib/{unittest/test => test/test_unittest}/test_break.py (100%) - rename Lib/{unittest/test => test/test_unittest}/test_case.py (99%) - rename Lib/{unittest/test => test/test_unittest}/test_discovery.py (99%) - rename Lib/{unittest/test => test/test_unittest}/test_functiontestcase.py (99%) - rename Lib/{unittest/test => test/test_unittest}/test_loader.py (99%) - rename Lib/{unittest/test => test/test_unittest}/test_program.py (96%) - rename Lib/{unittest/test => test/test_unittest}/test_result.py (100%) - rename Lib/{unittest/test => test/test_unittest}/test_runner.py (99%) - rename Lib/{unittest/test => test/test_unittest}/test_setups.py (100%) - rename Lib/{unittest/test => test/test_unittest}/test_skipping.py (99%) - rename Lib/{unittest/test => test/test_unittest}/test_suite.py (99%) - rename Lib/{unittest/test => test/test_unittest}/testmock/__init__.py (86%) - rename Lib/{unittest/test => test/test_unittest}/testmock/__main__.py (86%) - rename Lib/{unittest/test => test/test_unittest}/testmock/support.py (100%) - rename Lib/{unittest/test => test/test_unittest}/testmock/testasync.py (100%) - rename Lib/{unittest/test => test/test_unittest}/testmock/testcallable.py (98%) - rename Lib/{unittest/test => test/test_unittest}/testmock/testhelpers.py (100%) - rename Lib/{unittest/test => test/test_unittest}/testmock/testmagicmethods.py (100%) - rename Lib/{unittest/test => test/test_unittest}/testmock/testmock.py (99%) - rename Lib/{unittest/test => test/test_unittest}/testmock/testpatch.py (98%) - rename Lib/{unittest/test => test/test_unittest}/testmock/testsealable.py (100%) - rename Lib/{unittest/test => test/test_unittest}/testmock/testsentinel.py (100%) - rename Lib/{unittest/test => test/test_unittest}/testmock/testwith.py (99%) - delete mode 100644 Lib/unittest/test/__init__.py - delete mode 100644 Lib/unittest/test/__main__.py - -diff --git a/Lib/test/test_unittest.py b/Lib/test/test_unittest.py -deleted file mode 100644 -index 1079c7df2e..0000000000 ---- a/Lib/test/test_unittest.py -+++ /dev/null -@@ -1,16 +0,0 @@ --import unittest.test -- --from test import support -- -- --def load_tests(*_): -- # used by unittest -- return unittest.test.suite() -- -- --def tearDownModule(): -- support.reap_children() -- -- --if __name__ == "__main__": -- unittest.main() -diff --git a/Lib/test/test_unittest/__init__.py b/Lib/test/test_unittest/__init__.py -new file mode 100644 -index 0000000000..bc502ef32d ---- /dev/null -+++ b/Lib/test/test_unittest/__init__.py -@@ -0,0 +1,6 @@ -+import os.path -+from test.support import load_package_tests -+ -+ -+def load_tests(*args): -+ return load_package_tests(os.path.dirname(__file__), *args) -diff --git a/Lib/test/test_unittest/__main__.py b/Lib/test/test_unittest/__main__.py -new file mode 100644 -index 0000000000..40a23a297e ---- /dev/null -+++ b/Lib/test/test_unittest/__main__.py -@@ -0,0 +1,4 @@ -+from . import load_tests -+import unittest -+ -+unittest.main() -diff --git a/Lib/unittest/test/_test_warnings.py b/Lib/test/test_unittest/_test_warnings.py -similarity index 100% -rename from Lib/unittest/test/_test_warnings.py -rename to Lib/test/test_unittest/_test_warnings.py -diff --git a/Lib/unittest/test/dummy.py b/Lib/test/test_unittest/dummy.py -similarity index 100% -rename from Lib/unittest/test/dummy.py -rename to Lib/test/test_unittest/dummy.py -diff --git a/Lib/unittest/test/support.py b/Lib/test/test_unittest/support.py -similarity index 100% -rename from Lib/unittest/test/support.py -rename to Lib/test/test_unittest/support.py -diff --git a/Lib/unittest/test/test_assertions.py b/Lib/test/test_unittest/test_assertions.py -similarity index 100% -rename from Lib/unittest/test/test_assertions.py -rename to Lib/test/test_unittest/test_assertions.py -diff --git a/Lib/unittest/test/test_async_case.py b/Lib/test/test_unittest/test_async_case.py -similarity index 100% -rename from Lib/unittest/test/test_async_case.py -rename to Lib/test/test_unittest/test_async_case.py -diff --git a/Lib/unittest/test/test_break.py b/Lib/test/test_unittest/test_break.py -similarity index 100% -rename from Lib/unittest/test/test_break.py -rename to Lib/test/test_unittest/test_break.py -diff --git a/Lib/unittest/test/test_case.py b/Lib/test/test_unittest/test_case.py -similarity index 99% -rename from Lib/unittest/test/test_case.py -rename to Lib/test/test_unittest/test_case.py -index 374a255255..e000fe4f07 100644 ---- a/Lib/unittest/test/test_case.py -+++ b/Lib/test/test_unittest/test_case.py -@@ -15,7 +15,7 @@ - - import unittest - --from unittest.test.support import ( -+from test.test_unittest.support import ( - TestEquality, TestHashing, LoggingResult, LegacyLoggingResult, - ResultWithNoStartTestRunStopTestRun - ) -diff --git a/Lib/unittest/test/test_discovery.py b/Lib/test/test_unittest/test_discovery.py -similarity index 99% -rename from Lib/unittest/test/test_discovery.py -rename to Lib/test/test_unittest/test_discovery.py -index 3b58786ec1..946fa1258e 100644 ---- a/Lib/unittest/test/test_discovery.py -+++ b/Lib/test/test_unittest/test_discovery.py -@@ -10,7 +10,7 @@ - - import unittest - import unittest.mock --import unittest.test -+import test.test_unittest - - - class TestableTestProgram(unittest.TestProgram): -@@ -789,7 +789,7 @@ def test_discovery_from_dotted_path(self): - loader = unittest.TestLoader() - - tests = [self] -- expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__)) -+ expectedPath = os.path.abspath(os.path.dirname(test.test_unittest.__file__)) - - self.wasRun = False - def _find_tests(start_dir, pattern): -@@ -797,7 +797,7 @@ def _find_tests(start_dir, pattern): - self.assertEqual(start_dir, expectedPath) - return tests - loader._find_tests = _find_tests -- suite = loader.discover('unittest.test') -+ suite = loader.discover('test.test_unittest') - self.assertTrue(self.wasRun) - self.assertEqual(suite._tests, tests) - -diff --git a/Lib/unittest/test/test_functiontestcase.py b/Lib/test/test_unittest/test_functiontestcase.py -similarity index 99% -rename from Lib/unittest/test/test_functiontestcase.py -rename to Lib/test/test_unittest/test_functiontestcase.py -index 4971729880..2ebed9564a 100644 ---- a/Lib/unittest/test/test_functiontestcase.py -+++ b/Lib/test/test_unittest/test_functiontestcase.py -@@ -1,6 +1,6 @@ - import unittest - --from unittest.test.support import LoggingResult -+from test.test_unittest.support import LoggingResult - - - class Test_FunctionTestCase(unittest.TestCase): -diff --git a/Lib/unittest/test/test_loader.py b/Lib/test/test_unittest/test_loader.py -similarity index 99% -rename from Lib/unittest/test/test_loader.py -rename to Lib/test/test_unittest/test_loader.py -index de2268cda9..c06ebb658d 100644 ---- a/Lib/unittest/test/test_loader.py -+++ b/Lib/test/test_unittest/test_loader.py -@@ -716,7 +716,7 @@ def test_loadTestsFromName__module_not_loaded(self): - # We're going to try to load this module as a side-effect, so it - # better not be loaded before we try. - # -- module_name = 'unittest.test.dummy' -+ module_name = 'test.test_unittest.dummy' - sys.modules.pop(module_name, None) - - loader = unittest.TestLoader() -@@ -844,7 +844,7 @@ def test_loadTestsFromNames__unknown_attr_name(self): - loader = unittest.TestLoader() - - suite = loader.loadTestsFromNames( -- ['unittest.loader.sdasfasfasdf', 'unittest.test.dummy']) -+ ['unittest.loader.sdasfasfasdf', 'test.test_unittest.dummy']) - error, test = self.check_deferred_error(loader, list(suite)[0]) - expected = "module 'unittest.loader' has no attribute 'sdasfasfasdf'" - self.assertIn( -@@ -1141,7 +1141,7 @@ def test_loadTestsFromNames__module_not_loaded(self): - # We're going to try to load this module as a side-effect, so it - # better not be loaded before we try. - # -- module_name = 'unittest.test.dummy' -+ module_name = 'test.test_unittest.dummy' - sys.modules.pop(module_name, None) - - loader = unittest.TestLoader() -diff --git a/Lib/unittest/test/test_program.py b/Lib/test/test_unittest/test_program.py -similarity index 96% -rename from Lib/unittest/test/test_program.py -rename to Lib/test/test_unittest/test_program.py -index 26a8550af8..169fc4ed94 100644 ---- a/Lib/unittest/test/test_program.py -+++ b/Lib/test/test_unittest/test_program.py -@@ -5,8 +5,8 @@ - import subprocess - from test import support - import unittest --import unittest.test --from unittest.test.test_result import BufferedWriter -+import test.test_unittest -+from test.test_unittest.test_result import BufferedWriter - - - class Test_TestProgram(unittest.TestCase): -@@ -15,7 +15,7 @@ def test_discovery_from_dotted_path(self): - loader = unittest.TestLoader() - - tests = [self] -- expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__)) -+ expectedPath = os.path.abspath(os.path.dirname(test.test_unittest.__file__)) - - self.wasRun = False - def _find_tests(start_dir, pattern): -@@ -23,7 +23,7 @@ def _find_tests(start_dir, pattern): - self.assertEqual(start_dir, expectedPath) - return tests - loader._find_tests = _find_tests -- suite = loader.discover('unittest.test') -+ suite = loader.discover('test.test_unittest') - self.assertTrue(self.wasRun) - self.assertEqual(suite._tests, tests) - -@@ -93,10 +93,10 @@ def run(self, test): - sys.argv = ['faketest'] - runner = FakeRunner() - program = unittest.TestProgram(testRunner=runner, exit=False, -- defaultTest='unittest.test', -+ defaultTest='test.test_unittest', - testLoader=self.FooBarLoader()) - sys.argv = old_argv -- self.assertEqual(('unittest.test',), program.testNames) -+ self.assertEqual(('test.test_unittest',), program.testNames) - - def test_defaultTest_with_iterable(self): - class FakeRunner(object): -@@ -109,10 +109,10 @@ def run(self, test): - runner = FakeRunner() - program = unittest.TestProgram( - testRunner=runner, exit=False, -- defaultTest=['unittest.test', 'unittest.test2'], -+ defaultTest=['test.test_unittest', 'test.test_unittest2'], - testLoader=self.FooBarLoader()) - sys.argv = old_argv -- self.assertEqual(['unittest.test', 'unittest.test2'], -+ self.assertEqual(['test.test_unittest', 'test.test_unittest2'], - program.testNames) - - def test_NonExit(self): -diff --git a/Lib/unittest/test/test_result.py b/Lib/test/test_unittest/test_result.py -similarity index 100% -rename from Lib/unittest/test/test_result.py -rename to Lib/test/test_unittest/test_result.py -diff --git a/Lib/unittest/test/test_runner.py b/Lib/test/test_unittest/test_runner.py -similarity index 99% -rename from Lib/unittest/test/test_runner.py -rename to Lib/test/test_unittest/test_runner.py -index d3488b40e8..9e3a0a9ca0 100644 ---- a/Lib/unittest/test/test_runner.py -+++ b/Lib/test/test_unittest/test_runner.py -@@ -8,7 +8,7 @@ - import unittest - from unittest.case import _Outcome - --from unittest.test.support import (LoggingResult, -+from test.test_unittest.support import (LoggingResult, - ResultWithNoStartTestRunStopTestRun) - - -diff --git a/Lib/unittest/test/test_setups.py b/Lib/test/test_unittest/test_setups.py -similarity index 100% -rename from Lib/unittest/test/test_setups.py -rename to Lib/test/test_unittest/test_setups.py -diff --git a/Lib/unittest/test/test_skipping.py b/Lib/test/test_unittest/test_skipping.py -similarity index 99% -rename from Lib/unittest/test/test_skipping.py -rename to Lib/test/test_unittest/test_skipping.py -index 64ceeae37e..f146dcac18 100644 ---- a/Lib/unittest/test/test_skipping.py -+++ b/Lib/test/test_unittest/test_skipping.py -@@ -1,6 +1,6 @@ - import unittest - --from unittest.test.support import LoggingResult -+from test.test_unittest.support import LoggingResult - - - class Test_TestSkipping(unittest.TestCase): -diff --git a/Lib/unittest/test/test_suite.py b/Lib/test/test_unittest/test_suite.py -similarity index 99% -rename from Lib/unittest/test/test_suite.py -rename to Lib/test/test_unittest/test_suite.py -index 0551a16996..ca52ee9d9c 100644 ---- a/Lib/unittest/test/test_suite.py -+++ b/Lib/test/test_unittest/test_suite.py -@@ -3,7 +3,7 @@ - import gc - import sys - import weakref --from unittest.test.support import LoggingResult, TestEquality -+from test.test_unittest.support import LoggingResult, TestEquality - - - ### Support code for Test_TestSuite -diff --git a/Lib/unittest/test/testmock/__init__.py b/Lib/test/test_unittest/testmock/__init__.py -similarity index 86% -rename from Lib/unittest/test/testmock/__init__.py -rename to Lib/test/test_unittest/testmock/__init__.py -index 87d7ae994d..6ee60b2376 100644 ---- a/Lib/unittest/test/testmock/__init__.py -+++ b/Lib/test/test_unittest/testmock/__init__.py -@@ -10,7 +10,7 @@ def load_tests(*args): - suite = unittest.TestSuite() - for fn in os.listdir(here): - if fn.startswith("test") and fn.endswith(".py"): -- modname = "unittest.test.testmock." + fn[:-3] -+ modname = "test.test_unittest.testmock." + fn[:-3] - __import__(modname) - module = sys.modules[modname] - suite.addTest(loader.loadTestsFromModule(module)) -diff --git a/Lib/unittest/test/testmock/__main__.py b/Lib/test/test_unittest/testmock/__main__.py -similarity index 86% -rename from Lib/unittest/test/testmock/__main__.py -rename to Lib/test/test_unittest/testmock/__main__.py -index 45c633a4ee..1e3068b0dd 100644 ---- a/Lib/unittest/test/testmock/__main__.py -+++ b/Lib/test/test_unittest/testmock/__main__.py -@@ -6,7 +6,7 @@ def load_tests(loader, standard_tests, pattern): - # top level directory cached on loader instance - this_dir = os.path.dirname(__file__) - pattern = pattern or "test*.py" -- # We are inside unittest.test.testmock, so the top-level is three notches up -+ # We are inside test.test_unittest.testmock, so the top-level is three notches up - top_level_dir = os.path.dirname(os.path.dirname(os.path.dirname(this_dir))) - package_tests = loader.discover(start_dir=this_dir, pattern=pattern, - top_level_dir=top_level_dir) -diff --git a/Lib/unittest/test/testmock/support.py b/Lib/test/test_unittest/testmock/support.py -similarity index 100% -rename from Lib/unittest/test/testmock/support.py -rename to Lib/test/test_unittest/testmock/support.py -diff --git a/Lib/unittest/test/testmock/testasync.py b/Lib/test/test_unittest/testmock/testasync.py -similarity index 100% -rename from Lib/unittest/test/testmock/testasync.py -rename to Lib/test/test_unittest/testmock/testasync.py -diff --git a/Lib/unittest/test/testmock/testcallable.py b/Lib/test/test_unittest/testmock/testcallable.py -similarity index 98% -rename from Lib/unittest/test/testmock/testcallable.py -rename to Lib/test/test_unittest/testmock/testcallable.py -index 5eadc00704..ca88511f63 100644 ---- a/Lib/unittest/test/testmock/testcallable.py -+++ b/Lib/test/test_unittest/testmock/testcallable.py -@@ -3,7 +3,7 @@ - # http://www.voidspace.org.uk/python/mock/ - - import unittest --from unittest.test.testmock.support import is_instance, X, SomeClass -+from test.test_unittest.testmock.support import is_instance, X, SomeClass - - from unittest.mock import ( - Mock, MagicMock, NonCallableMagicMock, -diff --git a/Lib/unittest/test/testmock/testhelpers.py b/Lib/test/test_unittest/testmock/testhelpers.py -similarity index 100% -rename from Lib/unittest/test/testmock/testhelpers.py -rename to Lib/test/test_unittest/testmock/testhelpers.py -diff --git a/Lib/unittest/test/testmock/testmagicmethods.py b/Lib/test/test_unittest/testmock/testmagicmethods.py -similarity index 100% -rename from Lib/unittest/test/testmock/testmagicmethods.py -rename to Lib/test/test_unittest/testmock/testmagicmethods.py -diff --git a/Lib/unittest/test/testmock/testmock.py b/Lib/test/test_unittest/testmock/testmock.py -similarity index 99% -rename from Lib/unittest/test/testmock/testmock.py -rename to Lib/test/test_unittest/testmock/testmock.py -index c99098dc4e..8a92490137 100644 ---- a/Lib/unittest/test/testmock/testmock.py -+++ b/Lib/test/test_unittest/testmock/testmock.py -@@ -5,7 +5,7 @@ - - from test.support import ALWAYS_EQ - import unittest --from unittest.test.testmock.support import is_instance -+from test.test_unittest.testmock.support import is_instance - from unittest import mock - from unittest.mock import ( - call, DEFAULT, patch, sentinel, -diff --git a/Lib/unittest/test/testmock/testpatch.py b/Lib/test/test_unittest/testmock/testpatch.py -similarity index 98% -rename from Lib/unittest/test/testmock/testpatch.py -rename to Lib/test/test_unittest/testmock/testpatch.py -index 8ab63a1317..93ec0ca4be 100644 ---- a/Lib/unittest/test/testmock/testpatch.py -+++ b/Lib/test/test_unittest/testmock/testpatch.py -@@ -7,8 +7,8 @@ - from collections import OrderedDict - - import unittest --from unittest.test.testmock import support --from unittest.test.testmock.support import SomeClass, is_instance -+from test.test_unittest.testmock import support -+from test.test_unittest.testmock.support import SomeClass, is_instance - - from test.test_importlib.util import uncache - from unittest.mock import ( -@@ -669,7 +669,7 @@ def test_patch_dict_decorator_resolution(self): - # the new dictionary during function call - original = support.target.copy() - -- @patch.dict('unittest.test.testmock.support.target', {'bar': 'BAR'}) -+ @patch.dict('test.test_unittest.testmock.support.target', {'bar': 'BAR'}) - def test(): - self.assertEqual(support.target, {'foo': 'BAZ', 'bar': 'BAR'}) - -@@ -1614,7 +1614,7 @@ def test_patch_with_spec_mock_repr(self): - - - def test_patch_nested_autospec_repr(self): -- with patch('unittest.test.testmock.support', autospec=True) as m: -+ with patch('test.test_unittest.testmock.support', autospec=True) as m: - self.assertIn(" name='support.SomeClass.wibble()'", - repr(m.SomeClass.wibble())) - self.assertIn(" name='support.SomeClass().wibble()'", -@@ -1882,7 +1882,7 @@ def foo(x=0): - - with patch.object(foo, '__module__', "testpatch2"): - self.assertEqual(foo.__module__, "testpatch2") -- self.assertEqual(foo.__module__, 'unittest.test.testmock.testpatch') -+ self.assertEqual(foo.__module__, 'test.test_unittest.testmock.testpatch') - - with patch.object(foo, '__annotations__', dict([('s', 1, )])): - self.assertEqual(foo.__annotations__, dict([('s', 1, )])) -@@ -1917,16 +1917,16 @@ def test_dotted_but_module_not_loaded(self): - # This exercises the AttributeError branch of _dot_lookup. - - # make sure it's there -- import unittest.test.testmock.support -+ import test.test_unittest.testmock.support - # now make sure it's not: - with patch.dict('sys.modules'): -- del sys.modules['unittest.test.testmock.support'] -- del sys.modules['unittest.test.testmock'] -- del sys.modules['unittest.test'] -+ del sys.modules['test.test_unittest.testmock.support'] -+ del sys.modules['test.test_unittest.testmock'] -+ del sys.modules['test.test_unittest'] - del sys.modules['unittest'] - - # now make sure we can patch based on a dotted path: -- @patch('unittest.test.testmock.support.X') -+ @patch('test.test_unittest.testmock.support.X') - def test(mock): - pass - test() -@@ -1943,7 +1943,7 @@ class Foo: - - - def test_cant_set_kwargs_when_passing_a_mock(self): -- @patch('unittest.test.testmock.support.X', new=object(), x=1) -+ @patch('test.test_unittest.testmock.support.X', new=object(), x=1) - def test(): pass - with self.assertRaises(TypeError): - test() -diff --git a/Lib/unittest/test/testmock/testsealable.py b/Lib/test/test_unittest/testmock/testsealable.py -similarity index 100% -rename from Lib/unittest/test/testmock/testsealable.py -rename to Lib/test/test_unittest/testmock/testsealable.py -diff --git a/Lib/unittest/test/testmock/testsentinel.py b/Lib/test/test_unittest/testmock/testsentinel.py -similarity index 100% -rename from Lib/unittest/test/testmock/testsentinel.py -rename to Lib/test/test_unittest/testmock/testsentinel.py -diff --git a/Lib/unittest/test/testmock/testwith.py b/Lib/test/test_unittest/testmock/testwith.py -similarity index 99% -rename from Lib/unittest/test/testmock/testwith.py -rename to Lib/test/test_unittest/testmock/testwith.py -index c74d49a63c..8dc8eb1137 100644 ---- a/Lib/unittest/test/testmock/testwith.py -+++ b/Lib/test/test_unittest/testmock/testwith.py -@@ -1,7 +1,7 @@ - import unittest - from warnings import catch_warnings - --from unittest.test.testmock.support import is_instance -+from test.test_unittest.testmock.support import is_instance - from unittest.mock import MagicMock, Mock, patch, sentinel, mock_open, call - - -diff --git a/Lib/unittest/__init__.py b/Lib/unittest/__init__.py -index 005d23f6d0..b8de8c95d6 100644 ---- a/Lib/unittest/__init__.py -+++ b/Lib/unittest/__init__.py -@@ -73,16 +73,6 @@ def testMultiply(self): - _TextTestResult = TextTestResult - - --# There are no tests here, so don't try to run anything discovered from --# introspecting the symbols (e.g. FunctionTestCase). Instead, all our --# tests come from within unittest.test. --def load_tests(loader, tests, pattern): -- import os.path -- # top level directory cached on loader instance -- this_dir = os.path.dirname(__file__) -- return loader.discover(start_dir=this_dir, pattern=pattern) -- -- - # Lazy import of IsolatedAsyncioTestCase from .async_case - # It imports asyncio, which is relatively heavy, but most tests - # do not need it. -diff --git a/Lib/unittest/test/__init__.py b/Lib/unittest/test/__init__.py -deleted file mode 100644 -index 143f4ab5a3..0000000000 ---- a/Lib/unittest/test/__init__.py -+++ /dev/null -@@ -1,25 +0,0 @@ --import os --import sys --import unittest -- -- --here = os.path.dirname(__file__) --loader = unittest.defaultTestLoader -- --def suite(): -- suite = unittest.TestSuite() -- for fn in os.listdir(here): -- if fn.startswith("test") and fn.endswith(".py"): -- modname = "unittest.test." + fn[:-3] -- try: -- __import__(modname) -- except unittest.SkipTest: -- continue -- module = sys.modules[modname] -- suite.addTest(loader.loadTestsFromModule(module)) -- suite.addTest(loader.loadTestsFromName('unittest.test.testmock')) -- return suite -- -- --if __name__ == "__main__": -- unittest.main(defaultTest="suite") -diff --git a/Lib/unittest/test/__main__.py b/Lib/unittest/test/__main__.py -deleted file mode 100644 -index 44d0591e84..0000000000 ---- a/Lib/unittest/test/__main__.py -+++ /dev/null -@@ -1,18 +0,0 @@ --import os --import unittest -- -- --def load_tests(loader, standard_tests, pattern): -- # top level directory cached on loader instance -- this_dir = os.path.dirname(__file__) -- pattern = pattern or "test_*.py" -- # We are inside unittest.test, so the top-level is two notches up -- top_level_dir = os.path.dirname(os.path.dirname(this_dir)) -- package_tests = loader.discover(start_dir=this_dir, pattern=pattern, -- top_level_dir=top_level_dir) -- standard_tests.addTests(package_tests) -- return standard_tests -- -- --if __name__ == '__main__': -- unittest.main() -diff --git a/PCbuild/lib.pyproj b/PCbuild/lib.pyproj -index 692b083349..f3f44d1d8f 100644 ---- a/PCbuild/lib.pyproj -+++ b/PCbuild/lib.pyproj -@@ -1491,33 +1491,33 @@ - <Compile Include="unittest\runner.py" /> - <Compile Include="unittest\signals.py" /> - <Compile Include="unittest\suite.py" /> -- <Compile Include="unittest\test\dummy.py" /> -- <Compile Include="unittest\test\support.py" /> -- <Compile Include="unittest\test\testmock\support.py" /> -- <Compile Include="unittest\test\testmock\testcallable.py" /> -- <Compile Include="unittest\test\testmock\testhelpers.py" /> -- <Compile Include="unittest\test\testmock\testmagicmethods.py" /> -- <Compile Include="unittest\test\testmock\testmock.py" /> -- <Compile Include="unittest\test\testmock\testpatch.py" /> -- <Compile Include="unittest\test\testmock\testsentinel.py" /> -- <Compile Include="unittest\test\testmock\testwith.py" /> -- <Compile Include="unittest\test\testmock\__init__.py" /> -- <Compile Include="unittest\test\testmock\__main__.py" /> -- <Compile Include="unittest\test\test_assertions.py" /> -- <Compile Include="unittest\test\test_break.py" /> -- <Compile Include="unittest\test\test_case.py" /> -- <Compile Include="unittest\test\test_discovery.py" /> -- <Compile Include="unittest\test\test_functiontestcase.py" /> -- <Compile Include="unittest\test\test_loader.py" /> -- <Compile Include="unittest\test\test_program.py" /> -- <Compile Include="unittest\test\test_result.py" /> -- <Compile Include="unittest\test\test_runner.py" /> -- <Compile Include="unittest\test\test_setups.py" /> -- <Compile Include="unittest\test\test_skipping.py" /> -- <Compile Include="unittest\test\test_suite.py" /> -- <Compile Include="unittest\test\_test_warnings.py" /> -- <Compile Include="unittest\test\__init__.py" /> -- <Compile Include="unittest\test\__main__.py" /> -+ <Compile Include="test\test_unittest\dummy.py" /> -+ <Compile Include="test\test_unittest\support.py" /> -+ <Compile Include="test\test_unittest\testmock\support.py" /> -+ <Compile Include="test\test_unittest\testmock\testcallable.py" /> -+ <Compile Include="test\test_unittest\testmock\testhelpers.py" /> -+ <Compile Include="test\test_unittest\testmock\testmagicmethods.py" /> -+ <Compile Include="test\test_unittest\testmock\testmock.py" /> -+ <Compile Include="test\test_unittest\testmock\testpatch.py" /> -+ <Compile Include="test\test_unittest\testmock\testsentinel.py" /> -+ <Compile Include="test\test_unittest\testmock\testwith.py" /> -+ <Compile Include="test\test_unittest\testmock\__init__.py" /> -+ <Compile Include="test\test_unittest\testmock\__main__.py" /> -+ <Compile Include="test\test_unittest\test_assertions.py" /> -+ <Compile Include="test\test_unittest\test_break.py" /> -+ <Compile Include="test\test_unittest\test_case.py" /> -+ <Compile Include="test\test_unittest\test_discovery.py" /> -+ <Compile Include="test\test_unittest\test_functiontestcase.py" /> -+ <Compile Include="test\test_unittest\test_loader.py" /> -+ <Compile Include="test\test_unittest\test_program.py" /> -+ <Compile Include="test\test_unittest\test_result.py" /> -+ <Compile Include="test\test_unittest\test_runner.py" /> -+ <Compile Include="test\test_unittest\test_setups.py" /> -+ <Compile Include="test\test_unittest\test_skipping.py" /> -+ <Compile Include="test\test_unittest\test_suite.py" /> -+ <Compile Include="test\test_unittest\_test_warnings.py" /> -+ <Compile Include="test\test_unittest\__init__.py" /> -+ <Compile Include="test\test_unittest\__main__.py" /> - <Compile Include="unittest\util.py" /> - <Compile Include="unittest\__init__.py" /> - <Compile Include="unittest\__main__.py" /> -@@ -1804,6 +1804,8 @@ - <Folder Include="test\test_json" /> - <Folder Include="test\test_peg_generator" /> - <Folder Include="test\test_tools" /> -+ <Folder Include="test\test_unittest" /> -+ <Folder Include="test\test_unittest\testmock" /> - <Folder Include="test\test_warnings" /> - <Folder Include="test\test_warnings\data" /> - <Folder Include="test\tracedmodules" /> -@@ -1813,8 +1815,6 @@ - <Folder Include="tkinter\test\test_ttk" /> - <Folder Include="turtledemo" /> - <Folder Include="unittest" /> -- <Folder Include="unittest\test" /> -- <Folder Include="unittest\test\testmock" /> - <Folder Include="urllib" /> - <Folder Include="venv" /> - <Folder Include="wsgiref" /> -diff --git a/Tools/wasm/wasm_assets.py b/Tools/wasm/wasm_assets.py -index d0a0570840..67afde60f0 100755 ---- a/Tools/wasm/wasm_assets.py -+++ b/Tools/wasm/wasm_assets.py -@@ -112,7 +112,6 @@ - # regression test sub directories - OMIT_SUBDIRS = ( - "tkinter/test/", -- "unittest/test/", - ) - - def get_builddir(args: argparse.Namespace) -> pathlib.Path: --- -2.29.2.windows.2 - diff --git a/packages/test/patches/0007-gh-93839-Use-load_package_tests-for-testmock-GH-9405.patch b/packages/test/patches/0007-gh-93839-Use-load_package_tests-for-testmock-GH-9405.patch deleted file mode 100644 index 33531df3b37..00000000000 --- a/packages/test/patches/0007-gh-93839-Use-load_package_tests-for-testmock-GH-9405.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 50ebd72fb0e69c78f95cea3d4a47589beb91ac37 Mon Sep 17 00:00:00 2001 -From: Christian Heimes <[email protected]> -Date: Tue, 21 Jun 2022 14:51:39 +0200 -Subject: [PATCH 7/9] gh-93839: Use load_package_tests() for testmock (GH-94055) - -Fixes failing tests on WebAssembly platforms. - -Automerge-Triggered-By: GH:tiran ---- - Lib/test/test_unittest/testmock/__init__.py | 17 +++-------------- - 1 file changed, 3 insertions(+), 14 deletions(-) - -diff --git a/Lib/test/test_unittest/testmock/__init__.py b/Lib/test/test_unittest/testmock/__init__.py -index 6ee60b2376..bc502ef32d 100644 ---- a/Lib/test/test_unittest/testmock/__init__.py -+++ b/Lib/test/test_unittest/testmock/__init__.py -@@ -1,17 +1,6 @@ --import os --import sys --import unittest -+import os.path -+from test.support import load_package_tests - - --here = os.path.dirname(__file__) --loader = unittest.defaultTestLoader -- - def load_tests(*args): -- suite = unittest.TestSuite() -- for fn in os.listdir(here): -- if fn.startswith("test") and fn.endswith(".py"): -- modname = "test.test_unittest.testmock." + fn[:-3] -- __import__(modname) -- module = sys.modules[modname] -- suite.addTest(loader.loadTestsFromModule(module)) -- return suite -+ return load_package_tests(os.path.dirname(__file__), *args) --- -2.29.2.windows.2 - diff --git a/packages/test/patches/0008-Move-test-directories.patch b/packages/test/patches/0008-Move-test-directories.patch deleted file mode 100644 index 7ca56f31b64..00000000000 --- a/packages/test/patches/0008-Move-test-directories.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 4c71c808cc65ed6003b1e29d583c71586ebb36e1 Mon Sep 17 00:00:00 2001 -From: ryanking13 <[email protected]> -Date: Wed, 25 Jan 2023 15:54:16 +0900 -Subject: [PATCH 8/9] Move test directories - ---- - Makefile.pre.in | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/Makefile.pre.in b/Makefile.pre.in -index b356f6293e..68c55a356a 100644 ---- a/Makefile.pre.in -+++ b/Makefile.pre.in -@@ -1932,8 +1932,7 @@ LIBSUBDIRS= asyncio \ - xmlrpc \ - zoneinfo \ - __phello__ --TESTSUBDIRS= ctypes/test \ -- distutils/tests \ -+TESTSUBDIRS= distutils/tests \ - idlelib/idle_test \ - lib2to3/tests \ - lib2to3/tests/data \ -@@ -2009,7 +2008,8 @@ TESTSUBDIRS= ctypes/test \ - test/ziptestdata \ - tkinter/test tkinter/test/test_tkinter \ - tkinter/test/test_ttk \ -- unittest/test unittest/test/testmock -+ test/test_ctypes \ -+ test/test_unittest test/test_unittest/testmock - - TEST_MODULES=@TEST_MODULES@ - libinstall: all $(srcdir)/Modules/xxmodule.c --- -2.29.2.windows.2 - diff --git a/packages/wrapt/test_wrapt.py b/packages/wrapt/test_wrapt.py index 05674155f5f..b2e41ad83bf 100644 --- a/packages/wrapt/test_wrapt.py +++ b/packages/wrapt/test_wrapt.py @@ -79,5 +79,5 @@ def _function(*args, **kwargs): self.assertEqual(result, (_args, _kwargs)) # Run tests - with unittest.TestCase().assertRaisesRegex(SystemExit, "False"): + with unittest.TestCase().assertRaisesRegex(SystemExit, "5"): unittest.main() diff --git a/packages/xgboost/meta.yaml b/packages/xgboost/meta.yaml index e38c486f1bb..5d8edcb2928 100644 --- a/packages/xgboost/meta.yaml +++ b/packages/xgboost/meta.yaml @@ -1,5 +1,6 @@ package: name: xgboost + _disabled: true version: 1.6.1 top-level: - xgboost @@ -22,7 +23,6 @@ requirements: - numpy - scipy - setuptools - - distutils about: home: https://github.com/dmlc/xgboost PyPI: https://pypi.org/project/xgboost diff --git a/pyodide-build/pyodide_build/pyzip.py b/pyodide-build/pyodide_build/pyzip.py index da4301432d5..f537d4664e8 100644 --- a/pyodide-build/pyodide_build/pyzip.py +++ b/pyodide-build/pyodide_build/pyzip.py @@ -28,7 +28,6 @@ # These files are unvendored from the stdlib and can be loaded with `loadPackage` UNVENDORED_FILES = ( "test/", - "distutils/", "sqlite3", "ssl.py", "lzma.py", diff --git a/pyodide-build/pyodide_build/tests/_test_recipes/joblib/meta.yaml b/pyodide-build/pyodide_build/tests/_test_recipes/joblib/meta.yaml index 1c99591cd58..a8be0b66f68 100644 --- a/pyodide-build/pyodide_build/tests/_test_recipes/joblib/meta.yaml +++ b/pyodide-build/pyodide_build/tests/_test_recipes/joblib/meta.yaml @@ -2,10 +2,6 @@ package: name: joblib version: 1.1.0 -requirements: - run: - - distutils - source: url: https://files.pythonhosted.org/packages/92/b9/9e3616e7e00c8165fb25175c53444533bdde05f3e974d45d9fcbbe451ee6/joblib-1.1.0.tar.gz sha256: 4158fcecd13733f8be669be0683b96ebdbbd38d23559f54dca7205aea1bf1e35 diff --git a/pyodide-build/pyodide_build/tests/fixture.py b/pyodide-build/pyodide_build/tests/fixture.py index 538ee59bf7e..e403a9cb41d 100644 --- a/pyodide-build/pyodide_build/tests/fixture.py +++ b/pyodide-build/pyodide_build/tests/fixture.py @@ -18,7 +18,6 @@ def temp_python_lib(tmp_path_factory): (path / "test").mkdir() (path / "test" / "test_blah.py").touch() - (path / "distutils").mkdir() (path / "turtle.py").touch() (path / "module1.py").touch() diff --git a/pyodide-build/pyodide_build/tests/test_pyzip.py b/pyodide-build/pyodide_build/tests/test_pyzip.py index a1343ad9895..572f89d4fb6 100644 --- a/pyodide-build/pyodide_build/tests/test_pyzip.py +++ b/pyodide-build/pyodide_build/tests/test_pyzip.py @@ -8,7 +8,7 @@ def test_defaultfilterfunc(temp_python_lib): filterfunc = default_filterfunc(temp_python_lib, verbose=True) - ignored = ["test", "distutils", "turtle.py"] + ignored = ["test", "turtle.py"] assert set(ignored) == filterfunc(str(temp_python_lib), ignored) assert set() == filterfunc(str(temp_python_lib), ["hello.py", "world.py"]) diff --git a/pyodide-build/pyodide_build/tools/pyo3_config.ini b/pyodide-build/pyodide_build/tools/pyo3_config.ini index 799e184af01..473858b6a27 100644 --- a/pyodide-build/pyodide_build/tools/pyo3_config.ini +++ b/pyodide-build/pyodide_build/tools/pyo3_config.ini @@ -1,7 +1,7 @@ implementation=CPython -version=3.11 +version=3.12 shared=true abi3=false -lib_name=python3.11 +lib_name=python3.12 pointer_width=32 suppress_build_script_link_lines=false diff --git a/run_docker b/run_docker index 6632c9230e4..bed0ba65422 100755 --- a/run_docker +++ b/run_docker @@ -1,7 +1,7 @@ #!/usr/bin/env bash PYODIDE_IMAGE_REPO="pyodide" -PYODIDE_IMAGE_TAG="20240127-chrome114-firefox122-py311" +PYODIDE_IMAGE_TAG="20240127-chrome114-firefox122-py312" DEFAULT_PYODIDE_DOCKER_IMAGE="${PYODIDE_IMAGE_REPO}/pyodide-env:${PYODIDE_IMAGE_TAG}" DEFAULT_PYODIDE_SYSTEM_PORT="none" DOCKER_COMMAND="/bin/bash" diff --git a/src/core/pyproxy.c b/src/core/pyproxy.c index 93d938d2133..750e66f5419 100644 --- a/src/core/pyproxy.c +++ b/src/core/pyproxy.c @@ -111,13 +111,25 @@ static PyObject* asyncio; #define IS_MUTABLE_SEQUENCE (1 << 14) // clang-format on +// _PyGen_GetCode is static, and PyGen_GetCode is a public wrapper around it +// which increfs the return value. We wrap the wrapper back into _PyGen_GetCode +// which returns a borrowed reference so we can use the exact upstream +// implementation of gen_is_coroutine +static inline PyCodeObject* +_PyGen_GetCode(PyGenObject* o) +{ + PyCodeObject* code = PyGen_GetCode((PyGenObject*)o); + Py_DECREF(code); + return code; +} + // Taken from genobject.c // For checking whether an object is awaitable. static int gen_is_coroutine(PyObject* o) { if (PyGen_CheckExact(o)) { - PyCodeObject* code = (PyCodeObject*)((PyGenObject*)o)->gi_code; + PyCodeObject* code = _PyGen_GetCode((PyGenObject*)o); if (code->co_flags & CO_ITERABLE_COROUTINE) { return 1; } diff --git a/src/core/stack_switching/pystate.c b/src/core/stack_switching/pystate.c index bfb2bc60f9c..6f7d14520fd 100644 --- a/src/core/stack_switching/pystate.c +++ b/src/core/stack_switching/pystate.c @@ -48,8 +48,8 @@ typedef struct { PyFrameObject* _top_frame; _PyCFrame* cframe; - int use_tracing; - int recursion_depth; + int py_recursion_depth; + int c_recursion_depth; int trash_delete_nesting; _PyInterpreterFrame* current_frame; _PyStackChunk* datastack_chunk; @@ -69,13 +69,14 @@ savePythonState(PyThreadState* tstate) ps.datastack_top = tstate->datastack_top; ps.datastack_limit = tstate->datastack_limit; - ps.use_tracing = tstate->cframe->use_tracing; - ps.recursion_depth = tstate->recursion_limit - tstate->recursion_remaining; + ps.py_recursion_depth = + tstate->py_recursion_limit - tstate->py_recursion_remaining; + ps.c_recursion_depth = C_RECURSION_LIMIT - tstate->c_recursion_remaining; ps._top_frame = PyThreadState_GetFrame((PyThreadState*)tstate); Py_XDECREF(ps._top_frame); - ps.trash_delete_nesting = tstate->trash_delete_nesting; + ps.trash_delete_nesting = tstate->trash.delete_nesting; ps.context = tstate->context; Py_XINCREF(ps.context); @@ -91,10 +92,11 @@ restorePythonState(PyThreadState* tstate, PythonState ps) tstate->datastack_top = ps.datastack_top; tstate->datastack_limit = ps.datastack_limit; - tstate->cframe->use_tracing = ps.use_tracing; - tstate->recursion_remaining = tstate->recursion_limit - ps.recursion_depth; + tstate->py_recursion_remaining = + tstate->py_recursion_limit - ps.py_recursion_depth; + tstate->c_recursion_remaining = C_RECURSION_LIMIT - ps.c_recursion_depth; - tstate->trash_delete_nesting = ps.trash_delete_nesting; + tstate->trash.delete_nesting = ps.trash_delete_nesting; tstate->context = ps.context; Py_XDECREF(ps.context); @@ -149,7 +151,7 @@ set_new_cframe(_PyCFrame* frame) *frame = *tstate->cframe; tstate->cframe = frame; tstate->cframe->previous = &PyThreadState_GET()->root_cframe; - tstate->trash_delete_nesting = 0; + tstate->trash.delete_nesting = 0; tstate->cframe->current_frame = NULL; tstate->datastack_chunk = NULL; tstate->datastack_top = NULL; diff --git a/src/tests/python_tests.yaml b/src/tests/python_tests.yaml index ea57a6022ce..d189dc8dfc7 100644 --- a/src/tests/python_tests.yaml +++ b/src/tests/python_tests.yaml @@ -28,7 +28,7 @@ # - multiprocessing: Fails due to no multiprocessing implementation. # - fs: Fails due to virtual filesystem issues. # - nonsense: This functionality doesn't make sense in this context. Includes -# things like `pip`, `distutils` +# things like `pip` # - crash: The Python interpreter just stopped without a traceback. Will require # further investigation. This usually seems to be caused by calling into a # system function that doesn't behave as one would expect. @@ -36,15 +36,18 @@ # - crash-firefox: Same as crash but only affecting Firefox - leakers.test_ctypes - leakers.test_selftype +- regrtestdata.import_from_tests.test_regrtest_a: + xfail: didn't correctly embed test data +- regrtestdata.import_from_tests.test_regrtest_c: + xfail: didn't correctly embed test data - test___all__: xfail: multiprocessing -- test___future__: - xfail: Dunno - test__locale: xfail: locale - test__opcode - test__osx_support: xfail: platform-specific +- test__xxinterpchannels - test__xxsubinterpreters: xfail: hits Py_FatalError("not the last thread") inside Py_EndInterpreter - test_abc @@ -55,20 +58,18 @@ - test_array - test_asdl_parser - test_ast: + xfail-safari: Stack overflow skip: - test_stdlib_validates # incompatible with zipimport - test_asyncgen: xfail: async -- test_asynchat: - xfail: async -- test_asyncio.test_asyncio_waitfor: - xfail: async - test_asyncio.test_base_events: xfail: async - test_asyncio.test_buffered_proto: xfail: async - test_asyncio.test_context: xfail: async +- test_asyncio.test_eager_task_factory - test_asyncio.test_events: xfail: async - test_asyncio.test_futures: @@ -106,13 +107,13 @@ xfail: async - test_asyncio.test_threads: xfail: async +- test_asyncio.test_timeouts - test_asyncio.test_transports - test_asyncio.test_unix_events: xfail: async +- test_asyncio.test_waitfor - test_asyncio.test_windows_events - test_asyncio.test_windows_utils -- test_asyncore: - xfail: async - test_atexit: skip: - test_general # fork @@ -159,15 +160,35 @@ - testThreading - test_c_locale_coercion - test_calendar -- test_call -- test_capi: - xfail: hangs +- test_call: + xfail: stack overflow +- test_capi.test_abstract +- test_capi.test_bytearray +- test_capi.test_bytes +- test_capi.test_codecs +- test_capi.test_complex +- test_capi.test_dict +- test_capi.test_eval_code_ex +- test_capi.test_exceptions +- test_capi.test_float +- test_capi.test_getargs +- test_capi.test_immortal +- test_capi.test_list +- test_capi.test_long +- test_capi.test_mem +- test_capi.test_misc: + skip: + - "*subinterpreter*" # Should we disable _xxsubinterpreters in Setup.local? +- test_capi.test_set +- test_capi.test_structmembers +- test_capi.test_sys +- test_capi.test_unicode +- test_capi.test_watchers - test_cgi: skip: - test_log # OSError: [Errno 70] Invalid seek - test_cgitb - test_charmapcodec -- test_check_c_globals - test_class - test_clinic - test_cmath @@ -198,8 +219,17 @@ - test_compile - test_compileall: xfail: multiprocessing +- test_compiler_assemble +- test_compiler_codegen - test_complex -- test_concurrent_futures +- test_concurrent_futures.test_as_completed +- test_concurrent_futures.test_deadlock +- test_concurrent_futures.test_future +- test_concurrent_futures.test_init +- test_concurrent_futures.test_process_pool +- test_concurrent_futures.test_shutdown +- test_concurrent_futures.test_thread_pool +- test_concurrent_futures.test_wait - test_configparser - test_contains - test_context: @@ -217,12 +247,60 @@ - test_crashers - test_crypt - test_csv -- test_ctypes: +- test_ctypes.test_anon +- test_ctypes.test_array_in_pointer +- test_ctypes.test_arrays +- test_ctypes.test_as_parameter +- test_ctypes.test_bitfields +- test_ctypes.test_buffers +- test_ctypes.test_bytes +- test_ctypes.test_byteswap +- test_ctypes.test_callbacks +- test_ctypes.test_cast +- test_ctypes.test_cfuncs +- test_ctypes.test_checkretval +- test_ctypes.test_delattr +- test_ctypes.test_errno +- test_ctypes.test_find +- test_ctypes.test_frombuffer +- test_ctypes.test_funcptr +- test_ctypes.test_functions +- test_ctypes.test_incomplete +- test_ctypes.test_init +- test_ctypes.test_internals +- test_ctypes.test_keeprefs +- test_ctypes.test_libc +- test_ctypes.test_loading +- test_ctypes.test_macholib +- test_ctypes.test_memfunctions +- test_ctypes.test_numbers +- test_ctypes.test_objects +- test_ctypes.test_parameters +- test_ctypes.test_pep3118 +- test_ctypes.test_pickling +- test_ctypes.test_pointers +- test_ctypes.test_prototypes +- test_ctypes.test_python_api +- test_ctypes.test_random_things +- test_ctypes.test_refcounts +- test_ctypes.test_repr +- test_ctypes.test_returnfuncptrs +- test_ctypes.test_simplesubclasses +- test_ctypes.test_sizes +- test_ctypes.test_slicing +- test_ctypes.test_stringptr +- test_ctypes.test_strings +- test_ctypes.test_struct_fields +- test_ctypes.test_structures: skip: - # See https://bugs.python.org/issue47208 - - test_callback_too_many_args + - test_array_in_struct_registers # needs https://github.com/libffi/libffi/pull/818 +- test_ctypes.test_unaligned_structures +- test_ctypes.test_unicode +- test_ctypes.test_values +- test_ctypes.test_varsize_struct +- test_ctypes.test_win32 +- test_ctypes.test_wintypes - test_curses -- test_dataclasses - test_datetime: xfail: strftime - test_dbm: @@ -238,7 +316,8 @@ - test_decorators - test_defaultdict - test_deque -- test_descr +- test_descr: + xfail: stack overflow - test_descrtut - test_devpoll - test_dict @@ -247,9 +326,6 @@ - test_dictviews - test_difflib - test_dis -- test_distutils: - # error while loading tests ModuleNotFoundError: No module named '_osx_support' - xfail: nonsense - test_doctest: xfail: subprocess - test_doctest2 @@ -333,19 +409,20 @@ - test_functools: skip: - "*threaded*" -- test_future: - xfail: Dunno -- test_future3: - xfail: Dunno -- test_future4: - xfail: Dunno -- test_future5: - xfail: Dunno +- test_future_stmt.test_future +- test_future_stmt.test_future_flags +- test_future_stmt.test_future_multiple_features +- test_future_stmt.test_future_multiple_imports +- test_future_stmt.test_future_single_import - test_gc: skip: - test_garbage_at_shutdown - test_trashcan_threads -- test_gdb +- test_gdb.test_backtrace +- test_gdb.test_cfunction +- test_gdb.test_cfunction_full +- test_gdb.test_misc +- test_gdb.test_pretty_print - test_generator_stop - test_generators - test_genericalias: @@ -357,8 +434,6 @@ - test_samestat_on_link - test_exists_fd - test_genexps -- test_getargs2: - xfail: Not sure - test_getopt - test_getpass - test_getpath @@ -394,17 +469,14 @@ - test_imaplib: xfail: socket - test_imghdr -- test_imp: - skip: - # incompatible with zipimport - - test_multiple_calls_to_get_data - - test_issue1267 - - test_load_from_source - test_importlib.builtin.test_finder - test_importlib.builtin.test_loader - test_importlib.extension.test_case_sensitivity -- test_importlib.extension.test_finder -- test_importlib.extension.test_loader +- test_importlib.extension.test_finder: + skip: + - "*FinderTests.test_module" +- test_importlib.extension.test_loader: + xfail: "TODO: why does it fail?" - test_importlib.extension.test_path_hook - test_importlib.frozen.test_finder - test_importlib.frozen.test_loader @@ -413,10 +485,20 @@ - test_importlib.import_.test_api - test_importlib.import_.test_caching - test_importlib.import_.test_fromlist +- test_importlib.import_.test_helpers - test_importlib.import_.test_meta_path - test_importlib.import_.test_packages - test_importlib.import_.test_path - test_importlib.import_.test_relative_imports +- test_importlib.resources.test_compatibilty_files +- test_importlib.resources.test_contents +- test_importlib.resources.test_custom +- test_importlib.resources.test_files +- test_importlib.resources.test_open +- test_importlib.resources.test_path +- test_importlib.resources.test_read +- test_importlib.resources.test_reader +- test_importlib.resources.test_resource - test_importlib.source.test_case_sensitivity - test_importlib.source.test_file_loader - test_importlib.source.test_finder @@ -427,21 +509,13 @@ skip: # incompatible with zipimport - test_reload_missing_loader -- test_importlib.test_compatibilty_files -- test_importlib.test_contents -- test_importlib.test_files - test_importlib.test_lazy - test_importlib.test_locks: xfail: threading - test_importlib.test_main - test_importlib.test_metadata_api - test_importlib.test_namespace_pkgs -- test_importlib.test_open -- test_importlib.test_path - test_importlib.test_pkg_import -- test_importlib.test_read -- test_importlib.test_reader -- test_importlib.test_resource - test_importlib.test_spec - test_importlib.test_threaded_import: xfail: threading @@ -449,9 +523,7 @@ - test_importlib.test_windows - test_importlib.test_zip - test_index -- test_inspect: - skip: - - test_nested_class_definition_inside_async_function +- test_inspect.test_inspect - test_int - test_int_literal - test_interpreters: @@ -491,8 +563,21 @@ - test_kqueue - test_largefile: xfail: segfault-outofmemory ("Array buffer allocation failed") -- test_lib2to3: - xfail: nonsense +- test_launcher +- test_lib2to3.test_all_fixers: + xfail: removed +- test_lib2to3.test_fixers: + xfail: removed +- test_lib2to3.test_main: + xfail: removed +- test_lib2to3.test_parser: + xfail: removed +- test_lib2to3.test_pytree: + xfail: removed +- test_lib2to3.test_refactor: + xfail: removed +- test_lib2to3.test_util: + xfail: removed - test_linecache: skip: # incompatible with zipimport @@ -517,6 +602,7 @@ - test_test # not sure why it fails... - test_marshal - test_math +- test_math_property - test_memoryio - test_memoryview - test_metaclass @@ -529,14 +615,24 @@ - test_basic - test_offset - test_resize_past_pos -- test_module -- test_modulefinder +- test_modulefinder: + xfail: takes a long time +- test_monitoring - test_msilib - test_multibytecodec -- test_multiprocessing_fork -- test_multiprocessing_forkserver +- test_multiprocessing_fork.test_manager +- test_multiprocessing_fork.test_misc +- test_multiprocessing_fork.test_processes +- test_multiprocessing_fork.test_threads +- test_multiprocessing_forkserver.test_manager +- test_multiprocessing_forkserver.test_misc +- test_multiprocessing_forkserver.test_processes +- test_multiprocessing_forkserver.test_threads - test_multiprocessing_main_handling -- test_multiprocessing_spawn +- test_multiprocessing_spawn.test_manager +- test_multiprocessing_spawn.test_misc +- test_multiprocessing_spawn.test_processes +- test_multiprocessing_spawn.test_threads - test_named_expressions - test_netrc - test_nis @@ -580,6 +676,9 @@ - test_peg_generator.test_first_sets - test_peg_generator.test_grammar_validator - test_peg_generator.test_pegen +- test_pep646_syntax +- test_perf_profiler +- test_perfmaps - test_pickle: xfail: dbm - test_picklebuffer @@ -591,7 +690,8 @@ - test_platform: skip: - test_architecture_via_symlink # fork -- test_plistlib +- test_plistlib: + xfail: stack overflow - test_poll: xfail: subprocess - test_popen: @@ -653,7 +753,7 @@ - test_runpy: skip: - test_pymain_run* # fork - # incompatible with zipimport + # incompatible with zipimport - test_run_module - test_run_module_alter_sys - test_run_module_in_namespace_package @@ -698,7 +798,6 @@ - test_site: xfail: "TypeError: unhashable type: 'pyodide.JsProxy'" - test_slice -- test_smtpd - test_smtplib: xfail: sockets - test_smtpnet @@ -713,6 +812,7 @@ - test_20731 - test_spwd - test_sqlite3.test_backup +- test_sqlite3.test_cli - test_sqlite3.test_dbapi - test_sqlite3.test_dump - test_sqlite3.test_factory @@ -738,15 +838,12 @@ - test_timezone - test_strtod - test_struct -- test_structmembers: - xfail: Not sure - test_structseq - test_subclassinit - test_subprocess: xfail: subprocess - test_sunau -- test_sundry: - xfail: Dunno +- test_sundry - test_super - test_support: xfail: about half the tests fork @@ -763,14 +860,12 @@ - test_syslog - test_tabnanny - test_tarfile: - xfail: Dunno skip: - - test_file_mode + - test_chains + - test_deep_symlink + - test_sly_relative* - test_extractall* - - test_link_size - - test_dereference_hardlink - - test_add_hardlink - - test_add_twice + - test_parent_symlink* - test_tcl - test_telnetlib: xfail: 7/19 fail with sockets @@ -780,6 +875,7 @@ - test_process_awareness # fork - test_truncate_with_size_parameter # setup failure FileNotFoundError: [Errno 44] No such file or directory - test_noinherit # self.assertEqual(os.get_inheritable(file.fd), False) ==> AssertionError: True != False +- test_termios - test_textwrap - test_thread: xfail: threading @@ -803,17 +899,23 @@ - test_timeit - test_timeout - test_tix -- test_tk +- test_tkinter.test_colorchooser +- test_tkinter.test_font +- test_tkinter.test_geometry_managers +- test_tkinter.test_images +- test_tkinter.test_loadtk +- test_tkinter.test_messagebox +- test_tkinter.test_misc +- test_tkinter.test_simpledialog +- test_tkinter.test_text +- test_tkinter.test_variables +- test_tkinter.test_widgets - test_tokenize -- test_tools.test_fixcid +- test_tomllib.test_data +- test_tomllib.test_error +- test_tomllib.test_misc - test_tools.test_freeze -- test_tools.test_gprof2html - test_tools.test_i18n -- test_tools.test_lll -- test_tools.test_md5sum -- test_tools.test_pathfix -- test_tools.test_pdeps -- test_tools.test_pindent - test_tools.test_reindent - test_tools.test_sundry - test_trace: @@ -821,15 +923,21 @@ - test_traceback: skip: - test_encoded_file # fork + - test_import_from* # tries to write to stdlib but it's in a zip - test_tracemalloc -- test_ttk_guionly +- test_ttk.test_extensions +- test_ttk.test_style +- test_ttk.test_widgets - test_ttk_textonly +- test_tty - test_tuple: xfail-chrome: times out - test_turtle +- test_type_aliases - test_type_annotations - test_type_cache - test_type_comments +- test_type_params - test_typechecks - test_types - test_typing: @@ -844,18 +952,19 @@ - test_unicode_file_functions - test_unicode_identifiers - test_unicodedata -- test_unittest: - xfail: Dunno - skip: - - "*async*" - - "*Interrupt*" - - "*Handler*" - # os.kill - - testTwoResults - - test_warnings - - testRemoveResult - # fork - - testSelectedTestNamesFunctionalTest +- test_unittest.test_assertions +- test_unittest.test_async_case +- test_unittest.test_break +- test_unittest.test_case +- test_unittest.test_discovery +- test_unittest.test_functiontestcase +- test_unittest.test_loader +- test_unittest.test_program +- test_unittest.test_result +- test_unittest.test_runner +- test_unittest.test_setups +- test_unittest.test_skipping +- test_unittest.test_suite - test_univnewlines - test_unpack - test_unpack_ex @@ -896,16 +1005,15 @@ - test_winreg - test_winsound - test_with +- test_wmi - test_wsgiref - test_xdrlib - test_xml_dom_minicompat - test_xml_etree: - xfail: Dunno skip: # stack overflow in v8 - - "test_recursive_repr" -- test_xml_etree_c: - xfail: Dunno + - test_recursive_repr +- test_xml_etree_c - test_xmlrpc: xfail: networking - test_xmlrpc_net @@ -913,12 +1021,17 @@ - test_xxtestfuzz - test_yield_from - test_zipapp -- test_zipfile: - xfail-chrome: times out +- test_zipfile._path.test_complexity +- test_zipfile._path.test_path +- test_zipfile.test_core: + xfail: Times out skip: - - test_many_opens # [Errno 54] Not a directory: '/proc/self/fd' + # NotADirectoryError: [Errno 54] Not a directory: '/proc/self/fd' + - test_many_opens + - test_zipfile64 - test_zipimport - test_zipimport_support - test_zlib - test_zoneinfo.test_zoneinfo +- test_zoneinfo.test_zoneinfo_property
SciTools__cartopy-1681
pip install of cartopy 0.18.0 fails when installing numpy at the same time ### Description I am provisioning a docker image using pip. In a single pip command, I installed a number of packages, including cartopy and numpy. This worked in versions prior to 0.18.0, and no longer works with 0.18.0. #### Code to reproduce In a docker image with vanilla python3 install and no pip packages installed, run ``` pip3 install --upgrade pip && pip3 install --no-cache-dir cartopy==0.18.0 numpy ``` #### Traceback ``` ERROR: Command errored out with exit status 1: command: /usr/local/pyenv/versions/3.7.6/bin/python3.7 -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-cdx7ek5c/cartopy/setup.py'"'"'; __file__='"'" '/tmp/pip-install-cdx7ek5c/cartopy/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(cod e, __file__, '"'"'exec'"'"'))' egg_info --egg-base /tmp/pip-pip-egg-info-377te2k_ cwd: /tmp/pip-install-cdx7ek5c/cartopy/ Complete output (12 lines): Traceback (most recent call last): File "/tmp/pip-install-cdx7ek5c/cartopy/setup.py", line 43, in <module> import numpy as np ModuleNotFoundError: No module named 'numpy' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<string>", line 1, in <module> File "/tmp/pip-install-cdx7ek5c/cartopy/setup.py", line 45, in <module> raise ImportError('NumPy 1.10+ is required to install cartopy.') ImportError: NumPy 1.10+ is required to install cartopy. ---------------------------------------- ERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output. ``` <details> <summary>Full environment definition</summary> <!-- fill in the following information as appropriate --> ### Operating system Ubuntu 18.04 Python 3.7.6 installed via pyenv ### Cartopy version 0.18.0 </details>
[ { "content": "# Copyright Cartopy Contributors\n#\n# This file is part of Cartopy and is released under the LGPL license.\n# See COPYING and COPYING.LESSER in the root of the repository for full\n# licensing details.\n\n# NOTE: This file must remain Python 2 compatible for the foreseeable future,\n# to ensure that we error out properly for people with outdated setuptools\n# and/or pip.\nimport sys\n\nPYTHON_MIN_VERSION = (3, 5)\n\nif sys.version_info < PYTHON_MIN_VERSION:\n error = \"\"\"\nBeginning with Cartopy 0.19, Python {} or above is required.\nYou are using Python {}.\n\nThis may be due to an out of date pip.\n\nMake sure you have pip >= 9.0.1.\n\"\"\".format('.'.join(str(n) for n in PYTHON_MIN_VERSION),\n '.'.join(str(n) for n in sys.version_info[:3]))\n sys.exit(error)\n\n\nimport fnmatch\nimport os\nimport subprocess\nimport warnings\nfrom collections import defaultdict\nfrom distutils.spawn import find_executable\nfrom distutils.sysconfig import get_config_var\n\nfrom setuptools import Command, Extension, convert_path, setup\n\n\"\"\"\nDistribution definition for Cartopy.\n\n\"\"\"\n\n# The existence of a PKG-INFO directory is enough to tell us whether this is a\n# source installation or not (sdist).\nHERE = os.path.dirname(__file__)\nIS_SDIST = os.path.exists(os.path.join(HERE, 'PKG-INFO'))\nFORCE_CYTHON = os.environ.get('FORCE_CYTHON', False)\n\nif not IS_SDIST or FORCE_CYTHON:\n import Cython\n if Cython.__version__ < '0.28':\n raise ImportError(\n \"Cython 0.28+ is required to install cartopy from source.\")\n\n from Cython.Distutils import build_ext as cy_build_ext\n\n\ntry:\n import numpy as np\nexcept ImportError:\n raise ImportError('NumPy 1.10+ is required to install cartopy.')\n\n\n# Please keep in sync with INSTALL file.\nGEOS_MIN_VERSION = (3, 3, 3)\nPROJ_MIN_VERSION = (4, 9, 0)\n\n\ndef file_walk_relative(top, remove=''):\n \"\"\"\n Return a generator of files from the top of the tree, removing\n the given prefix from the root/file result.\n\n \"\"\"\n top = top.replace('/', os.path.sep)\n remove = remove.replace('/', os.path.sep)\n for root, dirs, files in os.walk(top):\n for file in files:\n yield os.path.join(root, file).replace(remove, '')\n\n\ndef find_package_tree(root_path, root_package):\n \"\"\"\n Return the package and all its sub-packages.\n\n Automated package discovery - extracted/modified from Distutils Cookbook:\n https://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery\n\n \"\"\"\n packages = [root_package]\n # Accept a root_path with Linux path separators.\n root_path = root_path.replace('/', os.path.sep)\n root_count = len(root_path.split(os.path.sep))\n for (dir_path, dir_names, _) in os.walk(convert_path(root_path)):\n # Prune dir_names *in-place* to prevent unwanted directory recursion\n for dir_name in list(dir_names):\n if not os.path.isfile(os.path.join(dir_path, dir_name,\n '__init__.py')):\n dir_names.remove(dir_name)\n if dir_names:\n prefix = dir_path.split(os.path.sep)[root_count:]\n packages.extend(['.'.join([root_package] + prefix + [dir_name])\n for dir_name in dir_names])\n return packages\n\n\n# Dependency checks\n# =================\n\n# GEOS\ntry:\n geos_version = subprocess.check_output(['geos-config', '--version'])\n geos_version = tuple(int(v) for v in geos_version.split(b'.')\n if 'dev' not in str(v))\n geos_includes = subprocess.check_output(['geos-config', '--includes'])\n geos_clibs = subprocess.check_output(['geos-config', '--clibs'])\nexcept (OSError, ValueError, subprocess.CalledProcessError):\n warnings.warn(\n 'Unable to determine GEOS version. Ensure you have %s or later '\n 'installed, or installation may fail.' % (\n '.'.join(str(v) for v in GEOS_MIN_VERSION), ))\n\n geos_includes = []\n geos_library_dirs = []\n geos_libraries = ['geos_c']\nelse:\n if geos_version < GEOS_MIN_VERSION:\n print('GEOS version %s is installed, but cartopy requires at least '\n 'version %s.' % ('.'.join(str(v) for v in geos_version),\n '.'.join(str(v) for v in GEOS_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n geos_includes = geos_includes.decode().split()\n geos_libraries = []\n geos_library_dirs = []\n for entry in geos_clibs.decode().split():\n if entry.startswith('-L'):\n geos_library_dirs.append(entry[2:])\n elif entry.startswith('-l'):\n geos_libraries.append(entry[2:])\n\n\n# Proj\ndef find_proj_version_by_program(conda=None):\n proj = find_executable('proj')\n if proj is None:\n print(\n 'Proj {} must be installed.'.format(\n '.'.join(str(v) for v in PROJ_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n if conda is not None and conda not in proj:\n print(\n 'Proj {} must be installed in Conda environment \"{}\".'.format(\n '.'.join(str(v) for v in PROJ_MIN_VERSION), conda),\n file=sys.stderr)\n exit(1)\n\n try:\n proj_version = subprocess.check_output([proj],\n stderr=subprocess.STDOUT)\n proj_version = proj_version.split()[1].split(b'.')\n proj_version = tuple(int(v.strip(b',')) for v in proj_version)\n except (OSError, IndexError, ValueError, subprocess.CalledProcessError):\n warnings.warn(\n 'Unable to determine Proj version. Ensure you have %s or later '\n 'installed, or installation may fail.' % (\n '.'.join(str(v) for v in PROJ_MIN_VERSION), ))\n proj_version = (0, 0, 0)\n\n return proj_version\n\n\ndef get_proj_libraries():\n \"\"\"\n This function gets the PROJ libraries to cythonize with\n \"\"\"\n proj_libraries = [\"proj\"]\n if os.name == \"nt\" and (6, 0, 0) <= proj_version < (6, 3, 0):\n proj_libraries = [\n \"proj_{}_{}\".format(proj_version[0], proj_version[1])\n ]\n return proj_libraries\n\n\nconda = os.getenv('CONDA_DEFAULT_ENV')\nif conda is not None and conda in sys.prefix:\n # Conda does not provide pkg-config compatibility, but the search paths\n # should be set up so that nothing extra is required. We'll still check\n # the version, though.\n proj_version = find_proj_version_by_program(conda)\n if proj_version < PROJ_MIN_VERSION:\n print(\n 'Proj version %s is installed, but cartopy requires at least '\n 'version %s.' % ('.'.join(str(v) for v in proj_version),\n '.'.join(str(v) for v in PROJ_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n proj_includes = []\n proj_libraries = get_proj_libraries()\n proj_library_dirs = []\n\nelse:\n try:\n proj_version = subprocess.check_output(['pkg-config', '--modversion',\n 'proj'],\n stderr=subprocess.STDOUT)\n proj_version = tuple(int(v) for v in proj_version.split(b'.'))\n proj_includes = subprocess.check_output(['pkg-config', '--cflags',\n 'proj'])\n proj_clibs = subprocess.check_output(['pkg-config', '--libs', 'proj'])\n except (OSError, ValueError, subprocess.CalledProcessError):\n proj_version = find_proj_version_by_program()\n if proj_version < PROJ_MIN_VERSION:\n print(\n 'Proj version %s is installed, but cartopy requires at least '\n 'version %s.' % ('.'.join(str(v) for v in proj_version),\n '.'.join(str(v) for v in PROJ_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n proj_includes = []\n proj_libraries = get_proj_libraries()\n proj_library_dirs = []\n else:\n if proj_version < PROJ_MIN_VERSION:\n print(\n 'Proj version %s is installed, but cartopy requires at least '\n 'version %s.' % ('.'.join(str(v) for v in proj_version),\n '.'.join(str(v) for v in PROJ_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n proj_includes = [\n proj_include[2:] if proj_include.startswith('-I') else\n proj_include for proj_include in proj_includes.decode().split()]\n\n proj_libraries = []\n proj_library_dirs = []\n for entry in proj_clibs.decode().split():\n if entry.startswith('-L'):\n proj_library_dirs.append(entry[2:])\n elif entry.startswith('-l'):\n proj_libraries.append(entry[2:])\n\n# Python dependencies\nextras_require = {}\nfor name in os.listdir(os.path.join(HERE, 'requirements')):\n with open(os.path.join(HERE, 'requirements', name)) as fh:\n section, ext = os.path.splitext(name)\n extras_require[section] = []\n for line in fh:\n if line.startswith('#'):\n pass\n elif line.startswith('-'):\n pass\n else:\n extras_require[section].append(line.strip())\ninstall_requires = extras_require.pop('default')\ntests_require = extras_require.get('tests', [])\n\n# General extension paths\nif sys.platform.startswith('win'):\n def get_config_var(name):\n return '.'\ninclude_dir = get_config_var('INCLUDEDIR')\nlibrary_dir = get_config_var('LIBDIR')\nextra_extension_args = defaultdict(list)\nif not sys.platform.startswith('win'):\n extra_extension_args[\"runtime_library_dirs\"].append(\n get_config_var('LIBDIR')\n )\n\n# Description\n# ===========\nwith open(os.path.join(HERE, 'README.md')) as fh:\n description = ''.join(fh.readlines())\n\n\ncython_coverage_enabled = os.environ.get('CYTHON_COVERAGE', None)\nif proj_version >= (6, 0, 0):\n extra_extension_args[\"define_macros\"].append(\n ('ACCEPT_USE_OF_DEPRECATED_PROJ_API_H', '1')\n )\nif cython_coverage_enabled:\n extra_extension_args[\"define_macros\"].append(\n ('CYTHON_TRACE_NOGIL', '1')\n )\n\nextensions = [\n Extension(\n 'cartopy.trace',\n ['lib/cartopy/trace.pyx'],\n include_dirs=([include_dir, './lib/cartopy', np.get_include()] +\n proj_includes + geos_includes),\n libraries=proj_libraries + geos_libraries,\n library_dirs=[library_dir] + proj_library_dirs + geos_library_dirs,\n language='c++',\n **extra_extension_args),\n Extension(\n 'cartopy._crs',\n ['lib/cartopy/_crs.pyx'],\n include_dirs=[include_dir, np.get_include()] + proj_includes,\n libraries=proj_libraries,\n library_dirs=[library_dir] + proj_library_dirs,\n **extra_extension_args),\n # Requires proj v4.9\n Extension(\n 'cartopy.geodesic._geodesic',\n ['lib/cartopy/geodesic/_geodesic.pyx'],\n include_dirs=[include_dir, np.get_include()] + proj_includes,\n libraries=proj_libraries,\n library_dirs=[library_dir] + proj_library_dirs,\n **extra_extension_args),\n]\n\n\nif cython_coverage_enabled:\n # We need to explicitly cythonize the extension in order\n # to control the Cython compiler_directives.\n from Cython.Build import cythonize\n\n directives = {'linetrace': True,\n 'binding': True}\n extensions = cythonize(extensions, compiler_directives=directives)\n\n\ndef decythonize(extensions, **_ignore):\n # Remove pyx sources from extensions.\n # Note: even if there are changes to the pyx files, they will be ignored.\n for extension in extensions:\n sources = []\n for sfile in extension.sources:\n path, ext = os.path.splitext(sfile)\n if ext in ('.pyx',):\n if extension.language == 'c++':\n ext = '.cpp'\n else:\n ext = '.c'\n sfile = path + ext\n sources.append(sfile)\n extension.sources[:] = sources\n return extensions\n\n\nif IS_SDIST and not FORCE_CYTHON:\n extensions = decythonize(extensions)\n cmdclass = {}\nelse:\n cmdclass = {'build_ext': cy_build_ext}\n\n\n# Main setup\n# ==========\nsetup(\n name='Cartopy',\n url='https://scitools.org.uk/cartopy/docs/latest/',\n download_url='https://github.com/SciTools/cartopy',\n author='UK Met Office',\n description='A cartographic python library with Matplotlib support for '\n 'visualisation',\n long_description=description,\n long_description_content_type='text/markdown',\n license=\"LGPLv3\",\n keywords=\"cartography map transform projection proj proj.4 geos shapely \"\n \"shapefile\",\n\n install_requires=install_requires,\n extras_require=extras_require,\n tests_require=tests_require,\n\n setup_requires=['setuptools_scm', 'setuptools_scm_git_archive'],\n use_scm_version={\n 'write_to': 'lib/cartopy/_version.py',\n },\n\n packages=find_package_tree('lib/cartopy', 'cartopy'),\n package_dir={'': 'lib'},\n package_data={'cartopy': list(file_walk_relative('lib/cartopy/tests/'\n 'mpl/baseline_images/',\n remove='lib/cartopy/')) +\n list(file_walk_relative('lib/cartopy/data/raster',\n remove='lib/cartopy/')) +\n list(file_walk_relative('lib/cartopy/data/netcdf',\n remove='lib/cartopy/')) +\n list(file_walk_relative('lib/cartopy/data/'\n 'shapefiles/gshhs',\n remove='lib/cartopy/')) +\n list(file_walk_relative('lib/cartopy/tests/lakes_shapefile',\n remove='lib/cartopy/')) +\n ['io/srtm.npz']},\n\n\n # requires proj headers\n ext_modules=extensions,\n cmdclass=cmdclass,\n python_requires='>=' + '.'.join(str(n) for n in PYTHON_MIN_VERSION),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Framework :: Matplotlib',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 '\n 'or later (LGPLv3+)',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: AIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: C++',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: GIS',\n 'Topic :: Scientific/Engineering :: Visualization',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# Copyright Cartopy Contributors\n#\n# This file is part of Cartopy and is released under the LGPL license.\n# See COPYING and COPYING.LESSER in the root of the repository for full\n# licensing details.\n\n# NOTE: This file must remain Python 2 compatible for the foreseeable future,\n# to ensure that we error out properly for people with outdated setuptools\n# and/or pip.\nimport sys\n\nPYTHON_MIN_VERSION = (3, 5)\n\nif sys.version_info < PYTHON_MIN_VERSION:\n error = \"\"\"\nBeginning with Cartopy 0.19, Python {} or above is required.\nYou are using Python {}.\n\nThis may be due to an out of date pip.\n\nMake sure you have pip >= 9.0.1.\n\"\"\".format('.'.join(str(n) for n in PYTHON_MIN_VERSION),\n '.'.join(str(n) for n in sys.version_info[:3]))\n sys.exit(error)\n\n\nimport fnmatch\nimport os\nimport subprocess\nimport warnings\nfrom collections import defaultdict\nfrom distutils.spawn import find_executable\nfrom distutils.sysconfig import get_config_var\n\nfrom setuptools import Command, Extension, convert_path, setup\n\n\"\"\"\nDistribution definition for Cartopy.\n\n\"\"\"\n\n# The existence of a PKG-INFO directory is enough to tell us whether this is a\n# source installation or not (sdist).\nHERE = os.path.dirname(__file__)\nIS_SDIST = os.path.exists(os.path.join(HERE, 'PKG-INFO'))\nFORCE_CYTHON = os.environ.get('FORCE_CYTHON', False)\n\nif not IS_SDIST or FORCE_CYTHON:\n import Cython\n if Cython.__version__ < '0.28':\n raise ImportError(\n \"Cython 0.28+ is required to install cartopy from source.\")\n\n from Cython.Distutils import build_ext as cy_build_ext\n\n\ntry:\n import numpy as np\nexcept ImportError:\n raise ImportError('NumPy 1.10+ is required to install cartopy.')\n\n\n# Please keep in sync with INSTALL file.\nGEOS_MIN_VERSION = (3, 3, 3)\nPROJ_MIN_VERSION = (4, 9, 0)\n\n\ndef file_walk_relative(top, remove=''):\n \"\"\"\n Return a generator of files from the top of the tree, removing\n the given prefix from the root/file result.\n\n \"\"\"\n top = top.replace('/', os.path.sep)\n remove = remove.replace('/', os.path.sep)\n for root, dirs, files in os.walk(top):\n for file in files:\n yield os.path.join(root, file).replace(remove, '')\n\n\ndef find_package_tree(root_path, root_package):\n \"\"\"\n Return the package and all its sub-packages.\n\n Automated package discovery - extracted/modified from Distutils Cookbook:\n https://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery\n\n \"\"\"\n packages = [root_package]\n # Accept a root_path with Linux path separators.\n root_path = root_path.replace('/', os.path.sep)\n root_count = len(root_path.split(os.path.sep))\n for (dir_path, dir_names, _) in os.walk(convert_path(root_path)):\n # Prune dir_names *in-place* to prevent unwanted directory recursion\n for dir_name in list(dir_names):\n if not os.path.isfile(os.path.join(dir_path, dir_name,\n '__init__.py')):\n dir_names.remove(dir_name)\n if dir_names:\n prefix = dir_path.split(os.path.sep)[root_count:]\n packages.extend(['.'.join([root_package] + prefix + [dir_name])\n for dir_name in dir_names])\n return packages\n\n\n# Dependency checks\n# =================\n\n# GEOS\ntry:\n geos_version = subprocess.check_output(['geos-config', '--version'])\n geos_version = tuple(int(v) for v in geos_version.split(b'.')\n if 'dev' not in str(v))\n geos_includes = subprocess.check_output(['geos-config', '--includes'])\n geos_clibs = subprocess.check_output(['geos-config', '--clibs'])\nexcept (OSError, ValueError, subprocess.CalledProcessError):\n warnings.warn(\n 'Unable to determine GEOS version. Ensure you have %s or later '\n 'installed, or installation may fail.' % (\n '.'.join(str(v) for v in GEOS_MIN_VERSION), ))\n\n geos_includes = []\n geos_library_dirs = []\n geos_libraries = ['geos_c']\nelse:\n if geos_version < GEOS_MIN_VERSION:\n print('GEOS version %s is installed, but cartopy requires at least '\n 'version %s.' % ('.'.join(str(v) for v in geos_version),\n '.'.join(str(v) for v in GEOS_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n geos_includes = geos_includes.decode().split()\n geos_libraries = []\n geos_library_dirs = []\n for entry in geos_clibs.decode().split():\n if entry.startswith('-L'):\n geos_library_dirs.append(entry[2:])\n elif entry.startswith('-l'):\n geos_libraries.append(entry[2:])\n\n\n# Proj\ndef find_proj_version_by_program(conda=None):\n proj = find_executable('proj')\n if proj is None:\n print(\n 'Proj {} must be installed.'.format(\n '.'.join(str(v) for v in PROJ_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n if conda is not None and conda not in proj:\n print(\n 'Proj {} must be installed in Conda environment \"{}\".'.format(\n '.'.join(str(v) for v in PROJ_MIN_VERSION), conda),\n file=sys.stderr)\n exit(1)\n\n try:\n proj_version = subprocess.check_output([proj],\n stderr=subprocess.STDOUT)\n proj_version = proj_version.split()[1].split(b'.')\n proj_version = tuple(int(v.strip(b',')) for v in proj_version)\n except (OSError, IndexError, ValueError, subprocess.CalledProcessError):\n warnings.warn(\n 'Unable to determine Proj version. Ensure you have %s or later '\n 'installed, or installation may fail.' % (\n '.'.join(str(v) for v in PROJ_MIN_VERSION), ))\n proj_version = (0, 0, 0)\n\n return proj_version\n\n\ndef get_proj_libraries():\n \"\"\"\n This function gets the PROJ libraries to cythonize with\n \"\"\"\n proj_libraries = [\"proj\"]\n if os.name == \"nt\" and (6, 0, 0) <= proj_version < (6, 3, 0):\n proj_libraries = [\n \"proj_{}_{}\".format(proj_version[0], proj_version[1])\n ]\n return proj_libraries\n\n\nconda = os.getenv('CONDA_DEFAULT_ENV')\nif conda is not None and conda in sys.prefix:\n # Conda does not provide pkg-config compatibility, but the search paths\n # should be set up so that nothing extra is required. We'll still check\n # the version, though.\n proj_version = find_proj_version_by_program(conda)\n if proj_version < PROJ_MIN_VERSION:\n print(\n 'Proj version %s is installed, but cartopy requires at least '\n 'version %s.' % ('.'.join(str(v) for v in proj_version),\n '.'.join(str(v) for v in PROJ_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n proj_includes = []\n proj_libraries = get_proj_libraries()\n proj_library_dirs = []\n\nelse:\n try:\n proj_version = subprocess.check_output(['pkg-config', '--modversion',\n 'proj'],\n stderr=subprocess.STDOUT)\n proj_version = tuple(int(v) for v in proj_version.split(b'.'))\n proj_includes = subprocess.check_output(['pkg-config', '--cflags',\n 'proj'])\n proj_clibs = subprocess.check_output(['pkg-config', '--libs', 'proj'])\n except (OSError, ValueError, subprocess.CalledProcessError):\n proj_version = find_proj_version_by_program()\n if proj_version < PROJ_MIN_VERSION:\n print(\n 'Proj version %s is installed, but cartopy requires at least '\n 'version %s.' % ('.'.join(str(v) for v in proj_version),\n '.'.join(str(v) for v in PROJ_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n proj_includes = []\n proj_libraries = get_proj_libraries()\n proj_library_dirs = []\n else:\n if proj_version < PROJ_MIN_VERSION:\n print(\n 'Proj version %s is installed, but cartopy requires at least '\n 'version %s.' % ('.'.join(str(v) for v in proj_version),\n '.'.join(str(v) for v in PROJ_MIN_VERSION)),\n file=sys.stderr)\n exit(1)\n\n proj_includes = [\n proj_include[2:] if proj_include.startswith('-I') else\n proj_include for proj_include in proj_includes.decode().split()]\n\n proj_libraries = []\n proj_library_dirs = []\n for entry in proj_clibs.decode().split():\n if entry.startswith('-L'):\n proj_library_dirs.append(entry[2:])\n elif entry.startswith('-l'):\n proj_libraries.append(entry[2:])\n\n# Python dependencies\nextras_require = {}\nfor name in os.listdir(os.path.join(HERE, 'requirements')):\n with open(os.path.join(HERE, 'requirements', name)) as fh:\n section, ext = os.path.splitext(name)\n extras_require[section] = []\n for line in fh:\n if line.startswith('#'):\n pass\n elif line.startswith('-'):\n pass\n else:\n extras_require[section].append(line.strip())\ninstall_requires = extras_require.pop('default')\ntests_require = extras_require.get('tests', [])\n\n# General extension paths\nif sys.platform.startswith('win'):\n def get_config_var(name):\n return '.'\ninclude_dir = get_config_var('INCLUDEDIR')\nlibrary_dir = get_config_var('LIBDIR')\nextra_extension_args = defaultdict(list)\nif not sys.platform.startswith('win'):\n extra_extension_args[\"runtime_library_dirs\"].append(\n get_config_var('LIBDIR')\n )\n\n# Description\n# ===========\nwith open(os.path.join(HERE, 'README.md')) as fh:\n description = ''.join(fh.readlines())\n\n\ncython_coverage_enabled = os.environ.get('CYTHON_COVERAGE', None)\nif proj_version >= (6, 0, 0):\n extra_extension_args[\"define_macros\"].append(\n ('ACCEPT_USE_OF_DEPRECATED_PROJ_API_H', '1')\n )\nif cython_coverage_enabled:\n extra_extension_args[\"define_macros\"].append(\n ('CYTHON_TRACE_NOGIL', '1')\n )\n\nextensions = [\n Extension(\n 'cartopy.trace',\n ['lib/cartopy/trace.pyx'],\n include_dirs=([include_dir, './lib/cartopy', np.get_include()] +\n proj_includes + geos_includes),\n libraries=proj_libraries + geos_libraries,\n library_dirs=[library_dir] + proj_library_dirs + geos_library_dirs,\n language='c++',\n **extra_extension_args),\n Extension(\n 'cartopy._crs',\n ['lib/cartopy/_crs.pyx'],\n include_dirs=[include_dir, np.get_include()] + proj_includes,\n libraries=proj_libraries,\n library_dirs=[library_dir] + proj_library_dirs,\n **extra_extension_args),\n # Requires proj v4.9\n Extension(\n 'cartopy.geodesic._geodesic',\n ['lib/cartopy/geodesic/_geodesic.pyx'],\n include_dirs=[include_dir, np.get_include()] + proj_includes,\n libraries=proj_libraries,\n library_dirs=[library_dir] + proj_library_dirs,\n **extra_extension_args),\n]\n\n\nif cython_coverage_enabled:\n # We need to explicitly cythonize the extension in order\n # to control the Cython compiler_directives.\n from Cython.Build import cythonize\n\n directives = {'linetrace': True,\n 'binding': True}\n extensions = cythonize(extensions, compiler_directives=directives)\n\n\ndef decythonize(extensions, **_ignore):\n # Remove pyx sources from extensions.\n # Note: even if there are changes to the pyx files, they will be ignored.\n for extension in extensions:\n sources = []\n for sfile in extension.sources:\n path, ext = os.path.splitext(sfile)\n if ext in ('.pyx',):\n if extension.language == 'c++':\n ext = '.cpp'\n else:\n ext = '.c'\n sfile = path + ext\n sources.append(sfile)\n extension.sources[:] = sources\n return extensions\n\n\nif IS_SDIST and not FORCE_CYTHON:\n extensions = decythonize(extensions)\n cmdclass = {}\nelse:\n cmdclass = {'build_ext': cy_build_ext}\n\n\n# Main setup\n# ==========\nsetup(\n name='Cartopy',\n url='https://scitools.org.uk/cartopy/docs/latest/',\n download_url='https://github.com/SciTools/cartopy',\n author='UK Met Office',\n description='A cartographic python library with Matplotlib support for '\n 'visualisation',\n long_description=description,\n long_description_content_type='text/markdown',\n license=\"LGPLv3\",\n keywords=\"cartography map transform projection proj proj.4 geos shapely \"\n \"shapefile\",\n\n install_requires=install_requires,\n extras_require=extras_require,\n tests_require=tests_require,\n\n use_scm_version={\n 'write_to': 'lib/cartopy/_version.py',\n },\n\n packages=find_package_tree('lib/cartopy', 'cartopy'),\n package_dir={'': 'lib'},\n package_data={'cartopy': list(file_walk_relative('lib/cartopy/tests/'\n 'mpl/baseline_images/',\n remove='lib/cartopy/')) +\n list(file_walk_relative('lib/cartopy/data/raster',\n remove='lib/cartopy/')) +\n list(file_walk_relative('lib/cartopy/data/netcdf',\n remove='lib/cartopy/')) +\n list(file_walk_relative('lib/cartopy/data/'\n 'shapefiles/gshhs',\n remove='lib/cartopy/')) +\n list(file_walk_relative('lib/cartopy/tests/lakes_shapefile',\n remove='lib/cartopy/')) +\n ['io/srtm.npz']},\n\n\n # requires proj headers\n ext_modules=extensions,\n cmdclass=cmdclass,\n python_requires='>=' + '.'.join(str(n) for n in PYTHON_MIN_VERSION),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Framework :: Matplotlib',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 '\n 'or later (LGPLv3+)',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: AIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: C++',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: GIS',\n 'Topic :: Scientific/Engineering :: Visualization',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/.travis.yml b/.travis.yml index 679b31445..f18a4a471 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,10 +4,10 @@ env: matrix: - NAME="Minimum dependencies." PYTHON_VERSION=3.6 - PACKAGES="cython=0.28.5 matplotlib=2.2.2 numpy=1.16 owslib=0.17 proj4=5.2.0 scipy=1.2.0" + PACKAGES="matplotlib=2.2.2 numpy=1.16 owslib=0.17 proj4=5.2.0 scipy=1.2.0" - NAME="Latest everything." PYTHON_VERSION=3.8 - PACKAGES="cython fiona matplotlib-base numpy proj pykdtree scipy" + PACKAGES="fiona matplotlib-base proj pykdtree scipy" sudo: false @@ -37,11 +37,11 @@ install: # Customise the testing environment # --------------------------------- - PACKAGES="$PACKAGES flufl.lock owslib pep8 pillow pyepsg pyshp pytest" - - PACKAGES="$PACKAGES pytest-xdist requests setuptools_scm" - - PACKAGES="$PACKAGES setuptools_scm_git_archive shapely" + - PACKAGES="$PACKAGES pytest-xdist requests" + - PACKAGES="$PACKAGES shapely" - | if [[ "$NAME" == "Latest everything"* ]]; then - PACKAGES="$PACKAGES pytest-cov coveralls"; + PACKAGES="$PACKAGES cython>=0.29.2 pytest-cov coveralls"; export CYTHON_COVERAGE=1; fi - conda create -n $ENV_NAME python=$PYTHON_VERSION $PACKAGES diff --git a/INSTALL b/INSTALL index c0a04c7b9..ca5c8f598 100644 --- a/INSTALL +++ b/INSTALL @@ -47,7 +47,7 @@ using the `setup.py` file:: Required dependencies ~~~~~~~~~~~~~~~~~~~~~ In order to install Cartopy, or to access its basic functionality, it will be -necessary to first install **GEOS**, **NumPy**, **Cython**, **Shapely**, and +necessary to first install **GEOS**, **Shapely**, and **pyshp**. Many of these packages can be installed using pip or other package managers such as apt-get (Linux) and brew (macOS). Many of these dependencies are built as part of Cartopy's conda distribution, and the recipes @@ -56,7 +56,7 @@ for these packages can be found at https://github.com/conda-forge/feedstocks. For macOS, the required dependencies can be installed in the following way:: brew install proj geos - pip3 install --upgrade cython numpy pyshp + pip3 install --upgrade pyshp # shapely needs to be built from source to link to geos. If it is already # installed, uninstall it by: pip3 uninstall shapely pip3 install shapely --no-binary shapely @@ -79,12 +79,6 @@ Further information about the required dependencies can be found here: **Python** 3.5 or later (https://www.python.org/) -**Cython** 0.28 or later (https://pypi.python.org/pypi/Cython/) - -**NumPy** 1.10 or later (https://numpy.org/) - Python package for scientific computing including a powerful N-dimensional - array object. - **GEOS** 3.3.3 or later (https://trac.osgeo.org/geos/) GEOS is an API of spatial predicates and functions for processing geometry written in C++. diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..948c6801b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,10 @@ +[build-system] +requires = [ + "wheel", + "setuptools >= 40.6.0", + "Cython >= 0.29.2", + "oldest-supported-numpy", + "setuptools_scm", + "setuptools_scm_git_archive", +] +build-backend = "setuptools.build_meta" diff --git a/requirements/default.txt b/requirements/default.txt index ece5e4012..16ed4e653 100644 --- a/requirements/default.txt +++ b/requirements/default.txt @@ -1,4 +1,3 @@ -numpy>=1.10 +numpy>=1.13.3 shapely>=1.5.6 pyshp>=2 -setuptools>=0.7.2 diff --git a/setup.py b/setup.py index a04eace28..fb62c5efb 100644 --- a/setup.py +++ b/setup.py @@ -371,7 +371,6 @@ def decythonize(extensions, **_ignore): extras_require=extras_require, tests_require=tests_require, - setup_requires=['setuptools_scm', 'setuptools_scm_git_archive'], use_scm_version={ 'write_to': 'lib/cartopy/_version.py', },
UTNkar__moore-59
Login is per-subdomain
[ { "content": "\"\"\"\nDjango settings for the production environment of Project Moore.\n\nFor more information regarding running in production see,\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'DJANGO_SECRET',\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\n)\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DJANGO_DB'),\n 'USER': os.environ.get('DJANGO_DB_USER'),\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\n }\n}\n\n# CONN_MAX_AGE = 0\n\n# Base URL to use when referring to full URLs within the Wagtail admin\n# backend - e.g. in notification emails. Don't include '/admin' or a\n# trailing slash\nBASE_URL = 'https://dev.utn.se'\n\nALLOWED_HOSTS = ['.utn.se']\n\n# Email settings\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nEMAIL_SUBJECT_PREFIX = '[UTN] '\n\n# Admins - will be sent error messages\nADMINS = [('UTN System Administrator', '[email protected]')]\n\nLOGGING_CONFIG = None\n\n# TODO: HTTPS security\n# CSRF_COOKIE_SECURE = True\n#\n# SESSION_COOKIE_SECURE = True\n\n# Membership API\nMEMBERSHIP_API_USER = 'moore'\nMEMBERSHIP_API_PASSWORD = os.environ.get('MEMBERSHIP_API_PASSWORD')\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n", "path": "website/website/settings/production.py" } ]
[ { "content": "\"\"\"\nDjango settings for the production environment of Project Moore.\n\nFor more information regarding running in production see,\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'DJANGO_SECRET',\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\n)\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DJANGO_DB'),\n 'USER': os.environ.get('DJANGO_DB_USER'),\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\n }\n}\n\n# CONN_MAX_AGE = 0\n\n# Base URL to use when referring to full URLs within the Wagtail admin\n# backend - e.g. in notification emails. Don't include '/admin' or a\n# trailing slash\nBASE_URL = 'https://dev.utn.se'\n\nALLOWED_HOSTS = ['.utn.se']\n\n# Email settings\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nEMAIL_SUBJECT_PREFIX = '[UTN] '\n\n# Admins - will be sent error messages\nADMINS = [('UTN System Administrator', '[email protected]')]\n\nLOGGING_CONFIG = None\n\nCSRF_COOKIE_SECURE = True\n\nSESSION_COOKIE_DOMAIN = '.utn.se'\n\nSESSION_COOKIE_SECURE = True\n\n# Membership API\nMEMBERSHIP_API_USER = 'moore'\nMEMBERSHIP_API_PASSWORD = os.environ.get('MEMBERSHIP_API_PASSWORD')\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n", "path": "website/website/settings/production.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 753dc74f..be38bbc7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ### Fixed - Confirmation e-mails not being sent. +- Updated translations +- Production: cross-domain cookies ## [0.1.1] - 2017-03-30 ### Added diff --git a/website/website/settings/production.py b/website/website/settings/production.py index 833544b0..661f3864 100644 --- a/website/website/settings/production.py +++ b/website/website/settings/production.py @@ -56,10 +56,11 @@ LOGGING_CONFIG = None -# TODO: HTTPS security -# CSRF_COOKIE_SECURE = True -# -# SESSION_COOKIE_SECURE = True +CSRF_COOKIE_SECURE = True + +SESSION_COOKIE_DOMAIN = '.utn.se' + +SESSION_COOKIE_SECURE = True # Membership API MEMBERSHIP_API_USER = 'moore'
ManageIQ__integration_tests-7728
cfme.log only showing on first test in a run. cfme.log link only appears on the first test from a selection but shows all logs from all tests in that run. Expected to have a separate log link for each test specific to that test. See attached ![screenshot from 2018-08-14 15-50-11](https://user-images.githubusercontent.com/18352403/44099182-fca60de6-9fd9-11e8-8525-27ca6032229f.png)
[ { "content": "\"\"\" Logger plugin for Artifactor\n\nAdd a stanza to the artifactor config like this,\nartifactor:\n log_dir: /home/username/outdir\n per_run: test #test, run, None\n overwrite: True\n plugins:\n logger:\n enabled: True\n plugin: logger\n level: DEBUG\n\"\"\"\nimport os\nfrom logging import makeLogRecord\nfrom artifactor import ArtifactorBasePlugin\nfrom cfme.utils.log import make_file_handler\n\n\nclass Logger(ArtifactorBasePlugin):\n\n class Test(object):\n def __init__(self, ident):\n self.ident = ident\n self.in_progress = False\n self.handler = None\n\n def close(self):\n if self.handle is not None:\n self.handler.close()\n self.handler = None\n\n def plugin_initialize(self):\n self.register_plugin_hook('start_test', self.start_test)\n self.register_plugin_hook('finish_test', self.finish_test)\n self.register_plugin_hook('log_message', self.log_message)\n\n def configure(self):\n self.configured = True\n self.level = self.data.get('level', 'DEBUG')\n\n @ArtifactorBasePlugin.check_configured\n def start_test(self, artifact_path, test_name, test_location, slaveid):\n if not slaveid:\n slaveid = \"Master\"\n test_ident = \"{}/{}\".format(test_location, test_name)\n if slaveid in self.store:\n if self.store[slaveid].in_progress:\n print(\"Test already running, can't start another, logger\")\n return None\n self.store[slaveid].close()\n self.store[slaveid] = self.Test(test_ident)\n self.store[slaveid].in_progress = True\n filename = \"{ident}-cfme.log\".format(ident=self.ident)\n self.store[slaveid].handler = make_file_handler(\n filename,\n root=artifact_path,\n # we overwrite\n mode='w',\n level=self.level)\n\n self.fire_hook('filedump', test_location=test_location, test_name=test_name,\n description=\"cfme.log\", slaveid=slaveid, contents=\"\", file_type=\"log\",\n display_glyph=\"align-justify\", dont_write=True,\n os_filename=os.path.join(artifact_path, filename),\n group_id=\"pytest-logfile\")\n\n @ArtifactorBasePlugin.check_configured\n def finish_test(self, artifact_path, test_name, test_location, slaveid):\n if not slaveid:\n slaveid = \"Master\"\n self.store[slaveid].in_progress = False\n self.store[slaveid].close()\n\n @ArtifactorBasePlugin.check_configured\n def log_message(self, log_record, slaveid):\n # json transport fallout: args must be a dict or a tuple, json makes a tuple into a list\n args = log_record['args']\n log_record['args'] = tuple(args) if isinstance(args, list) else args\n record = makeLogRecord(log_record)\n if not slaveid:\n slaveid = \"Master\"\n if slaveid in self.store:\n handler = self.store[slaveid].handler\n if handler and record.levelno >= handler.level:\n handler.handle(record)\n", "path": "artifactor/plugins/logger.py" } ]
[ { "content": "\"\"\" Logger plugin for Artifactor\n\nAdd a stanza to the artifactor config like this,\nartifactor:\n log_dir: /home/username/outdir\n per_run: test #test, run, None\n overwrite: True\n plugins:\n logger:\n enabled: True\n plugin: logger\n level: DEBUG\n\"\"\"\nimport os\nfrom logging import makeLogRecord\nfrom artifactor import ArtifactorBasePlugin\nfrom cfme.utils.log import make_file_handler\n\n\nclass Logger(ArtifactorBasePlugin):\n\n class Test(object):\n def __init__(self, ident):\n self.ident = ident\n self.in_progress = False\n self.handler = None\n\n def close(self):\n if self.handler is not None:\n self.handler.close()\n self.handler = None\n\n def plugin_initialize(self):\n self.register_plugin_hook('start_test', self.start_test)\n self.register_plugin_hook('finish_test', self.finish_test)\n self.register_plugin_hook('log_message', self.log_message)\n\n def configure(self):\n self.configured = True\n self.level = self.data.get('level', 'DEBUG')\n\n @ArtifactorBasePlugin.check_configured\n def start_test(self, artifact_path, test_name, test_location, slaveid):\n if not slaveid:\n slaveid = \"Master\"\n test_ident = \"{}/{}\".format(test_location, test_name)\n if slaveid in self.store:\n if self.store[slaveid].in_progress:\n print(\"Test already running, can't start another, logger\")\n return None\n self.store[slaveid].close()\n self.store[slaveid] = self.Test(test_ident)\n self.store[slaveid].in_progress = True\n filename = \"{ident}-cfme.log\".format(ident=self.ident)\n self.store[slaveid].handler = make_file_handler(\n filename,\n root=artifact_path,\n # we overwrite\n mode='w',\n level=self.level)\n\n self.fire_hook('filedump', test_location=test_location, test_name=test_name,\n description=\"cfme.log\", slaveid=slaveid, contents=\"\", file_type=\"log\",\n display_glyph=\"align-justify\", dont_write=True,\n os_filename=os.path.join(artifact_path, filename),\n group_id=\"pytest-logfile\")\n\n @ArtifactorBasePlugin.check_configured\n def finish_test(self, artifact_path, test_name, test_location, slaveid):\n if not slaveid:\n slaveid = \"Master\"\n self.store[slaveid].in_progress = False\n self.store[slaveid].close()\n\n @ArtifactorBasePlugin.check_configured\n def log_message(self, log_record, slaveid):\n # json transport fallout: args must be a dict or a tuple, json makes a tuple into a list\n args = log_record['args']\n log_record['args'] = tuple(args) if isinstance(args, list) else args\n record = makeLogRecord(log_record)\n if not slaveid:\n slaveid = \"Master\"\n if slaveid in self.store:\n handler = self.store[slaveid].handler\n if handler and record.levelno >= handler.level:\n handler.handle(record)\n", "path": "artifactor/plugins/logger.py" } ]
diff --git a/artifactor/example_tests/artifactor_simple_example.py b/artifactor/example_tests/artifactor_simple_example.py new file mode 100644 index 0000000000..c7cf111291 --- /dev/null +++ b/artifactor/example_tests/artifactor_simple_example.py @@ -0,0 +1,21 @@ +from cfme.utils.log import logger as log +import pytest + + +def test_pass(): + log.info("pass") + + [email protected](reason="example") +def test_skip(): + pass + + +def test_skip_imp(): + log.info("skip") + pytest.skip("example") + + +def test_fail(): + log.info("fail") + raise ValueError() diff --git a/artifactor/plugins/logger.py b/artifactor/plugins/logger.py index b612c7e6b6..2eead0b7d5 100644 --- a/artifactor/plugins/logger.py +++ b/artifactor/plugins/logger.py @@ -26,7 +26,7 @@ def __init__(self, ident): self.handler = None def close(self): - if self.handle is not None: + if self.handler is not None: self.handler.close() self.handler = None
ethereum__consensus-specs-1743
is verifying max number of indices necessary in method is_valid_indexed_attestation(), there is `if not len(indices) <= MAX_VALIDATORS_PER_COMMITTEE: return False` But since we defined the length of the indices already in struct IndexedAttestation as MAX_VALIDATORS_PER_COMMITTEE, which means when create a instance of the IndexedAttestation, the length is guaranteed to be MAX_VALIDATORS_PER_COMMITTEE. But if we assume that the construction of the struct may give it a variant-length, we might need check the other fields as well, e.g., some fields in struct BeaconBlockBody.
[ { "content": "from setuptools import setup, find_packages, Command\nfrom setuptools.command.build_py import build_py\nfrom distutils import dir_util\nfrom distutils.util import convert_path\nimport os\nimport re\nfrom typing import Dict, NamedTuple, List\n\nFUNCTION_REGEX = r'^def [\\w_]*'\n\n\nclass SpecObject(NamedTuple):\n functions: Dict[str, str]\n custom_types: Dict[str, str]\n constants: Dict[str, str]\n ssz_objects: Dict[str, str]\n\n\ndef get_spec(file_name: str) -> SpecObject:\n \"\"\"\n Takes in the file name of a spec.md file, opens it and returns a parsed spec object.\n\n Note: This function makes heavy use of the inherent ordering of dicts,\n if this is not supported by your python version, it will not work.\n \"\"\"\n pulling_from = None # line number of start of latest object\n current_name = None # most recent section title\n functions: Dict[str, str] = {}\n constants: Dict[str, str] = {}\n ssz_objects: Dict[str, str] = {}\n function_matcher = re.compile(FUNCTION_REGEX)\n is_ssz = False\n custom_types: Dict[str, str] = {}\n for linenum, line in enumerate(open(file_name).readlines()):\n line = line.rstrip()\n if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':\n current_name = line[line[:-1].rfind('`') + 1: -1]\n if line[:9] == '```python':\n assert pulling_from is None\n pulling_from = linenum + 1\n elif line[:3] == '```':\n pulling_from = None\n else:\n # Handle function definitions & ssz_objects\n if pulling_from is not None:\n # SSZ Object\n if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':\n name = line[6:-12]\n # Check consistency with markdown header\n assert name == current_name\n is_ssz = True\n # function definition\n elif function_matcher.match(line) is not None:\n current_name = function_matcher.match(line).group(0)\n is_ssz = False\n if is_ssz:\n ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\\n'\n else:\n functions[current_name] = functions.get(current_name, '') + line + '\\n'\n # Handle constant and custom types table entries\n elif pulling_from is None and len(line) > 0 and line[0] == '|':\n row = line[1:].split('|')\n if len(row) >= 2:\n for i in range(2):\n row[i] = row[i].strip().strip('`')\n if '`' in row[i]:\n row[i] = row[i][:row[i].find('`')]\n is_constant_def = True\n if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':\n is_constant_def = False\n for c in row[0]:\n if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':\n is_constant_def = False\n if is_constant_def:\n constants[row[0]] = row[1].replace('**TBD**', '2**32')\n elif row[1].startswith('uint') or row[1].startswith('Bytes'):\n custom_types[row[0]] = row[1]\n return SpecObject(functions, custom_types, constants, ssz_objects)\n\n\nCONFIG_LOADER = '''\napply_constants_config(globals())\n'''\n\nPHASE0_IMPORTS = '''from eth2spec.config.config_util import apply_constants_config\nfrom typing import (\n Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar\n)\n\nfrom dataclasses import (\n dataclass,\n field,\n)\n\nfrom lru import LRU\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root\nfrom eth2spec.utils.ssz.ssz_typing import (\n View, boolean, Container, List, Vector, uint64,\n Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,\n)\nfrom eth2spec.utils import bls\n\nfrom eth2spec.utils.hash_function import hash\n\nSSZObject = TypeVar('SSZObject', bound=View)\n'''\nPHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0\nfrom eth2spec.config.config_util import apply_constants_config\nfrom typing import (\n Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable\n)\n\nfrom dataclasses import (\n dataclass,\n field,\n)\n\nfrom lru import LRU\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root\nfrom eth2spec.utils.ssz.ssz_typing import (\n View, boolean, Container, List, Vector, uint64, uint8, bit,\n ByteList, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,\n)\nfrom eth2spec.utils import bls\n\nfrom eth2spec.utils.hash_function import hash\n\n# Whenever phase 1 is loaded, make sure we have the latest phase0\nfrom importlib import reload\nreload(phase0)\n\n\nSSZVariableName = str\nGeneralizedIndex = NewType('GeneralizedIndex', int)\nSSZObject = TypeVar('SSZObject', bound=View)\n'''\nSUNDRY_CONSTANTS_FUNCTIONS = '''\ndef ceillog2(x: uint64) -> int:\n return (x - 1).bit_length()\n'''\nSUNDRY_FUNCTIONS = '''\n# Monkey patch hash cache\n_hash = hash\nhash_cache: Dict[bytes, Bytes32] = {}\n\n\ndef get_eth1_data(distance: uint64) -> Bytes32:\n return hash(distance)\n\n\ndef hash(x: bytes) -> Bytes32: # type: ignore\n if x not in hash_cache:\n hash_cache[x] = Bytes32(_hash(x))\n return hash_cache[x]\n\n\ndef cache_this(key_fn, value_fn, lru_size): # type: ignore\n cache_dict = LRU(size=lru_size)\n\n def wrapper(*args, **kw): # type: ignore\n key = key_fn(*args, **kw)\n nonlocal cache_dict\n if key not in cache_dict:\n cache_dict[key] = value_fn(*args, **kw)\n return cache_dict[key]\n return wrapper\n\n\n_compute_shuffled_index = compute_shuffled_index\ncompute_shuffled_index = cache_this(\n lambda index, index_count, seed: (index, index_count, seed),\n _compute_shuffled_index, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_total_active_balance = get_total_active_balance\nget_total_active_balance = cache_this(\n lambda state: (state.validators.hash_tree_root(), compute_epoch_at_slot(state.slot)),\n _get_total_active_balance, lru_size=10)\n\n_get_base_reward = get_base_reward\nget_base_reward = cache_this(\n lambda state, index: (state.validators.hash_tree_root(), state.slot, index),\n _get_base_reward, lru_size=2048)\n\n_get_committee_count_at_slot = get_committee_count_at_slot\nget_committee_count_at_slot = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_committee_count_at_slot, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_active_validator_indices = get_active_validator_indices\nget_active_validator_indices = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_active_validator_indices, lru_size=3)\n\n_get_beacon_committee = get_beacon_committee\nget_beacon_committee = cache_this(\n lambda state, slot, index: (state.validators.hash_tree_root(), state.randao_mixes.hash_tree_root(), slot, index),\n _get_beacon_committee, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)\n\n_get_matching_target_attestations = get_matching_target_attestations\nget_matching_target_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_target_attestations, lru_size=10)\n\n_get_matching_head_attestations = get_matching_head_attestations\nget_matching_head_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_head_attestations, lru_size=10)\n\n_get_attesting_indices = get_attesting_indices\nget_attesting_indices = cache_this(\n lambda state, data, bits: (state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()),\n _get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''\n\n\ndef objects_to_spec(spec_object: SpecObject, imports: str, fork: str) -> str:\n \"\"\"\n Given all the objects that constitute a spec, combine them into a single pyfile.\n \"\"\"\n new_type_definitions = (\n '\\n\\n'.join(\n [\n f\"class {key}({value}):\\n pass\\n\"\n for key, value in spec_object.custom_types.items()\n ]\n )\n )\n for k in list(spec_object.functions):\n if \"ceillog2\" in k:\n del spec_object.functions[k]\n functions_spec = '\\n\\n'.join(spec_object.functions.values())\n for k in list(spec_object.constants.keys()):\n if k == \"BLS12_381_Q\":\n spec_object.constants[k] += \" # noqa: E501\"\n constants_spec = '\\n'.join(map(lambda x: '%s = %s' % (x, spec_object.constants[x]), spec_object.constants))\n ssz_objects_instantiation_spec = '\\n\\n'.join(spec_object.ssz_objects.values())\n spec = (\n imports\n + '\\n\\n' + f\"fork = \\'{fork}\\'\\n\"\n + '\\n\\n' + new_type_definitions\n + '\\n' + SUNDRY_CONSTANTS_FUNCTIONS\n + '\\n\\n' + constants_spec\n + '\\n\\n' + CONFIG_LOADER\n + '\\n\\n' + ssz_objects_instantiation_spec\n + '\\n\\n' + functions_spec\n + '\\n' + SUNDRY_FUNCTIONS\n + '\\n'\n )\n return spec\n\n\ndef combine_functions(old_functions: Dict[str, str], new_functions: Dict[str, str]) -> Dict[str, str]:\n for key, value in new_functions.items():\n old_functions[key] = value\n return old_functions\n\n\ndef combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, str]) -> Dict[str, str]:\n for key, value in new_constants.items():\n old_constants[key] = value\n return old_constants\n\n\nignored_dependencies = [\n 'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',\n 'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',\n 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\n 'bytes', 'byte', 'ByteList', 'ByteVector'\n]\n\n\ndef dependency_order_ssz_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:\n \"\"\"\n Determines which SSZ Object is dependent on which other and orders them appropriately\n \"\"\"\n items = list(objects.items())\n for key, value in items:\n dependencies = []\n for line in value.split('\\n'):\n if not re.match(r'\\s+\\w+: .+', line):\n continue # skip whitespace etc.\n line = line[line.index(':') + 1:] # strip of field name\n if '#' in line:\n line = line[:line.index('#')] # strip of comment\n dependencies.extend(re.findall(r'(\\w+)', line)) # catch all legible words, potential dependencies\n dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants\n dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)\n dependencies = filter(lambda x: x not in custom_types, dependencies)\n for dep in dependencies:\n key_list = list(objects.keys())\n for item in [dep, key] + key_list[key_list.index(dep)+1:]:\n objects[item] = objects.pop(item)\n\n\ndef combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:\n \"\"\"\n Takes in old spec and new spec ssz objects, combines them,\n and returns the newer versions of the objects in dependency order.\n \"\"\"\n for key, value in new_objects.items():\n old_objects[key] = value\n return old_objects\n\n\ndef combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:\n \"\"\"\n Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.\n \"\"\"\n functions0, custom_types0, constants0, ssz_objects0 = spec0\n functions1, custom_types1, constants1, ssz_objects1 = spec1\n functions = combine_functions(functions0, functions1)\n custom_types = combine_constants(custom_types0, custom_types1)\n constants = combine_constants(constants0, constants1)\n ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)\n return SpecObject(functions, custom_types, constants, ssz_objects)\n\n\nfork_imports = {\n 'phase0': PHASE0_IMPORTS,\n 'phase1': PHASE1_IMPORTS,\n}\n\n\ndef build_spec(fork: str, source_files: List[str]) -> str:\n all_specs = [get_spec(spec) for spec in source_files]\n\n spec_object = all_specs[0]\n for value in all_specs[1:]:\n spec_object = combine_spec_objects(spec_object, value)\n\n dependency_order_ssz_objects(spec_object.ssz_objects, spec_object.custom_types)\n\n return objects_to_spec(spec_object, fork_imports[fork], fork)\n\n\nclass PySpecCommand(Command):\n \"\"\"Convert spec markdown files to a spec python file\"\"\"\n\n description = \"Convert spec markdown files to a spec python file\"\n\n spec_fork: str\n md_doc_paths: str\n parsed_md_doc_paths: List[str]\n out_dir: str\n\n # The format is (long option, short option, description).\n user_options = [\n ('spec-fork=', None, \"Spec fork to tag build with. Used to select md-docs defaults.\"),\n ('md-doc-paths=', None, \"List of paths of markdown files to build spec with\"),\n ('out-dir=', None, \"Output directory to write spec package to\")\n ]\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n # Each user option must be listed here with their default value.\n self.spec_fork = 'phase0'\n self.md_doc_paths = ''\n self.out_dir = 'pyspec_output'\n\n def finalize_options(self):\n \"\"\"Post-process options.\"\"\"\n if len(self.md_doc_paths) == 0:\n print(\"no paths were specified, using default markdown file paths for pyspec\"\n \" build (spec fork: %s)\" % self.spec_fork)\n if self.spec_fork == \"phase0\":\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n \"\"\"\n elif self.spec_fork == \"phase1\":\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase1/custody-game.md\n specs/phase1/beacon-chain.md\n specs/phase1/fraud-proofs.md\n specs/phase1/fork-choice.md\n specs/phase1/phase1-fork.md\n \"\"\"\n else:\n raise Exception('no markdown files specified, and spec fork \"%s\" is unknown', self.spec_fork)\n\n self.parsed_md_doc_paths = self.md_doc_paths.split()\n\n for filename in self.parsed_md_doc_paths:\n if not os.path.exists(filename):\n raise Exception('Pyspec markdown input file \"%s\" does not exist.' % filename)\n\n def run(self):\n spec_str = build_spec(self.spec_fork, self.parsed_md_doc_paths)\n if self.dry_run:\n self.announce('dry run successfully prepared contents for spec.'\n f' out dir: \"{self.out_dir}\", spec fork: \"{self.spec_fork}\"')\n self.debug_print(spec_str)\n else:\n dir_util.mkpath(self.out_dir)\n with open(os.path.join(self.out_dir, 'spec.py'), 'w') as out:\n out.write(spec_str)\n with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out:\n out.write(\"\")\n\n\nclass BuildPyCommand(build_py):\n \"\"\"Customize the build command to run the spec-builder on setup.py build\"\"\"\n\n def initialize_options(self):\n super(BuildPyCommand, self).initialize_options()\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n cmd_obj.out_dir = os.path.join(self.build_lib, 'eth2spec', spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n for spec_fork in fork_imports:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\n super(BuildPyCommand, self).run()\n\n\nclass PyspecDevCommand(Command):\n \"\"\"Build the markdown files in-place to their source location for testing.\"\"\"\n description = \"Build the markdown files in-place to their source location for testing.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n eth2spec_dir = convert_path(self.distribution.package_dir['eth2spec'])\n cmd_obj.out_dir = os.path.join(eth2spec_dir, spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n print(\"running build_py command\")\n for spec_fork in fork_imports:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\ncommands = {\n 'pyspec': PySpecCommand,\n 'build_py': BuildPyCommand,\n 'pyspecdev': PyspecDevCommand,\n}\n\nwith open(\"README.md\", \"rt\", encoding=\"utf8\") as f:\n readme = f.read()\n\n# How to use \"VERSION.txt\" file:\n# - dev branch contains \"X.Y.Z.dev\", where \"X.Y.Z\" is the target version to release dev into.\n# -> Changed as part of 'master' backport to 'dev'\n# - master branch contains \"X.Y.Z\", where \"X.Y.Z\" is the current version.\n# -> Changed as part of 'dev' release (or other branch) into 'master'\n# -> In case of a commit on master without git tag, target the next version\n# with \".postN\" (release candidate, numbered) suffixed.\n# See https://www.python.org/dev/peps/pep-0440/#public-version-identifiers\nwith open(os.path.join('tests', 'core', 'pyspec', 'eth2spec', 'VERSION.txt')) as f:\n spec_version = f.read().strip()\n\nsetup(\n name='eth2spec',\n version=spec_version,\n description=\"Eth2 spec, provided as Python package for tooling and testing\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"ethereum\",\n url=\"https://github.com/ethereum/eth2.0-specs\",\n include_package_data=False,\n package_data={'configs': ['*.yaml'],\n 'specs': ['**/*.md'],\n 'eth2spec': ['VERSION.txt']},\n package_dir={\n \"eth2spec\": \"tests/core/pyspec/eth2spec\",\n \"configs\": \"configs\",\n \"specs\": \"specs\"\n },\n packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],\n py_modules=[\"eth2spec\"],\n cmdclass=commands,\n python_requires=\">=3.8, <4\",\n extras_require={\n \"test\": [\"pytest>=4.4\", \"pytest-cov\", \"pytest-xdist\"],\n \"lint\": [\"flake8==3.7.7\", \"mypy==0.750\"],\n },\n install_requires=[\n \"eth-utils>=1.3.0,<2\",\n \"eth-typing>=2.1.0,<3.0.0\",\n \"pycryptodome==3.9.4\",\n \"py_ecc==2.0.0\",\n \"dataclasses==0.6\",\n \"remerkleable==0.1.12\",\n \"ruamel.yaml==0.16.5\",\n \"lru-dict==1.1.6\"\n ]\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages, Command\nfrom setuptools.command.build_py import build_py\nfrom distutils import dir_util\nfrom distutils.util import convert_path\nimport os\nimport re\nfrom typing import Dict, NamedTuple, List\n\nFUNCTION_REGEX = r'^def [\\w_]*'\n\n\nclass SpecObject(NamedTuple):\n functions: Dict[str, str]\n custom_types: Dict[str, str]\n constants: Dict[str, str]\n ssz_objects: Dict[str, str]\n\n\ndef get_spec(file_name: str) -> SpecObject:\n \"\"\"\n Takes in the file name of a spec.md file, opens it and returns a parsed spec object.\n\n Note: This function makes heavy use of the inherent ordering of dicts,\n if this is not supported by your python version, it will not work.\n \"\"\"\n pulling_from = None # line number of start of latest object\n current_name = None # most recent section title\n functions: Dict[str, str] = {}\n constants: Dict[str, str] = {}\n ssz_objects: Dict[str, str] = {}\n function_matcher = re.compile(FUNCTION_REGEX)\n is_ssz = False\n custom_types: Dict[str, str] = {}\n for linenum, line in enumerate(open(file_name).readlines()):\n line = line.rstrip()\n if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':\n current_name = line[line[:-1].rfind('`') + 1: -1]\n if line[:9] == '```python':\n assert pulling_from is None\n pulling_from = linenum + 1\n elif line[:3] == '```':\n pulling_from = None\n else:\n # Handle function definitions & ssz_objects\n if pulling_from is not None:\n # SSZ Object\n if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':\n name = line[6:-12]\n # Check consistency with markdown header\n assert name == current_name\n is_ssz = True\n # function definition\n elif function_matcher.match(line) is not None:\n current_name = function_matcher.match(line).group(0)\n is_ssz = False\n if is_ssz:\n ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\\n'\n else:\n functions[current_name] = functions.get(current_name, '') + line + '\\n'\n # Handle constant and custom types table entries\n elif pulling_from is None and len(line) > 0 and line[0] == '|':\n row = line[1:].split('|')\n if len(row) >= 2:\n for i in range(2):\n row[i] = row[i].strip().strip('`')\n if '`' in row[i]:\n row[i] = row[i][:row[i].find('`')]\n is_constant_def = True\n if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':\n is_constant_def = False\n for c in row[0]:\n if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':\n is_constant_def = False\n if is_constant_def:\n constants[row[0]] = row[1].replace('**TBD**', '2**32')\n elif row[1].startswith('uint') or row[1].startswith('Bytes'):\n custom_types[row[0]] = row[1]\n return SpecObject(functions, custom_types, constants, ssz_objects)\n\n\nCONFIG_LOADER = '''\napply_constants_config(globals())\n'''\n\nPHASE0_IMPORTS = '''from eth2spec.config.config_util import apply_constants_config\nfrom typing import (\n Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar\n)\n\nfrom dataclasses import (\n dataclass,\n field,\n)\n\nfrom lru import LRU\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root\nfrom eth2spec.utils.ssz.ssz_typing import (\n View, boolean, Container, List, Vector, uint64,\n Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,\n)\nfrom eth2spec.utils import bls\n\nfrom eth2spec.utils.hash_function import hash\n\nSSZObject = TypeVar('SSZObject', bound=View)\n'''\nPHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0\nfrom eth2spec.config.config_util import apply_constants_config\nfrom typing import (\n Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable\n)\n\nfrom dataclasses import (\n dataclass,\n field,\n)\n\nfrom lru import LRU\n\nfrom eth2spec.utils.ssz.ssz_impl import hash_tree_root\nfrom eth2spec.utils.ssz.ssz_typing import (\n View, boolean, Container, List, Vector, uint64, uint8, bit,\n ByteList, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,\n)\nfrom eth2spec.utils import bls\n\nfrom eth2spec.utils.hash_function import hash\n\n# Whenever phase 1 is loaded, make sure we have the latest phase0\nfrom importlib import reload\nreload(phase0)\n\n\nSSZVariableName = str\nGeneralizedIndex = NewType('GeneralizedIndex', int)\nSSZObject = TypeVar('SSZObject', bound=View)\n'''\nSUNDRY_CONSTANTS_FUNCTIONS = '''\ndef ceillog2(x: uint64) -> int:\n return (x - 1).bit_length()\n'''\nSUNDRY_FUNCTIONS = '''\n# Monkey patch hash cache\n_hash = hash\nhash_cache: Dict[bytes, Bytes32] = {}\n\n\ndef get_eth1_data(distance: uint64) -> Bytes32:\n return hash(distance)\n\n\ndef hash(x: bytes) -> Bytes32: # type: ignore\n if x not in hash_cache:\n hash_cache[x] = Bytes32(_hash(x))\n return hash_cache[x]\n\n\ndef cache_this(key_fn, value_fn, lru_size): # type: ignore\n cache_dict = LRU(size=lru_size)\n\n def wrapper(*args, **kw): # type: ignore\n key = key_fn(*args, **kw)\n nonlocal cache_dict\n if key not in cache_dict:\n cache_dict[key] = value_fn(*args, **kw)\n return cache_dict[key]\n return wrapper\n\n\n_compute_shuffled_index = compute_shuffled_index\ncompute_shuffled_index = cache_this(\n lambda index, index_count, seed: (index, index_count, seed),\n _compute_shuffled_index, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_total_active_balance = get_total_active_balance\nget_total_active_balance = cache_this(\n lambda state: (state.validators.hash_tree_root(), compute_epoch_at_slot(state.slot)),\n _get_total_active_balance, lru_size=10)\n\n_get_base_reward = get_base_reward\nget_base_reward = cache_this(\n lambda state, index: (state.validators.hash_tree_root(), state.slot, index),\n _get_base_reward, lru_size=2048)\n\n_get_committee_count_at_slot = get_committee_count_at_slot\nget_committee_count_at_slot = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_committee_count_at_slot, lru_size=SLOTS_PER_EPOCH * 3)\n\n_get_active_validator_indices = get_active_validator_indices\nget_active_validator_indices = cache_this(\n lambda state, epoch: (state.validators.hash_tree_root(), epoch),\n _get_active_validator_indices, lru_size=3)\n\n_get_beacon_committee = get_beacon_committee\nget_beacon_committee = cache_this(\n lambda state, slot, index: (state.validators.hash_tree_root(), state.randao_mixes.hash_tree_root(), slot, index),\n _get_beacon_committee, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)\n\n_get_matching_target_attestations = get_matching_target_attestations\nget_matching_target_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_target_attestations, lru_size=10)\n\n_get_matching_head_attestations = get_matching_head_attestations\nget_matching_head_attestations = cache_this(\n lambda state, epoch: (state.hash_tree_root(), epoch),\n _get_matching_head_attestations, lru_size=10)\n\n_get_attesting_indices = get_attesting_indices\nget_attesting_indices = cache_this(\n lambda state, data, bits: (state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()),\n _get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''\n\n\ndef objects_to_spec(spec_object: SpecObject, imports: str, fork: str) -> str:\n \"\"\"\n Given all the objects that constitute a spec, combine them into a single pyfile.\n \"\"\"\n new_type_definitions = (\n '\\n\\n'.join(\n [\n f\"class {key}({value}):\\n pass\\n\"\n for key, value in spec_object.custom_types.items()\n ]\n )\n )\n for k in list(spec_object.functions):\n if \"ceillog2\" in k:\n del spec_object.functions[k]\n functions_spec = '\\n\\n'.join(spec_object.functions.values())\n for k in list(spec_object.constants.keys()):\n if k == \"BLS12_381_Q\":\n spec_object.constants[k] += \" # noqa: E501\"\n constants_spec = '\\n'.join(map(lambda x: '%s = %s' % (x, spec_object.constants[x]), spec_object.constants))\n ssz_objects_instantiation_spec = '\\n\\n'.join(spec_object.ssz_objects.values())\n spec = (\n imports\n + '\\n\\n' + f\"fork = \\'{fork}\\'\\n\"\n + '\\n\\n' + new_type_definitions\n + '\\n' + SUNDRY_CONSTANTS_FUNCTIONS\n + '\\n\\n' + constants_spec\n + '\\n\\n' + CONFIG_LOADER\n + '\\n\\n' + ssz_objects_instantiation_spec\n + '\\n\\n' + functions_spec\n + '\\n' + SUNDRY_FUNCTIONS\n + '\\n'\n )\n return spec\n\n\ndef combine_functions(old_functions: Dict[str, str], new_functions: Dict[str, str]) -> Dict[str, str]:\n for key, value in new_functions.items():\n old_functions[key] = value\n return old_functions\n\n\ndef combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, str]) -> Dict[str, str]:\n for key, value in new_constants.items():\n old_constants[key] = value\n return old_constants\n\n\nignored_dependencies = [\n 'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',\n 'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',\n 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\n 'bytes', 'byte', 'ByteList', 'ByteVector'\n]\n\n\ndef dependency_order_ssz_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:\n \"\"\"\n Determines which SSZ Object is dependent on which other and orders them appropriately\n \"\"\"\n items = list(objects.items())\n for key, value in items:\n dependencies = []\n for line in value.split('\\n'):\n if not re.match(r'\\s+\\w+: .+', line):\n continue # skip whitespace etc.\n line = line[line.index(':') + 1:] # strip of field name\n if '#' in line:\n line = line[:line.index('#')] # strip of comment\n dependencies.extend(re.findall(r'(\\w+)', line)) # catch all legible words, potential dependencies\n dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants\n dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)\n dependencies = filter(lambda x: x not in custom_types, dependencies)\n for dep in dependencies:\n key_list = list(objects.keys())\n for item in [dep, key] + key_list[key_list.index(dep)+1:]:\n objects[item] = objects.pop(item)\n\n\ndef combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:\n \"\"\"\n Takes in old spec and new spec ssz objects, combines them,\n and returns the newer versions of the objects in dependency order.\n \"\"\"\n for key, value in new_objects.items():\n old_objects[key] = value\n return old_objects\n\n\ndef combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:\n \"\"\"\n Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.\n \"\"\"\n functions0, custom_types0, constants0, ssz_objects0 = spec0\n functions1, custom_types1, constants1, ssz_objects1 = spec1\n functions = combine_functions(functions0, functions1)\n custom_types = combine_constants(custom_types0, custom_types1)\n constants = combine_constants(constants0, constants1)\n ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)\n return SpecObject(functions, custom_types, constants, ssz_objects)\n\n\nfork_imports = {\n 'phase0': PHASE0_IMPORTS,\n 'phase1': PHASE1_IMPORTS,\n}\n\n\ndef build_spec(fork: str, source_files: List[str]) -> str:\n all_specs = [get_spec(spec) for spec in source_files]\n\n spec_object = all_specs[0]\n for value in all_specs[1:]:\n spec_object = combine_spec_objects(spec_object, value)\n\n dependency_order_ssz_objects(spec_object.ssz_objects, spec_object.custom_types)\n\n return objects_to_spec(spec_object, fork_imports[fork], fork)\n\n\nclass PySpecCommand(Command):\n \"\"\"Convert spec markdown files to a spec python file\"\"\"\n\n description = \"Convert spec markdown files to a spec python file\"\n\n spec_fork: str\n md_doc_paths: str\n parsed_md_doc_paths: List[str]\n out_dir: str\n\n # The format is (long option, short option, description).\n user_options = [\n ('spec-fork=', None, \"Spec fork to tag build with. Used to select md-docs defaults.\"),\n ('md-doc-paths=', None, \"List of paths of markdown files to build spec with\"),\n ('out-dir=', None, \"Output directory to write spec package to\")\n ]\n\n def initialize_options(self):\n \"\"\"Set default values for options.\"\"\"\n # Each user option must be listed here with their default value.\n self.spec_fork = 'phase0'\n self.md_doc_paths = ''\n self.out_dir = 'pyspec_output'\n\n def finalize_options(self):\n \"\"\"Post-process options.\"\"\"\n if len(self.md_doc_paths) == 0:\n print(\"no paths were specified, using default markdown file paths for pyspec\"\n \" build (spec fork: %s)\" % self.spec_fork)\n if self.spec_fork == \"phase0\":\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase0/validator.md\n \"\"\"\n elif self.spec_fork == \"phase1\":\n self.md_doc_paths = \"\"\"\n specs/phase0/beacon-chain.md\n specs/phase0/fork-choice.md\n specs/phase1/custody-game.md\n specs/phase1/beacon-chain.md\n specs/phase1/fraud-proofs.md\n specs/phase1/fork-choice.md\n specs/phase1/phase1-fork.md\n \"\"\"\n else:\n raise Exception('no markdown files specified, and spec fork \"%s\" is unknown', self.spec_fork)\n\n self.parsed_md_doc_paths = self.md_doc_paths.split()\n\n for filename in self.parsed_md_doc_paths:\n if not os.path.exists(filename):\n raise Exception('Pyspec markdown input file \"%s\" does not exist.' % filename)\n\n def run(self):\n spec_str = build_spec(self.spec_fork, self.parsed_md_doc_paths)\n if self.dry_run:\n self.announce('dry run successfully prepared contents for spec.'\n f' out dir: \"{self.out_dir}\", spec fork: \"{self.spec_fork}\"')\n self.debug_print(spec_str)\n else:\n dir_util.mkpath(self.out_dir)\n with open(os.path.join(self.out_dir, 'spec.py'), 'w') as out:\n out.write(spec_str)\n with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out:\n out.write(\"\")\n\n\nclass BuildPyCommand(build_py):\n \"\"\"Customize the build command to run the spec-builder on setup.py build\"\"\"\n\n def initialize_options(self):\n super(BuildPyCommand, self).initialize_options()\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n cmd_obj.out_dir = os.path.join(self.build_lib, 'eth2spec', spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n for spec_fork in fork_imports:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\n super(BuildPyCommand, self).run()\n\n\nclass PyspecDevCommand(Command):\n \"\"\"Build the markdown files in-place to their source location for testing.\"\"\"\n description = \"Build the markdown files in-place to their source location for testing.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run_pyspec_cmd(self, spec_fork: str, **opts):\n cmd_obj: PySpecCommand = self.distribution.reinitialize_command(\"pyspec\")\n cmd_obj.spec_fork = spec_fork\n eth2spec_dir = convert_path(self.distribution.package_dir['eth2spec'])\n cmd_obj.out_dir = os.path.join(eth2spec_dir, spec_fork)\n for k, v in opts.items():\n setattr(cmd_obj, k, v)\n self.run_command('pyspec')\n\n def run(self):\n print(\"running build_py command\")\n for spec_fork in fork_imports:\n self.run_pyspec_cmd(spec_fork=spec_fork)\n\ncommands = {\n 'pyspec': PySpecCommand,\n 'build_py': BuildPyCommand,\n 'pyspecdev': PyspecDevCommand,\n}\n\nwith open(\"README.md\", \"rt\", encoding=\"utf8\") as f:\n readme = f.read()\n\n# How to use \"VERSION.txt\" file:\n# - dev branch contains \"X.Y.Z.dev\", where \"X.Y.Z\" is the target version to release dev into.\n# -> Changed as part of 'master' backport to 'dev'\n# - master branch contains \"X.Y.Z\", where \"X.Y.Z\" is the current version.\n# -> Changed as part of 'dev' release (or other branch) into 'master'\n# -> In case of a commit on master without git tag, target the next version\n# with \".postN\" (release candidate, numbered) suffixed.\n# See https://www.python.org/dev/peps/pep-0440/#public-version-identifiers\nwith open(os.path.join('tests', 'core', 'pyspec', 'eth2spec', 'VERSION.txt')) as f:\n spec_version = f.read().strip()\n\nsetup(\n name='eth2spec',\n version=spec_version,\n description=\"Eth2 spec, provided as Python package for tooling and testing\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"ethereum\",\n url=\"https://github.com/ethereum/eth2.0-specs\",\n include_package_data=False,\n package_data={'configs': ['*.yaml'],\n 'specs': ['**/*.md'],\n 'eth2spec': ['VERSION.txt']},\n package_dir={\n \"eth2spec\": \"tests/core/pyspec/eth2spec\",\n \"configs\": \"configs\",\n \"specs\": \"specs\"\n },\n packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],\n py_modules=[\"eth2spec\"],\n cmdclass=commands,\n python_requires=\">=3.8, <4\",\n extras_require={\n \"test\": [\"pytest>=4.4\", \"pytest-cov\", \"pytest-xdist\"],\n \"lint\": [\"flake8==3.7.7\", \"mypy==0.750\"],\n },\n install_requires=[\n \"eth-utils>=1.3.0,<2\",\n \"eth-typing>=2.1.0,<3.0.0\",\n \"pycryptodome==3.9.4\",\n \"py_ecc==2.0.0\",\n \"dataclasses==0.6\",\n \"remerkleable==0.1.13\",\n \"ruamel.yaml==0.16.5\",\n \"lru-dict==1.1.6\"\n ]\n)\n", "path": "setup.py" } ]
diff --git a/.circleci/config.yml b/.circleci/config.yml index 5c4b77e784..3a67e55281 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -79,16 +79,16 @@ jobs: # Restore git repo at point close to target branch/revision, to speed up checkout - restore_cache: keys: - - v2-specs-repo-{{ .Branch }}-{{ .Revision }} - - v2-specs-repo-{{ .Branch }}- - - v2-specs-repo- + - v3-specs-repo-{{ .Branch }}-{{ .Revision }} + - v3-specs-repo-{{ .Branch }}- + - v3-specs-repo- - checkout - run: name: Clean up git repo to reduce cache size command: git gc # Save the git checkout as a cache, to make cloning next time faster. - save_cache: - key: v2-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} paths: - ~/specs-repo install_pyspec_test: @@ -97,7 +97,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v2-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Install pyspec requirements @@ -109,7 +109,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v2-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -140,7 +140,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v2-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run linter @@ -152,7 +152,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v2-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_deposit_contract_compiler_cached_venv - run: name: Install deposit contract compiler requirements @@ -164,7 +164,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v2-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_deposit_contract_tester_cached_venv - run: name: Install deposit contract tester requirements @@ -176,7 +176,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v2-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_deposit_contract_compiler_cached_venv - run: name: Run deposit contract compile test @@ -187,7 +187,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v2-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_deposit_contract_tester_cached_venv - run: name: Run deposit contract test diff --git a/Makefile b/Makefile index e8f3d21bc5..e53aaf8a2a 100644 --- a/Makefile +++ b/Makefile @@ -117,7 +117,7 @@ install_deposit_contract_compiler: compile_deposit_contract: cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \ - python3.7 deposit_contract/compile.py contracts/validator_registration.vy + python3.7 deposit_contract/compile.py ../contracts/validator_registration.vy test_compile_deposit_contract: cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \ diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 8306bb3782..6d71cfa47c 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -162,9 +162,6 @@ DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000 # --------------------------------------------------------------- PHASE_1_FORK_VERSION: 0x01000000 INITIAL_ACTIVE_SHARDS: 64 -# Placeholder -INITIAL_GASPRICE: 10 - # Phase 1: General # --------------------------------------------------------------- @@ -190,8 +187,8 @@ SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233] MAX_SHARD_BLOCKS_PER_ATTESTATION: 12 # 2**14 (= 16,384) Gwei MAX_GASPRICE: 16384 -# 2**5 (= 32) Gwei -MIN_GASPRICE: 32 +# 2**3 (= 8) Gwei +MIN_GASPRICE: 8 # 2**3 (= 8) GASPRICE_ADJUSTMENT_COEFFICIENT: 8 diff --git a/configs/minimal.yaml b/configs/minimal.yaml index e5d6ca308f..9daf428b4a 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -164,8 +164,6 @@ DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000 PHASE_1_FORK_VERSION: 0x01000001 # [customized] reduced for testing INITIAL_ACTIVE_SHARDS: 4 -# Placeholder -INITIAL_GASPRICE: 10 # Phase 1: General @@ -192,8 +190,8 @@ SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233] MAX_SHARD_BLOCKS_PER_ATTESTATION: 12 # 2**14 (= 16,384) Gwei MAX_GASPRICE: 16384 -# 2**5 (= 32) Gwei -MIN_GASPRICE: 32 +# 2**3 (= 8) Gwei +MIN_GASPRICE: 8 # 2**3 (= 8) GASPRICE_ADJUSTMENT_COEFFICIENT: 8 diff --git a/setup.py b/setup.py index 911eb65b0e..d1c62fb72f 100644 --- a/setup.py +++ b/setup.py @@ -499,7 +499,7 @@ def run(self): "pycryptodome==3.9.4", "py_ecc==2.0.0", "dataclasses==0.6", - "remerkleable==0.1.12", + "remerkleable==0.1.13", "ruamel.yaml==0.16.5", "lru-dict==1.1.6" ] diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md index 23fa5ceee2..cdf38dc1e4 100644 --- a/specs/phase0/beacon-chain.md +++ b/specs/phase0/beacon-chain.md @@ -684,14 +684,10 @@ def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationDa ```python def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool: """ - Check if ``indexed_attestation`` has valid indices and signature. + Check if ``indexed_attestation`` has sorted and unique indices and a valid aggregate signature. """ - indices = indexed_attestation.attesting_indices - - # Verify max number of indices - if not len(indices) <= MAX_VALIDATORS_PER_COMMITTEE: - return False # Verify indices are sorted and unique + indices = indexed_attestation.attesting_indices if not indices == sorted(set(indices)): return False # Verify aggregate signature @@ -1195,7 +1191,7 @@ Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`. ## Beacon chain state transition function -The post-state corresponding to a pre-state `state` and a signed block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. +The post-state corresponding to a pre-state `state` and a signed block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid. ```python def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> BeaconState: diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index c42609be09..18c7a15806 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -108,7 +108,7 @@ def get_forkchoice_store(anchor_state: BeaconState) -> Store: justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) return Store( - time=anchor_state.genesis_time, + time=anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot, genesis_time=anchor_state.genesis_time, justified_checkpoint=justified_checkpoint, finalized_checkpoint=finalized_checkpoint, diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 841432efd8..d3b9150d17 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -105,6 +105,7 @@ It consists of four main sections: - [Discovery](#discovery) - [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht) - [What is the difference between an ENR and a multiaddr, and why are we using ENRs?](#what-is-the-difference-between-an-enr-and-a-multiaddr-and-why-are-we-using-enrs) + - [Why do we not form ENRs and find peers until genesis block/state is known?](#why-do-we-not-form-enrs-and-find-peers-until-genesis-blockstate-is-known) - [Compression/Encoding](#compressionencoding) - [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding) - [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers) @@ -247,6 +248,8 @@ Topics are plain UTF-8 strings and are encoded on the wire as determined by prot - `Name` - see table below - `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encoding-strategies) section for further details. +*Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known. + Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. Clients MUST reject (fail validation) messages that are over this size limit. Likewise, clients MUST NOT emit or propagate messages larger than this limit. The `message-id` of a gossipsub message MUST be: @@ -286,8 +289,8 @@ There are two primary global topics used to propagate beacon blocks and aggregat - The block is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the block MAY be queued for later processing while proposers for the block's branch are calculated. - `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `signed_aggregate_and_proof` on the network. (We define the following for convenience -- `aggregate_and_proof = signed_aggregate_and_proof.message` and `aggregate = aggregate_and_proof.aggregate`) - `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` (a client MAY queue future aggregates for processing at the appropriate slot). - - The aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally). - - The `aggregate` is the first valid aggregate received for the aggregator with index `aggregate_and_proof.aggregator_index` for the slot `aggregate.data.slot`. + - The aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen (via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally). + - The `aggregate` is the first valid aggregate received for the aggregator with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`. - The block being voted for (`aggregate.data.beacon_block_root`) passes validation. - `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate.data.slot, aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. - The aggregator's validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)`. @@ -316,7 +319,7 @@ Attestation subnets are used to propagate unaggregated attestations to subsectio - The attestation's committee index (`attestation.data.index`) is for the correct subnet. - `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (within a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot` (a client MAY queue future attestations for processing at the appropriate slot). - The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`). - - The attestation is the first valid attestation received for the participating validator for the slot, `attestation.data.slot`. + - There has been no other valid attestation seen on an attestation subnet that has an identical `attestation.data.target.epoch` and participating validator index. - The block being voted for (`attestation.data.beacon_block_root`) passes validation. - The signature of `attestation` is valid. @@ -342,7 +345,9 @@ Topics are post-fixed with an encoding. Encodings define how the payload of a go #### Mainnet -- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). Example: The beacon aggregate attestation topic string is `/eth2/beacon_aggregate_and_proof/ssz_snappy`, and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy. +- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) block compression. Example: The beacon aggregate attestation topic string is `/eth2/beacon_aggregate_and_proof/ssz_snappy`, and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy. + +Snappy has two formats: "block" and "frames" (streaming). Gossip messages remain relatively small (100s of bytes to 100s of kilobytes) so [basic snappy block compression](https://github.com/google/snappy/blob/master/format_description.txt) is used to avoid the additional overhead associated with snappy frames. Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations. @@ -445,7 +450,7 @@ Here, `result` represents the 1-byte response code. The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time: - `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s. -- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; MUST be supported in mainnet. +- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) frames compression. MAY be supported in the interoperability testnet; MUST be supported in mainnet. #### SSZ-encoding strategy (with or without Snappy) @@ -458,7 +463,7 @@ Snappy has two formats: "block" and "frames" (streaming). To support large reque Since snappy frame contents [have a maximum size of `65536` bytes](https://github.com/google/snappy/blob/master/framing_format.txt#L104) and frame headers are just `identifier (1) + checksum (4)` bytes, the expected buffering of a single frame is acceptable. -**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST encode the length of the raw SSZ bytes, encoded as an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). +**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST encode the length of the raw SSZ bytes, encoded as an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints). *Writing*: By first computing and writing the SSZ byte length, the SSZ encoder can then directly write the chunk contents to the stream. If Snappy is applied, it can be passed through a buffered Snappy writer to compress frame by frame. @@ -572,7 +577,7 @@ Response Content: ) ``` -Requests count beacon blocks from the peer starting from `start_slot`, leading up to the current head block as selected by fork choice. `step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at slots [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. A step value of 1 returns all blocks on the range `[start_slot, start_slot + count)`. +Requests beacon blocks in the slot range `[start_slot, start_slot + count * step)`, leading up to the current head block as selected by fork choice. `step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at slots [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. `BeaconBlocksByRange` is primarily used to sync historical blocks. @@ -752,6 +757,8 @@ where the fields of `ENRForkID` are defined as * `next_fork_version` is the fork version corresponding to the next planned hard fork at a future epoch. If no future fork is planned, set `next_fork_version = current_fork_version` to signal this fact * `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated. If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact +*Note*: `fork_digest` is composed of values that are not not known until the genesis block/state are available. Due to this, clients SHOULD NOT form ENRs and begin peer discovery until genesis values are known. One notable exception to this rule is the distribution of bootnode ENRs prior to genesis. In this case, bootnode ENRs SHOULD be initially distributed with `eth2` field set as `ENRForkID(fork_digest=compute_fork_digest(GENESIS_FORK_VERSION, b'\x00'*32), next_fork_version=GENESIS_FORK_VERSION, next_fork_epoch=FAR_FUTURE_EPOCH)`. After genesis values are known, the bootnodes SHOULD update ENRs to participate in normal discovery operations. + Clients SHOULD connect to peers with `fork_digest`, `next_fork_version`, and `next_fork_epoch` that match local values. Clients MAY connect to peers with the same `fork_digest` but a different `next_fork_version`/`next_fork_epoch`. Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients, these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`. @@ -1092,6 +1099,12 @@ discv5 uses ENRs and we will presumably need to: 1. Add `multiaddr` to the dictionary, so that nodes can advertise their multiaddr under a reserved namespace in ENRs. – and/or – 2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Eth 1.0 nodes). +### Why do we not form ENRs and find peers until genesis block/state is known? + +Although client software might very well be running locally prior to the solidification of the eth2 genesis state and block, clients cannot form valid ENRs prior to this point. ENRs contain `fork_digest` which utilizes the `genesis_validators_root` for a cleaner separation between chains so prior to knowing genesis, we cannot use `fork_digest` to cleanly find peers on our intended chain. Once genesis data is known, we can then form ENRs and safely find peers. + +When using an eth1 deposit contract for deposits, `fork_digest` will be known at least `MIN_GENESIS_DELAY` (24 hours in mainnet configuration) before `genesis_time`, providing ample time to find peers and form initial connections and gossip subnets prior to genesis. + ## Compression/Encoding ### Why are we using SSZ for encoding? diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index be80f3f4a4..596b3818f8 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -34,8 +34,10 @@ - [Misc](#misc-1) - [`get_previous_slot`](#get_previous_slot) - [`pack_compact_validator`](#pack_compact_validator) + - [`unpack_compact_validator`](#unpack_compact_validator) - [`committee_to_compact_committee`](#committee_to_compact_committee) - [`compute_shard_from_committee_index`](#compute_shard_from_committee_index) + - [`compute_offset_slots`](#compute_offset_slots) - [Beacon state accessors](#beacon-state-accessors) - [`get_active_shard_count`](#get_active_shard_count) - [`get_online_validator_indices`](#get_online_validator_indices) @@ -46,9 +48,10 @@ - [`get_updated_gasprice`](#get_updated_gasprice) - [`get_start_shard`](#get_start_shard) - [`get_shard`](#get_shard) - - [`get_next_slot_for_shard`](#get_next_slot_for_shard) + - [`get_latest_slot_for_shard`](#get_latest_slot_for_shard) - [`get_offset_slots`](#get_offset_slots) - [Predicates](#predicates) + - [`is_winning_attestation`](#is_winning_attestation) - [Updated `is_valid_indexed_attestation`](#updated-is_valid_indexed_attestation) - [Block processing](#block-processing) - [Operations](#operations) @@ -57,7 +60,7 @@ - [`apply_shard_transition`](#apply_shard_transition) - [`process_crosslink_for_shard`](#process_crosslink_for_shard) - [`process_crosslinks`](#process_crosslinks) - - [`process_attestations`](#process_attestations) + - [`process_attestation`](#process_attestation) - [New Attester slashing processing](#new-attester-slashing-processing) - [Shard transition false positives](#shard-transition-false-positives) - [Light client processing](#light-client-processing) @@ -101,7 +104,7 @@ Configuration is not namespaced. Instead it is strictly an extension; | `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | | | `MAX_SHARD_BLOCKS_PER_ATTESTATION` | `len(SHARD_BLOCK_OFFSETS)` | | | `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | | -| `MIN_GASPRICE` | `Gwei(2**5)` (= 32) | Gwei | | +| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | | | `GASPRICE_ADJUSTMENT_COEFFICIENT` | `2**3` (= 8) | | | `DOMAIN_SHARD_PROPOSAL` | `DomainType('0x80000000')` | | | `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | | @@ -371,15 +374,29 @@ def get_previous_slot(slot: Slot) -> Slot: #### `pack_compact_validator` ```python -def pack_compact_validator(index: int, slashed: bool, balance_in_increments: int) -> int: +def pack_compact_validator(index: ValidatorIndex, slashed: bool, balance_in_increments: uint64) -> uint64: """ - Creates a compact validator object representing index, slashed status, and compressed balance. + Create a compact validator object representing index, slashed status, and compressed balance. Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with the unpacking function. """ return (index << 16) + (slashed << 15) + balance_in_increments ``` +#### `unpack_compact_validator` + +```python +def unpack_compact_validator(compact_validator: uint64) -> Tuple[ValidatorIndex, bool, uint64]: + """ + Return validator index, slashed, balance // EFFECTIVE_BALANCE_INCREMENT + """ + return ( + ValidatorIndex(compact_validator >> 16), + bool((compact_validator >> 15) % 2), + compact_validator & (2**15 - 1), + ) +``` + #### `committee_to_compact_committee` ```python @@ -404,6 +421,16 @@ def compute_shard_from_committee_index(state: BeaconState, index: CommitteeIndex return Shard((index + get_start_shard(state, slot)) % active_shards) ``` +#### `compute_offset_slots` + +```python +def compute_offset_slots(start_slot: Slot, end_slot: Slot) -> Sequence[Slot]: + """ + Return the offset slots that are greater than ``start_slot`` and less than ``end_slot``. + """ + return [Slot(start_slot + x) for x in SHARD_BLOCK_OFFSETS if start_slot + x < end_slot] +``` + ### Beacon state accessors #### `get_active_shard_count` @@ -495,23 +522,40 @@ def get_shard(state: BeaconState, attestation: Attestation) -> Shard: return compute_shard_from_committee_index(state, attestation.data.index, attestation.data.slot) ``` -#### `get_next_slot_for_shard` +#### `get_latest_slot_for_shard` ```python -def get_next_slot_for_shard(state: BeaconState, shard: Shard) -> Slot: - return Slot(state.shard_states[shard].slot + 1) +def get_latest_slot_for_shard(state: BeaconState, shard: Shard) -> Slot: + return state.shard_states[shard].slot ``` - #### `get_offset_slots` ```python -def get_offset_slots(state: BeaconState, start_slot: Slot) -> Sequence[Slot]: - return [Slot(start_slot + x) for x in SHARD_BLOCK_OFFSETS if start_slot + x < state.slot] +def get_offset_slots(state: BeaconState, shard: Shard) -> Sequence[Slot]: + return compute_offset_slots(state.shard_states[shard].slot, state.slot) ``` ### Predicates +#### `is_winning_attestation` + +```python +def is_winning_attestation(state: BeaconState, + attestation: PendingAttestation, + committee_index: CommitteeIndex, + winning_root: Root) -> bool: + """ + Check if ``attestation`` helped contribute to the successful crosslink of + ``winning_root`` formed by ``committee_index`` committee at the current slot. + """ + return ( + attestation.slot == state.slot + and attestation.data.index == committee_index + and attestation.data.shard_transition_root == winning_root + ) +``` + #### Updated `is_valid_indexed_attestation` Note that this replaces the Phase 0 `is_valid_indexed_attestation`. @@ -528,7 +572,7 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch) aggregation_bits = attestation.aggregation_bits assert len(aggregation_bits) == len(indexed_attestation.committee) - + if len(attestation.custody_bits_blocks) == 0: # fall back on phase0 behavior if there is no shard data. for participant, abit in zip(indexed_attestation.committee, aggregation_bits): @@ -543,8 +587,12 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe if abit: all_pubkeys.append(state.validators[participant].pubkey) # Note: only 2N distinct message hashes - all_signing_roots.append(compute_signing_root( - AttestationCustodyBitWrapper(hash_tree_root(attestation.data), i, cbit), domain)) + attestation_wrapper = AttestationCustodyBitWrapper( + attestation_data_root=hash_tree_root(attestation.data), + block_index=i, + bit=cbit + ) + all_signing_roots.append(compute_signing_root(attestation_wrapper, domain)) else: assert not cbit return bls.AggregateVerify(zip(all_pubkeys, all_signing_roots), signature=attestation.signature) @@ -570,23 +618,23 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # Verify that outstanding deposits are processed up to the maximum number of deposits assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) - + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: for operation in operations: fn(state, operation) - + for_ops(body.proposer_slashings, process_proposer_slashing) for_ops(body.attester_slashings, process_attester_slashing) - # New attestation processing - process_attestations(state, body, body.attestations) - + for_ops(body.attestations, process_attestation) for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) # See custody game spec. process_custody_game_operations(state, body) + process_crosslinks(state, body.shard_transitions, body.attestations) + # TODO process_operations(body.shard_receipt_proofs, process_shard_receipt_proofs) ``` @@ -600,6 +648,7 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None: assert data.index < get_committee_count_at_slot(state, data.slot) assert data.index < get_active_shard_count(state) assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) + assert data.target.epoch == compute_epoch_at_slot(data.slot) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH committee = get_beacon_committee(state, data.slot, data.index) @@ -611,40 +660,39 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None: assert attestation.data.source == state.previous_justified_checkpoint shard = get_shard(state, attestation) - shard_start_slot = get_next_slot_for_shard(state, shard) - # Signature check - assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) - # Type 1: on-time attestations + # Type 1: on-time attestations, the custody bits should be non-empty. if attestation.custody_bits_blocks != []: - # Correct slot + # Ensure on-time attestation assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot # Correct data root count - assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard_start_slot)) + assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard)) # Correct parent block root assert data.beacon_block_root == get_block_root_at_slot(state, get_previous_slot(state.slot)) - # Type 2: no shard transition, no custody bits # TODO: could only allow for older attestations. + # Type 2: no shard transition, no custody bits else: - # assert state.slot - compute_start_slot_at_epoch(compute_epoch_at_slot(data.slot)) < SLOTS_PER_EPOCH + # Ensure delayed attestation + assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY < state.slot + # Late attestations cannot have a shard transition root assert data.shard_transition_root == Root() + + # Signature check + assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) ``` ###### `apply_shard_transition` ```python def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTransition) -> None: - # Slot the attestation starts counting from - start_slot = get_next_slot_for_shard(state, shard) - # Correct data root count - offset_slots = get_offset_slots(state, start_slot) + offset_slots = get_offset_slots(state, shard) assert ( len(transition.shard_data_roots) == len(transition.shard_states) == len(transition.shard_block_lengths) == len(offset_slots) ) - assert transition.start_slot == start_slot + assert transition.start_slot == offset_slots[0] # Reconstruct shard headers headers = [] @@ -687,11 +735,12 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr ```python def process_crosslink_for_shard(state: BeaconState, - shard: Shard, + committee_index: CommitteeIndex, shard_transition: ShardTransition, attestations: Sequence[Attestation]) -> Root: - committee = get_beacon_committee(state, get_current_epoch(state), shard) + committee = get_beacon_committee(state, state.slot, committee_index) online_indices = get_online_validator_indices(state) + shard = compute_shard_from_committee_index(state, committee_index, state.slot) # Loop over all shard transition roots shard_transition_roots = set([a.data.shard_transition_root for a in attestations]) @@ -723,7 +772,7 @@ def process_crosslink_for_shard(state: BeaconState, increase_balance(state, beacon_proposer_index, proposer_reward) states_slots_lengths = zip( shard_transition.shard_states, - get_offset_slots(state, get_next_slot_for_shard(state, shard)), + get_offset_slots(state, get_latest_slot_for_shard(state, shard)), shard_transition.shard_block_lengths ) for shard_state, slot, length in states_slots_lengths: @@ -742,49 +791,42 @@ def process_crosslink_for_shard(state: BeaconState, ```python def process_crosslinks(state: BeaconState, - block_body: BeaconBlockBody, - attestations: Sequence[Attestation]) -> Set[Tuple[Shard, Root]]: - winners: Set[Tuple[Shard, Root]] = set() + shard_transitions: Sequence[ShardTransition], + attestations: Sequence[Attestation]) -> None: committee_count = get_committee_count_at_slot(state, state.slot) for committee_index in map(CommitteeIndex, range(committee_count)): shard = compute_shard_from_committee_index(state, committee_index, state.slot) - # All attestations in the block for this shard + # All attestations in the block for this committee/shard and current slot shard_attestations = [ attestation for attestation in attestations - if get_shard(state, attestation) == shard and attestation.data.slot == state.slot + if attestation.data.index == committee_index and attestation.data.slot == state.slot ] - shard_transition = block_body.shard_transitions[shard] - winning_root = process_crosslink_for_shard(state, shard, shard_transition, shard_attestations) + shard_transition = shard_transitions[shard] + winning_root = process_crosslink_for_shard(state, committee_index, shard_transition, shard_attestations) if winning_root != Root(): - winners.add((shard, winning_root)) - return winners + # Mark relevant pending attestations as creating a successful crosslink + for pending_attestation in state.current_epoch_attestations: + if is_winning_attestation(state, pending_attestation, committee_index, winning_root): + pending_attestation.crosslink_success = True ``` -###### `process_attestations` +###### `process_attestation` ```python -def process_attestations(state: BeaconState, block_body: BeaconBlockBody, attestations: Sequence[Attestation]) -> None: - # Basic validation - for attestation in attestations: - validate_attestation(state, attestation) - - # Process crosslinks - winners = process_crosslinks(state, block_body, attestations) - - # Store pending attestations for epoch processing - for attestation in attestations: - is_winning_transition = (get_shard(state, attestation), attestation.data.shard_transition_root) in winners - pending_attestation = PendingAttestation( - aggregation_bits=attestation.aggregation_bits, - data=attestation.data, - inclusion_delay=state.slot - attestation.data.slot, - crosslink_success=is_winning_transition and attestation.data.slot == state.slot, - proposer_index=get_beacon_proposer_index(state), - ) - if attestation.data.target.epoch == get_current_epoch(state): - state.current_epoch_attestations.append(pending_attestation) - else: - state.previous_epoch_attestations.append(pending_attestation) +def process_attestation(state: BeaconState, attestation: Attestation) -> None: + validate_attestation(state, attestation) + # Store pending attestation for epoch processing + pending_attestation = PendingAttestation( + aggregation_bits=attestation.aggregation_bits, + data=attestation.data, + inclusion_delay=state.slot - attestation.data.slot, + proposer_index=get_beacon_proposer_index(state), + crosslink_success=False, # To be filled in during process_crosslinks + ) + if attestation.data.target.epoch == get_current_epoch(state): + state.current_epoch_attestations.append(pending_attestation) + else: + state.previous_epoch_attestations.append(pending_attestation) ``` ##### New Attester slashing processing @@ -803,6 +845,7 @@ def get_indices_from_committee( def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None: indexed_attestation_1 = attester_slashing.attestation_1 indexed_attestation_2 = attester_slashing.attestation_2 + assert is_slashable_attestation_data( indexed_attestation_1.attestation.data, indexed_attestation_2.attestation.data, @@ -856,7 +899,7 @@ def process_light_client_signatures(state: BeaconState, block_body: BeaconBlockB slot = get_previous_slot(state.slot) signing_root = compute_signing_root(get_block_root_at_slot(state, slot), get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(slot))) - return bls.FastAggregateVerify(signer_pubkeys, signing_root, signature=block_body.light_client_signature) + assert bls.FastAggregateVerify(signer_pubkeys, signing_root, signature=block_body.light_client_signature) ``` diff --git a/specs/phase1/custody-game.md b/specs/phase1/custody-game.md index af3aadc962..eb243f8fb8 100644 --- a/specs/phase1/custody-game.md +++ b/specs/phase1/custody-game.md @@ -192,10 +192,10 @@ def get_custody_atoms(bytez: bytes) -> Sequence[bytes]: def compute_custody_bit(key: BLSSignature, data: bytes) -> bit: full_G2_element = bls.signature_to_G2(key) s = full_G2_element[0].coeffs - bits = [legendre_bit(sum(s[i % 2]**i * int.from_bytes(atom, "little")), BLS12_381_Q) - for i, atom in enumerate(get_custody_atoms(data))] - # XOR all atom bits - return bit(sum(bits) % 2) + custody_atoms = get_custody_atoms(data) + n = len(custody_atoms) + a = sum(s[i % 2]**i * int.from_bytes(atom, "little") for i, atom in enumerate(custody_atoms) + s[n % 2]**n) + return legendre_bit(a, BLS12_381_Q) ``` ### `get_randao_epoch_for_custody_period` @@ -416,7 +416,13 @@ def process_reveal_deadlines(state: BeaconState) -> None: epoch = get_current_epoch(state) for index, validator in enumerate(state.validators): if get_custody_period_for_validator(ValidatorIndex(index), epoch) > validator.next_custody_secret_to_reveal: - slash_validator(state, ValidatorIndex(index)) + # ------------------ WARNING ----------------------- # + # UNSAFE REMOVAL OF SLASHING TO PRIORITIZE PHASE 0 CI # + # Must find generic way to handle key reveals in tests # + # ---------------------------------------------------- # + + # slash_validator(state, ValidatorIndex(index)) + pass ``` ### Final updates diff --git a/specs/phase1/phase1-fork.md b/specs/phase1/phase1-fork.md index adb0cd2365..173fceeb47 100644 --- a/specs/phase1/phase1-fork.md +++ b/specs/phase1/phase1-fork.md @@ -36,7 +36,6 @@ Warning: this configuration is not definitive. | - | - | | `PHASE_1_FORK_VERSION` | `Version('0x01000000')` | | `INITIAL_ACTIVE_SHARDS` | `2**6` (= 64) | -| `INITIAL_GASPRICE` | `Gwei(10)` | ## Fork to Phase 1 @@ -102,7 +101,7 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState: shard_states=List[ShardState, MAX_SHARDS]( ShardState( slot=pre.slot, - gasprice=INITIAL_GASPRICE, + gasprice=MIN_GASPRICE, data=Root(), latest_block_root=Root(), ) for i in range(INITIAL_ACTIVE_SHARDS) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index 027934ea1a..a8839f70de 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -0.11.1 \ No newline at end of file +0.11.2 \ No newline at end of file diff --git a/tests/core/pyspec/eth2spec/config/config_util.py b/tests/core/pyspec/eth2spec/config/config_util.py index 42ad76d69d..4c5768a294 100644 --- a/tests/core/pyspec/eth2spec/config/config_util.py +++ b/tests/core/pyspec/eth2spec/config/config_util.py @@ -8,13 +8,18 @@ # Access to overwrite spec constants based on configuration # This is called by the spec module after declaring its globals, and applies the loaded presets. -def apply_constants_config(spec_globals: Dict[str, Any]) -> None: +def apply_constants_config(spec_globals: Dict[str, Any], warn_if_unknown: bool = False) -> None: global config for k, v in config.items(): - if k.startswith('DOMAIN_'): - spec_globals[k] = spec_globals['DomainType'](v) # domain types are defined as bytes in the configs + # the spec should have default values for everything, if not, the config key is invalid. + if k in spec_globals: + # Keep the same type as the default value indicates (which may be an SSZ basic type subclass, e.g. 'Gwei') + spec_globals[k] = spec_globals[k].__class__(v) else: - spec_globals[k] = v + # Note: Phase 0 spec will not know the phase 1 config values. + # Yet, during debugging you can enable explicit warnings. + if warn_if_unknown: + print(f"WARNING: unknown config key: '{k}' with value: '{v}'") # Load presets from a file, and then prepares the global config setting. This does not apply the config. @@ -36,7 +41,8 @@ def load_config_file(configs_dir, presets_name) -> Dict[str, Any]: out = dict() for k, v in loaded.items(): if isinstance(v, list): - out[k] = v + # Clean up integer values. YAML parser renders lists of ints as list of str + out[k] = [int(item) if item.isdigit() else item for item in v] elif isinstance(v, str) and v.startswith("0x"): out[k] = bytes.fromhex(v[2:]) else: diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 5d50612c7b..1a182fd312 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -7,7 +7,7 @@ from .utils import vector_test, with_meta_tags from random import Random -from typing import Any, Callable, Sequence, TypedDict, Protocol +from typing import Any, Callable, NewType, Sequence, TypedDict, Protocol from importlib import reload @@ -19,25 +19,33 @@ def reload_specs(): # Some of the Spec module functionality is exposed here to deal with phase-specific changes. +SpecForkName = NewType("SpecForkName", str) + +PHASE0 = SpecForkName('phase0') +PHASE1 = SpecForkName('phase1') +ALL_PHASES = (PHASE0, PHASE1) + # TODO: currently phases are defined as python modules. # It would be better if they would be more well-defined interfaces for stronger typing. + + class Spec(Protocol): version: str -class Phase0(Spec): +class SpecPhase0(Spec): ... -class Phase1(Spec): +class SpecPhase1(Spec): def upgrade_to_phase1(self, state: spec_phase0.BeaconState) -> spec_phase1.BeaconState: ... # add transfer, bridge, etc. as the spec evolves class SpecForks(TypedDict, total=False): - phase0: Phase0 - phase1: Phase1 + PHASE0: SpecPhase0 + PHASE1: SpecPhase1 def with_custom_state(balances_fn: Callable[[Any], Sequence[int]], @@ -45,16 +53,19 @@ def with_custom_state(balances_fn: Callable[[Any], Sequence[int]], def deco(fn): def entry(*args, spec: Spec, phases: SpecForks, **kw): try: - p0 = phases["phase0"] + p0 = phases[PHASE0] balances = balances_fn(p0) activation_threshold = threshold_fn(p0) state = create_genesis_state(spec=p0, validator_balances=balances, activation_threshold=activation_threshold) - if spec.fork == 'phase1': + if spec.fork == PHASE1: # TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper. # Decide based on performance/consistency results later. - state = phases["phase1"].upgrade_to_phase1(state) + state = phases[PHASE1].upgrade_to_phase1(state) + # Shard state slot must lag behind BeaconState slot by at least 1 + # Will handle this more elegantly with fork mechanics + spec.process_slots(state, state.slot + 1) kw['state'] = state except KeyError: @@ -217,14 +228,11 @@ def entry(*args, **kw): return entry -all_phases = ['phase0', 'phase1'] - - def with_all_phases(fn): """ A decorator for running a test with every phase """ - return with_phases(all_phases)(fn) + return with_phases(ALL_PHASES)(fn) def with_all_phases_except(exclusion_phases): @@ -232,7 +240,7 @@ def with_all_phases_except(exclusion_phases): A decorator factory for running a tests with every phase except the ones listed """ def decorator(fn): - return with_phases([phase for phase in all_phases if phase not in exclusion_phases])(fn) + return with_phases([phase for phase in ALL_PHASES if phase not in exclusion_phases])(fn) return decorator @@ -258,18 +266,18 @@ def wrapper(*args, **kw): # TODO: test state is dependent on phase0 but is immediately transitioned to phase1. # A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0 - available_phases.add('phase0') + available_phases.add(PHASE0) phase_dir = {} - if 'phase0' in available_phases: - phase_dir['phase0'] = spec_phase0 - if 'phase1' in available_phases: - phase_dir['phase1'] = spec_phase1 + if PHASE0 in available_phases: + phase_dir[PHASE0] = spec_phase0 + if PHASE1 in available_phases: + phase_dir[PHASE1] = spec_phase1 # return is ignored whenever multiple phases are ran. If - if 'phase0' in run_phases: + if PHASE0 in run_phases: ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw) - if 'phase1' in run_phases: + if PHASE1 in run_phases: ret = fn(spec=spec_phase1, phases=phase_dir, *args, **kw) return ret return wrapper diff --git a/tests/core/pyspec/eth2spec/test/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/fork_choice/test_get_head.py index e34c32c0e9..17d4f644f7 100644 --- a/tests/core/pyspec/eth2spec/test/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/fork_choice/test_get_head.py @@ -1,9 +1,8 @@ from eth2spec.test.context import with_all_phases, spec_state_test -from eth2spec.test.helpers.attestations import get_valid_attestation +from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.state import ( next_epoch, - next_epoch_with_attestations, state_transition_and_sign_block, ) diff --git a/tests/core/pyspec/eth2spec/test/fork_choice/test_on_attestation.py b/tests/core/pyspec/eth2spec/test/fork_choice/test_on_attestation.py index 09248944cd..360c18ccd1 100644 --- a/tests/core/pyspec/eth2spec/test/fork_choice/test_on_attestation.py +++ b/tests/core/pyspec/eth2spec/test/fork_choice/test_on_attestation.py @@ -1,7 +1,7 @@ -from eth2spec.test.context import with_all_phases, spec_state_test +from eth2spec.test.context import PHASE0, with_all_phases, spec_state_test from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation -from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block +from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch def run_on_attestation(spec, state, store, attestation, valid=True): @@ -16,7 +16,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True): indexed_attestation = spec.get_indexed_attestation(state, attestation) spec.on_attestation(store, attestation) - if spec.fork == 'phase0': + if spec.fork == PHASE0: sample_index = indexed_attestation.attesting_indices[0] else: attesting_indices = [ @@ -120,11 +120,12 @@ def test_on_attestation_mismatched_target_and_slot(spec, state): @spec_state_test def test_on_attestation_target_not_in_store(spec, state): store = spec.get_forkchoice_store(state) - time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH + time = store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH spec.on_tick(store, time) # move to immediately before next epoch to make block new target - transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1) + next_epoch = spec.get_current_epoch(state) + 1 + transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1) target_block = build_empty_block_for_next_slot(spec, state) state_transition_and_sign_block(spec, state, target_block) @@ -141,11 +142,12 @@ def test_on_attestation_target_not_in_store(spec, state): @spec_state_test def test_on_attestation_beacon_block_not_in_store(spec, state): store = spec.get_forkchoice_store(state) - time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH + time = store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH spec.on_tick(store, time) # move to immediately before next epoch to make block new target - transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1) + next_epoch = spec.get_current_epoch(state) + 1 + transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1) target_block = build_empty_block_for_next_slot(spec, state) signed_target_block = state_transition_and_sign_block(spec, state, target_block) @@ -169,7 +171,7 @@ def test_on_attestation_beacon_block_not_in_store(spec, state): @spec_state_test def test_on_attestation_future_epoch(spec, state): store = spec.get_forkchoice_store(state) - time = 3 * spec.SECONDS_PER_SLOT + time = store.time + 3 * spec.SECONDS_PER_SLOT spec.on_tick(store, time) block = build_empty_block_for_next_slot(spec, state) @@ -179,7 +181,7 @@ def test_on_attestation_future_epoch(spec, state): spec.on_block(store, signed_block) # move state forward but not store - state.slot = block.slot + spec.SLOTS_PER_EPOCH + next_epoch(spec, state) attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True) run_on_attestation(spec, state, store, attestation, False) @@ -189,7 +191,7 @@ def test_on_attestation_future_epoch(spec, state): @spec_state_test def test_on_attestation_future_block(spec, state): store = spec.get_forkchoice_store(state) - time = spec.SECONDS_PER_SLOT * 5 + time = store.time + spec.SECONDS_PER_SLOT * 5 spec.on_tick(store, time) block = build_empty_block_for_next_slot(spec, state) @@ -209,7 +211,7 @@ def test_on_attestation_future_block(spec, state): @spec_state_test def test_on_attestation_same_slot(spec, state): store = spec.get_forkchoice_store(state) - time = 1 * spec.SECONDS_PER_SLOT + time = store.time + spec.SECONDS_PER_SLOT spec.on_tick(store, time) block = build_empty_block_for_next_slot(spec, state) @@ -225,7 +227,7 @@ def test_on_attestation_same_slot(spec, state): @spec_state_test def test_on_attestation_invalid_attestation(spec, state): store = spec.get_forkchoice_store(state) - time = 3 * spec.SECONDS_PER_SLOT + time = store.time + 3 * spec.SECONDS_PER_SLOT spec.on_tick(store, time) block = build_empty_block_for_next_slot(spec, state) diff --git a/tests/core/pyspec/eth2spec/test/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/fork_choice/test_on_block.py index f50a00a9ff..4438dff920 100644 --- a/tests/core/pyspec/eth2spec/test/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/fork_choice/test_on_block.py @@ -4,7 +4,8 @@ from eth2spec.test.context import with_all_phases, spec_state_test from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block, transition_unsigned_block, \ build_empty_block -from eth2spec.test.helpers.state import next_epoch, next_epoch_with_attestations, state_transition_and_sign_block +from eth2spec.test.helpers.attestations import next_epoch_with_attestations +from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block def run_on_block(spec, store, signed_block, valid=True): @@ -159,6 +160,7 @@ def test_on_block_finalized_skip_slots(spec, state): @spec_state_test def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): # Initialization + next_epoch(spec, state) store = spec.get_forkchoice_store(state) store.finalized_checkpoint = spec.Checkpoint( diff --git a/tests/core/pyspec/eth2spec/test/fork_choice/test_on_tick.py b/tests/core/pyspec/eth2spec/test/fork_choice/test_on_tick.py index 27b64ac098..93f3bd9bb5 100644 --- a/tests/core/pyspec/eth2spec/test/fork_choice/test_on_tick.py +++ b/tests/core/pyspec/eth2spec/test/fork_choice/test_on_tick.py @@ -27,14 +27,16 @@ def test_basic(spec, state): @spec_state_test def test_update_justified_single(spec, state): store = spec.get_forkchoice_store(state) - seconds_per_epoch = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH + next_epoch = spec.get_current_epoch(state) + 1 + next_epoch_start_slot = spec.compute_start_slot_at_epoch(next_epoch) + seconds_until_next_epoch = next_epoch_start_slot * spec.SECONDS_PER_SLOT - store.time store.best_justified_checkpoint = spec.Checkpoint( epoch=store.justified_checkpoint.epoch + 1, root=b'\x55' * 32, ) - run_on_tick(spec, store, store.time + seconds_per_epoch, True) + run_on_tick(spec, store, store.time + seconds_until_next_epoch, True) @with_all_phases diff --git a/tests/core/pyspec/eth2spec/test/genesis/test_initialization.py b/tests/core/pyspec/eth2spec/test/genesis/test_initialization.py index 61a2ffb1eb..8828213377 100644 --- a/tests/core/pyspec/eth2spec/test/genesis/test_initialization.py +++ b/tests/core/pyspec/eth2spec/test/genesis/test_initialization.py @@ -1,10 +1,10 @@ -from eth2spec.test.context import spec_test, with_phases, single_phase +from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase from eth2spec.test.helpers.deposits import ( prepare_genesis_deposits, ) -@with_phases(['phase0']) +@with_phases(([PHASE0])) @spec_test @single_phase def test_initialize_beacon_state_from_eth1(spec): @@ -32,7 +32,7 @@ def test_initialize_beacon_state_from_eth1(spec): yield 'state', state -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_test @single_phase def test_initialize_beacon_state_some_small_balances(spec): diff --git a/tests/core/pyspec/eth2spec/test/genesis/test_validity.py b/tests/core/pyspec/eth2spec/test/genesis/test_validity.py index a90b4a6956..dbaf3f9516 100644 --- a/tests/core/pyspec/eth2spec/test/genesis/test_validity.py +++ b/tests/core/pyspec/eth2spec/test/genesis/test_validity.py @@ -1,4 +1,4 @@ -from eth2spec.test.context import spec_test, with_phases, single_phase +from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase from eth2spec.test.helpers.deposits import ( prepare_genesis_deposits, ) @@ -25,7 +25,7 @@ def run_is_valid_genesis_state(spec, state, valid=True): assert is_valid == valid -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_test @single_phase def test_is_valid_genesis_state_true(spec): @@ -34,7 +34,7 @@ def test_is_valid_genesis_state_true(spec): yield from run_is_valid_genesis_state(spec, state, valid=True) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_test @single_phase def test_is_valid_genesis_state_false_invalid_timestamp(spec): @@ -44,7 +44,7 @@ def test_is_valid_genesis_state_false_invalid_timestamp(spec): yield from run_is_valid_genesis_state(spec, state, valid=False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_test @single_phase def test_is_valid_genesis_state_true_more_balance(spec): @@ -55,7 +55,7 @@ def test_is_valid_genesis_state_true_more_balance(spec): # TODO: not part of the genesis function yet. Erroneously merged. -# @with_phases(['phase0']) +# @with_phases([PHASE0]) # @spec_test # def test_is_valid_genesis_state_false_not_enough_balance(spec): # state = create_valid_beacon_state(spec) @@ -64,7 +64,7 @@ def test_is_valid_genesis_state_true_more_balance(spec): # yield from run_is_valid_genesis_state(spec, state, valid=False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_test @single_phase def test_is_valid_genesis_state_true_one_more_validator(spec): @@ -78,7 +78,7 @@ def test_is_valid_genesis_state_true_one_more_validator(spec): yield from run_is_valid_genesis_state(spec, state, valid=True) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_test @single_phase def test_is_valid_genesis_state_false_not_enough_validator(spec): diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py index 281d11b45c..8215f5c5b0 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py @@ -1,12 +1,48 @@ from typing import List -from eth2spec.test.helpers.block import build_empty_block_for_next_slot, transition_unsigned_block, \ - build_empty_block +from eth2spec.test.context import expect_assertion_error, PHASE0 +from eth2spec.test.helpers.state import state_transition_and_sign_block +from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.keys import privkeys from eth2spec.utils import bls from eth2spec.utils.ssz.ssz_typing import Bitlist +def run_attestation_processing(spec, state, attestation, valid=True): + """ + Run ``process_attestation``, yielding: + - pre-state ('pre') + - attestation ('attestation') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + # yield pre-state + yield 'pre', state + + yield 'attestation', attestation + + # If the attestation is invalid, processing is aborted, and there is no post-state. + if not valid: + expect_assertion_error(lambda: spec.process_attestation(state, attestation)) + yield 'post', None + return + + current_epoch_count = len(state.current_epoch_attestations) + previous_epoch_count = len(state.previous_epoch_attestations) + + # process attestation + spec.process_attestation(state, attestation) + + # Make sure the attestation has been processed + if attestation.data.target.epoch == spec.get_current_epoch(state): + assert len(state.current_epoch_attestations) == current_epoch_count + 1 + else: + assert len(state.previous_epoch_attestations) == previous_epoch_count + 1 + + # yield post-state + yield 'post', state + + def build_attestation_data(spec, state, slot, index): assert state.slot >= slot @@ -39,7 +75,45 @@ def build_attestation_data(spec, state, slot, index): ) -def get_valid_attestation(spec, state, slot=None, index=None, empty=False, signed=False): +def convert_to_valid_on_time_attestation(spec, state, attestation, signed=False): + shard = spec.get_shard(state, attestation) + offset_slots = spec.compute_offset_slots(spec.get_latest_slot_for_shard(state, shard), state.slot + 1) + for offset_slot in offset_slots: + attestation.custody_bits_blocks.append( + Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits]) + ) + + if signed: + sign_attestation(spec, state, attestation) + + return attestation + + +def get_valid_on_time_attestation(spec, state, slot=None, index=None, signed=False): + ''' + Construct on-time attestation for next slot + ''' + if slot is None: + slot = state.slot + if index is None: + index = 0 + + return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=True) + + +def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False): + ''' + Construct on-time attestation for next slot + ''' + if slot is None: + slot = state.slot + if index is None: + index = 0 + + return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=False) + + +def get_valid_attestation(spec, state, slot=None, index=None, empty=False, signed=False, on_time=True): if slot is None: slot = state.slot if index is None: @@ -63,6 +137,10 @@ def get_valid_attestation(spec, state, slot=None, index=None, empty=False, signe fill_aggregate_attestation(spec, state, attestation) if signed: sign_attestation(spec, state, attestation) + + if spec.fork == 'phase1' and on_time: + attestation = convert_to_valid_on_time_attestation(spec, state, attestation, signed) + return attestation @@ -78,12 +156,11 @@ def sign_aggregate_attestation(spec, state, attestation_data, participants: List privkey ) ) - # TODO: we should try signing custody bits if spec.fork == 'phase1' return bls.Aggregate(signatures) def sign_indexed_attestation(spec, state, indexed_attestation): - if spec.fork == 'phase0': + if spec.fork == PHASE0: participants = indexed_attestation.attesting_indices data = indexed_attestation.data indexed_attestation.signature = sign_aggregate_attestation(spec, state, data, participants) @@ -96,7 +173,47 @@ def sign_indexed_attestation(spec, state, indexed_attestation): indexed_attestation.attestation.signature = sign_aggregate_attestation(spec, state, data, participants) +def sign_on_time_attestation(spec, state, attestation): + if not any(attestation.custody_bits_blocks): + sign_attestation(spec, state, attestation) + return + + committee = spec.get_beacon_committee(state, attestation.data.slot, attestation.data.index) + signatures = [] + for block_index, custody_bits in enumerate(attestation.custody_bits_blocks): + for participant, abit, cbit in zip(committee, attestation.aggregation_bits, custody_bits): + if not abit: + continue + signatures.append(get_attestation_custody_signature( + spec, + state, + attestation.data, + block_index, + cbit, + privkeys[participant] + )) + + attestation.signature = bls.Aggregate(signatures) + + +def get_attestation_custody_signature(spec, state, attestation_data, block_index, bit, privkey): + domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch) + signing_root = spec.compute_signing_root( + spec.AttestationCustodyBitWrapper( + attestation_data_root=attestation_data.hash_tree_root(), + block_index=block_index, + bit=bit, + ), + domain, + ) + return bls.Sign(privkey, signing_root) + + def sign_attestation(spec, state, attestation): + if spec.fork == 'phase1' and any(attestation.custody_bits_blocks): + sign_on_time_attestation(spec, state, attestation) + return + participants = spec.get_attesting_indices( state, attestation.data, @@ -113,7 +230,6 @@ def get_attestation_signature(spec, state, attestation_data, privkey): def fill_aggregate_attestation(spec, state, attestation, signed=False): - beacon_committee = spec.get_beacon_committee( state, attestation.data.slot, @@ -127,8 +243,38 @@ def fill_aggregate_attestation(spec, state, attestation, signed=False): def add_attestations_to_state(spec, state, attestations, slot): - block = build_empty_block(spec, state, slot) + spec.process_slots(state, slot) for attestation in attestations: - block.body.attestations.append(attestation) - spec.process_slots(state, block.slot) - transition_unsigned_block(spec, state, block) + spec.process_attestation(state, attestation) + + +def next_epoch_with_attestations(spec, + state, + fill_cur_epoch, + fill_prev_epoch): + assert state.slot % spec.SLOTS_PER_EPOCH == 0 + + post_state = state.copy() + signed_blocks = [] + for _ in range(spec.SLOTS_PER_EPOCH): + block = build_empty_block_for_next_slot(spec, post_state) + if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: + slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 + committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest) + if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)): + for index in range(committees_per_slot): + cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index, signed=True) + block.body.attestations.append(cur_attestation) + + if fill_prev_epoch: + slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 + committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest) + for index in range(committees_per_slot): + prev_attestation = get_valid_attestation( + spec, post_state, slot_to_attest, index=index, signed=True, on_time=False) + block.body.attestations.append(prev_attestation) + + signed_block = state_transition_and_sign_block(spec, post_state, block) + signed_blocks.append(signed_block) + + return state, signed_blocks, post_state diff --git a/tests/core/pyspec/eth2spec/test/helpers/attester_slashings.py b/tests/core/pyspec/eth2spec/test/helpers/attester_slashings.py index 5dfedc2008..975f34c209 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attester_slashings.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attester_slashings.py @@ -1,3 +1,4 @@ +from eth2spec.test.context import PHASE1 from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation @@ -20,7 +21,7 @@ def get_indexed_attestation_participants(spec, indexed_att): """ Wrapper around index-attestation to return the list of participant indices, regardless of spec phase. """ - if spec.fork == "phase1": + if spec.fork == PHASE1: return list(spec.get_indices_from_committee( indexed_att.committee, indexed_att.attestation.aggregation_bits, @@ -33,21 +34,21 @@ def set_indexed_attestation_participants(spec, indexed_att, participants): """ Wrapper around index-attestation to return the list of participant indices, regardless of spec phase. """ - if spec.fork == "phase1": + if spec.fork == PHASE1: indexed_att.attestation.aggregation_bits = [bool(i in participants) for i in indexed_att.committee] else: indexed_att.attesting_indices = participants def get_attestation_1_data(spec, att_slashing): - if spec.fork == "phase1": + if spec.fork == PHASE1: return att_slashing.attestation_1.attestation.data else: return att_slashing.attestation_1.data def get_attestation_2_data(spec, att_slashing): - if spec.fork == "phase1": + if spec.fork == PHASE1: return att_slashing.attestation_2.attestation.data else: return att_slashing.attestation_2.data diff --git a/tests/core/pyspec/eth2spec/test/helpers/keys.py b/tests/core/pyspec/eth2spec/test/helpers/keys.py index 23bb95131f..7f7820d3a0 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/keys.py +++ b/tests/core/pyspec/eth2spec/test/helpers/keys.py @@ -1,6 +1,6 @@ from py_ecc.bls import G2ProofOfPossession as bls from eth2spec.phase0 import spec -privkeys = [i + 1 for i in range(spec.SLOTS_PER_EPOCH * 16)] +privkeys = [i + 1 for i in range(spec.SLOTS_PER_EPOCH * 256)] pubkeys = [bls.PrivToPub(privkey) for privkey in privkeys] pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} diff --git a/tests/core/pyspec/eth2spec/test/helpers/phase1/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/phase1/attestations.py index 622183fe92..0e16e1face 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/phase1/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/phase1/attestations.py @@ -1,30 +1,63 @@ -from eth2spec.test.helpers.keys import privkeys +from eth2spec.utils.ssz.ssz_typing import Bitlist from eth2spec.utils import bls +from eth2spec.test.helpers.keys import privkeys +import eth2spec.test.helpers.attestations as phase0_attestations + + +def get_valid_on_time_attestation(spec, state, index=None, signed=False): + ''' + Construct on-time attestation for next slot + ''' + if index is None: + index = 0 + + attestation = phase0_attestations.get_valid_attestation(spec, state, state.slot, index, False) + shard = spec.get_shard(state, attestation) + offset_slots = spec.compute_offset_slots(spec.get_latest_slot_for_shard(state, shard), state.slot + 1) + + for _ in offset_slots: + attestation.custody_bits_blocks.append( + Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits]) + ) -def sign_shard_attestation(spec, beacon_state, shard_state, block, participants): + if signed: + sign_attestation(spec, state, attestation) + + return attestation + + +def sign_attestation(spec, state, attestation): + if not any(attestation.custody_bits_blocks): + phase0_attestations.sign_attestation(spec, state, attestation) + return + + committee = spec.get_beacon_committee(state, attestation.data.slot, attestation.data.index) signatures = [] - message_hash = spec.ShardAttestationData( - slot=block.slot, - parent_root=block.parent_root, - ).hash_tree_root() - block_epoch = spec.compute_epoch_of_shard_slot(block.slot) - for validator_index in participants: - privkey = privkeys[validator_index] - signatures.append( - get_attestation_signature( + for block_index, custody_bits in enumerate(attestation.custody_bits_blocks): + for participant, abit, cbit in zip(committee, attestation.aggregation_bits, custody_bits): + if not abit: + continue + signatures.append(get_attestation_custody_signature( spec, - beacon_state, - shard_state, - message_hash, - block_epoch, - privkey, - ) - ) - return bls.Aggregate(signatures) + state, + attestation.data, + block_index, + cbit, + privkeys[participant] + )) + + attestation.signature = bls.Aggregate(signatures) -def get_attestation_signature(spec, beacon_state, shard_state, message_hash, block_epoch, privkey): - domain = spec.get_domain(beacon_state, spec.DOMAIN_SHARD_ATTESTER, block_epoch) - signing_root = spec.compute_signing_root(message_hash, domain) +def get_attestation_custody_signature(spec, state, attestation_data, block_index, bit, privkey): + domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch) + signing_root = spec.compute_signing_root( + spec.AttestationCustodyBitWrapper( + attestation_data.hash_tree_root(), + block_index, + bit, + ), + domain, + ) return bls.Sign(privkey, signing_root) diff --git a/tests/core/pyspec/eth2spec/test/helpers/state.py b/tests/core/pyspec/eth2spec/test/helpers/state.py index aad329ff41..46a7ce2b5e 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/state.py +++ b/tests/core/pyspec/eth2spec/test/helpers/state.py @@ -1,6 +1,5 @@ from eth2spec.test.context import expect_assertion_error -from eth2spec.test.helpers.attestations import get_valid_attestation -from eth2spec.test.helpers.block import sign_block, build_empty_block_for_next_slot, transition_unsigned_block +from eth2spec.test.helpers.block import sign_block, transition_unsigned_block def get_balance(state, index): @@ -14,6 +13,13 @@ def next_slot(spec, state): spec.process_slots(state, state.slot + 1) +def next_slots(spec, state, slots): + """ + Transition given slots forward. + """ + spec.process_slots(state, state.slot + slots) + + def transition_to(spec, state, slot): """ Transition to ``slot``. @@ -51,34 +57,3 @@ def state_transition_and_sign_block(spec, state, block, expect_fail=False): transition_unsigned_block(spec, state, block) block.state_root = state.hash_tree_root() return sign_block(spec, state, block) - - -def next_epoch_with_attestations(spec, - state, - fill_cur_epoch, - fill_prev_epoch): - assert state.slot % spec.SLOTS_PER_EPOCH == 0 - - post_state = state.copy() - signed_blocks = [] - for _ in range(spec.SLOTS_PER_EPOCH): - block = build_empty_block_for_next_slot(spec, post_state) - if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: - slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 - committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest) - if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)): - for index in range(committees_per_slot): - cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index, signed=True) - block.body.attestations.append(cur_attestation) - - if fill_prev_epoch: - slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 - committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest) - for index in range(committees_per_slot): - prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index, signed=True) - block.body.attestations.append(prev_attestation) - - signed_block = state_transition_and_sign_block(spec, post_state, block) - signed_blocks.append(signed_block) - - return state, signed_blocks, post_state diff --git a/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py index df42b6e1ae..8663391aa0 100644 --- a/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_attestation.py @@ -1,6 +1,5 @@ from eth2spec.test.context import ( spec_state_test, - expect_assertion_error, always_bls, never_bls, with_all_phases, spec_test, @@ -8,57 +7,26 @@ with_custom_state, single_phase) from eth2spec.test.helpers.attestations import ( + run_attestation_processing, get_valid_attestation, sign_aggregate_attestation, sign_attestation, ) from eth2spec.test.helpers.state import ( + next_slot, + next_slots, next_epoch, + transition_to, ) from eth2spec.test.helpers.block import apply_empty_block from eth2spec.utils.ssz.ssz_typing import Bitlist -def run_attestation_processing(spec, state, attestation, valid=True): - """ - Run ``process_attestation``, yielding: - - pre-state ('pre') - - attestation ('attestation') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - # yield pre-state - yield 'pre', state - - yield 'attestation', attestation - - # If the attestation is invalid, processing is aborted, and there is no post-state. - if not valid: - expect_assertion_error(lambda: spec.process_attestation(state, attestation)) - yield 'post', None - return - - current_epoch_count = len(state.current_epoch_attestations) - previous_epoch_count = len(state.previous_epoch_attestations) - - # process attestation - spec.process_attestation(state, attestation) - - # Make sure the attestation has been processed - if attestation.data.target.epoch == spec.get_current_epoch(state): - assert len(state.current_epoch_attestations) == current_epoch_count + 1 - else: - assert len(state.previous_epoch_attestations) == previous_epoch_count + 1 - - # yield post-state - yield 'post', state - - @with_all_phases @spec_state_test def test_success(spec, state): attestation = get_valid_attestation(spec, state, signed=True) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) yield from run_attestation_processing(spec, state, attestation) @@ -68,9 +36,9 @@ def test_success(spec, state): @with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE) @single_phase def test_success_multi_proposer_index_iterations(spec, state): - state.slot += spec.SLOTS_PER_EPOCH * 2 + next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2) attestation = get_valid_attestation(spec, state, signed=True) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) yield from run_attestation_processing(spec, state, attestation) @@ -78,8 +46,8 @@ def test_success_multi_proposer_index_iterations(spec, state): @with_all_phases @spec_state_test def test_success_previous_epoch(spec, state): - attestation = get_valid_attestation(spec, state, signed=True) - state.slot = spec.SLOTS_PER_EPOCH - 1 + attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1) next_epoch(spec, state) apply_empty_block(spec, state) @@ -91,7 +59,7 @@ def test_success_previous_epoch(spec, state): @always_bls def test_invalid_attestation_signature(spec, state): attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) yield from run_attestation_processing(spec, state, attestation, False) @@ -108,10 +76,10 @@ def test_before_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_after_epoch_slots(spec, state): - attestation = get_valid_attestation(spec, state, signed=True) - state.slot = spec.SLOTS_PER_EPOCH - 1 + attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + # increment past latest inclusion slot - spec.process_slots(state, state.slot + 2) + transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH + 1) apply_empty_block(spec, state) yield from run_attestation_processing(spec, state, attestation, False) @@ -120,7 +88,7 @@ def test_after_epoch_slots(spec, state): @with_all_phases @spec_state_test def test_old_source_epoch(spec, state): - state.slot = spec.SLOTS_PER_EPOCH * 5 + next_slots(spec, state, spec.SLOTS_PER_EPOCH * 5) state.finalized_checkpoint.epoch = 2 state.previous_justified_checkpoint.epoch = 3 state.current_justified_checkpoint.epoch = 4 @@ -142,7 +110,7 @@ def test_old_source_epoch(spec, state): @always_bls def test_wrong_index_for_committee_signature(spec, state): attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) attestation.data.index += 1 @@ -153,12 +121,14 @@ def test_wrong_index_for_committee_signature(spec, state): @spec_state_test @never_bls def test_wrong_index_for_slot(spec, state): - committees_per_slot = spec.get_committee_count_at_slot(state, state.slot) - assert committees_per_slot < spec.MAX_COMMITTEES_PER_SLOT - index = committees_per_slot + while spec.get_committee_count_at_slot(state, state.slot) >= spec.MAX_COMMITTEES_PER_SLOT: + state.validators = state.validators[:len(state.validators) // 2] + state.balances = state.balances[:len(state.balances) // 2] + + index = spec.MAX_COMMITTEES_PER_SLOT - 1 attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) attestation.data.index = index @@ -170,7 +140,7 @@ def test_wrong_index_for_slot(spec, state): @never_bls def test_invalid_index(spec, state): attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) # off by one (with respect to valid range) on purpose attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT @@ -184,7 +154,7 @@ def test_mismatched_target_and_slot(spec, state): next_epoch(spec, state) next_epoch(spec, state) - attestation = get_valid_attestation(spec, state) + attestation = get_valid_attestation(spec, state, on_time=False) attestation.data.slot = attestation.data.slot - spec.SLOTS_PER_EPOCH sign_attestation(spec, state, attestation) @@ -197,9 +167,9 @@ def test_mismatched_target_and_slot(spec, state): def test_old_target_epoch(spec, state): assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2 - attestation = get_valid_attestation(spec, state, signed=True) + attestation = get_valid_attestation(spec, state, signed=True, on_time=False) - state.slot = spec.SLOTS_PER_EPOCH * 2 # target epoch will be too old to handle + next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2) # target epoch will be too old to handle yield from run_attestation_processing(spec, state, attestation, False) @@ -221,7 +191,7 @@ def test_future_target_epoch(spec, state): # manually add signature for correct participants attestation.signature = sign_aggregate_attestation(spec, state, attestation.data, participants) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) yield from run_attestation_processing(spec, state, attestation, False) @@ -230,7 +200,7 @@ def test_future_target_epoch(spec, state): @spec_state_test def test_new_source_epoch(spec, state): attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) attestation.data.source.epoch += 1 @@ -243,7 +213,7 @@ def test_new_source_epoch(spec, state): @spec_state_test def test_source_root_is_target_root(spec, state): attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) attestation.data.source.root = attestation.data.target.root @@ -255,14 +225,15 @@ def test_source_root_is_target_root(spec, state): @with_all_phases @spec_state_test def test_invalid_current_source_root(spec, state): - state.slot = spec.SLOTS_PER_EPOCH * 5 + next_slots(spec, state, spec.SLOTS_PER_EPOCH * 5) + state.finalized_checkpoint.epoch = 2 state.previous_justified_checkpoint = spec.Checkpoint(epoch=3, root=b'\x01' * 32) state.current_justified_checkpoint = spec.Checkpoint(epoch=4, root=b'\x32' * 32) - attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1, on_time=False) + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) # Test logic sanity checks: assert state.current_justified_checkpoint.root != state.previous_justified_checkpoint.root @@ -280,7 +251,7 @@ def test_invalid_current_source_root(spec, state): @spec_state_test def test_bad_source_root(spec, state): attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) attestation.data.source.root = b'\x42' * 32 @@ -292,8 +263,9 @@ def test_bad_source_root(spec, state): @with_all_phases @spec_state_test def test_empty_aggregation_bits(spec, state): + next_slot(spec, state) attestation = get_valid_attestation(spec, state, empty=True) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) assert attestation.aggregation_bits == Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]( *([0b0] * len(attestation.aggregation_bits))) @@ -305,7 +277,7 @@ def test_empty_aggregation_bits(spec, state): @spec_state_test def test_too_many_aggregation_bits(spec, state): attestation = get_valid_attestation(spec, state, signed=True) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) # one too many bits attestation.aggregation_bits.append(0b0) @@ -317,7 +289,7 @@ def test_too_many_aggregation_bits(spec, state): @spec_state_test def test_too_few_aggregation_bits(spec, state): attestation = get_valid_attestation(spec, state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) attestation.aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]( *([0b1] + [0b0] * (len(attestation.aggregation_bits) - 1))) diff --git a/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py b/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py index e9665e7146..48dc75fd91 100644 --- a/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py +++ b/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_attester_slashing.py @@ -1,4 +1,7 @@ -from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases +from eth2spec.test.context import ( + PHASE0, PHASE1, + spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases +) from eth2spec.test.helpers.attestations import sign_indexed_attestation from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, \ get_indexed_attestation_participants, get_attestation_2_data, get_attestation_1_data @@ -161,7 +164,7 @@ def test_same_data(spec, state): indexed_att_1 = attester_slashing.attestation_1 att_2_data = get_attestation_2_data(spec, attester_slashing) - if spec.fork == 'phase1': + if spec.fork == PHASE1: indexed_att_1.attestation.data = att_2_data else: indexed_att_1.data = att_2_data @@ -199,7 +202,7 @@ def test_participants_already_slashed(spec, state): # Some of the following tests are phase0 only: phase 1 lists participants with bitfields instead of index list. -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test @always_bls def test_att1_bad_extra_index(spec, state): @@ -215,7 +218,7 @@ def test_att1_bad_extra_index(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test @always_bls def test_att1_bad_replaced_index(spec, state): @@ -231,7 +234,7 @@ def test_att1_bad_replaced_index(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test @always_bls def test_att2_bad_extra_index(spec, state): @@ -247,7 +250,7 @@ def test_att2_bad_extra_index(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test @always_bls def test_att2_bad_replaced_index(spec, state): @@ -263,7 +266,7 @@ def test_att2_bad_replaced_index(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test @always_bls def test_att1_duplicate_index_normal_signed(spec, state): @@ -283,7 +286,7 @@ def test_att1_duplicate_index_normal_signed(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test @always_bls def test_att2_duplicate_index_normal_signed(spec, state): @@ -303,7 +306,7 @@ def test_att2_duplicate_index_normal_signed(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test @always_bls def test_att1_duplicate_index_double_signed(spec, state): @@ -318,7 +321,7 @@ def test_att1_duplicate_index_double_signed(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test @always_bls def test_att2_duplicate_index_double_signed(spec, state): @@ -333,7 +336,7 @@ def test_att2_duplicate_index_double_signed(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test def test_unsorted_att_1(spec, state): attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True) @@ -346,7 +349,7 @@ def test_unsorted_att_1(spec, state): yield from run_attester_slashing_processing(spec, state, attester_slashing, False) -@with_phases(['phase0']) +@with_phases([PHASE0]) @spec_state_test def test_unsorted_att_2(spec, state): attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False) diff --git a/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py b/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py index 5f1fca9697..7657518fc4 100644 --- a/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py +++ b/tests/core/pyspec/eth2spec/test/phase_0/block_processing/test_process_proposer_slashing.py @@ -2,7 +2,7 @@ from eth2spec.test.helpers.block_header import sign_block_header from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing -from eth2spec.test.helpers.state import get_balance +from eth2spec.test.helpers.state import get_balance, next_epoch def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True): @@ -152,7 +152,7 @@ def test_proposer_is_withdrawn(spec, state): proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True) # move 1 epoch into future, to allow for past withdrawable epoch - state.slot += spec.SLOTS_PER_EPOCH + next_epoch(spec, state) # set proposer withdrawable_epoch in past current_epoch = spec.get_current_epoch(state) proposer_index = proposer_slashing.signed_header_1.message.proposer_index diff --git a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py index 28cfc9b5d6..8cb09be0ef 100644 --- a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py +++ b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_final_updates.py @@ -2,6 +2,7 @@ from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import ( run_epoch_processing_with, run_epoch_processing_to ) +from eth2spec.test.helpers.state import transition_to def run_process_final_updates(spec, state): @@ -13,7 +14,8 @@ def run_process_final_updates(spec, state): def test_eth1_vote_no_reset(spec, state): assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1 # skip ahead to the end of the epoch - state.slot = spec.SLOTS_PER_EPOCH - 1 + transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1) + for i in range(state.slot + 1): # add a vote for each skipped slot. state.eth1_data_votes.append( spec.Eth1Data(deposit_root=b'\xaa' * 32, diff --git a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py index 917c06e3db..09af2126db 100644 --- a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py +++ b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_justification_and_finalization.py @@ -2,6 +2,7 @@ from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import ( run_epoch_processing_with ) +from eth2spec.test.helpers.state import transition_to def run_process_just_and_fin(spec, state): @@ -23,7 +24,7 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}") total_balance = spec.get_total_active_balance(state) - remaining_balance = total_balance * 2 // 3 + remaining_balance = int(total_balance * 2 // 3) # can become negative start_slot = spec.compute_start_slot_at_epoch(epoch) for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH): @@ -41,14 +42,15 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support aggregation_bits = [0] * len(committee) for v in range(len(committee) * 2 // 3 + 1): if remaining_balance > 0: - remaining_balance -= state.validators[v].effective_balance + remaining_balance -= int(state.validators[v].effective_balance) aggregation_bits[v] = 1 else: break - # remove just one attester to make the marginal support insufficient + # remove 1/5th of attesters so that support is insufficient if not sufficient_support: - aggregation_bits[aggregation_bits.index(1)] = 0 + for i in range(max(len(committee) // 5, 1)): + aggregation_bits[i] = 0 attestations.append(spec.PendingAttestation( aggregation_bits=aggregation_bits, @@ -81,7 +83,7 @@ def put_checkpoints_in_block_roots(spec, state, checkpoints): def finalize_on_234(spec, state, epoch, sufficient_support): assert epoch > 4 - state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch + transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1) # skip ahead to just before epoch # 43210 -- epochs ago # 3210x -- justification bitfield indices @@ -116,7 +118,7 @@ def finalize_on_234(spec, state, epoch, sufficient_support): def finalize_on_23(spec, state, epoch, sufficient_support): assert epoch > 3 - state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch + transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1) # skip ahead to just before epoch # 43210 -- epochs ago # 210xx -- justification bitfield indices (pre shift) @@ -194,7 +196,7 @@ def finalize_on_123(spec, state, epoch, sufficient_support): def finalize_on_12(spec, state, epoch, sufficient_support, messed_up_target): assert epoch > 2 - state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch + transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1) # skip ahead to just before epoch # 43210 -- epochs ago # 210xx -- justification bitfield indices (pre shift) diff --git a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py index 526aba277e..a5f4d92279 100644 --- a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py +++ b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_registry_updates.py @@ -1,4 +1,4 @@ -from eth2spec.test.helpers.state import next_epoch +from eth2spec.test.helpers.state import next_epoch, next_slots from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with @@ -101,7 +101,7 @@ def test_activation_queue_sorting(spec, state): state.validators[mock_activations - 1].activation_eligibility_epoch = epoch # move state forward and finalize to allow for activations - state.slot += spec.SLOTS_PER_EPOCH * 3 + next_slots(spec, state, spec.SLOTS_PER_EPOCH * 3) state.finalized_checkpoint.epoch = epoch + 1 yield from run_process_registry_updates(spec, state) @@ -113,10 +113,10 @@ def test_activation_queue_sorting(spec, state): # the second last is at the end of the queue, and did not make the churn, # hence is not assigned an activation_epoch yet. assert state.validators[mock_activations - 2].activation_epoch == spec.FAR_FUTURE_EPOCH - # the one at churn_limit - 1 did not make it, it was out-prioritized - assert state.validators[churn_limit - 1].activation_epoch == spec.FAR_FUTURE_EPOCH + # the one at churn_limit did not make it, it was out-prioritized + assert state.validators[churn_limit].activation_epoch == spec.FAR_FUTURE_EPOCH # but the the one in front of the above did - assert state.validators[churn_limit - 2].activation_epoch != spec.FAR_FUTURE_EPOCH + assert state.validators[churn_limit - 1].activation_epoch != spec.FAR_FUTURE_EPOCH @with_all_phases @@ -131,7 +131,8 @@ def test_activation_queue_efficiency(spec, state): state.validators[i].activation_eligibility_epoch = epoch + 1 # move state forward and finalize to allow for activations - state.slot += spec.SLOTS_PER_EPOCH * 3 + next_slots(spec, state, spec.SLOTS_PER_EPOCH * 3) + state.finalized_checkpoint.epoch = epoch + 1 # Run first registry update. Do not yield test vectors diff --git a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_rewards_and_penalties.py b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_rewards_and_penalties.py index 525417f69a..af695fe697 100644 --- a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_rewards_and_penalties.py +++ b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_rewards_and_penalties.py @@ -1,8 +1,9 @@ from eth2spec.test.context import ( - spec_state_test, with_all_phases, spec_test, - misc_balances, with_custom_state, - low_single_balance, zero_activation_threshold, - single_phase, + spec_state_test, spec_test, + with_all_phases, with_phases, single_phase, + with_custom_state, + zero_activation_threshold, + misc_balances, low_single_balance, ) from eth2spec.test.helpers.state import ( next_epoch, @@ -20,7 +21,34 @@ def run_process_rewards_and_penalties(spec, state): yield from run_epoch_processing_with(spec, state, 'process_rewards_and_penalties') -@with_all_phases +def prepare_state_with_full_attestations(spec, state, empty=False): + # Go to start of next epoch to ensure can have full participation + next_epoch(spec, state) + + start_slot = state.slot + start_epoch = spec.get_current_epoch(state) + next_epoch_start_slot = spec.compute_start_slot_at_epoch(start_epoch + 1) + attestations = [] + for _ in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY): + # create an attestation for each index in each slot in epoch + if state.slot < next_epoch_start_slot: + for committee_index in range(spec.get_committee_count_at_slot(state, state.slot)): + attestation = get_valid_attestation(spec, state, index=committee_index, empty=empty, signed=True) + attestations.append(attestation) + # fill each created slot in state after inclusion delay + if state.slot >= start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY: + inclusion_slot = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + include_attestations = [att for att in attestations if att.data.slot == inclusion_slot] + add_attestations_to_state(spec, state, include_attestations, state.slot) + next_slot(spec, state) + + assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY + assert len(state.previous_epoch_attestations) == len(attestations) + + return attestations + + +@with_phases(['phase0']) @spec_state_test def test_genesis_epoch_no_attestations_no_penalties(spec, state): pre_state = state.copy() @@ -33,7 +61,7 @@ def test_genesis_epoch_no_attestations_no_penalties(spec, state): assert state.balances[index] == pre_state.balances[index] -@with_all_phases +@with_phases(['phase0']) @spec_state_test def test_genesis_epoch_full_attestations_no_rewards(spec, state): attestations = [] @@ -59,25 +87,6 @@ def test_genesis_epoch_full_attestations_no_rewards(spec, state): assert state.balances[index] == pre_state.balances[index] -def prepare_state_with_full_attestations(spec, state, empty=False): - attestations = [] - for slot in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY): - # create an attestation for each slot in epoch - if slot < spec.SLOTS_PER_EPOCH: - attestation = get_valid_attestation(spec, state, empty=empty, signed=True) - attestations.append(attestation) - # fill each created slot in state after inclusion delay - if slot - spec.MIN_ATTESTATION_INCLUSION_DELAY >= 0: - include_att = attestations[slot - spec.MIN_ATTESTATION_INCLUSION_DELAY] - add_attestations_to_state(spec, state, [include_att], state.slot) - next_slot(spec, state) - - assert spec.compute_epoch_at_slot(state.slot) == spec.GENESIS_EPOCH + 1 - assert len(state.previous_epoch_attestations) == spec.SLOTS_PER_EPOCH - - return attestations - - @with_all_phases @spec_state_test def test_full_attestations(spec, state): @@ -88,7 +97,7 @@ def test_full_attestations(spec, state): yield from run_process_rewards_and_penalties(spec, state) attesting_indices = spec.get_unslashed_attesting_indices(state, attestations) - assert len(attesting_indices) > 0 + assert len(attesting_indices) == len(pre_state.validators) for index in range(len(pre_state.validators)): if index in attesting_indices: assert state.balances[index] > pre_state.balances[index] @@ -168,6 +177,7 @@ def test_full_attestations_one_validaor_one_gwei(spec, state): @with_all_phases @spec_state_test def test_no_attestations_all_penalties(spec, state): + # Move to next epoch to ensure rewards/penalties are processed next_epoch(spec, state) pre_state = state.copy() @@ -237,26 +247,14 @@ def test_duplicate_attestation(spec, state): @spec_state_test # Case when some eligible attestations are slashed. Modifies attesting_balance and consequently rewards/penalties. def test_attestations_some_slashed(spec, state): - attestations = [] - for slot in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY): - # create an attestation for each slot in epoch - if slot < spec.SLOTS_PER_EPOCH: - attestation = get_valid_attestation(spec, state, signed=True) - attestations.append(attestation) - # fill each created slot in state after inclusion delay - if slot - spec.MIN_ATTESTATION_INCLUSION_DELAY >= 0: - include_att = attestations[slot - spec.MIN_ATTESTATION_INCLUSION_DELAY] - add_attestations_to_state(spec, state, [include_att], state.slot) - next_slot(spec, state) - + attestations = prepare_state_with_full_attestations(spec, state) attesting_indices_before_slashings = list(spec.get_unslashed_attesting_indices(state, attestations)) # Slash maximum amount of validators allowed per epoch. for i in range(spec.MIN_PER_EPOCH_CHURN_LIMIT): spec.slash_validator(state, attesting_indices_before_slashings[i]) - assert spec.compute_epoch_at_slot(state.slot) == spec.GENESIS_EPOCH + 1 - assert len(state.previous_epoch_attestations) == spec.SLOTS_PER_EPOCH + assert len(state.previous_epoch_attestations) == len(attestations) pre_state = state.copy() @@ -267,6 +265,8 @@ def test_attestations_some_slashed(spec, state): assert len(attesting_indices_before_slashings) - len(attesting_indices) == spec.MIN_PER_EPOCH_CHURN_LIMIT for index in range(len(pre_state.validators)): if index in attesting_indices: + # non-slashed attester should gain reward assert state.balances[index] > pre_state.balances[index] else: + # Slashed non-proposer attester should have penalty assert state.balances[index] < pre_state.balances[index] diff --git a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_slashings.py b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_slashings.py index c58da5a4ae..23c8ce11ad 100644 --- a/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_slashings.py +++ b/tests/core/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_slashings.py @@ -2,6 +2,7 @@ from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import ( run_epoch_processing_with, run_epoch_processing_to ) +from eth2spec.test.helpers.state import next_epoch def run_process_slashings(spec, state): @@ -79,7 +80,7 @@ def test_small_penalty(spec, state): @spec_state_test def test_scaled_penalties(spec, state): # skip to next epoch - state.slot = spec.SLOTS_PER_EPOCH + next_epoch(spec, state) # Also mock some previous slashings, so that we test to have the delta in the penalties computation. base = spec.EJECTION_BALANCE diff --git a/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_attestation.py new file mode 100644 index 0000000000..ed43283274 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_attestation.py @@ -0,0 +1,46 @@ +from eth2spec.test.context import ( + with_all_phases_except, + spec_state_test, + always_bls, +) +from eth2spec.test.helpers.state import transition_to +from eth2spec.test.helpers.attestations import ( + run_attestation_processing, + get_valid_late_attestation, + get_valid_on_time_attestation, +) + + +@with_all_phases_except(['phase0']) +@spec_state_test +@always_bls +def test_on_time_success(spec, state): + attestation = get_valid_on_time_attestation(spec, state, signed=True) + + transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) + + yield from run_attestation_processing(spec, state, attestation) + + +@with_all_phases_except(['phase0']) +@spec_state_test +@always_bls +def test_on_time_empty_custody_bits_blocks(spec, state): + attestation = get_valid_late_attestation(spec, state, signed=True) + + assert not any(attestation.custody_bits_blocks) + + transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) + + yield from run_attestation_processing(spec, state, attestation, False) + + +@with_all_phases_except(['phase0']) +@spec_state_test +@always_bls +def test_late_with_custody_bits_blocks(spec, state): + attestation = get_valid_on_time_attestation(spec, state, signed=True) + + transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY + 1) + + yield from run_attestation_processing(spec, state, attestation, False) diff --git a/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py b/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py index fb9157f2f1..8c2436d5b6 100644 --- a/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py +++ b/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py @@ -1,5 +1,6 @@ from eth2spec.test.helpers.custody import get_valid_custody_key_reveal from eth2spec.test.context import ( + PHASE0, with_all_phases_except, spec_state_test, expect_assertion_error, @@ -54,7 +55,7 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru yield 'post', state -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_success(spec, state): @@ -64,7 +65,7 @@ def test_success(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_reveal_too_early(spec, state): @@ -73,7 +74,7 @@ def test_reveal_too_early(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_wrong_period(spec, state): @@ -82,7 +83,7 @@ def test_wrong_period(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_late_reveal(spec, state): @@ -92,7 +93,7 @@ def test_late_reveal(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_double_reveal(spec, state): @@ -104,7 +105,7 @@ def test_double_reveal(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_max_decrement(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py b/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py index c5d9c5a63d..83b0fe3255 100644 --- a/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py +++ b/tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_early_derived_secret_reveal.py @@ -2,6 +2,7 @@ from eth2spec.test.helpers.block import apply_empty_block from eth2spec.test.helpers.state import next_epoch, get_balance from eth2spec.test.context import ( + PHASE0, with_all_phases_except, spec_state_test, expect_assertion_error, @@ -41,7 +42,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v yield 'post', state -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_success(spec, state): @@ -50,7 +51,7 @@ def test_success(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @never_bls def test_reveal_from_current_epoch(spec, state): @@ -59,7 +60,7 @@ def test_reveal_from_current_epoch(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @never_bls def test_reveal_from_past_epoch(spec, state): @@ -70,7 +71,7 @@ def test_reveal_from_past_epoch(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_reveal_with_custody_padding(spec, state): @@ -82,7 +83,7 @@ def test_reveal_with_custody_padding(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_reveal_with_custody_padding_minus_one(spec, state): @@ -94,7 +95,7 @@ def test_reveal_with_custody_padding_minus_one(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @never_bls def test_double_reveal(spec, state): @@ -115,7 +116,7 @@ def test_double_reveal(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @never_bls def test_revealer_is_slashed(spec, state): @@ -125,7 +126,7 @@ def test_revealer_is_slashed(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) -@with_all_phases_except(['phase0']) +@with_all_phases_except([PHASE0]) @spec_state_test @never_bls def test_far_future_epoch(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/sanity/test_blocks.py index 8e9fcd8711..b6b6718724 100644 --- a/tests/core/pyspec/eth2spec/test/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/sanity/test_blocks.py @@ -2,7 +2,7 @@ from eth2spec.utils import bls -from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block, next_slot +from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block, next_slot, next_epoch from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block, \ transition_unsigned_block from eth2spec.test.helpers.keys import privkeys, pubkeys @@ -11,7 +11,7 @@ from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.deposits import prepare_state_and_deposit -from eth2spec.test.context import spec_state_test, with_all_phases, expect_assertion_error, always_bls +from eth2spec.test.context import spec_state_test, with_all_phases, expect_assertion_error, always_bls, with_phases @with_all_phases @@ -303,7 +303,8 @@ def test_proposer_after_inactive_index(spec, state): state.validators[inactive_index].exit_epoch = spec.get_current_epoch(state) # skip forward, get brand new proposers - state.slot = spec.SLOTS_PER_EPOCH * 2 + next_epoch(spec, state) + next_epoch(spec, state) block = build_empty_block_for_next_slot(spec, state) state_transition_and_sign_block(spec, state, block) @@ -415,7 +416,7 @@ def test_deposit_top_up(spec, state): @with_all_phases @spec_state_test def test_attestation(spec, state): - state.slot = spec.SLOTS_PER_EPOCH + next_epoch(spec, state) yield 'pre', state @@ -423,7 +424,7 @@ def test_attestation(spec, state): # Add to state via block transition pre_current_attestations_len = len(state.current_epoch_attestations) - attestation_block = build_empty_block(spec, state, state.slot + 1 + spec.MIN_ATTESTATION_INCLUSION_DELAY) + attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) attestation_block.body.attestations.append(attestation) signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block) @@ -442,7 +443,9 @@ def test_attestation(spec, state): assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root -@with_all_phases +# In phase1 a committee is computed for PERSISTENT_COMMITTEE_PERIOD slots ago, +# exceeding the minimal-config randao mixes memory size. +@with_phases(['phase0']) @spec_state_test def test_voluntary_exit(spec, state): validator_index = spec.get_active_validator_indices( diff --git a/tests/core/pyspec/eth2spec/test/test_finality.py b/tests/core/pyspec/eth2spec/test/test_finality.py index 8ae50d4369..0b99cc3f92 100644 --- a/tests/core/pyspec/eth2spec/test/test_finality.py +++ b/tests/core/pyspec/eth2spec/test/test_finality.py @@ -1,5 +1,6 @@ -from eth2spec.test.context import spec_state_test, never_bls, with_all_phases -from eth2spec.test.helpers.state import next_epoch, next_epoch_with_attestations +from eth2spec.test.context import spec_state_test, never_bls, with_all_phases, with_phases +from eth2spec.test.helpers.state import next_epoch +from eth2spec.test.helpers.attestations import next_epoch_with_attestations from eth2spec.test.helpers.block import apply_empty_block @@ -28,7 +29,7 @@ def check_finality(spec, assert state.finalized_checkpoint == prev_state.finalized_checkpoint -@with_all_phases +@with_phases(["phase0"]) @spec_state_test @never_bls def test_finality_no_updates_at_genesis(spec, state): diff --git a/tests/formats/operations/README.md b/tests/formats/operations/README.md index f1ec0429aa..bb4636ec05 100644 --- a/tests/formats/operations/README.md +++ b/tests/formats/operations/README.md @@ -18,11 +18,11 @@ A YAML-encoded `BeaconState`, the state before applying the operation. Also available as `pre.ssz`. -### `<operation-name>.yaml` +### `<input-name>.yaml` A YAML-encoded operation object, e.g. a `ProposerSlashing`, or `Deposit`. -Also available as `<operation-name>.ssz`. +Also available as `<input-name>.ssz`. ### `post.yaml` @@ -39,14 +39,14 @@ This excludes the other parts of the block-transition. Operations: -| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* | -|-------------------------|----------------------|----------------------|--------------------------------------------------------| -| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` | -| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` | -| `block_header` | `Block` | **`block`** | `process_block_header(state, block)` | -| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` | -| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` | -| `voluntary_exit` | `VoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` | +| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* | +|-------------------------|-----------------------|----------------------|--------------------------------------------------------| +| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` | +| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` | +| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` | +| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` | +| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` | +| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` | Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here. diff --git a/tests/formats/sanity/blocks.md b/tests/formats/sanity/blocks.md index 2b50d19cae..991bc35d22 100644 --- a/tests/formats/sanity/blocks.md +++ b/tests/formats/sanity/blocks.md @@ -25,7 +25,7 @@ Also available as `pre.ssz`. A series of files, with `<index>` in range `[0, blocks_count)`. Blocks need to be processed in order, following the main transition function (i.e. process slot and epoch transitions in between blocks as normal) -Each file is a YAML-encoded `BeaconBlock`. +Each file is a YAML-encoded `SignedBeaconBlock`. Each block is also available as `blocks_<index>.ssz` diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py index bad4aab060..455292ae38 100644 --- a/tests/generators/bls/main.py +++ b/tests/generators/bls/main.py @@ -13,6 +13,7 @@ from py_ecc import bls from hashlib import sha256 +from eth2spec.test.context import PHASE0 def hash(x): return sha256(x).digest() @@ -202,7 +203,7 @@ def cases_fn() -> Iterable[gen_typing.TestCase]: print(data) (case_name, case_content) = data yield gen_typing.TestCase( - fork_name='phase0', + fork_name=PHASE0, runner_name='bls', handler_name=handler_name, suite_name='small', diff --git a/tests/generators/epoch_processing/README.md b/tests/generators/epoch_processing/README.md index 9b57875e2a..662b0b516d 100644 --- a/tests/generators/epoch_processing/README.md +++ b/tests/generators/epoch_processing/README.md @@ -5,7 +5,7 @@ Epoch processing covers the sub-transitions during an epoch change. An epoch-processing test-runner can consume these sub-transition test-suites, and handle different kinds of epoch sub-transitions by processing the cases using the specified test handler. -Information on the format of the tests can be found in the [epoch-processing test formats documentation](../../specs/test_formats/epoch_processing/README.md). +Information on the format of the tests can be found in the [epoch-processing test formats documentation](../../formats/epoch_processing/README.md). diff --git a/tests/generators/epoch_processing/main.py b/tests/generators/epoch_processing/main.py index 8f2a6e94fc..f3bbc21e6d 100644 --- a/tests/generators/epoch_processing/main.py +++ b/tests/generators/epoch_processing/main.py @@ -13,6 +13,7 @@ from gen_from_tests.gen import generate_from_tests from importlib import reload from eth2spec.config import config_util +from eth2spec.test.context import PHASE0 def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider: @@ -28,7 +29,7 @@ def cases_fn() -> Iterable[gen_typing.TestCase]: runner_name='epoch_processing', handler_name=handler_name, src=tests_src, - fork_name='phase0' + fork_name=PHASE0, ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) diff --git a/tests/generators/genesis/main.py b/tests/generators/genesis/main.py index 3563c3fd9d..8548b12c19 100644 --- a/tests/generators/genesis/main.py +++ b/tests/generators/genesis/main.py @@ -1,5 +1,6 @@ from typing import Iterable +from eth2spec.test.context import PHASE0 from eth2spec.test.genesis import test_initialization, test_validity from gen_base import gen_runner, gen_typing @@ -21,7 +22,7 @@ def cases_fn() -> Iterable[gen_typing.TestCase]: runner_name='genesis', handler_name=handler_name, src=tests_src, - fork_name='phase0' + fork_name=PHASE0, ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) diff --git a/tests/generators/operations/README.md b/tests/generators/operations/README.md index 5cb3afc989..a5d48c11b4 100644 --- a/tests/generators/operations/README.md +++ b/tests/generators/operations/README.md @@ -6,7 +6,7 @@ Operations (or "transactions" in previous spec iterations), An operation test-runner can consume these operation test-suites, and handle different kinds of operations by processing the cases using the specified test handler. -Information on the format of the tests can be found in the [operations test formats documentation](../../specs/test_formats/operations/README.md). +Information on the format of the tests can be found in the [operations test formats documentation](../../formats/operations/README.md). diff --git a/tests/generators/operations/main.py b/tests/generators/operations/main.py index 6906c9df71..935c7aa63d 100644 --- a/tests/generators/operations/main.py +++ b/tests/generators/operations/main.py @@ -15,6 +15,7 @@ from eth2spec.config import config_util from eth2spec.phase0 import spec as spec_phase0 from eth2spec.phase1 import spec as spec_phase1 +from eth2spec.test.context import PHASE0 def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider: @@ -30,7 +31,7 @@ def cases_fn() -> Iterable[gen_typing.TestCase]: runner_name='operations', handler_name=handler_name, src=tests_src, - fork_name='phase0' + fork_name=PHASE0, ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) diff --git a/tests/generators/sanity/README.md b/tests/generators/sanity/README.md index 6d2e2f30dd..cbc6aef06d 100644 --- a/tests/generators/sanity/README.md +++ b/tests/generators/sanity/README.md @@ -2,7 +2,7 @@ Sanity tests cover regular state-transitions in a common block-list format, to ensure the basics work. -Information on the format of the tests can be found in the [sanity test formats documentation](../../specs/test_formats/sanity/README.md). +Information on the format of the tests can be found in the [sanity test formats documentation](../../formats/sanity/README.md). diff --git a/tests/generators/sanity/main.py b/tests/generators/sanity/main.py index cfcbcfdb6d..74c85a9e8c 100644 --- a/tests/generators/sanity/main.py +++ b/tests/generators/sanity/main.py @@ -4,6 +4,7 @@ from gen_base import gen_runner, gen_typing from gen_from_tests.gen import generate_from_tests +from eth2spec.test.context import PHASE0 from eth2spec.test.sanity import test_blocks, test_slots from eth2spec.config import config_util from eth2spec.phase0 import spec as spec_phase0 @@ -23,7 +24,7 @@ def cases_fn() -> Iterable[gen_typing.TestCase]: runner_name='sanity', handler_name=handler_name, src=tests_src, - fork_name='phase0' + fork_name=PHASE0, ) return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) diff --git a/tests/generators/shuffling/main.py b/tests/generators/shuffling/main.py index 0ef2657c45..6069de77a3 100644 --- a/tests/generators/shuffling/main.py +++ b/tests/generators/shuffling/main.py @@ -6,6 +6,7 @@ from eth2spec.config import config_util from eth2spec.phase0 import spec as spec +from eth2spec.test.context import PHASE0 def shuffling_case_fn(seed, count): @@ -37,7 +38,7 @@ def prepare_fn(configs_path: str) -> str: def cases_fn() -> Iterable[gen_typing.TestCase]: for (case_name, case_fn) in shuffling_test_cases(): yield gen_typing.TestCase( - fork_name='phase0', + fork_name=PHASE0, runner_name='shuffling', handler_name='core', suite_name='shuffle', diff --git a/tests/generators/ssz_generic/main.py b/tests/generators/ssz_generic/main.py index 83e6da86de..8cfb2e3eb2 100644 --- a/tests/generators/ssz_generic/main.py +++ b/tests/generators/ssz_generic/main.py @@ -6,6 +6,7 @@ import ssz_boolean import ssz_uints import ssz_container +from eth2spec.test.context import PHASE0 def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typing.TestProvider: @@ -16,7 +17,7 @@ def prepare_fn(configs_path: str) -> str: def cases_fn() -> Iterable[gen_typing.TestCase]: for (case_name, case_fn) in case_maker(): yield gen_typing.TestCase( - fork_name='phase0', + fork_name=PHASE0, runner_name='ssz_generic', handler_name=handler_name, suite_name=suite_name, diff --git a/tests/generators/ssz_static/README.md b/tests/generators/ssz_static/README.md index 2a50401925..160d1ebb49 100644 --- a/tests/generators/ssz_static/README.md +++ b/tests/generators/ssz_static/README.md @@ -3,4 +3,4 @@ The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ: the serialization and hashing of Eth2 data types. -Test-format documentation can be found [here](../../specs/test_formats/ssz_static/README.md). +Test-format documentation can be found [here](../../formats/ssz_static/README.md). diff --git a/tests/generators/ssz_static/main.py b/tests/generators/ssz_static/main.py index b7c9487677..b9cb51db08 100644 --- a/tests/generators/ssz_static/main.py +++ b/tests/generators/ssz_static/main.py @@ -8,6 +8,7 @@ from eth2spec.debug import random_value, encode from eth2spec.config import config_util from eth2spec.phase0 import spec +from eth2spec.test.context import PHASE0 from eth2spec.utils.ssz.ssz_typing import Container from eth2spec.utils.ssz.ssz_impl import ( hash_tree_root, @@ -44,7 +45,7 @@ def ssz_static_cases(seed: int, name, ssz_type, mode: random_value.Randomization for i in range(count): yield gen_typing.TestCase( - fork_name='phase0', + fork_name=PHASE0, runner_name='ssz_static', handler_name=name, suite_name=f"ssz_{random_mode_name}{'_chaos' if chaos else ''}",
bridgecrewio__checkov-2255
"checkov --add-check" failing due to missing templates directory in setup.py When running `checkov --add-check`, you get an error due to the templates not being installed properly ``` gitpod /workspace/checkov $ checkov --add-check _ _ ___| |__ ___ ___| | _______ __ / __| '_ \ / _ \/ __| |/ / _ \ \ / / | (__| | | | __/ (__| < (_) \ V / \___|_| |_|\___|\___|_|\_\___/ \_/ By bridgecrew.io | version: 2.0.744 What action would you like to take? (add) [add]: Enter the title of your new check (without a .py) [MyNewTest]: Select a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]: Describe what this check does [Ensure that X does Y...]: What kind of check would you like to add? (terraform) [terraform]: Select the cloud provider this will run on (azure, aws, gcp) [aws]: Select a terraform object for this check (data, provider, resource) [resource]: Enter the terraform object type [aws_iam_policy]: Please ensure you are at the root of the Checkov repository before completing this prompt Traceback (most recent call last): File "/home/gitpod/.pyenv/versions/3.8.12/bin/checkov", line 9, in <module> sys.exit(run()) File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/main.py", line 77, in run check.action() File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 184, in action getattr(self, self.chosen_action)() File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 189, in add self.populate_templates() File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 196, in populate_templates tf_unit_test_template = self.template_env().get_template("unittest-terraform.jinja2") File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 167, in template_env print("jinja2.list_templates: %s" % jinja2.list_templates()) AttributeError: module 'jinja2' has no attribute 'list_templates' gitpod /workspace/checkov $ ``` The problem occurs on Mac, regardless of whether checkov is installed using `pip3 install checkov` or `brew install checkov`. I think it will probably occur in other environments as well. The fix inside checkov's gitpod environment seems to be fairly simple - just copy the template files from the repo into the installed package: ``` gitpod /workspace/checkov $ ls /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/ __init__.py __pycache__ gitpod /workspace/checkov $ cp checkov/common/util/templates/*.jinja2 /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/ gitpod /workspace/checkov $ checkov --add-check _ _ ___| |__ ___ ___| | _______ __ / __| '_ \ / _ \/ __| |/ / _ \ \ / / | (__| | | | __/ (__| < (_) \ V / \___|_| |_|\___|\___|_|\_\___/ \_/ By bridgecrew.io | version: 2.0.744 What action would you like to take? (add) [add]: Enter the title of your new check (without a .py) [MyNewTest]: Select a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]: Describe what this check does [Ensure that X does Y...]: What kind of check would you like to add? (terraform) [terraform]: Select the cloud provider this will run on (azure, aws, gcp) [aws]: Select a terraform object for this check (data, provider, resource) [resource]: Enter the terraform object type [aws_iam_policy]: Please ensure you are at the root of the Checkov repository before completing this prompt Creating Check MyNewTest.py in /workspace/checkov/checkov/terraform/checks/resource/aws Successfully created /workspace/checkov/checkov/terraform/checks/resource/aws/MyNewTest.py Creating Unit Test Stubs for MyNewTest in /workspace/checkov/tests/terraform/checks/resource/aws Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/example_MyNewTest/MyNewTest.tf Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/test_MyNewTest.py Next steps: 1) Edit your new check located in the checks/ directory listed above 2) Add both a PASS and FAIL unit test to the newly created unit test under the tests/ directory to show others how to fix failures gitpod /workspace/checkov $ ``` I _think_ the problem is simply due to the template files being inadvertently left out of checkov's `setup.py`, and that adding them into the `package_dir` section as below should probably fix things: ``` package_dir={ "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks", "checkov.common.util.templates": "checkov.common.util.templates" }, ``` However there's a number of directories under `checkov.common` that maybe should also be added to checkov's `setup.py`, and I'm not familiar enough with all of checkov's use cases to know which ones are important
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.24\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib>=0.11.0,<1.0.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\"\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.24\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib>=0.11.0,<1.0.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\"\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n \"checkov.common.util.templates\": [\n \"*.jinja2\"\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index b1584bec05..856af5f317 100644 --- a/setup.py +++ b/setup.py @@ -84,6 +84,9 @@ "aws/*.yaml", "gcp/*.yaml", "azure/*.yaml", + ], + "checkov.common.util.templates": [ + "*.jinja2" ] }, scripts=["bin/checkov", "bin/checkov.cmd"],
aio-libs__aiohttp-569
Incorrect remote_addr in access log All records contain 127.0.0.1 as remote_addr
[ { "content": "\"\"\"Various helper functions\"\"\"\nimport base64\nimport io\nimport os\nfrom urllib.parse import quote, urlencode\nfrom collections import namedtuple\nfrom wsgiref.handlers import format_date_time\n\nfrom . import hdrs, multidict\nfrom .errors import InvalidURL\n\n__all__ = ('BasicAuth', 'FormData', 'parse_mimetype')\n\n\nclass BasicAuth(namedtuple('BasicAuth', ['login', 'password', 'encoding'])):\n \"\"\"Http basic authentication helper.\n\n :param str login: Login\n :param str password: Password\n :param str encoding: (optional) encoding ('latin1' by default)\n \"\"\"\n\n def __new__(cls, login, password='', encoding='latin1'):\n if login is None:\n raise ValueError('None is not allowed as login value')\n\n if password is None:\n raise ValueError('None is not allowed as password value')\n\n return super().__new__(cls, login, password, encoding)\n\n def encode(self):\n \"\"\"Encode credentials.\"\"\"\n creds = ('%s:%s' % (self.login, self.password)).encode(self.encoding)\n return 'Basic %s' % base64.b64encode(creds).decode(self.encoding)\n\n\nclass FormData:\n \"\"\"Helper class for multipart/form-data and\n application/x-www-form-urlencoded body generation.\"\"\"\n\n def __init__(self, fields=()):\n from . import multipart\n self._writer = multipart.MultipartWriter('form-data')\n self._fields = []\n self._is_multipart = False\n\n if isinstance(fields, dict):\n fields = list(fields.items())\n elif not isinstance(fields, (list, tuple)):\n fields = (fields,)\n self.add_fields(*fields)\n\n @property\n def is_multipart(self):\n return self._is_multipart\n\n @property\n def content_type(self):\n if self._is_multipart:\n return self._writer.headers[hdrs.CONTENT_TYPE]\n else:\n return 'application/x-www-form-urlencoded'\n\n def add_field(self, name, value, *, content_type=None, filename=None,\n content_transfer_encoding=None):\n\n if isinstance(value, io.IOBase):\n self._is_multipart = True\n elif isinstance(value, (bytes, bytearray, memoryview)):\n if filename is None and content_transfer_encoding is None:\n filename = name\n\n type_options = multidict.MultiDict({'name': name})\n if filename is not None and not isinstance(filename, str):\n raise TypeError('filename must be an instance of str. '\n 'Got: %s' % filename)\n if filename is None and isinstance(value, io.IOBase):\n filename = guess_filename(value, name)\n if filename is not None:\n type_options['filename'] = filename\n self._is_multipart = True\n\n headers = {}\n if content_type is not None:\n if not isinstance(content_type, str):\n raise TypeError('content_type must be an instance of str. '\n 'Got: %s' % content_type)\n headers[hdrs.CONTENT_TYPE] = content_type\n self._is_multipart = True\n if content_transfer_encoding is not None:\n if not isinstance(content_transfer_encoding, str):\n raise TypeError('content_transfer_encoding must be an instance'\n ' of str. Got: %s' % content_transfer_encoding)\n headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding\n self._is_multipart = True\n\n self._fields.append((type_options, headers, value))\n\n def add_fields(self, *fields):\n to_add = list(fields)\n\n while to_add:\n rec = to_add.pop(0)\n\n if isinstance(rec, io.IOBase):\n k = guess_filename(rec, 'unknown')\n self.add_field(k, rec)\n\n elif isinstance(rec,\n (multidict.MultiDictProxy,\n multidict.MultiDict)):\n to_add.extend(rec.items())\n\n elif isinstance(rec, (list, tuple)) and len(rec) == 2:\n k, fp = rec\n self.add_field(k, fp)\n\n else:\n raise TypeError('Only io.IOBase, multidict and (name, file) '\n 'pairs allowed, use .add_field() for passing '\n 'more complex parameters')\n\n def _gen_form_urlencoded(self, encoding):\n # form data (x-www-form-urlencoded)\n data = []\n for type_options, _, value in self._fields:\n data.append((type_options['name'], value))\n\n data = urlencode(data, doseq=True)\n return data.encode(encoding)\n\n def _gen_form_data(self, *args, **kwargs):\n \"\"\"Encode a list of fields using the multipart/form-data MIME format\"\"\"\n for dispparams, headers, value in self._fields:\n part = self._writer.append(value, headers)\n if dispparams:\n part.set_content_disposition('form-data', **dispparams)\n # FIXME cgi.FieldStorage doesn't likes body parts with\n # Content-Length which were sent via chunked transfer encoding\n part.headers.pop(hdrs.CONTENT_LENGTH, None)\n yield from self._writer.serialize()\n\n def __call__(self, encoding):\n if self._is_multipart:\n return self._gen_form_data(encoding)\n else:\n return self._gen_form_urlencoded(encoding)\n\n\ndef parse_mimetype(mimetype):\n \"\"\"Parses a MIME type into its components.\n\n :param str mimetype: MIME type\n\n :returns: 4 element tuple for MIME type, subtype, suffix and parameters\n :rtype: tuple\n\n Example:\n\n >>> parse_mimetype('text/html; charset=utf-8')\n ('text', 'html', '', {'charset': 'utf-8'})\n\n \"\"\"\n if not mimetype:\n return '', '', '', {}\n\n parts = mimetype.split(';')\n params = []\n for item in parts[1:]:\n if not item:\n continue\n key, value = item.split('=', 1) if '=' in item else (item, '')\n params.append((key.lower().strip(), value.strip(' \"')))\n params = dict(params)\n\n fulltype = parts[0].strip().lower()\n if fulltype == '*':\n fulltype = '*/*'\n\n mtype, stype = fulltype.split('/', 1) \\\n if '/' in fulltype else (fulltype, '')\n stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')\n\n return mtype, stype, suffix, params\n\n\ndef str_to_bytes(s, encoding='utf-8'):\n if isinstance(s, str):\n return s.encode(encoding)\n return s\n\n\ndef guess_filename(obj, default=None):\n name = getattr(obj, 'name', None)\n if name and name[0] != '<' and name[-1] != '>':\n return os.path.split(name)[-1]\n return default\n\n\ndef parse_remote_addr(forward):\n if isinstance(forward, str):\n # we only took the last one\n # http://en.wikipedia.org/wiki/X-Forwarded-For\n if ',' in forward:\n forward = forward.rsplit(',', 1)[-1].strip()\n\n # find host and port on ipv6 address\n if '[' in forward and ']' in forward:\n host = forward.split(']')[0][1:].lower()\n elif ':' in forward and forward.count(':') == 1:\n host = forward.split(':')[0].lower()\n else:\n host = forward\n\n forward = forward.split(']')[-1]\n if ':' in forward and forward.count(':') == 1:\n port = forward.split(':', 1)[1]\n else:\n port = 80\n\n remote = (host, port)\n else:\n remote = forward\n\n return remote[0], str(remote[1])\n\n\ndef atoms(message, environ, response, transport, request_time):\n \"\"\"Gets atoms for log formatting.\"\"\"\n if message:\n r = '{} {} HTTP/{}.{}'.format(\n message.method, message.path,\n message.version[0], message.version[1])\n headers = message.headers\n else:\n r = ''\n headers = {}\n\n if transport is not None:\n remote_addr = parse_remote_addr(\n transport.get_extra_info('addr', '127.0.0.1'))\n else:\n remote_addr = ('',)\n\n atoms = {\n 'h': remote_addr[0],\n 'l': '-',\n 'u': '-',\n 't': format_date_time(None),\n 'r': r,\n 's': str(getattr(response, 'status', '')),\n 'b': str(getattr(response, 'output_length', '')),\n 'f': headers.get(hdrs.REFERER, '-'),\n 'a': headers.get(hdrs.USER_AGENT, '-'),\n 'T': str(int(request_time)),\n 'D': str(request_time).split('.', 1)[-1][:6],\n 'p': \"<%s>\" % os.getpid()\n }\n\n return atoms\n\n\nclass SafeAtoms(dict):\n \"\"\"Copy from gunicorn\"\"\"\n\n def __init__(self, atoms, i_headers, o_headers):\n dict.__init__(self)\n\n self._i_headers = i_headers\n self._o_headers = o_headers\n\n for key, value in atoms.items():\n self[key] = value.replace('\"', '\\\\\"')\n\n def __getitem__(self, k):\n if k.startswith('{'):\n if k.endswith('}i'):\n headers = self._i_headers\n elif k.endswith('}o'):\n headers = self._o_headers\n else:\n headers = None\n\n if headers is not None:\n return headers.get(k[1:-2], '-')\n\n if k in self:\n return super(SafeAtoms, self).__getitem__(k)\n else:\n return '-'\n\n\n_marker = object()\n\n\nclass reify:\n \"\"\"Use as a class method decorator. It operates almost exactly like\n the Python ``@property`` decorator, but it puts the result of the\n method it decorates into the instance dict after the first call,\n effectively replacing the function it decorates with an instance\n variable. It is, in Python parlance, a non-data descriptor.\n\n \"\"\"\n\n def __init__(self, wrapped):\n self.wrapped = wrapped\n try:\n self.__doc__ = wrapped.__doc__\n except: # pragma: no cover\n pass\n self.name = wrapped.__name__\n\n def __get__(self, inst, owner, _marker=_marker):\n if inst is None:\n return self\n val = inst.__dict__.get(self.name, _marker)\n if val is not _marker:\n return val\n val = self.wrapped(inst)\n inst.__dict__[self.name] = val\n return val\n\n def __set__(self, inst, value):\n raise AttributeError(\"reified property is read-only\")\n\n\n# The unreserved URI characters (RFC 3986)\nUNRESERVED_SET = frozenset(\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\" +\n \"0123456789-._~\")\n\n\ndef unquote_unreserved(uri):\n \"\"\"Un-escape any percent-escape sequences in a URI that are unreserved\n characters. This leaves all reserved, illegal and non-ASCII bytes encoded.\n \"\"\"\n parts = uri.split('%')\n for i in range(1, len(parts)):\n h = parts[i][0:2]\n if len(h) == 2 and h.isalnum():\n try:\n c = chr(int(h, 16))\n except ValueError:\n raise InvalidURL(\"Invalid percent-escape sequence: '%s'\" % h)\n\n if c in UNRESERVED_SET:\n parts[i] = c + parts[i][2:]\n else:\n parts[i] = '%' + parts[i]\n else:\n parts[i] = '%' + parts[i]\n return ''.join(parts)\n\n\ndef requote_uri(uri):\n \"\"\"Re-quote the given URI.\n\n This function passes the given URI through an unquote/quote cycle to\n ensure that it is fully and consistently quoted.\n \"\"\"\n safe_with_percent = \"!#$%&'()*+,/:;=?@[]~\"\n safe_without_percent = \"!#$&'()*+,/:;=?@[]~\"\n try:\n # Unquote only the unreserved characters\n # Then quote only illegal characters (do not quote reserved,\n # unreserved, or '%')\n return quote(unquote_unreserved(uri), safe=safe_with_percent)\n except InvalidURL:\n # We couldn't unquote the given URI, so let's try quoting it, but\n # there may be unquoted '%'s in the URI. We need to make sure they're\n # properly quoted so they do not cause issues elsewhere.\n return quote(uri, safe=safe_without_percent)\n", "path": "aiohttp/helpers.py" } ]
[ { "content": "\"\"\"Various helper functions\"\"\"\nimport base64\nimport io\nimport os\nfrom urllib.parse import quote, urlencode\nfrom collections import namedtuple\nfrom wsgiref.handlers import format_date_time\n\nfrom . import hdrs, multidict\nfrom .errors import InvalidURL\n\n__all__ = ('BasicAuth', 'FormData', 'parse_mimetype')\n\n\nclass BasicAuth(namedtuple('BasicAuth', ['login', 'password', 'encoding'])):\n \"\"\"Http basic authentication helper.\n\n :param str login: Login\n :param str password: Password\n :param str encoding: (optional) encoding ('latin1' by default)\n \"\"\"\n\n def __new__(cls, login, password='', encoding='latin1'):\n if login is None:\n raise ValueError('None is not allowed as login value')\n\n if password is None:\n raise ValueError('None is not allowed as password value')\n\n return super().__new__(cls, login, password, encoding)\n\n def encode(self):\n \"\"\"Encode credentials.\"\"\"\n creds = ('%s:%s' % (self.login, self.password)).encode(self.encoding)\n return 'Basic %s' % base64.b64encode(creds).decode(self.encoding)\n\n\nclass FormData:\n \"\"\"Helper class for multipart/form-data and\n application/x-www-form-urlencoded body generation.\"\"\"\n\n def __init__(self, fields=()):\n from . import multipart\n self._writer = multipart.MultipartWriter('form-data')\n self._fields = []\n self._is_multipart = False\n\n if isinstance(fields, dict):\n fields = list(fields.items())\n elif not isinstance(fields, (list, tuple)):\n fields = (fields,)\n self.add_fields(*fields)\n\n @property\n def is_multipart(self):\n return self._is_multipart\n\n @property\n def content_type(self):\n if self._is_multipart:\n return self._writer.headers[hdrs.CONTENT_TYPE]\n else:\n return 'application/x-www-form-urlencoded'\n\n def add_field(self, name, value, *, content_type=None, filename=None,\n content_transfer_encoding=None):\n\n if isinstance(value, io.IOBase):\n self._is_multipart = True\n elif isinstance(value, (bytes, bytearray, memoryview)):\n if filename is None and content_transfer_encoding is None:\n filename = name\n\n type_options = multidict.MultiDict({'name': name})\n if filename is not None and not isinstance(filename, str):\n raise TypeError('filename must be an instance of str. '\n 'Got: %s' % filename)\n if filename is None and isinstance(value, io.IOBase):\n filename = guess_filename(value, name)\n if filename is not None:\n type_options['filename'] = filename\n self._is_multipart = True\n\n headers = {}\n if content_type is not None:\n if not isinstance(content_type, str):\n raise TypeError('content_type must be an instance of str. '\n 'Got: %s' % content_type)\n headers[hdrs.CONTENT_TYPE] = content_type\n self._is_multipart = True\n if content_transfer_encoding is not None:\n if not isinstance(content_transfer_encoding, str):\n raise TypeError('content_transfer_encoding must be an instance'\n ' of str. Got: %s' % content_transfer_encoding)\n headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding\n self._is_multipart = True\n\n self._fields.append((type_options, headers, value))\n\n def add_fields(self, *fields):\n to_add = list(fields)\n\n while to_add:\n rec = to_add.pop(0)\n\n if isinstance(rec, io.IOBase):\n k = guess_filename(rec, 'unknown')\n self.add_field(k, rec)\n\n elif isinstance(rec,\n (multidict.MultiDictProxy,\n multidict.MultiDict)):\n to_add.extend(rec.items())\n\n elif isinstance(rec, (list, tuple)) and len(rec) == 2:\n k, fp = rec\n self.add_field(k, fp)\n\n else:\n raise TypeError('Only io.IOBase, multidict and (name, file) '\n 'pairs allowed, use .add_field() for passing '\n 'more complex parameters')\n\n def _gen_form_urlencoded(self, encoding):\n # form data (x-www-form-urlencoded)\n data = []\n for type_options, _, value in self._fields:\n data.append((type_options['name'], value))\n\n data = urlencode(data, doseq=True)\n return data.encode(encoding)\n\n def _gen_form_data(self, *args, **kwargs):\n \"\"\"Encode a list of fields using the multipart/form-data MIME format\"\"\"\n for dispparams, headers, value in self._fields:\n part = self._writer.append(value, headers)\n if dispparams:\n part.set_content_disposition('form-data', **dispparams)\n # FIXME cgi.FieldStorage doesn't likes body parts with\n # Content-Length which were sent via chunked transfer encoding\n part.headers.pop(hdrs.CONTENT_LENGTH, None)\n yield from self._writer.serialize()\n\n def __call__(self, encoding):\n if self._is_multipart:\n return self._gen_form_data(encoding)\n else:\n return self._gen_form_urlencoded(encoding)\n\n\ndef parse_mimetype(mimetype):\n \"\"\"Parses a MIME type into its components.\n\n :param str mimetype: MIME type\n\n :returns: 4 element tuple for MIME type, subtype, suffix and parameters\n :rtype: tuple\n\n Example:\n\n >>> parse_mimetype('text/html; charset=utf-8')\n ('text', 'html', '', {'charset': 'utf-8'})\n\n \"\"\"\n if not mimetype:\n return '', '', '', {}\n\n parts = mimetype.split(';')\n params = []\n for item in parts[1:]:\n if not item:\n continue\n key, value = item.split('=', 1) if '=' in item else (item, '')\n params.append((key.lower().strip(), value.strip(' \"')))\n params = dict(params)\n\n fulltype = parts[0].strip().lower()\n if fulltype == '*':\n fulltype = '*/*'\n\n mtype, stype = fulltype.split('/', 1) \\\n if '/' in fulltype else (fulltype, '')\n stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')\n\n return mtype, stype, suffix, params\n\n\ndef str_to_bytes(s, encoding='utf-8'):\n if isinstance(s, str):\n return s.encode(encoding)\n return s\n\n\ndef guess_filename(obj, default=None):\n name = getattr(obj, 'name', None)\n if name and name[0] != '<' and name[-1] != '>':\n return os.path.split(name)[-1]\n return default\n\n\ndef parse_remote_addr(forward):\n if isinstance(forward, str):\n # we only took the last one\n # http://en.wikipedia.org/wiki/X-Forwarded-For\n if ',' in forward:\n forward = forward.rsplit(',', 1)[-1].strip()\n\n # find host and port on ipv6 address\n if '[' in forward and ']' in forward:\n host = forward.split(']')[0][1:].lower()\n elif ':' in forward and forward.count(':') == 1:\n host = forward.split(':')[0].lower()\n else:\n host = forward\n\n forward = forward.split(']')[-1]\n if ':' in forward and forward.count(':') == 1:\n port = forward.split(':', 1)[1]\n else:\n port = 80\n\n remote = (host, port)\n else:\n remote = forward\n\n return remote[0], str(remote[1])\n\n\ndef atoms(message, environ, response, transport, request_time):\n \"\"\"Gets atoms for log formatting.\"\"\"\n if message:\n r = '{} {} HTTP/{}.{}'.format(\n message.method, message.path,\n message.version[0], message.version[1])\n headers = message.headers\n else:\n r = ''\n headers = {}\n\n if transport is not None:\n remote_addr = parse_remote_addr(\n transport.get_extra_info('peername', ('127.0.0.1', )))\n else:\n remote_addr = ('',)\n\n atoms = {\n 'h': remote_addr[0],\n 'l': '-',\n 'u': '-',\n 't': format_date_time(None),\n 'r': r,\n 's': str(getattr(response, 'status', '')),\n 'b': str(getattr(response, 'output_length', '')),\n 'f': headers.get(hdrs.REFERER, '-'),\n 'a': headers.get(hdrs.USER_AGENT, '-'),\n 'T': str(int(request_time)),\n 'D': str(request_time).split('.', 1)[-1][:6],\n 'p': \"<%s>\" % os.getpid()\n }\n\n return atoms\n\n\nclass SafeAtoms(dict):\n \"\"\"Copy from gunicorn\"\"\"\n\n def __init__(self, atoms, i_headers, o_headers):\n dict.__init__(self)\n\n self._i_headers = i_headers\n self._o_headers = o_headers\n\n for key, value in atoms.items():\n self[key] = value.replace('\"', '\\\\\"')\n\n def __getitem__(self, k):\n if k.startswith('{'):\n if k.endswith('}i'):\n headers = self._i_headers\n elif k.endswith('}o'):\n headers = self._o_headers\n else:\n headers = None\n\n if headers is not None:\n return headers.get(k[1:-2], '-')\n\n if k in self:\n return super(SafeAtoms, self).__getitem__(k)\n else:\n return '-'\n\n\n_marker = object()\n\n\nclass reify:\n \"\"\"Use as a class method decorator. It operates almost exactly like\n the Python ``@property`` decorator, but it puts the result of the\n method it decorates into the instance dict after the first call,\n effectively replacing the function it decorates with an instance\n variable. It is, in Python parlance, a non-data descriptor.\n\n \"\"\"\n\n def __init__(self, wrapped):\n self.wrapped = wrapped\n try:\n self.__doc__ = wrapped.__doc__\n except: # pragma: no cover\n pass\n self.name = wrapped.__name__\n\n def __get__(self, inst, owner, _marker=_marker):\n if inst is None:\n return self\n val = inst.__dict__.get(self.name, _marker)\n if val is not _marker:\n return val\n val = self.wrapped(inst)\n inst.__dict__[self.name] = val\n return val\n\n def __set__(self, inst, value):\n raise AttributeError(\"reified property is read-only\")\n\n\n# The unreserved URI characters (RFC 3986)\nUNRESERVED_SET = frozenset(\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\" +\n \"0123456789-._~\")\n\n\ndef unquote_unreserved(uri):\n \"\"\"Un-escape any percent-escape sequences in a URI that are unreserved\n characters. This leaves all reserved, illegal and non-ASCII bytes encoded.\n \"\"\"\n parts = uri.split('%')\n for i in range(1, len(parts)):\n h = parts[i][0:2]\n if len(h) == 2 and h.isalnum():\n try:\n c = chr(int(h, 16))\n except ValueError:\n raise InvalidURL(\"Invalid percent-escape sequence: '%s'\" % h)\n\n if c in UNRESERVED_SET:\n parts[i] = c + parts[i][2:]\n else:\n parts[i] = '%' + parts[i]\n else:\n parts[i] = '%' + parts[i]\n return ''.join(parts)\n\n\ndef requote_uri(uri):\n \"\"\"Re-quote the given URI.\n\n This function passes the given URI through an unquote/quote cycle to\n ensure that it is fully and consistently quoted.\n \"\"\"\n safe_with_percent = \"!#$%&'()*+,/:;=?@[]~\"\n safe_without_percent = \"!#$&'()*+,/:;=?@[]~\"\n try:\n # Unquote only the unreserved characters\n # Then quote only illegal characters (do not quote reserved,\n # unreserved, or '%')\n return quote(unquote_unreserved(uri), safe=safe_with_percent)\n except InvalidURL:\n # We couldn't unquote the given URI, so let's try quoting it, but\n # there may be unquoted '%'s in the URI. We need to make sure they're\n # properly quoted so they do not cause issues elsewhere.\n return quote(uri, safe=safe_without_percent)\n", "path": "aiohttp/helpers.py" } ]
diff --git a/aiohttp/helpers.py b/aiohttp/helpers.py index eb542ba96b0..0abecd899e4 100644 --- a/aiohttp/helpers.py +++ b/aiohttp/helpers.py @@ -239,7 +239,7 @@ def atoms(message, environ, response, transport, request_time): if transport is not None: remote_addr = parse_remote_addr( - transport.get_extra_info('addr', '127.0.0.1')) + transport.get_extra_info('peername', ('127.0.0.1', ))) else: remote_addr = ('',)
litestar-org__litestar-1906
Bug: SQL Alchemy repository `updated` vs `updated_at` column reference. https://github.com/litestar-org/litestar/blob/32396925a573c02eff57aa10b2060f505b920232/litestar/contrib/sqlalchemy/base.py#L69 This incorrectly references the old `updated` column name instead of the `updated_at` column name. <!-- POLAR PLEDGE BADGE START --> ## Funding * You can sponsor this specific effort via a [Polar.sh](https://polar.sh) pledge below * We receive the pledge once the issue is completed & verified <a href="https://polar.sh/litestar-org/litestar/issues/1905"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/1905/pledge.svg?darkmode=1"> <img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/1905/pledge.svg"> </picture> </a> <!-- POLAR PLEDGE BADGE END --> StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
[ { "content": "\"\"\"Application ORM configuration.\"\"\"\nfrom __future__ import annotations\n\nimport re\nfrom datetime import date, datetime, timezone\nfrom typing import TYPE_CHECKING, Any, ClassVar, Protocol, TypeVar, runtime_checkable\nfrom uuid import UUID, uuid4\n\nfrom pydantic import AnyHttpUrl, AnyUrl, EmailStr\nfrom sqlalchemy import Date, MetaData, Sequence, String\nfrom sqlalchemy.event import listens_for\nfrom sqlalchemy.orm import (\n DeclarativeBase,\n Mapped,\n Session,\n declared_attr,\n mapped_column,\n orm_insert_sentinel,\n registry,\n)\n\nfrom .types import GUID, BigIntIdentity, DateTimeUTC, JsonB\n\nif TYPE_CHECKING:\n from sqlalchemy.sql import FromClause\n\n__all__ = (\n \"AuditColumns\",\n \"BigIntAuditBase\",\n \"BigIntBase\",\n \"BigIntPrimaryKey\",\n \"CommonTableAttributes\",\n \"create_registry\",\n \"ModelProtocol\",\n \"touch_updated_timestamp\",\n \"UUIDAuditBase\",\n \"UUIDBase\",\n \"UUIDPrimaryKey\",\n)\n\n\nUUIDBaseT = TypeVar(\"UUIDBaseT\", bound=\"UUIDBase\")\nBigIntBaseT = TypeVar(\"BigIntBaseT\", bound=\"BigIntBase\")\n\nconvention = {\n \"ix\": \"ix_%(column_0_label)s\",\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n \"ck\": \"ck_%(table_name)s_%(constraint_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\",\n}\n\"\"\"Templates for automated constraint name generation.\"\"\"\n\n\n@listens_for(Session, \"before_flush\")\ndef touch_updated_timestamp(session: Session, *_: Any) -> None:\n \"\"\"Set timestamp on update.\n\n Called from SQLAlchemy's\n :meth:`before_flush <sqlalchemy.orm.SessionEvents.before_flush>` event to bump the ``updated``\n timestamp on modified instances.\n\n Args:\n session: The sync :class:`Session <sqlalchemy.orm.Session>` instance that underlies the async\n session.\n \"\"\"\n for instance in session.dirty:\n if hasattr(instance, \"updated_at\"):\n instance.updated = (datetime.now(timezone.utc),)\n\n\n@runtime_checkable\nclass ModelProtocol(Protocol):\n \"\"\"The base SQLAlchemy model protocol.\"\"\"\n\n __table__: FromClause\n __name__: ClassVar[str]\n\n def to_dict(self, exclude: set[str] | None = None) -> dict[str, Any]:\n \"\"\"Convert model to dictionary.\n\n Returns:\n dict[str, Any]: A dict representation of the model\n \"\"\"\n ...\n\n\nclass UUIDPrimaryKey:\n \"\"\"UUID Primary Key Field Mixin.\"\"\"\n\n id: Mapped[UUID] = mapped_column(default=uuid4, primary_key=True) # pyright: ignore\n \"\"\"UUID Primary key column.\"\"\"\n\n @declared_attr\n def _sentinel(cls) -> Mapped[int]:\n return orm_insert_sentinel()\n\n\nclass BigIntPrimaryKey:\n \"\"\"BigInt Primary Key Field Mixin.\"\"\"\n\n @declared_attr\n def id(cls) -> Mapped[int]:\n \"\"\"BigInt Primary key column.\"\"\"\n return mapped_column(\n BigIntIdentity,\n Sequence(f\"{cls.__tablename__}_id_seq\", optional=False), # type: ignore[attr-defined] # pyright: ignore\n primary_key=True,\n )\n\n\nclass AuditColumns:\n \"\"\"Created/Updated At Fields Mixin.\"\"\"\n\n created_at: Mapped[datetime] = mapped_column( # pyright: ignore\n DateTimeUTC(timezone=True),\n default=lambda: datetime.now(timezone.utc),\n )\n \"\"\"Date/time of instance creation.\"\"\"\n updated_at: Mapped[datetime] = mapped_column( # pyright: ignore\n DateTimeUTC(timezone=True),\n default=lambda: datetime.now(timezone.utc),\n )\n \"\"\"Date/time of instance last update.\"\"\"\n\n\nclass CommonTableAttributes:\n \"\"\"Common attributes for SQLALchemy tables.\"\"\"\n\n __name__: ClassVar[str]\n __table__: FromClause\n\n # noinspection PyMethodParameters\n @declared_attr.directive\n def __tablename__(cls) -> str:\n \"\"\"Infer table name from class name.\"\"\"\n regexp = re.compile(\"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))\")\n return regexp.sub(r\"_\\1\", cls.__name__).lower()\n\n def to_dict(self, exclude: set[str] | None = None) -> dict[str, Any]:\n \"\"\"Convert model to dictionary.\n\n Returns:\n dict[str, Any]: A dict representation of the model\n \"\"\"\n exclude = {\"_sentinel\"}.union(self._sa_instance_state.unloaded).union(exclude or []) # type: ignore[attr-defined]\n return {field.name: getattr(self, field.name) for field in self.__table__.columns if field.name not in exclude}\n\n\ndef create_registry() -> registry:\n \"\"\"Create a new SQLAlchemy registry.\"\"\"\n meta = MetaData(naming_convention=convention)\n return registry(\n metadata=meta,\n type_annotation_map={\n UUID: GUID,\n EmailStr: String,\n AnyUrl: String,\n AnyHttpUrl: String,\n dict: JsonB,\n datetime: DateTimeUTC,\n date: Date,\n },\n )\n\n\norm_registry = create_registry()\n\n\nclass UUIDBase(UUIDPrimaryKey, CommonTableAttributes, DeclarativeBase):\n \"\"\"Base for all SQLAlchemy declarative models with UUID primary keys.\"\"\"\n\n registry = orm_registry\n\n\nclass UUIDAuditBase(CommonTableAttributes, UUIDPrimaryKey, AuditColumns, DeclarativeBase):\n \"\"\"Base for declarative models with UUID primary keys and audit columns.\"\"\"\n\n registry = orm_registry\n\n\nclass BigIntBase(BigIntPrimaryKey, CommonTableAttributes, DeclarativeBase):\n \"\"\"Base for all SQLAlchemy declarative models with BigInt primary keys.\"\"\"\n\n registry = orm_registry\n\n\nclass BigIntAuditBase(CommonTableAttributes, BigIntPrimaryKey, AuditColumns, DeclarativeBase):\n \"\"\"Base for declarative models with BigInt primary keys and audit columns.\"\"\"\n\n registry = orm_registry\n", "path": "litestar/contrib/sqlalchemy/base.py" } ]
[ { "content": "\"\"\"Application ORM configuration.\"\"\"\nfrom __future__ import annotations\n\nimport re\nfrom datetime import date, datetime, timezone\nfrom typing import TYPE_CHECKING, Any, ClassVar, Protocol, TypeVar, runtime_checkable\nfrom uuid import UUID, uuid4\n\nfrom pydantic import AnyHttpUrl, AnyUrl, EmailStr\nfrom sqlalchemy import Date, MetaData, Sequence, String\nfrom sqlalchemy.event import listens_for\nfrom sqlalchemy.orm import (\n DeclarativeBase,\n Mapped,\n Session,\n declared_attr,\n mapped_column,\n orm_insert_sentinel,\n registry,\n)\n\nfrom .types import GUID, BigIntIdentity, DateTimeUTC, JsonB\n\nif TYPE_CHECKING:\n from sqlalchemy.sql import FromClause\n\n__all__ = (\n \"AuditColumns\",\n \"BigIntAuditBase\",\n \"BigIntBase\",\n \"BigIntPrimaryKey\",\n \"CommonTableAttributes\",\n \"create_registry\",\n \"ModelProtocol\",\n \"touch_updated_timestamp\",\n \"UUIDAuditBase\",\n \"UUIDBase\",\n \"UUIDPrimaryKey\",\n)\n\n\nUUIDBaseT = TypeVar(\"UUIDBaseT\", bound=\"UUIDBase\")\nBigIntBaseT = TypeVar(\"BigIntBaseT\", bound=\"BigIntBase\")\n\nconvention = {\n \"ix\": \"ix_%(column_0_label)s\",\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n \"ck\": \"ck_%(table_name)s_%(constraint_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\",\n}\n\"\"\"Templates for automated constraint name generation.\"\"\"\n\n\n@listens_for(Session, \"before_flush\")\ndef touch_updated_timestamp(session: Session, *_: Any) -> None:\n \"\"\"Set timestamp on update.\n\n Called from SQLAlchemy's\n :meth:`before_flush <sqlalchemy.orm.SessionEvents.before_flush>` event to bump the ``updated``\n timestamp on modified instances.\n\n Args:\n session: The sync :class:`Session <sqlalchemy.orm.Session>` instance that underlies the async\n session.\n \"\"\"\n for instance in session.dirty:\n if hasattr(instance, \"updated_at\"):\n instance.updated_at = datetime.now(timezone.utc)\n\n\n@runtime_checkable\nclass ModelProtocol(Protocol):\n \"\"\"The base SQLAlchemy model protocol.\"\"\"\n\n __table__: FromClause\n __name__: ClassVar[str]\n\n def to_dict(self, exclude: set[str] | None = None) -> dict[str, Any]:\n \"\"\"Convert model to dictionary.\n\n Returns:\n dict[str, Any]: A dict representation of the model\n \"\"\"\n ...\n\n\nclass UUIDPrimaryKey:\n \"\"\"UUID Primary Key Field Mixin.\"\"\"\n\n id: Mapped[UUID] = mapped_column(default=uuid4, primary_key=True) # pyright: ignore\n \"\"\"UUID Primary key column.\"\"\"\n\n @declared_attr\n def _sentinel(cls) -> Mapped[int]:\n return orm_insert_sentinel()\n\n\nclass BigIntPrimaryKey:\n \"\"\"BigInt Primary Key Field Mixin.\"\"\"\n\n @declared_attr\n def id(cls) -> Mapped[int]:\n \"\"\"BigInt Primary key column.\"\"\"\n return mapped_column(\n BigIntIdentity,\n Sequence(f\"{cls.__tablename__}_id_seq\", optional=False), # type: ignore[attr-defined] # pyright: ignore\n primary_key=True,\n )\n\n\nclass AuditColumns:\n \"\"\"Created/Updated At Fields Mixin.\"\"\"\n\n created_at: Mapped[datetime] = mapped_column( # pyright: ignore\n DateTimeUTC(timezone=True),\n default=lambda: datetime.now(timezone.utc),\n )\n \"\"\"Date/time of instance creation.\"\"\"\n updated_at: Mapped[datetime] = mapped_column( # pyright: ignore\n DateTimeUTC(timezone=True),\n default=lambda: datetime.now(timezone.utc),\n )\n \"\"\"Date/time of instance last update.\"\"\"\n\n\nclass CommonTableAttributes:\n \"\"\"Common attributes for SQLALchemy tables.\"\"\"\n\n __name__: ClassVar[str]\n __table__: FromClause\n\n # noinspection PyMethodParameters\n @declared_attr.directive\n def __tablename__(cls) -> str:\n \"\"\"Infer table name from class name.\"\"\"\n regexp = re.compile(\"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))\")\n return regexp.sub(r\"_\\1\", cls.__name__).lower()\n\n def to_dict(self, exclude: set[str] | None = None) -> dict[str, Any]:\n \"\"\"Convert model to dictionary.\n\n Returns:\n dict[str, Any]: A dict representation of the model\n \"\"\"\n exclude = {\"_sentinel\"}.union(self._sa_instance_state.unloaded).union(exclude or []) # type: ignore[attr-defined]\n return {field.name: getattr(self, field.name) for field in self.__table__.columns if field.name not in exclude}\n\n\ndef create_registry() -> registry:\n \"\"\"Create a new SQLAlchemy registry.\"\"\"\n meta = MetaData(naming_convention=convention)\n return registry(\n metadata=meta,\n type_annotation_map={\n UUID: GUID,\n EmailStr: String,\n AnyUrl: String,\n AnyHttpUrl: String,\n dict: JsonB,\n datetime: DateTimeUTC,\n date: Date,\n },\n )\n\n\norm_registry = create_registry()\n\n\nclass UUIDBase(UUIDPrimaryKey, CommonTableAttributes, DeclarativeBase):\n \"\"\"Base for all SQLAlchemy declarative models with UUID primary keys.\"\"\"\n\n registry = orm_registry\n\n\nclass UUIDAuditBase(CommonTableAttributes, UUIDPrimaryKey, AuditColumns, DeclarativeBase):\n \"\"\"Base for declarative models with UUID primary keys and audit columns.\"\"\"\n\n registry = orm_registry\n\n\nclass BigIntBase(BigIntPrimaryKey, CommonTableAttributes, DeclarativeBase):\n \"\"\"Base for all SQLAlchemy declarative models with BigInt primary keys.\"\"\"\n\n registry = orm_registry\n\n\nclass BigIntAuditBase(CommonTableAttributes, BigIntPrimaryKey, AuditColumns, DeclarativeBase):\n \"\"\"Base for declarative models with BigInt primary keys and audit columns.\"\"\"\n\n registry = orm_registry\n", "path": "litestar/contrib/sqlalchemy/base.py" } ]
diff --git a/litestar/contrib/sqlalchemy/base.py b/litestar/contrib/sqlalchemy/base.py index 7aa16cce44..fdbd97576f 100644 --- a/litestar/contrib/sqlalchemy/base.py +++ b/litestar/contrib/sqlalchemy/base.py @@ -66,7 +66,7 @@ def touch_updated_timestamp(session: Session, *_: Any) -> None: """ for instance in session.dirty: if hasattr(instance, "updated_at"): - instance.updated = (datetime.now(timezone.utc),) + instance.updated_at = datetime.now(timezone.utc) @runtime_checkable diff --git a/tests/unit/test_contrib/test_sqlalchemy/test_repository/test_repo_bigint.py b/tests/unit/test_contrib/test_sqlalchemy/test_repository/test_repo_bigint.py index 94fba11e05..d914eff75c 100644 --- a/tests/unit/test_contrib/test_sqlalchemy/test_repository/test_repo_bigint.py +++ b/tests/unit/test_contrib/test_sqlalchemy/test_repository/test_repo_bigint.py @@ -164,7 +164,7 @@ async def test_repo_created_updated(author_repo: AuthorAsyncRepository) -> None: author.books.append(BigIntBook(title="Testing")) author = await maybe_async(author_repo.update(author)) - assert author.updated_at == original_update_dt + assert author.updated_at > original_update_dt async def test_repo_list_method(raw_authors_bigint: list[dict[str, Any]], author_repo: AuthorAsyncRepository) -> None: diff --git a/tests/unit/test_contrib/test_sqlalchemy/test_repository/test_repo_uuid.py b/tests/unit/test_contrib/test_sqlalchemy/test_repository/test_repo_uuid.py index cce0d0d7fc..a38ae0fa04 100644 --- a/tests/unit/test_contrib/test_sqlalchemy/test_repository/test_repo_uuid.py +++ b/tests/unit/test_contrib/test_sqlalchemy/test_repository/test_repo_uuid.py @@ -260,7 +260,7 @@ async def test_repo_created_updated(author_repo: AuthorAsyncRepository) -> None: author.books.append(UUIDBook(title="Testing")) author = await maybe_async(author_repo.update(author)) - assert author.updated_at == original_update_dt + assert author.updated_at > original_update_dt async def test_repo_list_method(
pwr-Solaar__Solaar-346
Any chance to add MK220 combo (K220 + M150)? Well, the title says it all. The combo is the MK220: Keyboard: K220 Mouse: M150 Thanks and good work!
[ { "content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\nfrom .common import NamedInts as _NamedInts\nfrom .hidpp10 import REGISTERS as _R, DEVICE_KIND as _DK\nfrom .settings_templates import RegisterSettings as _RS, FeatureSettings as _FS\n\n#\n#\n#\n\nfrom collections import namedtuple\n_DeviceDescriptor = namedtuple('_DeviceDescriptor',\n\t\t\t\t('name', 'kind', 'wpid', 'codename', 'protocol', 'registers', 'settings'))\ndel namedtuple\n\nDEVICES = {}\n\ndef _D(name, codename=None, kind=None, wpid=None, protocol=None, registers=None, settings=None):\n\tassert name\n\n\tif kind is None:\n\t\tkind = (_DK.mouse if 'Mouse' in name\n\t\t\t\telse _DK.keyboard if 'Keyboard' in name\n\t\t\t\telse _DK.numpad if 'Number Pad' in name\n\t\t\t\telse _DK.touchpad if 'Touchpad' in name\n\t\t\t\telse _DK.trackball if 'Trackball' in name\n\t\t\t\telse None)\n\tassert kind is not None, 'descriptor for %s does not have kind set' % name\n\n\t# heuristic: the codename is the last word in the device name\n\tif codename is None and ' ' in name:\n\t\tcodename = name.split(' ')[-1]\n\tassert codename is not None, 'descriptor for %s does not have codename set' % name\n\n\tif protocol is not None:\n\t\t# ? 2.0 devices should not have any registers\n\t\tif protocol < 2.0:\n\t\t\tassert settings is None or all(s._rw.kind == 1 for s in settings)\n\t\telse:\n\t\t\tassert registers is None\n\t\t\tassert settings is None or all(s._rw.kind == 2 for s in settings)\n\n\t\tif wpid:\n\t\t\tfor w in wpid if isinstance(wpid, tuple) else (wpid, ):\n\t\t\t\tif protocol > 1.0:\n\t\t\t\t\tassert w[0:1] == '4', '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\t\t\t\telse:\n\t\t\t\t\tif w[0:1] == '1':\n\t\t\t\t\t\tassert kind == _DK.mouse, '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\t\t\t\t\telif w[0:1] == '2':\n\t\t\t\t\t\tassert kind in (_DK.keyboard, _DK.numpad), '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\n\tdevice_descriptor = _DeviceDescriptor(name=name, kind=kind,\n\t\t\t\t\twpid=wpid, codename=codename, protocol=protocol,\n\t\t\t\t\tregisters=registers, settings=settings)\n\n\tassert codename not in DEVICES, 'duplicate codename in device descriptors: %s' % (DEVICES[codename], )\n\tDEVICES[codename] = device_descriptor\n\n\tif wpid:\n\t\tif not isinstance(wpid, tuple):\n\t\t\twpid = (wpid, )\n\n\t\tfor w in wpid:\n\t\t\tassert w not in DEVICES, 'duplicate wpid in device descriptors: %s' % (DEVICES[w], )\n\t\t\tDEVICES[w] = device_descriptor\n\n#\n#\n#\n\n_PERFORMANCE_MX_DPIS = _NamedInts.range(0x81, 0x8F, lambda x: str((x - 0x80) * 100))\n\n#\n#\n#\n\n# Some HID++1.0 registers and HID++2.0 features can be discovered at run-time,\n# so they are not specified here.\n#\n# For known registers, however, please do specify them here -- avoids\n# unnecessary communication with the device and makes it easier to make certain\n# decisions when querying the device's state.\n#\n# Specify a negative value to blacklist a certain register for a device.\n#\n# Usually, state registers (battery, leds, some features, etc) are only used by\n# HID++ 1.0 devices, while HID++ 2.0 devices use features for the same\n# functionalities. This is a rule that's been discovered by trial-and-error,\n# so it may change in the future.\n\n# Well-known registers (in hex):\n# * 00 - notification flags (all devices)\n# 01 - mice: smooth scrolling\n# 07 - battery status\n# 09 - keyboards: FN swap (if it has the FN key)\n# 0D - battery charge\n# a device may have either the 07 or 0D register available;\n# no known device uses both\n# 51 - leds\n# 63 - mice: DPI\n# * F1 - firmware info\n# Some registers appear to be universally supported, no matter the HID++ version\n# (marked with *). The rest may or may not be supported, and their values may or\n# may not mean the same thing across different devices.\n\n# The 'codename' and 'kind' fields are usually guessed from the device name,\n# but in some cases (like the Logitech Cube) that heuristic fails and they have\n# to be specified.\n#\n# The 'protocol' and 'wpid' fields are optional (they can be discovered at\n# runtime), but specifying them here speeds up device discovery and reduces the\n# USB traffic Solaar has to do to fully identify peripherals.\n# Same goes for HID++ 2.0 feature settings (like _feature_fn_swap).\n#\n# The 'registers' field indicates read-only registers, specifying a state. These\n# are valid (AFAIK) only to HID++ 1.0 devices.\n# The 'settings' field indicates a read/write register; based on them Solaar\n# generates, at runtime, the settings controls in the device panel. HID++ 1.0\n# devices may only have register-based settings; HID++ 2.0 devices may only have\n# feature-based settings.\n\n# Keyboards\n\n_D('Wireless Keyboard K230', protocol=2.0, wpid='400D')\n_D('Wireless Keyboard K270', protocol=1.0,\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK320', protocol=1.0, wpid='200F',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK330')\n_D('Wireless Compact Keyboard K340', protocol=1.0, wpid='2007',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Wave Keyboard K350', protocol=1.0, wpid='200A',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard K360', protocol=2.0, wpid='4004',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Touch Keyboard K400', protocol=2.0, wpid=('400E', '4024'),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Touch Keyboard K400 Plus', protocol=2.0, wpid='404D',\n settings=[\n _FS.new_fn_swap()\n ],\n )\n_D('Wireless Keyboard K520', protocol=1.0, wpid='2011',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Number Pad N545', protocol=1.0, wpid='2006',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK550')\n_D('Wireless Keyboard MK700', protocol=1.0, wpid='2008',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Solar Keyboard K750', protocol=2.0, wpid='4002',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Solar Keyboard K780', protocol=4.5, wpid='405B',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.new_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Illuminated Keyboard K800', protocol=1.0, wpid='2010',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t\t_RS.hand_detection(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Illuminated Living-Room Keyboard K830', protocol=2.0, wpid='4032',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.new_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Mice\n\n_D('Wireless Mouse M175', protocol=2.0, wpid='4008')\n_D('Wireless Mouse M185')\n_D('Wireless Mouse M187', protocol=2.0, wpid='4019')\n_D('Wireless Mouse M215', protocol=1.0, wpid='1020')\n_D('Wireless Mouse M235')\n_D('Wireless Mouse M305', protocol=1.0, wpid='101F',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M310', protocol=1.0, wpid='1024',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Mouse M315')\n_D('Wireless Mouse M317')\n_D('Wireless Mouse M325', protocol=2.0, wpid='400A',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.smooth_scroll(),\n\t\t\t\t])\n_D('Wireless Mouse M345', protocol=2.0, wpid='4017')\n_D('Wireless Mouse M350', protocol=1.0, wpid='101C',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('Wireless Mouse M505', codename='M505/B605', protocol=1.0, wpid='101D',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M510', protocol=1.0, wpid='1025',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Couch Mouse M515', protocol=2.0, wpid='4007')\n_D('Wireless Mouse M525', protocol=2.0, wpid='4013')\n_D('Touch Mouse M600', protocol=2.0, wpid='401A')\n_D('Marathon Mouse M705', protocol=1.0, wpid='101B',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Zone Touch Mouse T400')\n_D('Touch Mouse T620', protocol=2.0)\n_D('Logitech Cube', kind=_DK.mouse, protocol=2.0)\n_D('Anywhere Mouse MX', codename='Anywhere MX', protocol=1.0, wpid='1017',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Anywhere Mouse MX 2', codename='Anywhere MX 2', protocol=4.5, wpid='404A',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.smooth_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Performance Mouse MX', codename='Performance MX', protocol=1.0, wpid='101A',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.dpi(choices=_PERFORMANCE_MX_DPIS),\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n_D('Wireless Mouse MX Master', codename='MX Master', protocol=4.5, wpid='4041')\n\n_D('G7 Cordless Laser Mouse', codename='G7', protocol=1.0, wpid='1002',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('G700 Gaming Mouse', codename='G700', protocol=1.0, wpid='1023',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('G700s Gaming Mouse', codename='G700s', protocol=1.0, wpid='102A',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Trackballs\n\n_D('Wireless Trackball M570')\n\n# Touchpads\n\n_D('Wireless Rechargeable Touchpad T650', protocol=2.0, wpid='4101')\n_D('Wireless Touchpad', codename='Wireless Touch', protocol=2.0, wpid='4011')\n\n#\n# Classic Nano peripherals (that don't support the Unifying protocol).\n# A wpid is necessary to properly identify them.\n#\n\n_D('VX Nano Cordless Laser Mouse', codename='VX Nano', protocol=1.0, wpid=('100B', '100F'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('V450 Nano Cordless Laser Mouse', codename='V450 Nano', protocol=1.0, wpid='1011',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('V550 Nano Cordless Laser Mouse', codename='V550 Nano', protocol=1.0, wpid='1013',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Mini receiver mice\n\n_D('MX610 Laser Cordless Mouse', codename='MX610', protocol=1.0, wpid='1001',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('MX620 Laser Cordless Mouse', codename='MX620', protocol=1.0, wpid=('100A', '1016'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX610 Left-Handled Mouse', codename='MX610L', protocol=1.0, wpid='1004',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('V400 Laser Cordless Mouse', codename='V400', protocol=1.0, wpid='1003',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('V450 Laser Cordless Mouse', codename='V450', protocol=1.0, wpid='1005',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('VX Revolution', codename='VX Revolution', kind=_DK.mouse, protocol=1.0, wpid=('1006', '100D'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX Air', codename='MX Air', protocol=1.0, kind=_DK.mouse, wpid=('1007', '100E'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX Revolution', codename='MX Revolution', protocol=1.0, kind=_DK.mouse, wpid=('1008', '100C'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX 1100 Cordless Laser Mouse', codename='MX 1100', protocol=1.0, kind=_DK.mouse, wpid='1014',\n registers=(_R.battery_charge, ),\n settings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n )\n\n# Some exotics...\n\n_D('Fujitsu Sonic Mouse', codename='Sonic', protocol=1.0, wpid='1029')\n", "path": "lib/logitech_receiver/descriptors.py" } ]
[ { "content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\nfrom .common import NamedInts as _NamedInts\nfrom .hidpp10 import REGISTERS as _R, DEVICE_KIND as _DK\nfrom .settings_templates import RegisterSettings as _RS, FeatureSettings as _FS\n\n#\n#\n#\n\nfrom collections import namedtuple\n_DeviceDescriptor = namedtuple('_DeviceDescriptor',\n\t\t\t\t('name', 'kind', 'wpid', 'codename', 'protocol', 'registers', 'settings'))\ndel namedtuple\n\nDEVICES = {}\n\ndef _D(name, codename=None, kind=None, wpid=None, protocol=None, registers=None, settings=None):\n\tassert name\n\n\tif kind is None:\n\t\tkind = (_DK.mouse if 'Mouse' in name\n\t\t\t\telse _DK.keyboard if 'Keyboard' in name\n\t\t\t\telse _DK.numpad if 'Number Pad' in name\n\t\t\t\telse _DK.touchpad if 'Touchpad' in name\n\t\t\t\telse _DK.trackball if 'Trackball' in name\n\t\t\t\telse None)\n\tassert kind is not None, 'descriptor for %s does not have kind set' % name\n\n\t# heuristic: the codename is the last word in the device name\n\tif codename is None and ' ' in name:\n\t\tcodename = name.split(' ')[-1]\n\tassert codename is not None, 'descriptor for %s does not have codename set' % name\n\n\tif protocol is not None:\n\t\t# ? 2.0 devices should not have any registers\n\t\tif protocol < 2.0:\n\t\t\tassert settings is None or all(s._rw.kind == 1 for s in settings)\n\t\telse:\n\t\t\tassert registers is None\n\t\t\tassert settings is None or all(s._rw.kind == 2 for s in settings)\n\n\t\tif wpid:\n\t\t\tfor w in wpid if isinstance(wpid, tuple) else (wpid, ):\n\t\t\t\tif protocol > 1.0:\n\t\t\t\t\tassert w[0:1] == '4', '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\t\t\t\telse:\n\t\t\t\t\tif w[0:1] == '1':\n\t\t\t\t\t\tassert kind == _DK.mouse, '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\t\t\t\t\telif w[0:1] == '2':\n\t\t\t\t\t\tassert kind in (_DK.keyboard, _DK.numpad), '%s has protocol %0.1f, wpid %s' % (name, protocol, w)\n\n\tdevice_descriptor = _DeviceDescriptor(name=name, kind=kind,\n\t\t\t\t\twpid=wpid, codename=codename, protocol=protocol,\n\t\t\t\t\tregisters=registers, settings=settings)\n\n\tassert codename not in DEVICES, 'duplicate codename in device descriptors: %s' % (DEVICES[codename], )\n\tDEVICES[codename] = device_descriptor\n\n\tif wpid:\n\t\tif not isinstance(wpid, tuple):\n\t\t\twpid = (wpid, )\n\n\t\tfor w in wpid:\n\t\t\tassert w not in DEVICES, 'duplicate wpid in device descriptors: %s' % (DEVICES[w], )\n\t\t\tDEVICES[w] = device_descriptor\n\n#\n#\n#\n\n_PERFORMANCE_MX_DPIS = _NamedInts.range(0x81, 0x8F, lambda x: str((x - 0x80) * 100))\n\n#\n#\n#\n\n# Some HID++1.0 registers and HID++2.0 features can be discovered at run-time,\n# so they are not specified here.\n#\n# For known registers, however, please do specify them here -- avoids\n# unnecessary communication with the device and makes it easier to make certain\n# decisions when querying the device's state.\n#\n# Specify a negative value to blacklist a certain register for a device.\n#\n# Usually, state registers (battery, leds, some features, etc) are only used by\n# HID++ 1.0 devices, while HID++ 2.0 devices use features for the same\n# functionalities. This is a rule that's been discovered by trial-and-error,\n# so it may change in the future.\n\n# Well-known registers (in hex):\n# * 00 - notification flags (all devices)\n# 01 - mice: smooth scrolling\n# 07 - battery status\n# 09 - keyboards: FN swap (if it has the FN key)\n# 0D - battery charge\n# a device may have either the 07 or 0D register available;\n# no known device uses both\n# 51 - leds\n# 63 - mice: DPI\n# * F1 - firmware info\n# Some registers appear to be universally supported, no matter the HID++ version\n# (marked with *). The rest may or may not be supported, and their values may or\n# may not mean the same thing across different devices.\n\n# The 'codename' and 'kind' fields are usually guessed from the device name,\n# but in some cases (like the Logitech Cube) that heuristic fails and they have\n# to be specified.\n#\n# The 'protocol' and 'wpid' fields are optional (they can be discovered at\n# runtime), but specifying them here speeds up device discovery and reduces the\n# USB traffic Solaar has to do to fully identify peripherals.\n# Same goes for HID++ 2.0 feature settings (like _feature_fn_swap).\n#\n# The 'registers' field indicates read-only registers, specifying a state. These\n# are valid (AFAIK) only to HID++ 1.0 devices.\n# The 'settings' field indicates a read/write register; based on them Solaar\n# generates, at runtime, the settings controls in the device panel. HID++ 1.0\n# devices may only have register-based settings; HID++ 2.0 devices may only have\n# feature-based settings.\n\n# Keyboards\n\n_D('Wireless Keyboard K230', protocol=2.0, wpid='400D')\n_D('Wireless Keyboard K270', protocol=1.0,\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK320', protocol=1.0, wpid='200F',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK330')\n_D('Wireless Compact Keyboard K340', protocol=1.0, wpid='2007',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Wave Keyboard K350', protocol=1.0, wpid='200A',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard K360', protocol=2.0, wpid='4004',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Touch Keyboard K400', protocol=2.0, wpid=('400E', '4024'),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Touch Keyboard K400 Plus', protocol=2.0, wpid='404D',\n settings=[\n _FS.new_fn_swap()\n ],\n )\n_D('Wireless Keyboard K520', protocol=1.0, wpid='2011',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Number Pad N545', protocol=1.0, wpid='2006',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Keyboard MK550')\n_D('Wireless Keyboard MK700', protocol=1.0, wpid='2008',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Solar Keyboard K750', protocol=2.0, wpid='4002',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Solar Keyboard K780', protocol=4.5, wpid='405B',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.new_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Illuminated Keyboard K800', protocol=1.0, wpid='2010',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.fn_swap(),\n\t\t\t\t\t\t\t_RS.hand_detection(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Illuminated Living-Room Keyboard K830', protocol=2.0, wpid='4032',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.new_fn_swap()\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Mice\n\n_D('Wireless Mouse M150', protocol=2.0, wpid='4022')\n_D('Wireless Mouse M175', protocol=2.0, wpid='4008')\n_D('Wireless Mouse M185')\n_D('Wireless Mouse M187', protocol=2.0, wpid='4019')\n_D('Wireless Mouse M215', protocol=1.0, wpid='1020')\n_D('Wireless Mouse M235')\n_D('Wireless Mouse M305', protocol=1.0, wpid='101F',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M310', protocol=1.0, wpid='1024',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('Wireless Mouse M315')\n_D('Wireless Mouse M317')\n_D('Wireless Mouse M325', protocol=2.0, wpid='400A',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.smooth_scroll(),\n\t\t\t\t])\n_D('Wireless Mouse M345', protocol=2.0, wpid='4017')\n_D('Wireless Mouse M350', protocol=1.0, wpid='101C',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('Wireless Mouse M505', codename='M505/B605', protocol=1.0, wpid='101D',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Wireless Mouse M510', protocol=1.0, wpid='1025',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Couch Mouse M515', protocol=2.0, wpid='4007')\n_D('Wireless Mouse M525', protocol=2.0, wpid='4013')\n_D('Touch Mouse M600', protocol=2.0, wpid='401A')\n_D('Marathon Mouse M705', protocol=1.0, wpid='101B',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Zone Touch Mouse T400')\n_D('Touch Mouse T620', protocol=2.0)\n_D('Logitech Cube', kind=_DK.mouse, protocol=2.0)\n_D('Anywhere Mouse MX', codename='Anywhere MX', protocol=1.0, wpid='1017',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Anywhere Mouse MX 2', codename='Anywhere MX 2', protocol=4.5, wpid='404A',\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_FS.smooth_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('Performance Mouse MX', codename='Performance MX', protocol=1.0, wpid='101A',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.dpi(choices=_PERFORMANCE_MX_DPIS),\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n_D('Wireless Mouse MX Master', codename='MX Master', protocol=4.5, wpid='4041')\n\n_D('G7 Cordless Laser Mouse', codename='G7', protocol=1.0, wpid='1002',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('G700 Gaming Mouse', codename='G700', protocol=1.0, wpid='1023',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('G700s Gaming Mouse', codename='G700s', protocol=1.0, wpid='102A',\n\t\t\t\tregisters=(_R.battery_status, _R.three_leds, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Trackballs\n\n_D('Wireless Trackball M570')\n\n# Touchpads\n\n_D('Wireless Rechargeable Touchpad T650', protocol=2.0, wpid='4101')\n_D('Wireless Touchpad', codename='Wireless Touch', protocol=2.0, wpid='4011')\n\n#\n# Classic Nano peripherals (that don't support the Unifying protocol).\n# A wpid is necessary to properly identify them.\n#\n\n_D('VX Nano Cordless Laser Mouse', codename='VX Nano', protocol=1.0, wpid=('100B', '100F'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n_D('V450 Nano Cordless Laser Mouse', codename='V450 Nano', protocol=1.0, wpid='1011',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('V550 Nano Cordless Laser Mouse', codename='V550 Nano', protocol=1.0, wpid='1013',\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\tsettings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n\t\t\t\t)\n\n# Mini receiver mice\n\n_D('MX610 Laser Cordless Mouse', codename='MX610', protocol=1.0, wpid='1001',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('MX620 Laser Cordless Mouse', codename='MX620', protocol=1.0, wpid=('100A', '1016'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX610 Left-Handled Mouse', codename='MX610L', protocol=1.0, wpid='1004',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('V400 Laser Cordless Mouse', codename='V400', protocol=1.0, wpid='1003',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('V450 Laser Cordless Mouse', codename='V450', protocol=1.0, wpid='1005',\n\t\t\t\tregisters=(_R.battery_status, ),\n\t\t\t\t)\n_D('VX Revolution', codename='VX Revolution', kind=_DK.mouse, protocol=1.0, wpid=('1006', '100D'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX Air', codename='MX Air', protocol=1.0, kind=_DK.mouse, wpid=('1007', '100E'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX Revolution', codename='MX Revolution', protocol=1.0, kind=_DK.mouse, wpid=('1008', '100C'),\n\t\t\t\tregisters=(_R.battery_charge, ),\n\t\t\t\t)\n_D('MX 1100 Cordless Laser Mouse', codename='MX 1100', protocol=1.0, kind=_DK.mouse, wpid='1014',\n registers=(_R.battery_charge, ),\n settings=[\n\t\t\t\t\t\t\t_RS.smooth_scroll(),\n\t\t\t\t\t\t\t_RS.side_scroll(),\n\t\t\t\t\t\t],\n )\n\n# Some exotics...\n\n_D('Fujitsu Sonic Mouse', codename='Sonic', protocol=1.0, wpid='1029')\n", "path": "lib/logitech_receiver/descriptors.py" } ]
diff --git a/docs/devices/mk220-new.txt b/docs/devices/mk220-new.txt new file mode 100644 index 0000000000..d8712b3b25 --- /dev/null +++ b/docs/devices/mk220-new.txt @@ -0,0 +1,82 @@ +Unifying Receiver + Device path : /dev/hidraw1 + USB id : 046d:c534 + Serial : 0 + Firmware : 29.00.B0015 + Has 2 paired device(s) out of a maximum of 6. + Notifications: (none) + + 1: Wireless Keyboard MK270 + Codename : MK270 + Kind : keyboard + Wireless PID : 4023 + Protocol : HID++ 2.0 + Polling rate : 20 ms (50Hz) + Serial number: 00000000 + Firmware: RQK 49.00.B0029 + Supports 18 HID++ 2.0 features: + 0: ROOT {0000} + 1: FEATURE SET {0001} + 2: DEVICE FW VERSION {0003} + 3: DEVICE NAME {0005} + 4: BATTERY STATUS {1000} + 5: REPROG CONTROLS {1B00} + 6: WIRELESS DEVICE STATUS {1D4B} + 7: FN INVERSION {40A0} + 8: ENCRYPTION {4100} + 9: KEYBOARD LAYOUT {4520} + 10: unknown:1810 {1810} internal, hidden + 11: unknown:1830 {1830} internal, hidden + 12: unknown:1890 {1890} internal, hidden + 13: unknown:18A0 {18A0} internal, hidden + 14: unknown:18B0 {18B0} internal, hidden + 15: unknown:1DF3 {1DF3} internal, hidden + 16: unknown:1E00 {1E00} hidden + 17: unknown:1868 {1868} internal, hidden + Has 11 reprogrammable keys: + 0: MY HOME => HomePage is FN, FN sensitive, reprogrammable + 1: Mail => Email is FN, FN sensitive, reprogrammable + 2: SEARCH => Search is FN, FN sensitive, reprogrammable + 3: Calculator => Calculator is FN, FN sensitive, reprogrammable + 4: MEDIA PLAYER => Music is FN, FN sensitive, reprogrammable + 5: Previous => Previous is FN, FN sensitive + 6: Play/Pause => Play/Pause is FN, FN sensitive + 7: Next => Next is FN, FN sensitive + 8: Mute => Mute is FN, FN sensitive + 9: Volume Down => Volume Down is FN, FN sensitive + 10: Volume Up => Volume Up is FN, FN sensitive + Battery: 30%, discharging. + + 2: Wireless Mouse + Codename : + Kind : mouse + Wireless PID : 4022 + Protocol : HID++ 2.0 + Polling rate : 8 ms (125Hz) + Serial number: 00000000 + Firmware: RQM 38.00.B0044 + Supports 18 HID++ 2.0 features: + 0: ROOT {0000} + 1: FEATURE SET {0001} + 2: DEVICE FW VERSION {0003} + 3: DEVICE NAME {0005} + 4: BATTERY STATUS {1000} + 5: REPROG CONTROLS {1B00} + 6: WIRELESS DEVICE STATUS {1D4B} + 7: VERTICAL SCROLLING {2100} + 8: MOUSE POINTER {2200} + 9: unknown:1810 {1810} internal, hidden + 10: unknown:1830 {1830} internal, hidden + 11: unknown:1850 {1850} internal, hidden + 12: unknown:1890 {1890} internal, hidden + 13: unknown:18B0 {18B0} internal, hidden + 14: unknown:1DF3 {1DF3} internal, hidden + 15: unknown:1868 {1868} internal, hidden + 16: unknown:1869 {1869} internal, hidden + 17: unknown:1E00 {1E00} hidden + Has 3 reprogrammable keys: + 0: LEFT CLICK => LeftClick mse, reprogrammable + 1: RIGHT CLICK => RightClick mse, reprogrammable + 2: MIDDLE BUTTON => MiddleMouseButton mse, reprogrammable + Battery: 30%, discharging. + diff --git a/lib/logitech_receiver/descriptors.py b/lib/logitech_receiver/descriptors.py index 333d71c023..8d8c1496d9 100644 --- a/lib/logitech_receiver/descriptors.py +++ b/lib/logitech_receiver/descriptors.py @@ -212,6 +212,7 @@ def _D(name, codename=None, kind=None, wpid=None, protocol=None, registers=None, # Mice +_D('Wireless Mouse M150', protocol=2.0, wpid='4022') _D('Wireless Mouse M175', protocol=2.0, wpid='4008') _D('Wireless Mouse M185') _D('Wireless Mouse M187', protocol=2.0, wpid='4019')
Parsl__parsl-972
Fix `ModuleNotFoundError: No module named 'monitoring'` Looks like this bug was introduced with the recent merge of monitoring back into the parsl repo. ``` Traceback (most recent call last): File "/Users/awoodard/software/miniconda3/bin/parsl-visualize", line 11, in <module> load_entry_point('parsl==0.7.2', 'console_scripts', 'parsl-visualize')() File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 484, in load_entry_point return get_distribution(dist).load_entry_point(group, name) File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2707, in load_entry_point return ep.load() File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2325, in load return self.resolve() File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2331, in resolve module = __import__(self.module_name, fromlist=['__name__'], level=0) ModuleNotFoundError: No module named 'monitoring' ```
[ { "content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nextras_require = {\n 'monitoring' : [\n 'psutil',\n 'sqlalchemy',\n 'sqlalchemy_utils',\n 'pydot',\n 'networkx',\n 'Flask',\n 'flask_sqlalchemy',\n 'pandas',\n 'plotly',\n 'python-daemon'\n ],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n}\nextras_require['all'] = sum(extras_require.values(), [])\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py',\n 'parsl/executors/low_latency/lowlatency_worker.py',\n ],\n extras_require=extras_require,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n 'parsl-visualize=monitoring.visualization.app:cli_run',\n ]}\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nextras_require = {\n 'monitoring' : [\n 'psutil',\n 'sqlalchemy',\n 'sqlalchemy_utils',\n 'pydot',\n 'networkx',\n 'Flask',\n 'flask_sqlalchemy',\n 'pandas',\n 'plotly',\n 'python-daemon'\n ],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n}\nextras_require['all'] = sum(extras_require.values(), [])\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py',\n 'parsl/executors/low_latency/lowlatency_worker.py',\n ],\n extras_require=extras_require,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',\n ]}\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 5e6a08fdf6..eb4e546418 100755 --- a/setup.py +++ b/setup.py @@ -61,6 +61,6 @@ entry_points={'console_scripts': [ 'parsl-globus-auth=parsl.data_provider.globus:cli_run', - 'parsl-visualize=monitoring.visualization.app:cli_run', + 'parsl-visualize=parsl.monitoring.visualization.app:cli_run', ]} )
pystiche__pystiche-103
ZeroDivisionError with default_epoch_optim_loop I get an `ZeroDivisionError: integer division or modulo by zero` when using the `default_transformer_epoch_optim_loop`. This is probably because the `num_batches` of the `batch_sampler` is much smaller than in the `default_transformer_optim_loop` which results in `log_freq=0` in `default_transformer_optim_log_fn.` Below is a minimal example to reproduce the error: ```python from pystiche.optim.log import default_transformer_optim_log_fn, OptimLogger logger = OptimLogger() num_batches = 300 log_fn = default_transformer_optim_log_fn(logger, num_batches) image_loading_velocity = 1 image_processing_velocity = 1 batch = 1 loss = 1 log_fn(batch, loss, image_loading_velocity, image_processing_velocity) ```
[ { "content": "from typing import Union, Optional, Tuple, Callable\nimport contextlib\nimport sys\nimport logging\nimport torch\nfrom torch.optim.optimizer import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nimport pystiche\nfrom pystiche.pyramid.level import PyramidLevel\nfrom .meter import FloatMeter, LossMeter, ProgressMeter\n\n__all__ = [\n \"default_logger\",\n \"OptimLogger\",\n \"default_image_optim_log_fn\",\n \"default_pyramid_level_header\",\n \"default_transformer_optim_log_fn\",\n]\n\n\ndef default_logger(name: Optional[str] = None, log_file: Optional[str] = None):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n\n fmt = logging.Formatter(\n fmt=\"|%(asctime)s| %(message)s\", datefmt=\"%d.%m.%Y %H:%M:%S\"\n )\n\n sh = logging.StreamHandler(sys.stdout)\n sh.setLevel(logging.INFO)\n sh.addFilter(lambda record: record.levelno <= logging.INFO)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n\n sh = logging.StreamHandler(sys.stderr)\n sh.setLevel(logging.WARNING)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n fh.setFormatter(fmt)\n logger.addHandler(fh)\n\n return logger\n\n\nclass OptimLogger:\n INDENT = 2\n SEP_LINE_LENGTH = 80\n SEP_CHARS = (\"#\", \"=\", \"-\", \".\")\n\n def __init__(self, logger: Optional[logging.Logger] = None):\n if logger is None:\n logger = default_logger()\n self.logger = logger\n\n self._environ_indent_offset = 0\n self._environ_level_offset = 0\n\n def _calc_abs_indent(self, indent: int, rel: bool):\n abs_indent = indent\n if rel:\n abs_indent += self._environ_indent_offset\n return abs_indent\n\n def _calc_abs_level(self, level: int, rel: bool):\n abs_level = level\n if rel:\n abs_level += self._environ_level_offset\n return abs_level\n\n def message(self, msg: str, indent: int = 0, rel=True) -> None:\n abs_indent = self._calc_abs_indent(indent, rel)\n for line in msg.splitlines():\n self.logger.info(\" \" * abs_indent + line)\n\n def sepline(self, level: int = 0, rel=True):\n abs_level = self._calc_abs_level(level, rel)\n self.message(self.SEP_CHARS[abs_level] * self.SEP_LINE_LENGTH)\n\n def sep_message(\n self, msg: str, level: int = 0, rel=True, top_sep=True, bottom_sep=True\n ):\n if top_sep:\n self.sepline(level=level, rel=rel)\n self.message(msg, rel=rel)\n if bottom_sep:\n self.sepline(level=level, rel=rel)\n\n @contextlib.contextmanager\n def environment(self, header: str):\n self.sep_message(header)\n self._environ_indent_offset += self.INDENT\n self._environ_level_offset += 1\n try:\n yield\n finally:\n self._environ_level_offset -= 1\n self._environ_indent_offset -= self.INDENT\n\n\ndef default_image_optim_log_fn(\n optim_logger: OptimLogger, log_freq: int = 50, max_depth: int = 1\n) -> Callable[[int, Union[torch.Tensor, pystiche.LossDict]], None]:\n def log_fn(step: int, loss: Union[torch.Tensor, pystiche.LossDict]) -> None:\n if step % log_freq == 0:\n with optim_logger.environment(f\"Step {step}\"):\n if isinstance(loss, torch.Tensor):\n optim_logger.message(f\"loss: {loss.item():.3e}\")\n else: # isinstance(loss, pystiche.LossDict)\n optim_logger.message(loss.aggregate(max_depth).format())\n\n return log_fn\n\n\ndef default_pyramid_level_header(\n num: int, level: PyramidLevel, input_image_size: Tuple[int, int]\n):\n height, width = input_image_size\n return f\"Pyramid level {num} with {level.num_steps} steps \" f\"({width} x {height})\"\n\n\ndef default_transformer_optim_log_fn(\n optim_logger: OptimLogger,\n num_batches: int,\n log_freq: Optional[int] = None,\n show_loading_velocity: bool = True,\n show_processing_velocity: bool = True,\n show_running_means: bool = True,\n):\n if log_freq is None:\n log_freq = min(round(1e-3 * num_batches) * 10, 50)\n\n window_size = min(10 * log_freq, 1000)\n\n meters = [LossMeter(show_avg=show_running_means, window_size=window_size)]\n if show_loading_velocity:\n meters.append(\n FloatMeter(\n name=\"loading_velocity\",\n fmt=\"{:3.1f} img/s\",\n show_avg=show_running_means,\n window_size=window_size,\n )\n )\n if show_processing_velocity:\n meters.append(\n FloatMeter(\n name=\"processing_velocity\",\n fmt=\"{:3.1f} img/s\",\n show_avg=show_running_means,\n window_size=window_size,\n )\n )\n\n progress_meter = ProgressMeter(num_batches, *meters)\n\n def log_fn(batch, loss, loading_velocity, processing_velocity):\n progress_meter.update(\n batch,\n loss=loss,\n loading_velocity=loading_velocity,\n processing_velocity=processing_velocity,\n )\n\n if batch % log_freq == 0:\n optim_logger.message(str(progress_meter))\n\n return log_fn\n\n\ndef default_epoch_header_fn(\n epoch: int, optimizer: Optimizer, lr_scheduler: Optional[LRScheduler]\n):\n return f\"Epoch {epoch}\"\n", "path": "pystiche/optim/log.py" } ]
[ { "content": "from typing import Union, Optional, Tuple, Callable\nimport contextlib\nimport sys\nimport logging\nimport torch\nfrom torch.optim.optimizer import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nimport pystiche\nfrom pystiche.pyramid.level import PyramidLevel\nfrom .meter import FloatMeter, LossMeter, ProgressMeter\n\n__all__ = [\n \"default_logger\",\n \"OptimLogger\",\n \"default_image_optim_log_fn\",\n \"default_pyramid_level_header\",\n \"default_transformer_optim_log_fn\",\n]\n\n\ndef default_logger(name: Optional[str] = None, log_file: Optional[str] = None):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n\n fmt = logging.Formatter(\n fmt=\"|%(asctime)s| %(message)s\", datefmt=\"%d.%m.%Y %H:%M:%S\"\n )\n\n sh = logging.StreamHandler(sys.stdout)\n sh.setLevel(logging.INFO)\n sh.addFilter(lambda record: record.levelno <= logging.INFO)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n\n sh = logging.StreamHandler(sys.stderr)\n sh.setLevel(logging.WARNING)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n fh.setFormatter(fmt)\n logger.addHandler(fh)\n\n return logger\n\n\nclass OptimLogger:\n INDENT = 2\n SEP_LINE_LENGTH = 80\n SEP_CHARS = (\"#\", \"=\", \"-\", \".\")\n\n def __init__(self, logger: Optional[logging.Logger] = None):\n if logger is None:\n logger = default_logger()\n self.logger = logger\n\n self._environ_indent_offset = 0\n self._environ_level_offset = 0\n\n def _calc_abs_indent(self, indent: int, rel: bool):\n abs_indent = indent\n if rel:\n abs_indent += self._environ_indent_offset\n return abs_indent\n\n def _calc_abs_level(self, level: int, rel: bool):\n abs_level = level\n if rel:\n abs_level += self._environ_level_offset\n return abs_level\n\n def message(self, msg: str, indent: int = 0, rel=True) -> None:\n abs_indent = self._calc_abs_indent(indent, rel)\n for line in msg.splitlines():\n self.logger.info(\" \" * abs_indent + line)\n\n def sepline(self, level: int = 0, rel=True):\n abs_level = self._calc_abs_level(level, rel)\n self.message(self.SEP_CHARS[abs_level] * self.SEP_LINE_LENGTH)\n\n def sep_message(\n self, msg: str, level: int = 0, rel=True, top_sep=True, bottom_sep=True\n ):\n if top_sep:\n self.sepline(level=level, rel=rel)\n self.message(msg, rel=rel)\n if bottom_sep:\n self.sepline(level=level, rel=rel)\n\n @contextlib.contextmanager\n def environment(self, header: str):\n self.sep_message(header)\n self._environ_indent_offset += self.INDENT\n self._environ_level_offset += 1\n try:\n yield\n finally:\n self._environ_level_offset -= 1\n self._environ_indent_offset -= self.INDENT\n\n\ndef default_image_optim_log_fn(\n optim_logger: OptimLogger, log_freq: int = 50, max_depth: int = 1\n) -> Callable[[int, Union[torch.Tensor, pystiche.LossDict]], None]:\n def log_fn(step: int, loss: Union[torch.Tensor, pystiche.LossDict]) -> None:\n if step % log_freq == 0:\n with optim_logger.environment(f\"Step {step}\"):\n if isinstance(loss, torch.Tensor):\n optim_logger.message(f\"loss: {loss.item():.3e}\")\n else: # isinstance(loss, pystiche.LossDict)\n optim_logger.message(loss.aggregate(max_depth).format())\n\n return log_fn\n\n\ndef default_pyramid_level_header(\n num: int, level: PyramidLevel, input_image_size: Tuple[int, int]\n):\n height, width = input_image_size\n return f\"Pyramid level {num} with {level.num_steps} steps \" f\"({width} x {height})\"\n\n\ndef default_transformer_optim_log_fn(\n optim_logger: OptimLogger,\n num_batches: int,\n log_freq: Optional[int] = None,\n show_loading_velocity: bool = True,\n show_processing_velocity: bool = True,\n show_running_means: bool = True,\n):\n if log_freq is None:\n log_freq = max(min(round(1e-3 * num_batches) * 10, 50), 1)\n\n window_size = min(10 * log_freq, 1000)\n\n meters = [LossMeter(show_avg=show_running_means, window_size=window_size)]\n if show_loading_velocity:\n meters.append(\n FloatMeter(\n name=\"loading_velocity\",\n fmt=\"{:3.1f} img/s\",\n show_avg=show_running_means,\n window_size=window_size,\n )\n )\n if show_processing_velocity:\n meters.append(\n FloatMeter(\n name=\"processing_velocity\",\n fmt=\"{:3.1f} img/s\",\n show_avg=show_running_means,\n window_size=window_size,\n )\n )\n\n progress_meter = ProgressMeter(num_batches, *meters)\n\n def log_fn(batch, loss, loading_velocity, processing_velocity):\n progress_meter.update(\n batch,\n loss=loss,\n loading_velocity=loading_velocity,\n processing_velocity=processing_velocity,\n )\n\n if batch % log_freq == 0:\n optim_logger.message(str(progress_meter))\n\n return log_fn\n\n\ndef default_epoch_header_fn(\n epoch: int, optimizer: Optimizer, lr_scheduler: Optional[LRScheduler]\n):\n return f\"Epoch {epoch}\"\n", "path": "pystiche/optim/log.py" } ]
diff --git a/pystiche/optim/log.py b/pystiche/optim/log.py index f972f623..f21c18bb 100644 --- a/pystiche/optim/log.py +++ b/pystiche/optim/log.py @@ -131,7 +131,7 @@ def default_transformer_optim_log_fn( show_running_means: bool = True, ): if log_freq is None: - log_freq = min(round(1e-3 * num_batches) * 10, 50) + log_freq = max(min(round(1e-3 * num_batches) * 10, 50), 1) window_size = min(10 * log_freq, 1000)
dotkom__onlineweb4-1902
Cannot view inventory ## What kind of an issue is this? - [x] Bug report ## What is the expected behaviour? To be able to view the inventory ## What is the current behaviour? A 500 error, with the message `TypeError: '>=' not supported between instances of 'datetime.date' and 'NoneType'`. ## How do you reproduce this problem? Make sure the inventory is not empty, and try to visit it.
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom apps.gallery.models import ResponsiveImage\n\n\nclass ItemCategory(models.Model):\n name = models.CharField(_(\"Kategori\"), max_length=50)\n\n def __str__(self):\n return self.name\n\n\nclass Item(models.Model):\n\n name = models.CharField(_(\"Varetype\"), max_length=50)\n description = models.CharField(_(\"Beskrivelse\"), max_length=50, null=True, blank=True)\n price = models.IntegerField(_(\"Pris\"), null=True, blank=True)\n available = models.BooleanField(_(\"Til salgs\"), default=False)\n category = models.ForeignKey(ItemCategory, verbose_name=_(\"Kategori\"),\n related_name=\"category\", null=True, blank=True)\n image = models.ForeignKey(ResponsiveImage, null=True, blank=True, default=None)\n\n @property\n def oldest_expiration_date(self):\n batches = self.batches.all().order_by(\"expiration_date\")\n if batches:\n return batches[0].expiration_date\n else:\n return None\n\n @property\n def last_added(self):\n batches = self.batches.all().order_by(\"-date_added\")\n if batches:\n return batches[0].date_added\n else:\n return None\n\n def oldest_batch(self):\n batches = self.batches.filter(amount__gt=0).order_by(\"date_added\")\n if batches:\n return batches[0]\n else:\n return None\n\n @property\n def total_amount(self):\n return sum([batch.amount for batch in self.batches.all()])\n\n @property\n def has_expired_batch(self):\n if timezone.now().date() >= self.oldest_expiration_date:\n return True\n return False\n\n def reduce_stock(self, amount):\n \"\"\"\n Makes an assumption that the oldest batches are sold first and reduce them first.\n \"\"\"\n\n oldest_batch = self.oldest_batch()\n\n if oldest_batch:\n if oldest_batch.amount > amount:\n oldest_batch.amount = oldest_batch.amount - amount\n oldest_batch.save()\n else:\n diff = amount - oldest_batch.amount\n oldest_batch.amount = 0\n oldest_batch.save()\n self.reduce_stock(diff)\n\n self.handle_notifications(amount)\n\n def handle_notifications(self, amount):\n\n # Send one notification when the stock goes to or below 10\n if self.total_amount <= 10 and self.total_amount + amount > 10:\n message = \"Det er kun \" + str(self.total_amount) + \" igjen av \" + str(self.name) + \\\n \" på kontoret.\\n\\n\" \\\n \"Dette er en automatisk generert melding og antallet kan være noe feil.\"\n\n EmailMessage(\n \"[Nibble] Lav stock på \" + self.name,\n str(message),\n \"[email protected]\",\n [],\n [settings.EMAIL_TRIKOM]\n ).send()\n\n def __str__(self):\n return self.name\n\n class Meta(object):\n verbose_name = _(\"Vare\")\n verbose_name_plural = _(\"Varer\")\n permissions = (\n (\"view_item\", \"View Inventory Item\"),\n )\n\n\nclass Batch(models.Model):\n\n item = models.ForeignKey(Item, verbose_name=_(\"Vare\"), related_name=\"batches\")\n amount = models.IntegerField(_(\"Antall\"), default=0)\n date_added = models.DateField(_(\"Dato lagt til\"), editable=False, auto_now_add=True)\n expiration_date = models.DateField(_(\"Utløpsdato\"), null=True, blank=True, editable=True)\n\n class Meta(object):\n verbose_name = _(\"Batch\")\n verbose_name_plural = _(\"Batches\")\n", "path": "apps/inventory/models.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom apps.gallery.models import ResponsiveImage\n\n\nclass ItemCategory(models.Model):\n name = models.CharField(_(\"Kategori\"), max_length=50)\n\n def __str__(self):\n return self.name\n\n\nclass Item(models.Model):\n\n name = models.CharField(_(\"Varetype\"), max_length=50)\n description = models.CharField(_(\"Beskrivelse\"), max_length=50, null=True, blank=True)\n price = models.IntegerField(_(\"Pris\"), null=True, blank=True)\n available = models.BooleanField(_(\"Til salgs\"), default=False)\n category = models.ForeignKey(ItemCategory, verbose_name=_(\"Kategori\"),\n related_name=\"category\", null=True, blank=True)\n image = models.ForeignKey(ResponsiveImage, null=True, blank=True, default=None)\n\n @property\n def oldest_expiration_date(self):\n batches = self.batches.all().order_by(\"expiration_date\")\n if batches:\n return batches[0].expiration_date\n else:\n return None\n\n @property\n def last_added(self):\n batches = self.batches.all().order_by(\"-date_added\")\n if batches:\n return batches[0].date_added\n else:\n return None\n\n def oldest_batch(self):\n batches = self.batches.filter(amount__gt=0).order_by(\"date_added\")\n if batches:\n return batches[0]\n else:\n return None\n\n @property\n def total_amount(self):\n return sum([batch.amount for batch in self.batches.all()])\n\n @property\n def has_expired_batch(self):\n if self.oldest_expiration_date and timezone.now().date() >= self.oldest_expiration_date:\n return True\n return False\n\n def reduce_stock(self, amount):\n \"\"\"\n Makes an assumption that the oldest batches are sold first and reduce them first.\n \"\"\"\n\n oldest_batch = self.oldest_batch()\n\n if oldest_batch:\n if oldest_batch.amount > amount:\n oldest_batch.amount = oldest_batch.amount - amount\n oldest_batch.save()\n else:\n diff = amount - oldest_batch.amount\n oldest_batch.amount = 0\n oldest_batch.save()\n self.reduce_stock(diff)\n\n self.handle_notifications(amount)\n\n def handle_notifications(self, amount):\n\n # Send one notification when the stock goes to or below 10\n if self.total_amount <= 10 and self.total_amount + amount > 10:\n message = \"Det er kun \" + str(self.total_amount) + \" igjen av \" + str(self.name) + \\\n \" på kontoret.\\n\\n\" \\\n \"Dette er en automatisk generert melding og antallet kan være noe feil.\"\n\n EmailMessage(\n \"[Nibble] Lav stock på \" + self.name,\n str(message),\n \"[email protected]\",\n [],\n [settings.EMAIL_TRIKOM]\n ).send()\n\n def __str__(self):\n return self.name\n\n class Meta(object):\n verbose_name = _(\"Vare\")\n verbose_name_plural = _(\"Varer\")\n permissions = (\n (\"view_item\", \"View Inventory Item\"),\n )\n\n\nclass Batch(models.Model):\n\n item = models.ForeignKey(Item, verbose_name=_(\"Vare\"), related_name=\"batches\")\n amount = models.IntegerField(_(\"Antall\"), default=0)\n date_added = models.DateField(_(\"Dato lagt til\"), editable=False, auto_now_add=True)\n expiration_date = models.DateField(_(\"Utløpsdato\"), null=True, blank=True, editable=True)\n\n class Meta(object):\n verbose_name = _(\"Batch\")\n verbose_name_plural = _(\"Batches\")\n", "path": "apps/inventory/models.py" } ]
diff --git a/apps/inventory/models.py b/apps/inventory/models.py index 1e518cd06..df000c6a7 100644 --- a/apps/inventory/models.py +++ b/apps/inventory/models.py @@ -55,7 +55,7 @@ def total_amount(self): @property def has_expired_batch(self): - if timezone.now().date() >= self.oldest_expiration_date: + if self.oldest_expiration_date and timezone.now().date() >= self.oldest_expiration_date: return True return False
wright-group__WrightTools-361
collection.keys returns data objects should return names, but get objects
[ { "content": "\"\"\"Collection.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\nimport shutil\n\nimport numpy as np\n\nimport h5py\n\nfrom .. import data as wt_data\nfrom .._base import Group\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = ['Collection']\n\n\n# --- classes -------------------------------------------------------------------------------------\n\n\nclass Collection(Group):\n \"\"\"Nestable Collection of Data objects.\"\"\"\n class_name = 'Collection'\n\n def __iter__(self):\n self.__n = 0\n return self\n\n def __len__(self):\n return len(self.item_names)\n\n def __next__(self):\n if self.__n < len(self):\n out = self[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n return out\n\n def __repr__(self):\n return '<WrightTools.Collection \\'{0}\\' {1} at {2}>'.format(self.natural_name,\n self.item_names,\n '::'.join([self.filepath,\n self.name]))\n\n def __getitem__(self, key):\n if isinstance(key, int):\n key = self.item_names[key]\n out = h5py.Group.__getitem__(self, key)\n if 'class' in out.attrs.keys():\n if out.attrs['class'] == 'Data':\n return wt_data.Data(filepath=self.filepath, parent=self.name, name=key,\n edit_local=True)\n elif out.attrs['class'] == 'Collection':\n return Collection(filepath=self.filepath, parent=self.name, name=key,\n edit_local=True)\n else:\n return Group(filepath=self.filepath, parent=self.name, name=key,\n edit_local=True)\n else:\n return out\n\n def __setitem__(self, key, value):\n raise NotImplementedError\n\n @property\n def item_names(self):\n if 'item_names' not in self.attrs.keys():\n self.attrs['item_names'] = np.array([], dtype='S')\n return [s.decode() for s in self.attrs['item_names']]\n\n def create_collection(self, name='collection', position=None, **kwargs):\n collection = Collection(filepath=self.filepath, parent=self.name, name=name,\n edit_local=True, **kwargs)\n if position is None:\n self._items.append(collection)\n self.attrs['item_names'] = np.append(self.attrs['item_names'],\n collection.natural_name.encode())\n else:\n self._items.insert(position, collection)\n self.attrs['item_names'] = np.insert(self.attrs['item_names'], position,\n collection.natural_name.encode())\n setattr(self, name, collection)\n return collection\n\n def create_data(self, name='data', position=None, **kwargs):\n data = wt_data.Data(filepath=self.filepath, parent=self.name, name=name, edit_local=True,\n **kwargs)\n if position is None:\n self._items.append(data)\n self.attrs['item_names'] = np.append(self.attrs['item_names'],\n data.natural_name.encode())\n else:\n self._items.insert(position, data)\n self.attrs['item_names'] = np.insert(self.attrs['item_names'], position,\n data.natural_name.encode())\n setattr(self, name, data)\n return data\n\n def index(self):\n raise NotImplementedError\n\n def flush(self):\n for item in self._items:\n item.flush()\n self.file.flush()\n\n def save(self, filepath=None, verbose=True):\n # TODO: documentation\n self.flush() # ensure all changes are written to file\n if filepath is None:\n filepath = os.path.join(os.getcwd(), self.natural_name + '.wt5')\n elif len(os.path.basename(filepath).split('.')) == 1:\n filepath += '.wt5'\n filepath = os.path.expanduser(filepath)\n shutil.copyfile(src=self.filepath, dst=filepath)\n if verbose:\n print('file saved at', filepath)\n return filepath\n", "path": "WrightTools/collection/_collection.py" } ]
[ { "content": "\"\"\"Collection.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\nimport shutil\n\nimport numpy as np\n\nimport h5py\n\nfrom .. import data as wt_data\nfrom .._base import Group\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = ['Collection']\n\n\n# --- classes -------------------------------------------------------------------------------------\n\n\nclass Collection(Group):\n \"\"\"Nestable Collection of Data objects.\"\"\"\n class_name = 'Collection'\n\n def __iter__(self):\n self.__n = 0\n return self\n\n def __len__(self):\n return len(self.item_names)\n\n def __next__(self):\n if self.__n < len(self):\n out = self.item_names[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n return out\n\n def __repr__(self):\n return '<WrightTools.Collection \\'{0}\\' {1} at {2}>'.format(self.natural_name,\n self.item_names,\n '::'.join([self.filepath,\n self.name]))\n\n def __getitem__(self, key):\n if isinstance(key, int):\n key = self.item_names[key]\n out = h5py.Group.__getitem__(self, key)\n if 'class' in out.attrs.keys():\n if out.attrs['class'] == 'Data':\n return wt_data.Data(filepath=self.filepath, parent=self.name, name=key,\n edit_local=True)\n elif out.attrs['class'] == 'Collection':\n return Collection(filepath=self.filepath, parent=self.name, name=key,\n edit_local=True)\n else:\n return Group(filepath=self.filepath, parent=self.name, name=key,\n edit_local=True)\n else:\n return out\n\n def __setitem__(self, key, value):\n raise NotImplementedError\n\n @property\n def item_names(self):\n if 'item_names' not in self.attrs.keys():\n self.attrs['item_names'] = np.array([], dtype='S')\n return [s.decode() for s in self.attrs['item_names']]\n\n def create_collection(self, name='collection', position=None, **kwargs):\n collection = Collection(filepath=self.filepath, parent=self.name, name=name,\n edit_local=True, **kwargs)\n if position is None:\n self._items.append(collection)\n self.attrs['item_names'] = np.append(self.attrs['item_names'],\n collection.natural_name.encode())\n else:\n self._items.insert(position, collection)\n self.attrs['item_names'] = np.insert(self.attrs['item_names'], position,\n collection.natural_name.encode())\n setattr(self, name, collection)\n return collection\n\n def create_data(self, name='data', position=None, **kwargs):\n data = wt_data.Data(filepath=self.filepath, parent=self.name, name=name, edit_local=True,\n **kwargs)\n if position is None:\n self._items.append(data)\n self.attrs['item_names'] = np.append(self.attrs['item_names'],\n data.natural_name.encode())\n else:\n self._items.insert(position, data)\n self.attrs['item_names'] = np.insert(self.attrs['item_names'], position,\n data.natural_name.encode())\n setattr(self, name, data)\n return data\n\n def index(self):\n raise NotImplementedError\n\n def flush(self):\n for item in self._items:\n item.flush()\n self.file.flush()\n\n def save(self, filepath=None, verbose=True):\n # TODO: documentation\n self.flush() # ensure all changes are written to file\n if filepath is None:\n filepath = os.path.join(os.getcwd(), self.natural_name + '.wt5')\n elif len(os.path.basename(filepath).split('.')) == 1:\n filepath += '.wt5'\n filepath = os.path.expanduser(filepath)\n shutil.copyfile(src=self.filepath, dst=filepath)\n if verbose:\n print('file saved at', filepath)\n return filepath\n", "path": "WrightTools/collection/_collection.py" } ]
diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py index 5660377e0..297bbeb7b 100644 --- a/WrightTools/collection/_collection.py +++ b/WrightTools/collection/_collection.py @@ -37,7 +37,7 @@ def __len__(self): def __next__(self): if self.__n < len(self): - out = self[self.__n] + out = self.item_names[self.__n] self.__n += 1 else: raise StopIteration
ros__ros_comm-2007
Rospy import * Hi, Doing ```python from rospy import * ``` raises the following exception : ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: module 'rospy' has no attribute 'NodeProxy' ``` After some investigations, `NodeProxy` doesn't seem to exist anymore in the codebase. Simply removing it from the exports should do the trick.
[ { "content": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2008, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Copyright (c) 2008, Willow Garage, Inc.\n# Revision $Id$\n\n\"\"\"\nROS client library for Python.\nSee U{http://ros.org/wiki/rospy}\n@author: Ken Conley (kwc)\n\"\"\"\n\n# import symbols into rospy namespace\n# NOTE: there are much better ways to configure python module\n# dictionaries, but the rospy codebase isn't quite in shape for that\n# yet\n\nfrom std_msgs.msg import Header\n\nfrom .client import spin, myargv, init_node, \\\n get_published_topics, \\\n wait_for_message, \\\n get_master, \\\n on_shutdown, \\\n get_param, get_param_cached, get_param_names, set_param, delete_param, has_param, search_param,\\\n DEBUG, INFO, WARN, ERROR, FATAL\nfrom .timer import sleep, Rate, Timer\nfrom .core import is_shutdown, signal_shutdown, \\\n get_node_uri, get_ros_root, \\\n logdebug, logwarn, loginfo, logout, logerr, logfatal, \\\n logdebug_throttle, logwarn_throttle, loginfo_throttle, logerr_throttle, logfatal_throttle, \\\n logdebug_throttle_identical, logwarn_throttle_identical, loginfo_throttle_identical, logerr_throttle_identical, logfatal_throttle_identical, \\\n logdebug_once, logwarn_once, loginfo_once, logerr_once, logfatal_once, \\\n parse_rosrpc_uri\nfrom .exceptions import *\nfrom .msg import AnyMsg\nfrom .msproxy import MasterProxy\nfrom .names import get_name, get_caller_id, get_namespace, resolve_name, remap_name\nfrom .rostime import Time, Duration, get_rostime, get_time\nfrom .service import ServiceException\n\n# - use tcp ros implementation of services\nfrom .impl.tcpros_service import Service, ServiceProxy, wait_for_service\nfrom .topics import Message, SubscribeListener, Publisher, Subscriber\n\n## \\defgroup validators Validators\n## \\defgroup clientapi Client API\n\n__all__ = [\n 'Header',\n 'spin',\n 'myargv',\n 'init_node',\n 'get_master',\n 'get_published_topics',\n 'wait_for_service',\n 'on_shutdown',\n 'get_param',\n 'get_param_cached',\n 'get_param_names',\n 'set_param',\n 'delete_param',\n 'has_param',\n 'search_param',\n 'sleep',\n 'Rate',\n 'DEBUG',\n 'INFO',\n 'WARN',\n 'ERROR',\n 'FATAL',\n 'is_shutdown',\n 'signal_shutdown',\n 'get_node_uri',\n 'get_ros_root',\n 'logdebug',\n 'logwarn', 'loginfo',\n 'logout', 'logerr', 'logfatal',\n 'logdebug_throttle',\n 'logwarn_throttle', 'loginfo_throttle',\n 'logerr_throttle', 'logfatal_throttle',\n 'logdebug_once',\n 'logwarn_once', 'loginfo_once',\n 'logerr_once', 'logfatal_once',\n 'parse_rosrpc_uri',\n 'MasterProxy',\n 'NodeProxy', \n 'ROSException',\n 'ROSSerializationException',\n 'ROSInitException',\n 'ROSInterruptException',\n 'ROSInternalException',\n 'TransportException',\n 'TransportTerminated',\n 'TransportInitError',\n 'AnyMsg', 'Message',\n 'get_name',\n 'get_caller_id',\n 'get_namespace',\n 'resolve_name',\n 'remap_name',\n 'Time', 'Duration', 'get_rostime', 'get_time',\n 'ServiceException', \n 'Service', 'ServiceProxy',\n 'SubscribeListener', 'Publisher', 'Subscriber',\n ]\n", "path": "clients/rospy/src/rospy/__init__.py" } ]
[ { "content": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2008, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Copyright (c) 2008, Willow Garage, Inc.\n# Revision $Id$\n\n\"\"\"\nROS client library for Python.\nSee U{http://ros.org/wiki/rospy}\n@author: Ken Conley (kwc)\n\"\"\"\n\n# import symbols into rospy namespace\n# NOTE: there are much better ways to configure python module\n# dictionaries, but the rospy codebase isn't quite in shape for that\n# yet\n\nfrom std_msgs.msg import Header\n\nfrom .client import spin, myargv, init_node, \\\n get_published_topics, \\\n wait_for_message, \\\n get_master, \\\n on_shutdown, \\\n get_param, get_param_cached, get_param_names, set_param, delete_param, has_param, search_param,\\\n DEBUG, INFO, WARN, ERROR, FATAL\nfrom .timer import sleep, Rate, Timer\nfrom .core import is_shutdown, signal_shutdown, \\\n get_node_uri, get_ros_root, \\\n logdebug, logwarn, loginfo, logout, logerr, logfatal, \\\n logdebug_throttle, logwarn_throttle, loginfo_throttle, logerr_throttle, logfatal_throttle, \\\n logdebug_throttle_identical, logwarn_throttle_identical, loginfo_throttle_identical, logerr_throttle_identical, logfatal_throttle_identical, \\\n logdebug_once, logwarn_once, loginfo_once, logerr_once, logfatal_once, \\\n parse_rosrpc_uri\nfrom .exceptions import *\nfrom .msg import AnyMsg\nfrom .msproxy import MasterProxy\nfrom .names import get_name, get_caller_id, get_namespace, resolve_name, remap_name\nfrom .rostime import Time, Duration, get_rostime, get_time\nfrom .service import ServiceException\n\n# - use tcp ros implementation of services\nfrom .impl.tcpros_service import Service, ServiceProxy, wait_for_service\nfrom .topics import Message, SubscribeListener, Publisher, Subscriber\n\n## \\defgroup validators Validators\n## \\defgroup clientapi Client API\n\n__all__ = [\n 'Header',\n 'spin',\n 'myargv',\n 'init_node',\n 'get_master',\n 'get_published_topics',\n 'wait_for_service',\n 'on_shutdown',\n 'get_param',\n 'get_param_cached',\n 'get_param_names',\n 'set_param',\n 'delete_param',\n 'has_param',\n 'search_param',\n 'sleep',\n 'Rate',\n 'DEBUG',\n 'INFO',\n 'WARN',\n 'ERROR',\n 'FATAL',\n 'is_shutdown',\n 'signal_shutdown',\n 'get_node_uri',\n 'get_ros_root',\n 'logdebug',\n 'logwarn', 'loginfo',\n 'logout', 'logerr', 'logfatal',\n 'logdebug_throttle',\n 'logwarn_throttle', 'loginfo_throttle',\n 'logerr_throttle', 'logfatal_throttle',\n 'logdebug_once',\n 'logwarn_once', 'loginfo_once',\n 'logerr_once', 'logfatal_once',\n 'parse_rosrpc_uri',\n 'MasterProxy',\n 'ROSException',\n 'ROSSerializationException',\n 'ROSInitException',\n 'ROSInterruptException',\n 'ROSInternalException',\n 'TransportException',\n 'TransportTerminated',\n 'TransportInitError',\n 'AnyMsg', 'Message',\n 'get_name',\n 'get_caller_id',\n 'get_namespace',\n 'resolve_name',\n 'remap_name',\n 'Time', 'Duration', 'get_rostime', 'get_time',\n 'ServiceException', \n 'Service', 'ServiceProxy',\n 'SubscribeListener', 'Publisher', 'Subscriber',\n ]\n", "path": "clients/rospy/src/rospy/__init__.py" } ]
diff --git a/clients/rospy/src/rospy/__init__.py b/clients/rospy/src/rospy/__init__.py index 1c4a5693a9..6d9ebc8311 100644 --- a/clients/rospy/src/rospy/__init__.py +++ b/clients/rospy/src/rospy/__init__.py @@ -113,7 +113,6 @@ 'logerr_once', 'logfatal_once', 'parse_rosrpc_uri', 'MasterProxy', - 'NodeProxy', 'ROSException', 'ROSSerializationException', 'ROSInitException',
kivy__kivy-5960
Kivy is using deprecated Cython syntax for properties ### Versions * Kivy: master ### Description According to [Cython documentation](http://cython.readthedocs.io/en/latest/src/userguide/extension_types.html) we are using a "special (deprecated) legacy syntax for defining properties in an extension class". The new syntax is `@property`, `@propname.setter` and `@propname.deleter` instead of `property propname:` The deprecated syntax is used in the files listed below (and maybe elsewhere). It's not clear if or when the legacy syntax is slated for removal. * [graphics/gl_instructions.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/gl_instructions.pyx) * [lib/vidcore_lite/bcm.pyx](https://github.com/kivy/kivy/blob/master/kivy/lib/vidcore_lite/bcm.pyx) * [graphics/context_instructions.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/context_instructions.pyx) * [graphics/scissor_instructions.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/scissor_instructions.pyx) * [graphics/instructions.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/instructions.pyx) * [graphics/vertex_instructions.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/vertex_instructions.pyx) * [graphics/texture.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/texture.pyx) * [graphics/vertex_instructions_line.pxi](https://github.com/kivy/kivy/blob/master/kivy/graphics/vertex_instructions_line.pxi) * [graphics/fbo.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/fbo.pyx) * [graphics/shader.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/shader.pyx) * [graphics/stencil_instructions.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/stencil_instructions.pyx) * [graphics/svg.pyx](https://github.com/kivy/kivy/blob/master/kivy/graphics/svg.pyx) * [weakproxy.pyx](https://github.com/kivy/kivy/blob/master/kivy/weakproxy.pyx) * [properties.pyx](https://github.com/kivy/kivy/blob/master/kivy/properties.pyx) * [_event.pyx](https://github.com/kivy/kivy/blob/master/kivy/_event.pyx) * [core/window/_window_sdl2.pyx](https://github.com/kivy/kivy/blob/master/kivy/core/window/_window_sdl2.pyx)
[ { "content": "#\n# Kivy - Cross-platform UI framework\n# https://kivy.org/\n#\nfrom __future__ import print_function\n\nimport sys\nbuild_examples = False\nif \"--build_examples\" in sys.argv:\n build_examples = True\n sys.argv.remove(\"--build_examples\")\n\nfrom copy import deepcopy\nimport os\nfrom os.path import join, dirname, sep, exists, basename, isdir\nfrom os import walk, environ\nfrom distutils.version import LooseVersion\nfrom distutils.sysconfig import get_python_inc\nfrom collections import OrderedDict\nfrom time import sleep\nfrom subprocess import check_output, CalledProcessError\nfrom datetime import datetime\n\nif environ.get('KIVY_USE_SETUPTOOLS'):\n from setuptools import setup, Extension\n print('Using setuptools')\nelse:\n from distutils.core import setup\n from distutils.extension import Extension\n print('Using distutils')\n\n\nPY3 = sys.version > '3'\n\nif PY3: # fix error with py3's LooseVersion comparisons\n def ver_equal(self, other):\n return self.version == other\n\n LooseVersion.__eq__ = ver_equal\n\n\ndef get_description():\n with open(join(dirname(__file__), 'README.md')) as fileh:\n return fileh.read()\n\n\ndef get_version(filename='kivy/version.py'):\n VERSION = kivy.__version__\n DATE = datetime.utcnow().strftime('%Y%m%d')\n try:\n GIT_REVISION = check_output(\n ['git', 'rev-parse', 'HEAD']\n ).strip().decode('ascii')\n except (CalledProcessError, OSError, IOError) as e:\n # CalledProcessError has no errno\n errno = getattr(e, 'errno', None)\n if errno != 2 and 'CalledProcessError' not in repr(e):\n raise\n GIT_REVISION = \"Unknown\"\n\n cnt = (\n \"# THIS FILE IS GENERATED FROM KIVY SETUP.PY\\n\"\n \"__version__ = '%(version)s'\\n\"\n \"__hash__ = '%(hash)s'\\n\"\n \"__date__ = '%(date)s'\\n\"\n )\n\n with open(filename, 'w') as f:\n f.write(cnt % {\n 'version': VERSION,\n 'hash': GIT_REVISION,\n 'date': DATE\n })\n return VERSION\n\n\nMIN_CYTHON_STRING = '0.23'\nMIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)\nMAX_CYTHON_STRING = '0.28.3'\nMAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)\nCYTHON_UNSUPPORTED = (\n # ref https://github.com/cython/cython/issues/1968\n '0.27', '0.27.2'\n)\n\n\ndef getoutput(cmd, env=None):\n import subprocess\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env=env)\n p.wait()\n if p.returncode: # if not returncode == 0\n print('WARNING: A problem occurred while running {0} (code {1})\\n'\n .format(cmd, p.returncode))\n stderr_content = p.stderr.read()\n if stderr_content:\n print('{0}\\n'.format(stderr_content))\n return \"\"\n return p.stdout.read()\n\n\ndef pkgconfig(*packages, **kw):\n flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}\n lenviron = None\n pconfig = join(sys.prefix, 'libs', 'pkgconfig')\n\n if isdir(pconfig):\n lenviron = environ.copy()\n lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(\n environ.get('PKG_CONFIG_PATH', ''), pconfig)\n cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))\n results = getoutput(cmd, lenviron).split()\n for token in results:\n ext = token[:2].decode('utf-8')\n flag = flag_map.get(ext)\n if not flag:\n continue\n kw.setdefault(flag, []).append(token[2:].decode('utf-8'))\n return kw\n\n\n# -----------------------------------------------------------------------------\n# Determine on which platform we are\n\nplatform = sys.platform\n\n# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)\nif sys.platform == 'darwin':\n if sys.maxsize > 2 ** 32:\n osx_arch = 'x86_64'\n else:\n osx_arch = 'i386'\n\n# Detect Python for android project (http://github.com/kivy/python-for-android)\nndkplatform = environ.get('NDKPLATFORM')\nif ndkplatform is not None and environ.get('LIBLINK'):\n platform = 'android'\nkivy_ios_root = environ.get('KIVYIOSROOT', None)\nif kivy_ios_root is not None:\n platform = 'ios'\nif exists('/opt/vc/include/bcm_host.h'):\n platform = 'rpi'\nif exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):\n platform = 'mali'\n\n# -----------------------------------------------------------------------------\n# Detect options\n#\nc_options = OrderedDict()\nc_options['use_rpi'] = platform == 'rpi'\nc_options['use_mali'] = platform == 'mali'\nc_options['use_egl'] = False\nc_options['use_opengl_es2'] = None\nc_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'\nc_options['use_sdl2'] = None\nc_options['use_ios'] = False\nc_options['use_mesagl'] = False\nc_options['use_x11'] = False\nc_options['use_wayland'] = False\nc_options['use_gstreamer'] = None\nc_options['use_avfoundation'] = platform == 'darwin'\nc_options['use_osx_frameworks'] = platform == 'darwin'\nc_options['debug_gl'] = False\n\n# now check if environ is changing the default values\nfor key in list(c_options.keys()):\n ukey = key.upper()\n if ukey in environ:\n value = bool(int(environ[ukey]))\n print('Environ change {0} -> {1}'.format(key, value))\n c_options[key] = value\n\n\n# -----------------------------------------------------------------------------\n# Cython check\n# on python-for-android and kivy-ios, cython usage is external\n\ncython_unsupported_append = '''\n\n Please note that the following versions of Cython are not supported\n at all: {}\n'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))\n\ncython_min = '''\\\n This version of Cython is not compatible with Kivy. Please upgrade to\n at least version {0}, preferably the newest supported version {1}.\n\n If your platform provides a Cython package, make sure you have upgraded\n to the newest version. If the newest version available is still too low,\n please remove it and install the newest supported Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_max = '''\\\n This version of Cython is untested with Kivy. While this version may\n work perfectly fine, it is possible that you may experience issues. If\n you do have issues, please downgrade to a supported version. It is\n best to use the newest supported version, {1}, but the minimum\n supported version is {0}.\n\n If your platform provides a Cython package, check if you can downgrade\n to a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_unsupported = '''\\\n This version of Cython suffers from known bugs and is unsupported.\n Please install the newest supported version, {1}, if possible, but\n the minimum supported version is {0}.\n\n If your platform provides a Cython package, check if you can install\n a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append)\n\nhave_cython = False\nskip_cython = False\nif platform in ('ios', 'android'):\n print('\\nCython check avoided.')\n skip_cython = True\nelse:\n try:\n # check for cython\n from Cython.Distutils import build_ext\n have_cython = True\n import Cython\n cy_version_str = Cython.__version__\n cy_ver = LooseVersion(cy_version_str)\n print('\\nDetected Cython version {}'.format(cy_version_str))\n if cy_ver < MIN_CYTHON_VERSION:\n print(cython_min)\n raise ImportError('Incompatible Cython Version')\n if cy_ver in CYTHON_UNSUPPORTED:\n print(cython_unsupported)\n raise ImportError('Incompatible Cython Version')\n if cy_ver > MAX_CYTHON_VERSION:\n print(cython_max)\n sleep(1)\n except ImportError:\n print(\"\\nCython is missing, it's required for compiling kivy !\\n\\n\")\n raise\n\nif not have_cython:\n from distutils.command.build_ext import build_ext\n\n# -----------------------------------------------------------------------------\n# Setup classes\n\n# the build path where kivy is being compiled\nsrc_path = build_path = dirname(__file__)\n\n\nclass KivyBuildExt(build_ext):\n\n def finalize_options(self):\n retval = build_ext.finalize_options(self)\n global build_path\n if (self.build_lib is not None and exists(self.build_lib) and\n not self.inplace):\n build_path = self.build_lib\n return retval\n\n def build_extensions(self):\n # build files\n config_h_fn = ('include', 'config.h')\n config_pxi_fn = ('include', 'config.pxi')\n config_py_fn = ('setupconfig.py', )\n\n # generate headers\n config_h = '// Autogenerated file for Kivy C configuration\\n'\n config_h += '#define __PY3 {0}\\n'.format(int(PY3))\n config_pxi = '# Autogenerated file for Kivy Cython configuration\\n'\n config_pxi += 'DEF PY3 = {0}\\n'.format(int(PY3))\n config_py = '# Autogenerated file for Kivy configuration\\n'\n config_py += 'PY3 = {0}\\n'.format(int(PY3))\n config_py += 'CYTHON_MIN = {0}\\nCYTHON_MAX = {1}\\n'.format(\n repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))\n config_py += 'CYTHON_BAD = {0}\\n'.format(repr(', '.join(map(\n str, CYTHON_UNSUPPORTED))))\n\n # generate content\n print('Build configuration is:')\n for opt, value in c_options.items():\n value = int(bool(value))\n print(' * {0} = {1}'.format(opt, value))\n opt = opt.upper()\n config_h += '#define __{0} {1}\\n'.format(opt, value)\n config_pxi += 'DEF {0} = {1}\\n'.format(opt, value)\n config_py += '{0} = {1}\\n'.format(opt, value)\n debug = bool(self.debug)\n print(' * debug = {0}'.format(debug))\n\n config_pxi += 'DEF DEBUG = {0}\\n'.format(debug)\n config_py += 'DEBUG = {0}\\n'.format(debug)\n config_pxi += 'DEF PLATFORM = \"{0}\"\\n'.format(platform)\n config_py += 'PLATFORM = \"{0}\"\\n'.format(platform)\n for fn, content in (\n (config_h_fn, config_h), (config_pxi_fn, config_pxi),\n (config_py_fn, config_py)):\n build_fn = expand(build_path, *fn)\n if self.update_if_changed(build_fn, content):\n print('Updated {}'.format(build_fn))\n src_fn = expand(src_path, *fn)\n if src_fn != build_fn and self.update_if_changed(src_fn, content):\n print('Updated {}'.format(src_fn))\n\n c = self.compiler.compiler_type\n print('Detected compiler is {}'.format(c))\n if c != 'msvc':\n for e in self.extensions:\n e.extra_link_args += ['-lm']\n\n build_ext.build_extensions(self)\n\n def update_if_changed(self, fn, content):\n need_update = True\n if exists(fn):\n with open(fn) as fd:\n need_update = fd.read() != content\n if need_update:\n with open(fn, 'w') as fd:\n fd.write(content)\n return need_update\n\n\ndef _check_and_fix_sdl2_mixer(f_path):\n print(\"Check if SDL2_mixer smpeg2 have an @executable_path\")\n rpath_from = (\"@executable_path/../Frameworks/SDL2.framework\"\n \"/Versions/A/SDL2\")\n rpath_to = \"@rpath/../../../../SDL2.framework/Versions/A/SDL2\"\n smpeg2_path = (\"{}/Versions/A/Frameworks/smpeg2.framework\"\n \"/Versions/A/smpeg2\").format(f_path)\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path)).decode('utf-8')\n if \"@executable_path\" not in output:\n return\n\n print(\"WARNING: Your SDL2_mixer version is invalid\")\n print(\"WARNING: The smpeg2 framework embedded in SDL2_mixer contains a\")\n print(\"WARNING: reference to @executable_path that will fail the\")\n print(\"WARNING: execution of your application.\")\n print(\"WARNING: We are going to change:\")\n print(\"WARNING: from: {}\".format(rpath_from))\n print(\"WARNING: to: {}\".format(rpath_to))\n getoutput(\"install_name_tool -change {} {} {}\".format(\n rpath_from, rpath_to, smpeg2_path))\n\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path))\n if b\"@executable_path\" not in output:\n print(\"WARNING: Change successfully applied!\")\n print(\"WARNING: You'll never see this message again.\")\n else:\n print(\"WARNING: Unable to apply the changes, sorry.\")\n\n\n# -----------------------------------------------------------------------------\n# extract version (simulate doc generation, kivy will be not imported)\nenviron['KIVY_DOC_INCLUDE'] = '1'\nimport kivy\n\n# extra build commands go in the cmdclass dict {'command-name': CommandClass}\n# see tools.packaging.{platform}.build.py for custom build commands for\n# portable packages. Also e.g. we use build_ext command from cython if its\n# installed for c extensions.\nfrom kivy.tools.packaging.factory import FactoryBuild\ncmdclass = {\n 'build_factory': FactoryBuild,\n 'build_ext': KivyBuildExt}\n\ntry:\n # add build rules for portable packages to cmdclass\n if platform == 'win32':\n from kivy.tools.packaging.win32.build import WindowsPortableBuild\n cmdclass['build_portable'] = WindowsPortableBuild\n elif platform == 'darwin':\n from kivy.tools.packaging.osx.build import OSXPortableBuild\n cmdclass['build_portable'] = OSXPortableBuild\nexcept ImportError:\n print('User distribution detected, avoid portable command.')\n\n# Detect which opengl version headers to use\nif platform in ('android', 'darwin', 'ios', 'rpi', 'mali'):\n c_options['use_opengl_es2'] = True\nelif c_options['use_opengl_es2'] is None:\n c_options['use_opengl_es2'] = \\\n environ.get('KIVY_GRAPHICS', '').lower() == 'gles'\n\nprint('Using this graphics system: {}'.format(\n ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))\n\n# check if we are in a kivy-ios build\nif platform == 'ios':\n print('Kivy-IOS project environment detect, use it.')\n print('Kivy-IOS project located at {0}'.format(kivy_ios_root))\n c_options['use_ios'] = True\n c_options['use_sdl2'] = True\n\nelif platform == 'darwin':\n if c_options['use_osx_frameworks']:\n if osx_arch == \"i386\":\n print(\"Warning: building with frameworks fail on i386\")\n else:\n print(\"OSX framework used, force to x86_64 only\")\n environ[\"ARCHFLAGS\"] = environ.get(\"ARCHFLAGS\", \"-arch x86_64\")\n print(\"OSX ARCHFLAGS are: {}\".format(environ[\"ARCHFLAGS\"]))\n\n# detect gstreamer, only on desktop\n# works if we forced the options or in autodetection\nif platform not in ('ios', 'android') and (c_options['use_gstreamer']\n in (None, True)):\n gstreamer_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n f_path = '/Library/Frameworks/GStreamer.framework'\n if not exists(f_path):\n c_options['use_gstreamer'] = False\n print('GStreamer framework not found, fallback on pkg-config')\n else:\n print('GStreamer framework found')\n gstreamer_valid = True\n c_options['use_gstreamer'] = True\n gst_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190',\n '-framework', 'GStreamer'],\n 'include_dirs': [join(f_path, 'Headers')]}\n\n if not gstreamer_valid:\n # use pkg-config approach instead\n gst_flags = pkgconfig('gstreamer-1.0')\n if 'libraries' in gst_flags:\n print('GStreamer found via pkg-config')\n c_options['use_gstreamer'] = True\n\n\n# detect SDL2, only on desktop and iOS, or android if explicitly enabled\n# works if we forced the options or in autodetection\nsdl2_flags = {}\nif c_options['use_sdl2'] or (\n platform not in ('android',) and c_options['use_sdl2'] is None):\n\n sdl2_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n sdl2_valid = True\n sdl2_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190'],\n 'include_dirs': [],\n 'extra_compile_args': ['-F/Library/Frameworks']\n }\n for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):\n f_path = '/Library/Frameworks/{}.framework'.format(name)\n if not exists(f_path):\n print('Missing framework {}'.format(f_path))\n sdl2_valid = False\n continue\n sdl2_flags['extra_link_args'] += ['-framework', name]\n sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]\n print('Found sdl2 frameworks: {}'.format(f_path))\n if name == 'SDL2_mixer':\n _check_and_fix_sdl2_mixer(f_path)\n\n if not sdl2_valid:\n c_options['use_sdl2'] = False\n print('SDL2 frameworks not found, fallback on pkg-config')\n else:\n c_options['use_sdl2'] = True\n print('Activate SDL2 compilation')\n\n if not sdl2_valid and platform != \"ios\":\n # use pkg-config approach instead\n sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')\n if 'libraries' in sdl2_flags:\n print('SDL2 found via pkg-config')\n c_options['use_sdl2'] = True\n\n\n# -----------------------------------------------------------------------------\n# declare flags\n\n\ndef get_modulename_from_file(filename):\n filename = filename.replace(sep, '/')\n pyx = '.'.join(filename.split('.')[:-1])\n pyxl = pyx.split('/')\n while pyxl[0] != 'kivy':\n pyxl.pop(0)\n if pyxl[1] == 'kivy':\n pyxl.pop(0)\n return '.'.join(pyxl)\n\n\ndef expand(root, *args):\n return join(root, 'kivy', *args)\n\n\nclass CythonExtension(Extension):\n\n def __init__(self, *args, **kwargs):\n Extension.__init__(self, *args, **kwargs)\n self.cython_directives = {\n 'c_string_encoding': 'utf-8',\n 'profile': 'USE_PROFILE' in environ,\n 'embedsignature': 'USE_EMBEDSIGNATURE' in environ}\n # XXX with pip, setuptools is imported before distutils, and change\n # our pyx to c, then, cythonize doesn't happen. So force again our\n # sources\n self.sources = args[1]\n\n\ndef merge(d1, *args):\n d1 = deepcopy(d1)\n for d2 in args:\n for key, value in d2.items():\n value = deepcopy(value)\n if key in d1:\n d1[key].extend(value)\n else:\n d1[key] = value\n return d1\n\n\ndef determine_base_flags():\n flags = {\n 'libraries': [],\n 'include_dirs': [join(src_path, 'kivy', 'include')],\n 'library_dirs': [],\n 'extra_link_args': [],\n 'extra_compile_args': []}\n if c_options['use_ios']:\n sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))\n if not sysroot:\n raise Exception('IOSSDKROOT is not set')\n flags['include_dirs'] += [sysroot]\n flags['extra_compile_args'] += ['-isysroot', sysroot]\n flags['extra_link_args'] += ['-isysroot', sysroot]\n elif platform.startswith('freebsd'):\n flags['include_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'include')]\n flags['library_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'lib')]\n elif platform == 'darwin':\n v = os.uname()\n if v[2] >= '13.0.0':\n # use xcode-select to search on the right Xcode path\n # XXX use the best SDK available instead of a specific one\n import platform as _platform\n xcode_dev = getoutput('xcode-select -p').splitlines()[0]\n sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])\n print('Xcode detected at {}, and using OS X{} sdk'.format(\n xcode_dev, sdk_mac_ver))\n sysroot = join(\n xcode_dev.decode('utf-8'),\n 'Platforms/MacOSX.platform/Developer/SDKs',\n 'MacOSX{}.sdk'.format(sdk_mac_ver),\n 'System/Library/Frameworks')\n else:\n sysroot = ('/System/Library/Frameworks/'\n 'ApplicationServices.framework/Frameworks')\n flags['extra_compile_args'] += ['-F%s' % sysroot]\n flags['extra_link_args'] += ['-F%s' % sysroot]\n elif platform == 'win32':\n flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]\n flags['library_dirs'] += [join(sys.prefix, \"libs\")]\n return flags\n\n\ndef determine_gl_flags():\n kivy_graphics_include = join(src_path, 'kivy', 'include')\n flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n if c_options['use_opengl_mock']:\n return flags, base_flags\n if platform == 'win32':\n flags['libraries'] = ['opengl32', 'glew32']\n elif platform == 'ios':\n flags['libraries'] = ['GLESv2']\n flags['extra_link_args'] = ['-framework', 'OpenGLES']\n elif platform == 'darwin':\n flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]\n flags['extra_compile_args'] = ['-arch', osx_arch]\n elif platform.startswith('freebsd'):\n flags['libraries'] = ['GL']\n elif platform.startswith('openbsd'):\n flags['include_dirs'] = ['/usr/X11R6/include']\n flags['library_dirs'] = ['/usr/X11R6/lib']\n flags['libraries'] = ['GL']\n elif platform == 'android':\n flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]\n flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]\n flags['libraries'] = ['GLESv2']\n elif platform == 'rpi':\n flags['include_dirs'] = [\n '/opt/vc/include',\n '/opt/vc/include/interface/vcos/pthreads',\n '/opt/vc/include/interface/vmcs_host/linux']\n flags['library_dirs'] = ['/opt/vc/lib']\n brcm_lib_files = (\n '/opt/vc/lib/libbrcmEGL.so',\n '/opt/vc/lib/libbrcmGLESv2.so')\n if all((exists(lib) for lib in brcm_lib_files)):\n print(\n 'Found brcmEGL and brcmGLES library files'\n 'for rpi platform at /opt/vc/lib/')\n gl_libs = ['brcmEGL', 'brcmGLESv2']\n else:\n print(\n 'Failed to find brcmEGL and brcmGLESv2 library files'\n 'for rpi platform, falling back to EGL and GLESv2.')\n gl_libs = ['EGL', 'GLESv2']\n flags['libraries'] = ['bcm_host'] + gl_libs\n elif platform == 'mali':\n flags['include_dirs'] = ['/usr/include/']\n flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']\n flags['libraries'] = ['GLESv2']\n c_options['use_x11'] = True\n c_options['use_egl'] = True\n else:\n flags['libraries'] = ['GL']\n return flags, base_flags\n\n\ndef determine_sdl2():\n flags = {}\n if not c_options['use_sdl2']:\n return flags\n\n sdl2_path = environ.get('KIVY_SDL2_PATH', None)\n\n if sdl2_flags and not sdl2_path and platform == 'darwin':\n return sdl2_flags\n\n # no pkgconfig info, or we want to use a specific sdl2 path, so perform\n # manual configuration\n flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']\n split_chr = ';' if platform == 'win32' else ':'\n sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []\n\n if not sdl2_paths:\n sdl_inc = join(sys.prefix, 'include', 'SDL2')\n if isdir(sdl_inc):\n sdl2_paths = [sdl_inc]\n sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])\n\n flags['include_dirs'] = sdl2_paths\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n flags['library_dirs'] = (\n sdl2_paths if sdl2_paths else\n ['/usr/local/lib/'])\n\n if sdl2_flags:\n flags = merge(flags, sdl2_flags)\n\n # ensure headers for all the SDL2 and sub libraries are available\n libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']\n can_compile = True\n for lib in libs_to_check:\n found = False\n for d in flags['include_dirs']:\n fn = join(d, '{}.h'.format(lib))\n if exists(fn):\n found = True\n print('SDL2: found {} header at {}'.format(lib, fn))\n break\n\n if not found:\n print('SDL2: missing sub library {}'.format(lib))\n can_compile = False\n\n if not can_compile:\n c_options['use_sdl2'] = False\n return {}\n\n return flags\n\n\nbase_flags = determine_base_flags()\ngl_flags, gl_flags_base = determine_gl_flags()\n\n# -----------------------------------------------------------------------------\n# sources to compile\n# all the dependencies have been found manually with:\n# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}\ngraphics_dependencies = {\n 'buffer.pyx': ['common.pxi'],\n 'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],\n 'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],\n 'compiler.pxd': ['instructions.pxd'],\n 'compiler.pyx': ['context_instructions.pxd'],\n 'cgl.pyx': ['cgl.pxd'],\n 'cgl_mock.pyx': ['cgl.pxd'],\n 'cgl_sdl2.pyx': ['cgl.pxd'],\n 'cgl_gl.pyx': ['cgl.pxd'],\n 'cgl_glew.pyx': ['cgl.pxd'],\n 'context_instructions.pxd': [\n 'transformation.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pyx': [\n 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],\n 'gl_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],\n 'instructions.pxd': [\n 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',\n 'texture.pxd', '../_event.pxd'],\n 'instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],\n 'opengl.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],\n 'opengl_utils.pyx': [\n 'opengl_utils_def.pxi', 'cgl.pxd', ],\n 'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],\n 'shader.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd',\n 'vertex.pxd', 'transformation.pxd', 'context.pxd',\n 'gl_debug_logger.pxi'],\n 'stencil_instructions.pxd': ['instructions.pxd'],\n 'stencil_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'gl_debug_logger.pxi'],\n 'scissor_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd'],\n 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd', 'tesselator.pxd'],\n 'texture.pxd': ['cgl.pxd'],\n 'texture.pyx': [\n 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',\n 'cgl.pxd', 'opengl_utils.pxd',\n 'img_tools.pxi', 'gl_debug_logger.pxi'],\n 'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],\n 'vbo.pyx': [\n 'config.pxi', 'common.pxi', 'context.pxd',\n 'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],\n 'vertex.pxd': ['cgl.pxd'],\n 'vertex.pyx': ['config.pxi', 'common.pxi'],\n 'vertex_instructions.pyx': [\n 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',\n 'instructions.pxd', 'vertex_instructions.pxd',\n 'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],\n 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}\n\nsources = {\n '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),\n '_clock.pyx': {},\n 'weakproxy.pyx': {},\n 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),\n 'graphics/buffer.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context.pyx': merge(base_flags, gl_flags_base),\n 'graphics/compiler.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/fbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),\n 'graphics/shader.pyx': merge(base_flags, gl_flags_base),\n 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/texture.pyx': merge(base_flags, gl_flags_base),\n 'graphics/transformation.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),\n 'core/text/text_layout.pyx': base_flags,\n 'core/window/window_info.pyx': base_flags,\n 'graphics/tesselator.pyx': merge(base_flags, {\n 'include_dirs': ['kivy/lib/libtess2/Include'],\n 'c_depends': [\n 'lib/libtess2/Source/bucketalloc.c',\n 'lib/libtess2/Source/dict.c',\n 'lib/libtess2/Source/geom.c',\n 'lib/libtess2/Source/mesh.c',\n 'lib/libtess2/Source/priorityq.c',\n 'lib/libtess2/Source/sweep.c',\n 'lib/libtess2/Source/tess.c'\n ]\n }),\n 'graphics/svg.pyx': merge(base_flags, gl_flags_base)\n}\n\nif c_options[\"use_sdl2\"]:\n sdl2_flags = determine_sdl2()\n\nif c_options['use_sdl2'] and sdl2_flags:\n sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(\n sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)\n sdl2_depends = {'depends': ['lib/sdl2.pxi']}\n for source_file in ('core/window/_window_sdl2.pyx',\n 'core/image/_img_sdl2.pyx',\n 'core/text/_text_sdl2.pyx',\n 'core/audio/audio_sdl2.pyx',\n 'core/clipboard/_clipboard_sdl2.pyx'):\n sources[source_file] = merge(\n base_flags, sdl2_flags, sdl2_depends)\n\nif platform in ('darwin', 'ios'):\n # activate ImageIO provider for our core image\n if platform == 'ios':\n osx_flags = {'extra_link_args': [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'ImageIO',\n '-framework', 'Accelerate']}\n else:\n osx_flags = {'extra_link_args': [\n '-framework', 'ApplicationServices']}\n sources['core/image/img_imageio.pyx'] = merge(\n base_flags, osx_flags)\n\nif c_options['use_avfoundation']:\n import platform as _platform\n mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]\n if mac_ver >= [10, 7]:\n osx_flags = {\n 'extra_link_args': ['-framework', 'AVFoundation'],\n 'extra_compile_args': ['-ObjC++'],\n 'depends': ['core/camera/camera_avfoundation_implem.m']}\n sources['core/camera/camera_avfoundation.pyx'] = merge(\n base_flags, osx_flags)\n else:\n print('AVFoundation cannot be used, OSX >= 10.7 is required')\n\nif c_options['use_rpi']:\n sources['lib/vidcore_lite/egl.pyx'] = merge(\n base_flags, gl_flags)\n sources['lib/vidcore_lite/bcm.pyx'] = merge(\n base_flags, gl_flags)\n\nif c_options['use_x11']:\n libs = ['Xrender', 'X11']\n if c_options['use_egl']:\n libs += ['EGL']\n else:\n libs += ['GL']\n sources['core/window/window_x11.pyx'] = merge(\n base_flags, gl_flags, {\n # FIXME add an option to depend on them but not compile them\n # cause keytab is included in core, and core is included in\n # window_x11\n #\n # 'depends': [\n # 'core/window/window_x11_keytab.c',\n # 'core/window/window_x11_core.c'],\n 'libraries': libs})\n\nif c_options['use_gstreamer']:\n sources['lib/gstplayer/_gstplayer.pyx'] = merge(\n base_flags, gst_flags, {\n 'depends': ['lib/gstplayer/_gstplayer.h']})\n\n\n# -----------------------------------------------------------------------------\n# extension modules\n\ndef get_dependencies(name, deps=None):\n if deps is None:\n deps = []\n for dep in graphics_dependencies.get(name, []):\n if dep not in deps:\n deps.append(dep)\n get_dependencies(dep, deps)\n return deps\n\n\ndef resolve_dependencies(fn, depends):\n fn = basename(fn)\n deps = []\n get_dependencies(fn, deps)\n get_dependencies(fn.replace('.pyx', '.pxd'), deps)\n\n deps_final = []\n paths_to_test = ['graphics', 'include']\n for dep in deps:\n found = False\n for path in paths_to_test:\n filename = expand(src_path, path, dep)\n if exists(filename):\n deps_final.append(filename)\n found = True\n break\n if not found:\n print('ERROR: Dependency for {} not resolved: {}'.format(\n fn, dep\n ))\n\n return deps_final\n\n\ndef get_extensions_from_sources(sources):\n ext_modules = []\n if environ.get('KIVY_FAKE_BUILDEXT'):\n print('Fake build_ext asked, will generate only .h/.c')\n return ext_modules\n for pyx, flags in sources.items():\n is_graphics = pyx.startswith('graphics')\n pyx = expand(src_path, pyx)\n depends = [expand(src_path, x) for x in flags.pop('depends', [])]\n c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]\n if not have_cython:\n pyx = '%s.c' % pyx[:-4]\n if is_graphics:\n depends = resolve_dependencies(pyx, depends)\n f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (\n 'c', 'cpp', 'm')]\n module_name = get_modulename_from_file(pyx)\n flags_clean = {'depends': depends}\n for key, value in flags.items():\n if len(value):\n flags_clean[key] = value\n ext_modules.append(CythonExtension(\n module_name, [pyx] + f_depends + c_depends, **flags_clean))\n return ext_modules\n\n\next_modules = get_extensions_from_sources(sources)\n\n\n# -----------------------------------------------------------------------------\n# automatically detect data files\nsplit_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))\ndata_file_prefix = 'share/kivy-'\nexamples = {}\nexamples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',\n 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',\n 'glsl', 'zip')\nfor root, subFolders, files in walk('examples'):\n for fn in files:\n ext = fn.split('.')[-1].lower()\n if ext not in examples_allowed_ext:\n continue\n filename = join(root, fn)\n directory = '%s%s' % (data_file_prefix, dirname(filename))\n if directory not in examples:\n examples[directory] = []\n examples[directory].append(filename)\n\nbinary_deps = []\nbinary_deps_path = join(src_path, 'kivy', 'binary_deps')\nif isdir(binary_deps_path):\n for root, dirnames, filenames in walk(binary_deps_path):\n for fname in filenames:\n binary_deps.append(\n join(root.replace(binary_deps_path, 'binary_deps'), fname))\n\n# -----------------------------------------------------------------------------\n# setup !\nif not build_examples:\n setup(\n name='Kivy',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=(\n 'A software library for rapid development of '\n 'hardware-accelerated multitouch applications.'),\n long_description=get_description(),\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=[\n 'kivy',\n 'kivy.adapters',\n 'kivy.core',\n 'kivy.core.audio',\n 'kivy.core.camera',\n 'kivy.core.clipboard',\n 'kivy.core.image',\n 'kivy.core.gl',\n 'kivy.core.spelling',\n 'kivy.core.text',\n 'kivy.core.video',\n 'kivy.core.window',\n 'kivy.deps',\n 'kivy.effects',\n 'kivy.graphics',\n 'kivy.graphics.cgl_backend',\n 'kivy.garden',\n 'kivy.input',\n 'kivy.input.postproc',\n 'kivy.input.providers',\n 'kivy.lang',\n 'kivy.lib',\n 'kivy.lib.osc',\n 'kivy.lib.gstplayer',\n 'kivy.lib.vidcore_lite',\n 'kivy.modules',\n 'kivy.network',\n 'kivy.storage',\n 'kivy.tests',\n 'kivy.tools',\n 'kivy.tools.packaging',\n 'kivy.tools.packaging.pyinstaller_hooks',\n 'kivy.tools.highlight',\n 'kivy.extras',\n 'kivy.uix',\n 'kivy.uix.behaviors',\n 'kivy.uix.recycleview',\n ],\n package_dir={'kivy': 'kivy'},\n package_data={'kivy': [\n 'setupconfig.py',\n '*.pxd',\n '*.pxi',\n 'core/text/*.pxd',\n 'core/text/*.pxi',\n 'core/window/*.pxi',\n 'core/window/*.pxd',\n 'graphics/*.pxd',\n 'graphics/*.pxi',\n 'graphics/*.h',\n 'include/*',\n 'lib/vidcore_lite/*.pxd',\n 'lib/vidcore_lite/*.pxi',\n 'data/*.kv',\n 'data/*.json',\n 'data/fonts/*.ttf',\n 'data/images/*.png',\n 'data/images/*.jpg',\n 'data/images/*.gif',\n 'data/images/*.atlas',\n 'data/keyboards/*.json',\n 'data/logo/*.png',\n 'data/glsl/*.png',\n 'data/glsl/*.vs',\n 'data/glsl/*.fs',\n 'tests/*.zip',\n 'tests/*.kv',\n 'tests/*.png',\n 'tests/*.ttf',\n 'tests/*.ogg',\n 'tools/gles_compat/*',\n 'tools/highlight/*',\n 'tools/packaging/README.txt',\n 'tools/packaging/win32/kivy.bat',\n 'tools/packaging/win32/kivyenv.sh',\n 'tools/packaging/win32/README.txt',\n 'tools/packaging/osx/Info.plist',\n 'tools/packaging/osx/InfoPlist.strings',\n 'tools/packaging/osx/kivy.sh',\n 'tools/pep8checker/*',\n 'tools/theming/defaulttheme/*',\n ] + binary_deps},\n data_files=[] if split_examples else list(examples.items()),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Artistic Software',\n 'Topic :: Games/Entertainment',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',\n 'Topic :: Multimedia :: Graphics :: Presentation',\n 'Topic :: Multimedia :: Graphics :: Viewers',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'Topic :: Multimedia :: Video :: Display',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Visualization',\n ('Topic :: Software Development :: Libraries :: '\n 'Application Frameworks'),\n 'Topic :: Software Development :: User Interfaces'],\n dependency_links=[\n 'https://github.com/kivy-garden/garden/archive/master.zip'],\n install_requires=[\n 'Kivy-Garden>=0.1.4', 'docutils', 'pygments'\n ],\n extra_requires={\n 'tuio': ['oscpy']\n },\n setup_requires=[\n 'cython>=' + MIN_CYTHON_STRING\n ] if not skip_cython else [])\nelse:\n setup(\n name='Kivy-examples',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=('Kivy examples.'),\n long_description=get_description(),\n data_files=list(examples.items()))\n", "path": "setup.py" } ]
[ { "content": "#\n# Kivy - Cross-platform UI framework\n# https://kivy.org/\n#\nfrom __future__ import print_function\n\nimport sys\nbuild_examples = False\nif \"--build_examples\" in sys.argv:\n build_examples = True\n sys.argv.remove(\"--build_examples\")\n\nfrom copy import deepcopy\nimport os\nfrom os.path import join, dirname, sep, exists, basename, isdir\nfrom os import walk, environ\nfrom distutils.version import LooseVersion\nfrom distutils.sysconfig import get_python_inc\nfrom collections import OrderedDict\nfrom time import sleep\nfrom subprocess import check_output, CalledProcessError\nfrom datetime import datetime\n\nif environ.get('KIVY_USE_SETUPTOOLS'):\n from setuptools import setup, Extension\n print('Using setuptools')\nelse:\n from distutils.core import setup\n from distutils.extension import Extension\n print('Using distutils')\n\n\nPY3 = sys.version > '3'\n\nif PY3: # fix error with py3's LooseVersion comparisons\n def ver_equal(self, other):\n return self.version == other\n\n LooseVersion.__eq__ = ver_equal\n\n\ndef get_description():\n with open(join(dirname(__file__), 'README.md')) as fileh:\n return fileh.read()\n\n\ndef get_version(filename='kivy/version.py'):\n VERSION = kivy.__version__\n DATE = datetime.utcnow().strftime('%Y%m%d')\n try:\n GIT_REVISION = check_output(\n ['git', 'rev-parse', 'HEAD']\n ).strip().decode('ascii')\n except (CalledProcessError, OSError, IOError) as e:\n # CalledProcessError has no errno\n errno = getattr(e, 'errno', None)\n if errno != 2 and 'CalledProcessError' not in repr(e):\n raise\n GIT_REVISION = \"Unknown\"\n\n cnt = (\n \"# THIS FILE IS GENERATED FROM KIVY SETUP.PY\\n\"\n \"__version__ = '%(version)s'\\n\"\n \"__hash__ = '%(hash)s'\\n\"\n \"__date__ = '%(date)s'\\n\"\n )\n\n with open(filename, 'w') as f:\n f.write(cnt % {\n 'version': VERSION,\n 'hash': GIT_REVISION,\n 'date': DATE\n })\n return VERSION\n\n\nMIN_CYTHON_STRING = '0.24'\nMIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)\nMAX_CYTHON_STRING = '0.28.3'\nMAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)\nCYTHON_UNSUPPORTED = (\n # ref https://github.com/cython/cython/issues/1968\n '0.27', '0.27.2'\n)\n\n\ndef getoutput(cmd, env=None):\n import subprocess\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env=env)\n p.wait()\n if p.returncode: # if not returncode == 0\n print('WARNING: A problem occurred while running {0} (code {1})\\n'\n .format(cmd, p.returncode))\n stderr_content = p.stderr.read()\n if stderr_content:\n print('{0}\\n'.format(stderr_content))\n return \"\"\n return p.stdout.read()\n\n\ndef pkgconfig(*packages, **kw):\n flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}\n lenviron = None\n pconfig = join(sys.prefix, 'libs', 'pkgconfig')\n\n if isdir(pconfig):\n lenviron = environ.copy()\n lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(\n environ.get('PKG_CONFIG_PATH', ''), pconfig)\n cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))\n results = getoutput(cmd, lenviron).split()\n for token in results:\n ext = token[:2].decode('utf-8')\n flag = flag_map.get(ext)\n if not flag:\n continue\n kw.setdefault(flag, []).append(token[2:].decode('utf-8'))\n return kw\n\n\n# -----------------------------------------------------------------------------\n# Determine on which platform we are\n\nplatform = sys.platform\n\n# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)\nif sys.platform == 'darwin':\n if sys.maxsize > 2 ** 32:\n osx_arch = 'x86_64'\n else:\n osx_arch = 'i386'\n\n# Detect Python for android project (http://github.com/kivy/python-for-android)\nndkplatform = environ.get('NDKPLATFORM')\nif ndkplatform is not None and environ.get('LIBLINK'):\n platform = 'android'\nkivy_ios_root = environ.get('KIVYIOSROOT', None)\nif kivy_ios_root is not None:\n platform = 'ios'\nif exists('/opt/vc/include/bcm_host.h'):\n platform = 'rpi'\nif exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):\n platform = 'mali'\n\n# -----------------------------------------------------------------------------\n# Detect options\n#\nc_options = OrderedDict()\nc_options['use_rpi'] = platform == 'rpi'\nc_options['use_mali'] = platform == 'mali'\nc_options['use_egl'] = False\nc_options['use_opengl_es2'] = None\nc_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'\nc_options['use_sdl2'] = None\nc_options['use_ios'] = False\nc_options['use_mesagl'] = False\nc_options['use_x11'] = False\nc_options['use_wayland'] = False\nc_options['use_gstreamer'] = None\nc_options['use_avfoundation'] = platform == 'darwin'\nc_options['use_osx_frameworks'] = platform == 'darwin'\nc_options['debug_gl'] = False\n\n# now check if environ is changing the default values\nfor key in list(c_options.keys()):\n ukey = key.upper()\n if ukey in environ:\n value = bool(int(environ[ukey]))\n print('Environ change {0} -> {1}'.format(key, value))\n c_options[key] = value\n\n\n# -----------------------------------------------------------------------------\n# Cython check\n# on python-for-android and kivy-ios, cython usage is external\n\ncython_unsupported_append = '''\n\n Please note that the following versions of Cython are not supported\n at all: {}\n'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))\n\ncython_min = '''\\\n This version of Cython is not compatible with Kivy. Please upgrade to\n at least version {0}, preferably the newest supported version {1}.\n\n If your platform provides a Cython package, make sure you have upgraded\n to the newest version. If the newest version available is still too low,\n please remove it and install the newest supported Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_max = '''\\\n This version of Cython is untested with Kivy. While this version may\n work perfectly fine, it is possible that you may experience issues. If\n you do have issues, please downgrade to a supported version. It is\n best to use the newest supported version, {1}, but the minimum\n supported version is {0}.\n\n If your platform provides a Cython package, check if you can downgrade\n to a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append if CYTHON_UNSUPPORTED else '')\n\ncython_unsupported = '''\\\n This version of Cython suffers from known bugs and is unsupported.\n Please install the newest supported version, {1}, if possible, but\n the minimum supported version is {0}.\n\n If your platform provides a Cython package, check if you can install\n a supported version. Otherwise, uninstall the platform package and\n install Cython via pip:\n\n pip install -I Cython=={1}{2}\\\n'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,\n cython_unsupported_append)\n\nhave_cython = False\nskip_cython = False\nif platform in ('ios', 'android'):\n print('\\nCython check avoided.')\n skip_cython = True\nelse:\n try:\n # check for cython\n from Cython.Distutils import build_ext\n have_cython = True\n import Cython\n cy_version_str = Cython.__version__\n cy_ver = LooseVersion(cy_version_str)\n print('\\nDetected Cython version {}'.format(cy_version_str))\n if cy_ver < MIN_CYTHON_VERSION:\n print(cython_min)\n raise ImportError('Incompatible Cython Version')\n if cy_ver in CYTHON_UNSUPPORTED:\n print(cython_unsupported)\n raise ImportError('Incompatible Cython Version')\n if cy_ver > MAX_CYTHON_VERSION:\n print(cython_max)\n sleep(1)\n except ImportError:\n print(\"\\nCython is missing, it's required for compiling kivy !\\n\\n\")\n raise\n\nif not have_cython:\n from distutils.command.build_ext import build_ext\n\n# -----------------------------------------------------------------------------\n# Setup classes\n\n# the build path where kivy is being compiled\nsrc_path = build_path = dirname(__file__)\n\n\nclass KivyBuildExt(build_ext):\n\n def finalize_options(self):\n retval = build_ext.finalize_options(self)\n global build_path\n if (self.build_lib is not None and exists(self.build_lib) and\n not self.inplace):\n build_path = self.build_lib\n return retval\n\n def build_extensions(self):\n # build files\n config_h_fn = ('include', 'config.h')\n config_pxi_fn = ('include', 'config.pxi')\n config_py_fn = ('setupconfig.py', )\n\n # generate headers\n config_h = '// Autogenerated file for Kivy C configuration\\n'\n config_h += '#define __PY3 {0}\\n'.format(int(PY3))\n config_pxi = '# Autogenerated file for Kivy Cython configuration\\n'\n config_pxi += 'DEF PY3 = {0}\\n'.format(int(PY3))\n config_py = '# Autogenerated file for Kivy configuration\\n'\n config_py += 'PY3 = {0}\\n'.format(int(PY3))\n config_py += 'CYTHON_MIN = {0}\\nCYTHON_MAX = {1}\\n'.format(\n repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))\n config_py += 'CYTHON_BAD = {0}\\n'.format(repr(', '.join(map(\n str, CYTHON_UNSUPPORTED))))\n\n # generate content\n print('Build configuration is:')\n for opt, value in c_options.items():\n value = int(bool(value))\n print(' * {0} = {1}'.format(opt, value))\n opt = opt.upper()\n config_h += '#define __{0} {1}\\n'.format(opt, value)\n config_pxi += 'DEF {0} = {1}\\n'.format(opt, value)\n config_py += '{0} = {1}\\n'.format(opt, value)\n debug = bool(self.debug)\n print(' * debug = {0}'.format(debug))\n\n config_pxi += 'DEF DEBUG = {0}\\n'.format(debug)\n config_py += 'DEBUG = {0}\\n'.format(debug)\n config_pxi += 'DEF PLATFORM = \"{0}\"\\n'.format(platform)\n config_py += 'PLATFORM = \"{0}\"\\n'.format(platform)\n for fn, content in (\n (config_h_fn, config_h), (config_pxi_fn, config_pxi),\n (config_py_fn, config_py)):\n build_fn = expand(build_path, *fn)\n if self.update_if_changed(build_fn, content):\n print('Updated {}'.format(build_fn))\n src_fn = expand(src_path, *fn)\n if src_fn != build_fn and self.update_if_changed(src_fn, content):\n print('Updated {}'.format(src_fn))\n\n c = self.compiler.compiler_type\n print('Detected compiler is {}'.format(c))\n if c != 'msvc':\n for e in self.extensions:\n e.extra_link_args += ['-lm']\n\n build_ext.build_extensions(self)\n\n def update_if_changed(self, fn, content):\n need_update = True\n if exists(fn):\n with open(fn) as fd:\n need_update = fd.read() != content\n if need_update:\n with open(fn, 'w') as fd:\n fd.write(content)\n return need_update\n\n\ndef _check_and_fix_sdl2_mixer(f_path):\n print(\"Check if SDL2_mixer smpeg2 have an @executable_path\")\n rpath_from = (\"@executable_path/../Frameworks/SDL2.framework\"\n \"/Versions/A/SDL2\")\n rpath_to = \"@rpath/../../../../SDL2.framework/Versions/A/SDL2\"\n smpeg2_path = (\"{}/Versions/A/Frameworks/smpeg2.framework\"\n \"/Versions/A/smpeg2\").format(f_path)\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path)).decode('utf-8')\n if \"@executable_path\" not in output:\n return\n\n print(\"WARNING: Your SDL2_mixer version is invalid\")\n print(\"WARNING: The smpeg2 framework embedded in SDL2_mixer contains a\")\n print(\"WARNING: reference to @executable_path that will fail the\")\n print(\"WARNING: execution of your application.\")\n print(\"WARNING: We are going to change:\")\n print(\"WARNING: from: {}\".format(rpath_from))\n print(\"WARNING: to: {}\".format(rpath_to))\n getoutput(\"install_name_tool -change {} {} {}\".format(\n rpath_from, rpath_to, smpeg2_path))\n\n output = getoutput((\"otool -L '{}'\").format(smpeg2_path))\n if b\"@executable_path\" not in output:\n print(\"WARNING: Change successfully applied!\")\n print(\"WARNING: You'll never see this message again.\")\n else:\n print(\"WARNING: Unable to apply the changes, sorry.\")\n\n\n# -----------------------------------------------------------------------------\n# extract version (simulate doc generation, kivy will be not imported)\nenviron['KIVY_DOC_INCLUDE'] = '1'\nimport kivy\n\n# extra build commands go in the cmdclass dict {'command-name': CommandClass}\n# see tools.packaging.{platform}.build.py for custom build commands for\n# portable packages. Also e.g. we use build_ext command from cython if its\n# installed for c extensions.\nfrom kivy.tools.packaging.factory import FactoryBuild\ncmdclass = {\n 'build_factory': FactoryBuild,\n 'build_ext': KivyBuildExt}\n\ntry:\n # add build rules for portable packages to cmdclass\n if platform == 'win32':\n from kivy.tools.packaging.win32.build import WindowsPortableBuild\n cmdclass['build_portable'] = WindowsPortableBuild\n elif platform == 'darwin':\n from kivy.tools.packaging.osx.build import OSXPortableBuild\n cmdclass['build_portable'] = OSXPortableBuild\nexcept ImportError:\n print('User distribution detected, avoid portable command.')\n\n# Detect which opengl version headers to use\nif platform in ('android', 'darwin', 'ios', 'rpi', 'mali'):\n c_options['use_opengl_es2'] = True\nelif c_options['use_opengl_es2'] is None:\n c_options['use_opengl_es2'] = \\\n environ.get('KIVY_GRAPHICS', '').lower() == 'gles'\n\nprint('Using this graphics system: {}'.format(\n ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))\n\n# check if we are in a kivy-ios build\nif platform == 'ios':\n print('Kivy-IOS project environment detect, use it.')\n print('Kivy-IOS project located at {0}'.format(kivy_ios_root))\n c_options['use_ios'] = True\n c_options['use_sdl2'] = True\n\nelif platform == 'darwin':\n if c_options['use_osx_frameworks']:\n if osx_arch == \"i386\":\n print(\"Warning: building with frameworks fail on i386\")\n else:\n print(\"OSX framework used, force to x86_64 only\")\n environ[\"ARCHFLAGS\"] = environ.get(\"ARCHFLAGS\", \"-arch x86_64\")\n print(\"OSX ARCHFLAGS are: {}\".format(environ[\"ARCHFLAGS\"]))\n\n# detect gstreamer, only on desktop\n# works if we forced the options or in autodetection\nif platform not in ('ios', 'android') and (c_options['use_gstreamer']\n in (None, True)):\n gstreamer_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n f_path = '/Library/Frameworks/GStreamer.framework'\n if not exists(f_path):\n c_options['use_gstreamer'] = False\n print('GStreamer framework not found, fallback on pkg-config')\n else:\n print('GStreamer framework found')\n gstreamer_valid = True\n c_options['use_gstreamer'] = True\n gst_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190',\n '-framework', 'GStreamer'],\n 'include_dirs': [join(f_path, 'Headers')]}\n\n if not gstreamer_valid:\n # use pkg-config approach instead\n gst_flags = pkgconfig('gstreamer-1.0')\n if 'libraries' in gst_flags:\n print('GStreamer found via pkg-config')\n c_options['use_gstreamer'] = True\n\n\n# detect SDL2, only on desktop and iOS, or android if explicitly enabled\n# works if we forced the options or in autodetection\nsdl2_flags = {}\nif c_options['use_sdl2'] or (\n platform not in ('android',) and c_options['use_sdl2'] is None):\n\n sdl2_valid = False\n if c_options['use_osx_frameworks'] and platform == 'darwin':\n # check the existence of frameworks\n sdl2_valid = True\n sdl2_flags = {\n 'extra_link_args': [\n '-F/Library/Frameworks',\n '-Xlinker', '-rpath',\n '-Xlinker', '/Library/Frameworks',\n '-Xlinker', '-headerpad',\n '-Xlinker', '190'],\n 'include_dirs': [],\n 'extra_compile_args': ['-F/Library/Frameworks']\n }\n for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):\n f_path = '/Library/Frameworks/{}.framework'.format(name)\n if not exists(f_path):\n print('Missing framework {}'.format(f_path))\n sdl2_valid = False\n continue\n sdl2_flags['extra_link_args'] += ['-framework', name]\n sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]\n print('Found sdl2 frameworks: {}'.format(f_path))\n if name == 'SDL2_mixer':\n _check_and_fix_sdl2_mixer(f_path)\n\n if not sdl2_valid:\n c_options['use_sdl2'] = False\n print('SDL2 frameworks not found, fallback on pkg-config')\n else:\n c_options['use_sdl2'] = True\n print('Activate SDL2 compilation')\n\n if not sdl2_valid and platform != \"ios\":\n # use pkg-config approach instead\n sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')\n if 'libraries' in sdl2_flags:\n print('SDL2 found via pkg-config')\n c_options['use_sdl2'] = True\n\n\n# -----------------------------------------------------------------------------\n# declare flags\n\n\ndef get_modulename_from_file(filename):\n filename = filename.replace(sep, '/')\n pyx = '.'.join(filename.split('.')[:-1])\n pyxl = pyx.split('/')\n while pyxl[0] != 'kivy':\n pyxl.pop(0)\n if pyxl[1] == 'kivy':\n pyxl.pop(0)\n return '.'.join(pyxl)\n\n\ndef expand(root, *args):\n return join(root, 'kivy', *args)\n\n\nclass CythonExtension(Extension):\n\n def __init__(self, *args, **kwargs):\n Extension.__init__(self, *args, **kwargs)\n self.cython_directives = {\n 'c_string_encoding': 'utf-8',\n 'profile': 'USE_PROFILE' in environ,\n 'embedsignature': 'USE_EMBEDSIGNATURE' in environ}\n # XXX with pip, setuptools is imported before distutils, and change\n # our pyx to c, then, cythonize doesn't happen. So force again our\n # sources\n self.sources = args[1]\n\n\ndef merge(d1, *args):\n d1 = deepcopy(d1)\n for d2 in args:\n for key, value in d2.items():\n value = deepcopy(value)\n if key in d1:\n d1[key].extend(value)\n else:\n d1[key] = value\n return d1\n\n\ndef determine_base_flags():\n flags = {\n 'libraries': [],\n 'include_dirs': [join(src_path, 'kivy', 'include')],\n 'library_dirs': [],\n 'extra_link_args': [],\n 'extra_compile_args': []}\n if c_options['use_ios']:\n sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))\n if not sysroot:\n raise Exception('IOSSDKROOT is not set')\n flags['include_dirs'] += [sysroot]\n flags['extra_compile_args'] += ['-isysroot', sysroot]\n flags['extra_link_args'] += ['-isysroot', sysroot]\n elif platform.startswith('freebsd'):\n flags['include_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'include')]\n flags['library_dirs'] += [join(\n environ.get('LOCALBASE', '/usr/local'), 'lib')]\n elif platform == 'darwin':\n v = os.uname()\n if v[2] >= '13.0.0':\n # use xcode-select to search on the right Xcode path\n # XXX use the best SDK available instead of a specific one\n import platform as _platform\n xcode_dev = getoutput('xcode-select -p').splitlines()[0]\n sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])\n print('Xcode detected at {}, and using OS X{} sdk'.format(\n xcode_dev, sdk_mac_ver))\n sysroot = join(\n xcode_dev.decode('utf-8'),\n 'Platforms/MacOSX.platform/Developer/SDKs',\n 'MacOSX{}.sdk'.format(sdk_mac_ver),\n 'System/Library/Frameworks')\n else:\n sysroot = ('/System/Library/Frameworks/'\n 'ApplicationServices.framework/Frameworks')\n flags['extra_compile_args'] += ['-F%s' % sysroot]\n flags['extra_link_args'] += ['-F%s' % sysroot]\n elif platform == 'win32':\n flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]\n flags['library_dirs'] += [join(sys.prefix, \"libs\")]\n return flags\n\n\ndef determine_gl_flags():\n kivy_graphics_include = join(src_path, 'kivy', 'include')\n flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}\n if c_options['use_opengl_mock']:\n return flags, base_flags\n if platform == 'win32':\n flags['libraries'] = ['opengl32', 'glew32']\n elif platform == 'ios':\n flags['libraries'] = ['GLESv2']\n flags['extra_link_args'] = ['-framework', 'OpenGLES']\n elif platform == 'darwin':\n flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]\n flags['extra_compile_args'] = ['-arch', osx_arch]\n elif platform.startswith('freebsd'):\n flags['libraries'] = ['GL']\n elif platform.startswith('openbsd'):\n flags['include_dirs'] = ['/usr/X11R6/include']\n flags['library_dirs'] = ['/usr/X11R6/lib']\n flags['libraries'] = ['GL']\n elif platform == 'android':\n flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]\n flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]\n flags['libraries'] = ['GLESv2']\n elif platform == 'rpi':\n flags['include_dirs'] = [\n '/opt/vc/include',\n '/opt/vc/include/interface/vcos/pthreads',\n '/opt/vc/include/interface/vmcs_host/linux']\n flags['library_dirs'] = ['/opt/vc/lib']\n brcm_lib_files = (\n '/opt/vc/lib/libbrcmEGL.so',\n '/opt/vc/lib/libbrcmGLESv2.so')\n if all((exists(lib) for lib in brcm_lib_files)):\n print(\n 'Found brcmEGL and brcmGLES library files'\n 'for rpi platform at /opt/vc/lib/')\n gl_libs = ['brcmEGL', 'brcmGLESv2']\n else:\n print(\n 'Failed to find brcmEGL and brcmGLESv2 library files'\n 'for rpi platform, falling back to EGL and GLESv2.')\n gl_libs = ['EGL', 'GLESv2']\n flags['libraries'] = ['bcm_host'] + gl_libs\n elif platform == 'mali':\n flags['include_dirs'] = ['/usr/include/']\n flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']\n flags['libraries'] = ['GLESv2']\n c_options['use_x11'] = True\n c_options['use_egl'] = True\n else:\n flags['libraries'] = ['GL']\n return flags, base_flags\n\n\ndef determine_sdl2():\n flags = {}\n if not c_options['use_sdl2']:\n return flags\n\n sdl2_path = environ.get('KIVY_SDL2_PATH', None)\n\n if sdl2_flags and not sdl2_path and platform == 'darwin':\n return sdl2_flags\n\n # no pkgconfig info, or we want to use a specific sdl2 path, so perform\n # manual configuration\n flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']\n split_chr = ';' if platform == 'win32' else ':'\n sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []\n\n if not sdl2_paths:\n sdl_inc = join(sys.prefix, 'include', 'SDL2')\n if isdir(sdl_inc):\n sdl2_paths = [sdl_inc]\n sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])\n\n flags['include_dirs'] = sdl2_paths\n flags['extra_link_args'] = []\n flags['extra_compile_args'] = []\n flags['library_dirs'] = (\n sdl2_paths if sdl2_paths else\n ['/usr/local/lib/'])\n\n if sdl2_flags:\n flags = merge(flags, sdl2_flags)\n\n # ensure headers for all the SDL2 and sub libraries are available\n libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']\n can_compile = True\n for lib in libs_to_check:\n found = False\n for d in flags['include_dirs']:\n fn = join(d, '{}.h'.format(lib))\n if exists(fn):\n found = True\n print('SDL2: found {} header at {}'.format(lib, fn))\n break\n\n if not found:\n print('SDL2: missing sub library {}'.format(lib))\n can_compile = False\n\n if not can_compile:\n c_options['use_sdl2'] = False\n return {}\n\n return flags\n\n\nbase_flags = determine_base_flags()\ngl_flags, gl_flags_base = determine_gl_flags()\n\n# -----------------------------------------------------------------------------\n# sources to compile\n# all the dependencies have been found manually with:\n# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}\ngraphics_dependencies = {\n 'buffer.pyx': ['common.pxi'],\n 'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],\n 'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],\n 'compiler.pxd': ['instructions.pxd'],\n 'compiler.pyx': ['context_instructions.pxd'],\n 'cgl.pyx': ['cgl.pxd'],\n 'cgl_mock.pyx': ['cgl.pxd'],\n 'cgl_sdl2.pyx': ['cgl.pxd'],\n 'cgl_gl.pyx': ['cgl.pxd'],\n 'cgl_glew.pyx': ['cgl.pxd'],\n 'context_instructions.pxd': [\n 'transformation.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],\n 'fbo.pyx': [\n 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],\n 'gl_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],\n 'instructions.pxd': [\n 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',\n 'texture.pxd', '../_event.pxd'],\n 'instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],\n 'opengl.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],\n 'opengl_utils.pyx': [\n 'opengl_utils_def.pxi', 'cgl.pxd', ],\n 'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],\n 'shader.pyx': [\n 'config.pxi', 'common.pxi', 'cgl.pxd',\n 'vertex.pxd', 'transformation.pxd', 'context.pxd',\n 'gl_debug_logger.pxi'],\n 'stencil_instructions.pxd': ['instructions.pxd'],\n 'stencil_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd',\n 'gl_debug_logger.pxi'],\n 'scissor_instructions.pyx': [\n 'config.pxi', 'opcodes.pxi', 'cgl.pxd'],\n 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',\n 'vertex_instructions.pxd', 'tesselator.pxd'],\n 'texture.pxd': ['cgl.pxd'],\n 'texture.pyx': [\n 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',\n 'cgl.pxd', 'opengl_utils.pxd',\n 'img_tools.pxi', 'gl_debug_logger.pxi'],\n 'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],\n 'vbo.pyx': [\n 'config.pxi', 'common.pxi', 'context.pxd',\n 'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],\n 'vertex.pxd': ['cgl.pxd'],\n 'vertex.pyx': ['config.pxi', 'common.pxi'],\n 'vertex_instructions.pyx': [\n 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',\n 'instructions.pxd', 'vertex_instructions.pxd',\n 'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],\n 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}\n\nsources = {\n '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),\n '_clock.pyx': {},\n 'weakproxy.pyx': {},\n 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),\n 'graphics/buffer.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context.pyx': merge(base_flags, gl_flags_base),\n 'graphics/compiler.pyx': merge(base_flags, gl_flags_base),\n 'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/fbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),\n 'graphics/shader.pyx': merge(base_flags, gl_flags_base),\n 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/texture.pyx': merge(base_flags, gl_flags_base),\n 'graphics/transformation.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vbo.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex.pyx': merge(base_flags, gl_flags_base),\n 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),\n 'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),\n 'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),\n 'core/text/text_layout.pyx': base_flags,\n 'core/window/window_info.pyx': base_flags,\n 'graphics/tesselator.pyx': merge(base_flags, {\n 'include_dirs': ['kivy/lib/libtess2/Include'],\n 'c_depends': [\n 'lib/libtess2/Source/bucketalloc.c',\n 'lib/libtess2/Source/dict.c',\n 'lib/libtess2/Source/geom.c',\n 'lib/libtess2/Source/mesh.c',\n 'lib/libtess2/Source/priorityq.c',\n 'lib/libtess2/Source/sweep.c',\n 'lib/libtess2/Source/tess.c'\n ]\n }),\n 'graphics/svg.pyx': merge(base_flags, gl_flags_base)\n}\n\nif c_options[\"use_sdl2\"]:\n sdl2_flags = determine_sdl2()\n\nif c_options['use_sdl2'] and sdl2_flags:\n sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(\n sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)\n sdl2_depends = {'depends': ['lib/sdl2.pxi']}\n for source_file in ('core/window/_window_sdl2.pyx',\n 'core/image/_img_sdl2.pyx',\n 'core/text/_text_sdl2.pyx',\n 'core/audio/audio_sdl2.pyx',\n 'core/clipboard/_clipboard_sdl2.pyx'):\n sources[source_file] = merge(\n base_flags, sdl2_flags, sdl2_depends)\n\nif platform in ('darwin', 'ios'):\n # activate ImageIO provider for our core image\n if platform == 'ios':\n osx_flags = {'extra_link_args': [\n '-framework', 'Foundation',\n '-framework', 'UIKit',\n '-framework', 'AudioToolbox',\n '-framework', 'CoreGraphics',\n '-framework', 'QuartzCore',\n '-framework', 'ImageIO',\n '-framework', 'Accelerate']}\n else:\n osx_flags = {'extra_link_args': [\n '-framework', 'ApplicationServices']}\n sources['core/image/img_imageio.pyx'] = merge(\n base_flags, osx_flags)\n\nif c_options['use_avfoundation']:\n import platform as _platform\n mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]\n if mac_ver >= [10, 7]:\n osx_flags = {\n 'extra_link_args': ['-framework', 'AVFoundation'],\n 'extra_compile_args': ['-ObjC++'],\n 'depends': ['core/camera/camera_avfoundation_implem.m']}\n sources['core/camera/camera_avfoundation.pyx'] = merge(\n base_flags, osx_flags)\n else:\n print('AVFoundation cannot be used, OSX >= 10.7 is required')\n\nif c_options['use_rpi']:\n sources['lib/vidcore_lite/egl.pyx'] = merge(\n base_flags, gl_flags)\n sources['lib/vidcore_lite/bcm.pyx'] = merge(\n base_flags, gl_flags)\n\nif c_options['use_x11']:\n libs = ['Xrender', 'X11']\n if c_options['use_egl']:\n libs += ['EGL']\n else:\n libs += ['GL']\n sources['core/window/window_x11.pyx'] = merge(\n base_flags, gl_flags, {\n # FIXME add an option to depend on them but not compile them\n # cause keytab is included in core, and core is included in\n # window_x11\n #\n # 'depends': [\n # 'core/window/window_x11_keytab.c',\n # 'core/window/window_x11_core.c'],\n 'libraries': libs})\n\nif c_options['use_gstreamer']:\n sources['lib/gstplayer/_gstplayer.pyx'] = merge(\n base_flags, gst_flags, {\n 'depends': ['lib/gstplayer/_gstplayer.h']})\n\n\n# -----------------------------------------------------------------------------\n# extension modules\n\ndef get_dependencies(name, deps=None):\n if deps is None:\n deps = []\n for dep in graphics_dependencies.get(name, []):\n if dep not in deps:\n deps.append(dep)\n get_dependencies(dep, deps)\n return deps\n\n\ndef resolve_dependencies(fn, depends):\n fn = basename(fn)\n deps = []\n get_dependencies(fn, deps)\n get_dependencies(fn.replace('.pyx', '.pxd'), deps)\n\n deps_final = []\n paths_to_test = ['graphics', 'include']\n for dep in deps:\n found = False\n for path in paths_to_test:\n filename = expand(src_path, path, dep)\n if exists(filename):\n deps_final.append(filename)\n found = True\n break\n if not found:\n print('ERROR: Dependency for {} not resolved: {}'.format(\n fn, dep\n ))\n\n return deps_final\n\n\ndef get_extensions_from_sources(sources):\n ext_modules = []\n if environ.get('KIVY_FAKE_BUILDEXT'):\n print('Fake build_ext asked, will generate only .h/.c')\n return ext_modules\n for pyx, flags in sources.items():\n is_graphics = pyx.startswith('graphics')\n pyx = expand(src_path, pyx)\n depends = [expand(src_path, x) for x in flags.pop('depends', [])]\n c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]\n if not have_cython:\n pyx = '%s.c' % pyx[:-4]\n if is_graphics:\n depends = resolve_dependencies(pyx, depends)\n f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (\n 'c', 'cpp', 'm')]\n module_name = get_modulename_from_file(pyx)\n flags_clean = {'depends': depends}\n for key, value in flags.items():\n if len(value):\n flags_clean[key] = value\n ext_modules.append(CythonExtension(\n module_name, [pyx] + f_depends + c_depends, **flags_clean))\n return ext_modules\n\n\next_modules = get_extensions_from_sources(sources)\n\n\n# -----------------------------------------------------------------------------\n# automatically detect data files\nsplit_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))\ndata_file_prefix = 'share/kivy-'\nexamples = {}\nexamples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',\n 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',\n 'glsl', 'zip')\nfor root, subFolders, files in walk('examples'):\n for fn in files:\n ext = fn.split('.')[-1].lower()\n if ext not in examples_allowed_ext:\n continue\n filename = join(root, fn)\n directory = '%s%s' % (data_file_prefix, dirname(filename))\n if directory not in examples:\n examples[directory] = []\n examples[directory].append(filename)\n\nbinary_deps = []\nbinary_deps_path = join(src_path, 'kivy', 'binary_deps')\nif isdir(binary_deps_path):\n for root, dirnames, filenames in walk(binary_deps_path):\n for fname in filenames:\n binary_deps.append(\n join(root.replace(binary_deps_path, 'binary_deps'), fname))\n\n# -----------------------------------------------------------------------------\n# setup !\nif not build_examples:\n setup(\n name='Kivy',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=(\n 'A software library for rapid development of '\n 'hardware-accelerated multitouch applications.'),\n long_description=get_description(),\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=[\n 'kivy',\n 'kivy.adapters',\n 'kivy.core',\n 'kivy.core.audio',\n 'kivy.core.camera',\n 'kivy.core.clipboard',\n 'kivy.core.image',\n 'kivy.core.gl',\n 'kivy.core.spelling',\n 'kivy.core.text',\n 'kivy.core.video',\n 'kivy.core.window',\n 'kivy.deps',\n 'kivy.effects',\n 'kivy.graphics',\n 'kivy.graphics.cgl_backend',\n 'kivy.garden',\n 'kivy.input',\n 'kivy.input.postproc',\n 'kivy.input.providers',\n 'kivy.lang',\n 'kivy.lib',\n 'kivy.lib.osc',\n 'kivy.lib.gstplayer',\n 'kivy.lib.vidcore_lite',\n 'kivy.modules',\n 'kivy.network',\n 'kivy.storage',\n 'kivy.tests',\n 'kivy.tools',\n 'kivy.tools.packaging',\n 'kivy.tools.packaging.pyinstaller_hooks',\n 'kivy.tools.highlight',\n 'kivy.extras',\n 'kivy.uix',\n 'kivy.uix.behaviors',\n 'kivy.uix.recycleview',\n ],\n package_dir={'kivy': 'kivy'},\n package_data={'kivy': [\n 'setupconfig.py',\n '*.pxd',\n '*.pxi',\n 'core/text/*.pxd',\n 'core/text/*.pxi',\n 'core/window/*.pxi',\n 'core/window/*.pxd',\n 'graphics/*.pxd',\n 'graphics/*.pxi',\n 'graphics/*.h',\n 'include/*',\n 'lib/vidcore_lite/*.pxd',\n 'lib/vidcore_lite/*.pxi',\n 'data/*.kv',\n 'data/*.json',\n 'data/fonts/*.ttf',\n 'data/images/*.png',\n 'data/images/*.jpg',\n 'data/images/*.gif',\n 'data/images/*.atlas',\n 'data/keyboards/*.json',\n 'data/logo/*.png',\n 'data/glsl/*.png',\n 'data/glsl/*.vs',\n 'data/glsl/*.fs',\n 'tests/*.zip',\n 'tests/*.kv',\n 'tests/*.png',\n 'tests/*.ttf',\n 'tests/*.ogg',\n 'tools/gles_compat/*',\n 'tools/highlight/*',\n 'tools/packaging/README.txt',\n 'tools/packaging/win32/kivy.bat',\n 'tools/packaging/win32/kivyenv.sh',\n 'tools/packaging/win32/README.txt',\n 'tools/packaging/osx/Info.plist',\n 'tools/packaging/osx/InfoPlist.strings',\n 'tools/packaging/osx/kivy.sh',\n 'tools/pep8checker/*',\n 'tools/theming/defaulttheme/*',\n ] + binary_deps},\n data_files=[] if split_examples else list(examples.items()),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Artistic Software',\n 'Topic :: Games/Entertainment',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',\n 'Topic :: Multimedia :: Graphics :: Presentation',\n 'Topic :: Multimedia :: Graphics :: Viewers',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'Topic :: Multimedia :: Video :: Display',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Visualization',\n ('Topic :: Software Development :: Libraries :: '\n 'Application Frameworks'),\n 'Topic :: Software Development :: User Interfaces'],\n dependency_links=[\n 'https://github.com/kivy-garden/garden/archive/master.zip'],\n install_requires=[\n 'Kivy-Garden>=0.1.4', 'docutils', 'pygments'\n ],\n extra_requires={\n 'tuio': ['oscpy']\n },\n setup_requires=[\n 'cython>=' + MIN_CYTHON_STRING\n ] if not skip_cython else [])\nelse:\n setup(\n name='Kivy-examples',\n version=get_version(),\n author='Kivy Team and other contributors',\n author_email='[email protected]',\n url='http://kivy.org',\n license='MIT',\n description=('Kivy examples.'),\n long_description=get_description(),\n data_files=list(examples.items()))\n", "path": "setup.py" } ]
diff --git a/doc/doc-requirements.txt b/doc/doc-requirements.txt index c3dfab3387..be9dff62a1 100644 --- a/doc/doc-requirements.txt +++ b/doc/doc-requirements.txt @@ -1,4 +1,4 @@ -Cython>=0.23 +Cython>=0.24 # Frozen Sphinx requirements for easier pip installation sphinxcontrib-actdiag sphinxcontrib-blockdiag diff --git a/kivy/_event.pyx b/kivy/_event.pyx index 7a6aaf7e5a..a9d5c95f4e 100644 --- a/kivy/_event.pyx +++ b/kivy/_event.pyx @@ -145,9 +145,9 @@ cdef class Observable(ObjectWithUid): except KeyError: pass - property proxy_ref: - def __get__(self): - return self + @property + def proxy_ref(self): + return self cdef class EventDispatcher(ObjectWithUid): @@ -888,12 +888,12 @@ cdef class EventDispatcher(ObjectWithUid): self.__properties[name] = prop setattr(self.__class__, name, prop) - property proxy_ref: + @property + def proxy_ref(self): '''Default implementation of proxy_ref, returns self. .. versionadded:: 1.9.0 ''' - def __get__(self): - return self + return self cdef class BoundCallback: diff --git a/kivy/core/window/_window_sdl2.pyx b/kivy/core/window/_window_sdl2.pyx index 31e0663c60..1b64c44374 100644 --- a/kivy/core/window/_window_sdl2.pyx +++ b/kivy/core/window/_window_sdl2.pyx @@ -649,11 +649,11 @@ cdef class _WindowSDL2Storage: def grab_mouse(self, grab): SDL_SetWindowGrab(self.win, SDL_TRUE if grab else SDL_FALSE) - property window_size: - def __get__(self): - cdef int w, h - SDL_GetWindowSize(self.win, &w, &h) - return [w, h] + @property + def window_size(self): + cdef int w, h + SDL_GetWindowSize(self.win, &w, &h) + return [w, h] # Based on the example at diff --git a/kivy/graphics/context_instructions.pyx b/kivy/graphics/context_instructions.pyx index dbc3e8d770..7f79e889ad 100644 --- a/kivy/graphics/context_instructions.pyx +++ b/kivy/graphics/context_instructions.pyx @@ -111,17 +111,21 @@ cdef class PushState(ContextInstruction): ContextInstruction.__init__(self, **kwargs) self.context_push = list(args) - property state: - def __get__(self): - return ','.join(self.context_push) - def __set__(self, value): - self.context_push = value.split(',') + @property + def state(self): + return ','.join(self.context_push) - property states: - def __get__(self): - return self.context_push - def __set__(self, value): - self.context_push = list(value) + @state.setter + def state(self, value): + self.context_push = value.split(',') + + @property + def states(self): + return self.context_push + + @states.setter + def states(self, value): + self.context_push = list(value) cdef class ChangeState(ContextInstruction): @@ -134,11 +138,13 @@ cdef class ChangeState(ContextInstruction): ContextInstruction.__init__(self, **kwargs) self.context_state.update(**kwargs) - property changes: - def __get__(self): - return self.context_state - def __set__(self, value): - self.context_state = dict(value) + @property + def changes(self): + return self.context_state + + @changes.setter + def changes(self, value): + self.context_state = dict(value) cdef class PopState(ContextInstruction): @@ -151,17 +157,21 @@ cdef class PopState(ContextInstruction): ContextInstruction.__init__(self, **kwargs) self.context_pop = list(args) - property state: - def __get__(self): - return ','.join(self.context_pop) - def __set__(self, value): - self.context_pop = value.split(',') + @property + def state(self): + return ','.join(self.context_pop) + + @state.setter + def state(self, value): + self.context_pop = value.split(',') + + @property + def states(self): + return self.context_pop - property states: - def __get__(self): - return self.context_pop - def __set__(self, value): - self.context_pop = list(value) + @states.setter + def states(self, value): + self.context_pop = list(value) cdef class Color(ContextInstruction): @@ -253,76 +263,105 @@ cdef class Color(ContextInstruction): if property_name in kwargs: setattr(self, property_name, kwargs[property_name]) - property rgba: + @property + def rgba(self): '''RGBA color, list of 4 values in 0-1 range. ''' - def __get__(self): - return self.context_state['color'] - def __set__(self, rgba): - self.set_state('color', [float(x) for x in rgba]) - property rgb: + return self.context_state['color'] + + @rgba.setter + def rgba(self, rgba): + self.set_state('color', [float(x) for x in rgba]) + + @property + def rgb(self): '''RGB color, list of 3 values in 0-1 range. The alpha will be 1. ''' - def __get__(self): - return self.rgba[:-1] - def __set__(self, rgb): - self.rgba = (rgb[0], rgb[1], rgb[2], 1.0) - property r: + return self.rgba[:-1] + + @rgb.setter + def rgb(self, rgb): + self.rgba = (rgb[0], rgb[1], rgb[2], 1.0) + + @property + def r(self): '''Red component, between 0 and 1. ''' - def __get__(self): - return self.rgba[0] - def __set__(self, r): - self.rgba = [r, self.g, self.b, self.a] - property g: + return self.rgba[0] + + @r.setter + def r(self, r): + self.rgba = [r, self.g, self.b, self.a] + + @property + def g(self): '''Green component, between 0 and 1. ''' - def __get__(self): - return self.rgba[1] - def __set__(self, g): - self.rgba = [self.r, g, self.b, self.a] - property b: + return self.rgba[1] + + @g.setter + def g(self, g): + self.rgba = [self.r, g, self.b, self.a] + + @property + def b(self): '''Blue component, between 0 and 1. ''' - def __get__(self): - return self.rgba[2] - def __set__(self, b): - self.rgba = [self.r, self.g, b, self.a] - property a: + return self.rgba[2] + + @b.setter + def b(self, b): + self.rgba = [self.r, self.g, b, self.a] + + @property + def a(self): '''Alpha component, between 0 and 1. ''' - def __get__(self): - return self.rgba[3] - def __set__(self, a): - self.rgba = [self.r, self.g, self.b, a] - property hsv: + return self.rgba[3] + + @a.setter + def a(self, a): + self.rgba = [self.r, self.g, self.b, a] + + @property + def hsv(self): '''HSV color, list of 3 values in 0-1 range, alpha will be 1. ''' - def __get__(self): - return rgb_to_hsv(self.r, self.g, self.b) - def __set__(self, x): - self.rgb = hsv_to_rgb(x[0], x[1], x[2]) - property h: + return rgb_to_hsv(self.r, self.g, self.b) + + @hsv.setter + def hsv(self, x): + self.rgb = hsv_to_rgb(x[0], x[1], x[2]) + + @property + def h(self): '''Hue component, between 0 and 1. ''' - def __get__(self): - return self.hsv[0] - def __set__(self, x): - self.hsv = [x, self.s, self.v] - property s: + return self.hsv[0] + + @h.setter + def h(self, x): + self.hsv = [x, self.s, self.v] + + @property + def s(self): '''Saturation component, between 0 and 1. ''' - def __get__(self): - return self.hsv[1] - def __set__(self, x): - self.hsv = [self.h, x, self.v] - property v: + return self.hsv[1] + + @s.setter + def s(self, x): + self.hsv = [self.h, x, self.v] + + @property + def v(self): '''Value component, between 0 and 1. ''' - def __get__(self): - return self.hsv[2] - def __set__(self, x): - self.hsv = [self.h, self.s, x] + return self.hsv[2] + + @v.setter + def v(self, x): + self.hsv = [self.h, self.s, x] cdef class BindTexture(ContextInstruction): @@ -351,41 +390,47 @@ cdef class BindTexture(ContextInstruction): cdef RenderContext context = self.get_context() context.set_texture(self._index, self._texture) - property texture: - def __get__(self): - return self._texture - def __set__(self, object texture): - if texture is None: - texture = get_default_texture() - if self._texture is texture: - return - self._texture = texture - self.flag_update() - - property index: - def __get__(self): - return self._index - def __set__(self, int index): - if self._index == index: - return - self._index = index - self.flag_update() - - property source: + @property + def texture(self): + return self._texture + + @texture.setter + def texture(self, object texture): + if texture is None: + texture = get_default_texture() + if self._texture is texture: + return + self._texture = texture + self.flag_update() + + @property + def index(self): + return self._index + + @index.setter + def index(self, int index): + if self._index == index: + return + self._index = index + self.flag_update() + + @property + def source(self): '''Set/get the source (filename) to load for the texture. ''' - def __get__(self): - return self._source - def __set__(self, filename): - self._source = resource_find(filename) - if self._source: - tex = Cache.get('kv.texture', filename) - if not tex: - tex = Image(self._source).texture - Cache.append('kv.texture', filename, tex) - self.texture = tex - else: - self.texture = None + return self._source + + @source.setter + def source(self, filename): + self._source = resource_find(filename) + if self._source: + tex = Cache.get('kv.texture', filename) + if not tex: + tex = Image(self._source).texture + Cache.append('kv.texture', filename, tex) + self.texture = tex + else: + self.texture = None cdef double radians(double degrees): @@ -401,17 +446,19 @@ cdef class LoadIdentity(ContextInstruction): def __init__(self, **kwargs): self.stack = kwargs.get('stack', 'modelview_mat') - property stack: + @property + def stack(self): '''Name of the matrix stack to use. Can be 'modelview_mat', 'projection_mat' or 'frag_modelview_mat'. ''' - def __get__(self): - if PY2: - return self.context_state.keys()[0] - else: - return list(self.context_state.keys())[0] - def __set__(self, value): - self.context_state = {value: Matrix()} + if PY2: + return self.context_state.keys()[0] + else: + return list(self.context_state.keys())[0] + + @stack.setter + def stack(self, value): + self.context_state = {value: Matrix()} cdef class PushMatrix(ContextInstruction): @@ -421,17 +468,19 @@ cdef class PushMatrix(ContextInstruction): ContextInstruction.__init__(self, **kwargs) self.stack = kwargs.get('stack', 'modelview_mat') - property stack: + @property + def stack(self): '''Name of the matrix stack to use. Can be 'modelview_mat', 'projection_mat' or 'frag_modelview_mat'. .. versionadded:: 1.6.0 ''' - def __get__(self): - return self.context_push[0] - def __set__(self, value): - value = value or 'modelview_mat' - self.context_push = [value] + return self.context_push[0] + + @stack.setter + def stack(self, value): + value = value or 'modelview_mat' + self.context_push = [value] cdef class PopMatrix(ContextInstruction): @@ -441,17 +490,19 @@ cdef class PopMatrix(ContextInstruction): ContextInstruction.__init__(self, **kwargs) self.stack = kwargs.get('stack', 'modelview_mat') - property stack: + @property + def stack(self): '''Name of the matrix stack to use. Can be 'modelview_mat', 'projection_mat' or 'frag_modelview_mat'. .. versionadded:: 1.6.0 ''' - def __get__(self): - return self.context_push[0] - def __set__(self, value): - value = value or 'modelview_mat' - self.context_pop = [value] + return self.context_push[0] + + @stack.setter + def stack(self, value): + value = value or 'modelview_mat' + self.context_pop = [value] cdef class ApplyContextMatrix(ContextInstruction): @@ -470,27 +521,31 @@ cdef class ApplyContextMatrix(ContextInstruction): m = m.multiply(context.get_state(self._source_stack)) context.set_state(self._target_stack, m) - property target_stack: + @property + def target_stack(self): '''Name of the matrix stack to use as a target. Can be 'modelview_mat', 'projection_mat' or 'frag_modelview_mat'. .. versionadded:: 1.6.0 ''' - def __get__(self): - return self._target_stack - def __set__(self, value): - self._target_stack = value or 'modelview_mat' + return self._target_stack - property source_stack: + @target_stack.setter + def target_stack(self, value): + self._target_stack = value or 'modelview_mat' + + @property + def source_stack(self): '''Name of the matrix stack to use as a source. Can be 'modelview_mat', 'projection_mat' or 'frag_modelview_mat'. .. versionadded:: 1.6.0 ''' - def __get__(self): - return self._source_stack - def __set__(self, value): - self._source_stack = value or 'modelview_mat' + return self._source_stack + + @source_stack.setter + def source_stack(self, value): + self._source_stack = value or 'modelview_mat' cdef class UpdateNormalMatrix(ContextInstruction): @@ -524,30 +579,34 @@ cdef class MatrixInstruction(ContextInstruction): mvm = context.get_state(self._stack) context.set_state(self._stack, mvm.multiply(self.matrix)) - property matrix: + @property + def matrix(self): ''' Matrix property. Matrix from the transformation module. Setting the matrix using this property when a change is made is important because it will notify the context about the update. ''' - def __get__(self): - if self._matrix == None: - self._matrix = Matrix() - return self._matrix - def __set__(self, x): - self._matrix = x - self.flag_update() + if self._matrix == None: + self._matrix = Matrix() + return self._matrix - property stack: + @matrix.setter + def matrix(self, x): + self._matrix = x + self.flag_update() + + @property + def stack(self): '''Name of the matrix stack to use. Can be 'modelview_mat', 'projection_mat' or 'frag_modelview_mat'. .. versionadded:: 1.6.0 ''' - def __get__(self): - return self._stack - def __set__(self, value): - value = value or "modelview_mat" - self._stack = value + return self._stack + + @stack.setter + def stack(self, value): + value = value or "modelview_mat" + self._stack = value cdef class Transform(MatrixInstruction): @@ -647,43 +706,49 @@ cdef class Rotate(Transform): matrix = matrix.multiply(Matrix().translate(-ox, -oy, -oz)) self.matrix = matrix - property angle: + @property + def angle(self): '''Property for getting/setting the angle of the rotation. ''' - def __get__(self): - return self._angle - def __set__(self, a): - self._angle = a - self.compute() + return self._angle + + @angle.setter + def angle(self, a): + self._angle = a + self.compute() - property axis: + @property + def axis(self): '''Property for getting/setting the axis of the rotation. The format of the axis is (x, y, z). ''' - def __get__(self): - return self._axis - def __set__(self, axis): - self._axis = axis - self.compute() + return self._axis - property origin: + @axis.setter + def axis(self, axis): + self._axis = axis + self.compute() + + @property + def origin(self): '''Origin of the rotation. .. versionadded:: 1.7.0 The format of the origin can be either (x, y) or (x, y, z). ''' - def __get__(self): - return self._origin - def __set__(self, origin): - if len(origin) == 3: - self._origin = tuple(origin) - elif len(origin) == 2: - self._origin = (origin[0], origin[1], 0.) - else: - raise Exception('invalid number of components in origin') - self.compute() + return self._origin + + @origin.setter + def origin(self, origin): + if len(origin) == 3: + self._origin = tuple(origin) + elif len(origin) == 2: + self._origin = (origin[0], origin[1], 0.) + else: + raise Exception('invalid number of components in origin') + self.compute() cdef class Scale(Transform): @@ -733,83 +798,95 @@ cdef class Scale(Transform): matrix = matrix.multiply(Matrix().translate(-ox, -oy, -oz)) self.matrix = matrix - property scale: + @property + def scale(self): '''Property for getting/setting the scale. .. deprecated:: 1.6.0 Deprecated in favor of per axis scale properties x,y,z, xyz, etc. ''' - def __get__(self): - if self._x == self._y == self._z: - Logger.warning("scale property is deprecated, use xyz, x, " +\ - "y, z, etc properties to get scale factor based on axis.") - return self._x - else: - raise Exception("trying to access deprecated property" +\ - " 'scale' on Scale instruction with non uniform scaling!") - - def __set__(self, s): + if self._x == self._y == self._z: Logger.warning("scale property is deprecated, use xyz, x, " +\ "y, z, etc properties to get scale factor based on axis.") - self.set_scale(s,s,s) + return self._x + else: + raise Exception("trying to access deprecated property" +\ + " 'scale' on Scale instruction with non uniform scaling!") + - property x: + @scale.setter + def scale(self, s): + Logger.warning("scale property is deprecated, use xyz, x, " +\ + "y, z, etc properties to get scale factor based on axis.") + self.set_scale(s,s,s) + + @property + def x(self): '''Property for getting/setting the scale on the X axis. .. versionchanged:: 1.6.0 ''' - def __get__(self): - return self._x - def __set__(self, double x): - self.set_scale(x, self._y, self._z) + return self._x - property y: + @x.setter + def x(self, double x): + self.set_scale(x, self._y, self._z) + + @property + def y(self): '''Property for getting/setting the scale on the Y axis. .. versionchanged:: 1.6.0 ''' - def __get__(self): - return self._y - def __set__(self, double y): - self.set_scale(self._x, y, self._z) + return self._y + + @y.setter + def y(self, double y): + self.set_scale(self._x, y, self._z) - property z: + @property + def z(self): '''Property for getting/setting the scale on Z axis. .. versionchanged:: 1.6.0 ''' - def __get__(self): - return self._z - def __set__(self, double z): - self.set_scale(self._x, self._y, z) + return self._z + + @z.setter + def z(self, double z): + self.set_scale(self._x, self._y, z) - property xyz: + @property + def xyz(self): '''3 tuple scale vector in 3D in x, y, and z axis. .. versionchanged:: 1.6.0 ''' - def __get__(self): - return self._x, self._y, self._z - def __set__(self, c): - self.set_scale(c[0], c[1], c[2]) + return self._x, self._y, self._z - property origin: + @xyz.setter + def xyz(self, c): + self.set_scale(c[0], c[1], c[2]) + + @property + def origin(self): '''Origin of the scale. .. versionadded:: 1.9.0 The format of the origin can be either (x, y) or (x, y, z). ''' - def __get__(self): - return self._origin - def __set__(self, origin): - if len(origin) == 3: - self._origin = tuple(origin) - elif len(origin) == 2: - self._origin = (origin[0], origin[1], 0.) - else: - raise Exception('invalid number of components in origin') - self.set_scale(self._x, self._y, self._z) + return self._origin + + @origin.setter + def origin(self, origin): + if len(origin) == 3: + self._origin = tuple(origin) + elif len(origin) == 2: + self._origin = (origin[0], origin[1], 0.) + else: + raise Exception('invalid number of components in origin') + self.set_scale(self._x, self._y, self._z) cdef class Translate(Transform): @@ -836,44 +913,54 @@ cdef class Translate(Transform): self._y = y self._z = z - property x: + @property + def x(self): '''Property for getting/setting the translation on the X axis. ''' - def __get__(self): - return self._x - def __set__(self, double x): - self.set_translate(x, self._y, self._z) + return self._x + + @x.setter + def x(self, double x): + self.set_translate(x, self._y, self._z) - property y: + @property + def y(self): '''Property for getting/setting the translation on the Y axis. ''' - def __get__(self): - return self._y - def __set__(self, double y): - self.set_translate(self._x, y, self._z) + return self._y - property z: + @y.setter + def y(self, double y): + self.set_translate(self._x, y, self._z) + + @property + def z(self): '''Property for getting/setting the translation on the Z axis. ''' - def __get__(self): - return self._z - def __set__(self, double z): - self.set_translate(self._x, self._y, z) + return self._z + + @z.setter + def z(self, double z): + self.set_translate(self._x, self._y, z) - property xy: + @property + def xy(self): '''2 tuple with translation vector in 2D for x and y axis. ''' - def __get__(self): - return self._x, self._y - def __set__(self, c): - self.set_translate(c[0], c[1], self._z) + return self._x, self._y - property xyz: + @xy.setter + def xy(self, c): + self.set_translate(c[0], c[1], self._z) + + @property + def xyz(self): '''3 tuple translation vector in 3D in x, y, and z axis. ''' - def __get__(self): - return self._x, self._y, self._z - def __set__(self, c): - self.set_translate(c[0], c[1], c[2]) + return self._x, self._y, self._z + + @xyz.setter + def xyz(self, c): + self.set_translate(c[0], c[1], c[2]) diff --git a/kivy/graphics/fbo.pyx b/kivy/graphics/fbo.pyx index ee47364716..2c11f0b4be 100644 --- a/kivy/graphics/fbo.pyx +++ b/kivy/graphics/fbo.pyx @@ -406,58 +406,62 @@ cdef class Fbo(RenderContext): continue - property size: + @property + def size(self): '''Size of the framebuffer, in (width, height) format. If you change the size, the framebuffer content will be lost. ''' - def __get__(self): - return (self._width, self._height) - def __set__(self, x): - cdef int w, h - w, h = x - if w == self._width and h == self._height: - return - self._width, self._height = x - self.delete_fbo() - self.create_fbo() - self.flag_update() + return (self._width, self._height) + + @size.setter + def size(self, x): + cdef int w, h + w, h = x + if w == self._width and h == self._height: + return + self._width, self._height = x + self.delete_fbo() + self.create_fbo() + self.flag_update() - property clear_color: + @property + def clear_color(self): '''Clear color in (red, green, blue, alpha) format. ''' - def __get__(self): - return (self._clear_color[0], - self._clear_color[1], - self._clear_color[2], - self._clear_color[3]) - def __set__(self, x): - x = list(x) - if len(x) != 4: - raise Exception('clear_color must be a list/tuple of 4 entry.') - self._clear_color[0] = x[0] - self._clear_color[1] = x[1] - self._clear_color[2] = x[2] - self._clear_color[3] = x[3] - - property texture: + return (self._clear_color[0], + self._clear_color[1], + self._clear_color[2], + self._clear_color[3]) + + @clear_color.setter + def clear_color(self, x): + x = list(x) + if len(x) != 4: + raise Exception('clear_color must be a list/tuple of 4 entry.') + self._clear_color[0] = x[0] + self._clear_color[1] = x[1] + self._clear_color[2] = x[2] + self._clear_color[3] = x[3] + + @property + def texture(self): '''Return the framebuffer texture ''' - def __get__(self): - return self._texture + return self._texture - property pixels: + @property + def pixels(self): '''Get the pixels texture, in RGBA format only, unsigned byte. The origin of the image is at bottom left. .. versionadded:: 1.7.0 ''' - def __get__(self): - w, h = self._width, self._height - self.bind() - data = py_glReadPixels(0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE) - self.release() - return data + w, h = self._width, self._height + self.bind() + data = py_glReadPixels(0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE) + self.release() + return data cpdef get_pixel_color(self, int wx, int wy): """Get the color of the pixel with specified window diff --git a/kivy/graphics/gl_instructions.pyx b/kivy/graphics/gl_instructions.pyx index 6ce1cafdae..f286a7c35f 100644 --- a/kivy/graphics/gl_instructions.pyx +++ b/kivy/graphics/gl_instructions.pyx @@ -52,68 +52,80 @@ cdef class ClearColor(Instruction): cgl.glClearColor(self.r, self.g, self.b, self.a) return 0 - property rgba: + @property + def rgba(self): '''RGBA color used for the clear color, a list of 4 values in the 0-1 range. ''' - def __get__(self): - return [self.r, self.b, self.g, self.a] - def __set__(self, rgba): - cdef list clear_color = [float(x) for x in rgba] - self.r = clear_color[0] - self.g = clear_color[1] - self.b = clear_color[2] - self.a = clear_color[3] - self.flag_update() - - property rgb: + return [self.r, self.b, self.g, self.a] + + @rgba.setter + def rgba(self, rgba): + cdef list clear_color = [float(x) for x in rgba] + self.r = clear_color[0] + self.g = clear_color[1] + self.b = clear_color[2] + self.a = clear_color[3] + self.flag_update() + + @property + def rgb(self): '''RGB color, a list of 3 values in 0-1 range where alpha will be 1. ''' - def __get__(self): - return [self.r, self.g, self.b] - def __set__(self, rgb): - cdef list clear_color = [float(x) for x in rgb] - self.r = clear_color[0] - self.g = clear_color[1] - self.b = clear_color[2] - self.a = 1 - self.flag_update() - - property r: + return [self.r, self.g, self.b] + + @rgb.setter + def rgb(self, rgb): + cdef list clear_color = [float(x) for x in rgb] + self.r = clear_color[0] + self.g = clear_color[1] + self.b = clear_color[2] + self.a = 1 + self.flag_update() + + @property + def r(self): '''Red component, between 0 and 1. ''' - def __get__(self): - return self.r - def __set__(self, r): - self.r = r - self.flag_update() + return self.r - property g: + @r.setter + def r(self, r): + self.r = r + self.flag_update() + + @property + def g(self): '''Green component, between 0 and 1. ''' - def __get__(self): - return self.g - def __set__(self, g): - self.g = g - self.flag_update() + return self.g + + @g.setter + def g(self, g): + self.g = g + self.flag_update() - property b: + @property + def b(self): '''Blue component, between 0 and 1. ''' - def __get__(self): - return self.b - def __set__(self, b): - self.b = b - self.flag_update() + return self.b + + @b.setter + def b(self, b): + self.b = b + self.flag_update() - property a: + @property + def a(self): '''Alpha component, between 0 and 1. ''' - def __get__(self): - return self.a - def __set__(self, a): - self.a = a - self.flag_update() + return self.a + + @a.setter + def a(self, a): + self.a = a + self.flag_update() cdef class ClearBuffers(Instruction): @@ -146,35 +158,41 @@ cdef class ClearBuffers(Instruction): cgl.glClear(mask) return 0 - property clear_color: + @property + def clear_color(self): '''If True, the color buffer will be cleared. ''' - def __get__(self): - return self.clear_color - def __set__(self, value): - value = int(value) - if value == self.clear_color: - return - self.clear_color = value - - property clear_stencil: + return self.clear_color + + @clear_color.setter + def clear_color(self, value): + value = int(value) + if value == self.clear_color: + return + self.clear_color = value + + @property + def clear_stencil(self): '''If True, the stencil buffer will be cleared. ''' - def __get__(self): - return self.clear_stencil - def __set__(self, value): - value = int(value) - if value == self.clear_stencil: - return - self.clear_stencil = value - - property clear_depth: + return self.clear_stencil + + @clear_stencil.setter + def clear_stencil(self, value): + value = int(value) + if value == self.clear_stencil: + return + self.clear_stencil = value + + @property + def clear_depth(self): '''If True, the depth buffer will be cleared. ''' - def __get__(self): - return self.clear_depth - def __set__(self, value): - value = int(value) - if value == self.clear_depth: - return - self.clear_depth = value + return self.clear_depth + + @clear_depth.setter + def clear_depth(self, value): + value = int(value) + if value == self.clear_depth: + return + self.clear_depth = value diff --git a/kivy/graphics/instructions.pyx b/kivy/graphics/instructions.pyx index 09c745496a..286e7d3c04 100644 --- a/kivy/graphics/instructions.pyx +++ b/kivy/graphics/instructions.pyx @@ -100,13 +100,14 @@ cdef class Instruction(ObjectWithUid): self.flags &= ~GI_NO_APPLY_ONCE self.flags &= ~GI_IGNORE - property needs_redraw: - def __get__(self): - if (self.flags & GI_NEEDS_UPDATE) > 0: - return True - return False - - property proxy_ref: + @property + def needs_redraw(self): + if (self.flags & GI_NEEDS_UPDATE) > 0: + return True + return False + + @property + def proxy_ref(self): '''Return a proxy reference to the Instruction i.e. without creating a reference of the widget. See `weakref.proxy <http://docs.python.org/2/library/weakref.html?highlight=proxy#weakref.proxy>`_ @@ -114,10 +115,9 @@ cdef class Instruction(ObjectWithUid): .. versionadded:: 1.7.2 ''' - def __get__(self): - if self.__proxy_ref is None: - self.__proxy_ref = proxy(self) - return self.__proxy_ref + if self.__proxy_ref is None: + self.__proxy_ref = proxy(self) + return self.__proxy_ref cdef class InstructionGroup(Instruction): @@ -308,7 +308,8 @@ cdef class VertexInstruction(Instruction): instr.set_parent(None) self.set_parent(None) - property texture: + @property + def texture(self): '''Property that represents the texture used for drawing this Instruction. You can set a new texture like this:: @@ -321,18 +322,20 @@ cdef class VertexInstruction(Instruction): Usually, you will use the :attr:`source` attribute instead of the texture. ''' - def __get__(self): - return self.texture_binding.texture - def __set__(self, _tex): - cdef Texture tex = _tex - self.texture_binding.texture = tex - if tex: - self.tex_coords = tex.tex_coords - else: - self.tex_coords = [0.0,0.0, 1.0,0.0, 1.0,1.0, 0.0,1.0] - self.flag_update() + return self.texture_binding.texture + + @texture.setter + def texture(self, _tex): + cdef Texture tex = _tex + self.texture_binding.texture = tex + if tex: + self.tex_coords = tex.tex_coords + else: + self.tex_coords = [0.0,0.0, 1.0,0.0, 1.0,1.0, 0.0,1.0] + self.flag_update() - property source: + @property + def source(self): '''This property represents the filename to load the texture from. If you want to use an image as source, do it like this:: @@ -356,13 +359,15 @@ cdef class VertexInstruction(Instruction): :func:`kivy.resources.resource_find` function. ''' - def __get__(self): - return self.texture_binding.source - def __set__(self, source): - self.texture_binding.source = source - self.texture = self.texture_binding._texture + return self.texture_binding.source - property tex_coords: + @source.setter + def source(self, source): + self.texture_binding.source = source + self.texture = self.texture_binding._texture + + @property + def tex_coords(self): '''This property represents the texture coordinates used for drawing the vertex instruction. The value must be a list of 8 values. @@ -384,21 +389,22 @@ cdef class VertexInstruction(Instruction): the texture coordinates to be faster. ''' - def __get__(self): - return ( - self._tex_coords[0], - self._tex_coords[1], - self._tex_coords[2], - self._tex_coords[3], - self._tex_coords[4], - self._tex_coords[5], - self._tex_coords[6], - self._tex_coords[7]) - def __set__(self, tc): - cdef int index - for index in xrange(8): - self._tex_coords[index] = tc[index] - self.flag_update() + return ( + self._tex_coords[0], + self._tex_coords[1], + self._tex_coords[2], + self._tex_coords[3], + self._tex_coords[4], + self._tex_coords[5], + self._tex_coords[6], + self._tex_coords[7]) + + @tex_coords.setter + def tex_coords(self, tc): + cdef int index + for index in xrange(8): + self._tex_coords[index] = tc[index] + self.flag_update() cdef void build(self): pass @@ -523,29 +529,33 @@ cdef class Callback(Instruction): self._shader.use() return 0 - property reset_context: + @property + def reset_context(self): '''Set this to True if you want to reset the OpenGL context for Kivy after the callback has been called. ''' - def __get__(self): - return self._reset_context - def __set__(self, value): - cdef int ivalue = int(value) - if self._reset_context == ivalue: - return - self._reset_context = ivalue - self.flag_update() + return self._reset_context - property callback: + @reset_context.setter + def reset_context(self, value): + cdef int ivalue = int(value) + if self._reset_context == ivalue: + return + self._reset_context = ivalue + self.flag_update() + + @property + def callback(self): '''Property for getting/setting func. ''' - def __get__(self): - return self.func - def __set__(self, object func): - if self.func == func: - return - self.func = func - self.flag_update() + return self.func + + @callback.setter + def callback(self, object func): + if self.func == func: + return + self.func = func + self.flag_update() cdef class CanvasBase(InstructionGroup): @@ -648,44 +658,45 @@ cdef class Canvas(CanvasBase): ''' self.flag_update() - property before: + @property + def before(self): '''Property for getting the 'before' group. ''' - def __get__(self): - if self._before is None: - self._before = CanvasBase() - self.insert(0, self._before) - return self._before + if self._before is None: + self._before = CanvasBase() + self.insert(0, self._before) + return self._before - property after: + @property + def after(self): '''Property for getting the 'after' group. ''' - def __get__(self): - cdef CanvasBase c - if self._after is None: - c = CanvasBase() - self.add(c) - self._after = c - return self._after - - property has_before: + cdef CanvasBase c + if self._after is None: + c = CanvasBase() + self.add(c) + self._after = c + return self._after + + @property + def has_before(self): '''Property to see if the :attr:`before` group has already been created. .. versionadded:: 1.7.0 ''' - def __get__(self): - return self._before is not None + return self._before is not None - property has_after: + @property + def has_after(self): '''Property to see if the :attr:`after` group has already been created. .. versionadded:: 1.7.0 ''' - def __get__(self): - return self._after is not None + return self._after is not None - property opacity: + @property + def opacity(self): '''Property to get/set the opacity value of the canvas. .. versionadded:: 1.4.1 @@ -703,11 +714,12 @@ cdef class Canvas(CanvasBase): frag_color = color * vec4(1.0, 1.0, 1.0, opacity); ''' - def __get__(self): - return self._opacity - def __set__(self, value): - self._opacity = value - self.flag_update() + return self._opacity + + @opacity.setter + def opacity(self, value): + self._opacity = value + self.flag_update() # Active Canvas and getActiveCanvas function is used # by instructions, so they know which canvas to add @@ -899,13 +911,14 @@ cdef class RenderContext(Canvas): def __getitem__(self, key): return self._shader.uniform_values[key] - property shader: + @property + def shader(self): '''Return the shader attached to the render context. ''' - def __get__(self): - return self._shader + return self._shader - property use_parent_projection: + @property + def use_parent_projection(self): '''If True, the parent projection matrix will be used. .. versionadded:: 1.7.0 @@ -918,15 +931,17 @@ cdef class RenderContext(Canvas): rc = RenderContext(use_parent_projection=True) ''' - def __get__(self): - return bool(self._use_parent_projection) - def __set__(self, value): - cdef cvalue = int(bool(value)) - if self._use_parent_projection != cvalue: - self._use_parent_projection = cvalue - self.flag_update() + return bool(self._use_parent_projection) + + @use_parent_projection.setter + def use_parent_projection(self, value): + cdef cvalue = int(bool(value)) + if self._use_parent_projection != cvalue: + self._use_parent_projection = cvalue + self.flag_update() - property use_parent_modelview: + @property + def use_parent_modelview(self): '''If True, the parent modelview matrix will be used. .. versionadded:: 1.7.0 @@ -939,28 +954,31 @@ cdef class RenderContext(Canvas): rc = RenderContext(use_parent_modelview=True) ''' - def __get__(self): - return bool(self._use_parent_modelview) - def __set__(self, value): - cdef cvalue = int(bool(value)) - if self._use_parent_modelview != cvalue: - self._use_parent_modelview = cvalue - self.flag_update() + return bool(self._use_parent_modelview) + + @use_parent_modelview.setter + def use_parent_modelview(self, value): + cdef cvalue = int(bool(value)) + if self._use_parent_modelview != cvalue: + self._use_parent_modelview = cvalue + self.flag_update() - property use_parent_frag_modelview: + @property + def use_parent_frag_modelview(self): '''If True, the parent fragment modelview matrix will be used. .. versionadded:: 1.10.1 rc = RenderContext(use_parent_frag_modelview=True) ''' - def __get__(self): - return bool(self._use_parent_frag_modelview) - def __set__(self, value): - cdef cvalue = int(bool(value)) - if self._use_parent_frag_modelview != cvalue: - self._use_parent_frag_modelview = cvalue - self.flag_update() + return bool(self._use_parent_frag_modelview) + + @use_parent_frag_modelview.setter + def use_parent_frag_modelview(self, value): + cdef cvalue = int(bool(value)) + if self._use_parent_frag_modelview != cvalue: + self._use_parent_frag_modelview = cvalue + self.flag_update() cdef RenderContext ACTIVE_CONTEXT = None diff --git a/kivy/graphics/scissor_instructions.pyx b/kivy/graphics/scissor_instructions.pyx index 3b3f0e9da0..d2815186e3 100644 --- a/kivy/graphics/scissor_instructions.pyx +++ b/kivy/graphics/scissor_instructions.pyx @@ -77,13 +77,13 @@ cdef class ScissorStack: def __init__(self): self._stack = [] - property empty: - def __get__(self): - return True if len(self._stack) is 0 else False + @property + def empty(self): + return True if len(self._stack) is 0 else False - property back: - def __get__(self): - return self._stack[-1] + @property + def back(self): + return self._stack[-1] def push(self, element): self._stack.append(element) @@ -110,53 +110,65 @@ cdef class ScissorPush(Instruction): cdef int _height cdef Rect _rect - property x: - def __get__(self): - return self._x - def __set__(self, value): - self._x = value - self._rect = Rect(self._x, self._y, self._width, self._height) - self.flag_update() - - property y: - def __get__(self): - return self._y - def __set__(self, value): - self._y = value - self._rect = Rect(self._x, self._y, self._width, self._height) - self.flag_update() - - property width: - def __get__(self): - return self._width - def __set__(self, value): - self._width = value - self._rect = Rect(self._x, self._y, self._width, self._height) - self.flag_update() - - property height: - def __get__(self): - return self._height - def __set__(self, value): - self._height = value - self._rect = Rect(self._x, self._y, self._width, self._height) - self.flag_update() - - property pos: - def __get__(self): - return self._x, self._y - def __set__(self, value): - self._x, self._y = value - self._rect = Rect(self._x, self._y, self._width, self._height) - self.flag_update() - - property size: - def __get__(self): - return self._width, self._height - def __set__(self, value): - self._width, self._height = value - self._rect = Rect(self._x, self._y, self._width, self._height) - self.flag_update() + @property + def x(self): + return self._x + + @x.setter + def x(self, value): + self._x = value + self._rect = Rect(self._x, self._y, self._width, self._height) + self.flag_update() + + @property + def y(self): + return self._y + + @y.setter + def y(self, value): + self._y = value + self._rect = Rect(self._x, self._y, self._width, self._height) + self.flag_update() + + @property + def width(self): + return self._width + + @width.setter + def width(self, value): + self._width = value + self._rect = Rect(self._x, self._y, self._width, self._height) + self.flag_update() + + @property + def height(self): + return self._height + + @height.setter + def height(self, value): + self._height = value + self._rect = Rect(self._x, self._y, self._width, self._height) + self.flag_update() + + @property + def pos(self): + return self._x, self._y + + @pos.setter + def pos(self, value): + self._x, self._y = value + self._rect = Rect(self._x, self._y, self._width, self._height) + self.flag_update() + + @property + def size(self): + return self._width, self._height + + @size.setter + def size(self, value): + self._width, self._height = value + self._rect = Rect(self._x, self._y, self._width, self._height) + self.flag_update() def __init__(self, **kwargs): self._x, self._y = kwargs.pop( diff --git a/kivy/graphics/shader.pyx b/kivy/graphics/shader.pyx index 53286893e9..7cf6fbae43 100644 --- a/kivy/graphics/shader.pyx +++ b/kivy/graphics/shader.pyx @@ -642,7 +642,8 @@ cdef class Shader: # Python access # - property source: + @property + def source(self): '''glsl source code. source should be the filename of a glsl shader that contains both the @@ -652,66 +653,71 @@ cdef class Shader: .. versionadded:: 1.6.0 ''' - def __get__(self): - return self._source - def __set__(self, object source): - self._source = source - if source is None: - self.vs = None - self.fs = None - return - self.vert_src = "" - self.frag_src = "" - glsl_source = "\n" - Logger.info('Shader: Read <{}>'.format(self._source)) - with open(self._source) as fin: - glsl_source += fin.read() - sections = glsl_source.split('\n---') - for section in sections: - lines = section.split('\n') - if lines[0].lower().startswith("vertex"): - _vs = '\n'.join(lines[1:]) - self.vert_src = _vs.replace('$HEADER$', header_vs) - if lines[0].lower().startswith("fragment"): - _fs = '\n'.join(lines[1:]) - self.frag_src = _fs.replace('$HEADER$', header_fs) - self.build_vertex(0) - self.build_fragment(0) - self.link_program() - - property vs: + return self._source + + @source.setter + def source(self, object source): + self._source = source + if source is None: + self.vs = None + self.fs = None + return + self.vert_src = "" + self.frag_src = "" + glsl_source = "\n" + Logger.info('Shader: Read <{}>'.format(self._source)) + with open(self._source) as fin: + glsl_source += fin.read() + sections = glsl_source.split('\n---') + for section in sections: + lines = section.split('\n') + if lines[0].lower().startswith("vertex"): + _vs = '\n'.join(lines[1:]) + self.vert_src = _vs.replace('$HEADER$', header_vs) + if lines[0].lower().startswith("fragment"): + _fs = '\n'.join(lines[1:]) + self.frag_src = _fs.replace('$HEADER$', header_fs) + self.build_vertex(0) + self.build_fragment(0) + self.link_program() + + @property + def vs(self): '''Vertex shader source code. If you set a new vertex shader code source, it will be automatically compiled and will replace the current vertex shader. ''' - def __get__(self): - return self.vert_src - def __set__(self, object source): - if source is None: - source = default_vs - source = source.replace('$HEADER$', header_vs) - self.vert_src = source - self.build_vertex() - - property fs: + return self.vert_src + + @vs.setter + def vs(self, object source): + if source is None: + source = default_vs + source = source.replace('$HEADER$', header_vs) + self.vert_src = source + self.build_vertex() + + @property + def fs(self): '''Fragment shader source code. If you set a new fragment shader code source, it will be automatically compiled and will replace the current fragment shader. ''' - def __get__(self): - return self.frag_src - def __set__(self, object source): - if source is None: - source = default_fs - source = source.replace('$HEADER$', header_fs) - self.frag_src = source - self.build_fragment() - - property success: + return self.frag_src + + @fs.setter + def fs(self, object source): + if source is None: + source = default_fs + source = source.replace('$HEADER$', header_fs) + self.frag_src = source + self.build_fragment() + + @property + def success(self): '''Indicate whether the shader loaded successfully and is ready for usage or not. ''' - def __get__(self): - return self._success + return self._success diff --git a/kivy/graphics/stencil_instructions.pyx b/kivy/graphics/stencil_instructions.pyx index ec472a1479..86d41ac36d 100644 --- a/kivy/graphics/stencil_instructions.pyx +++ b/kivy/graphics/stencil_instructions.pyx @@ -254,7 +254,8 @@ cdef class StencilUse(Instruction): stencil_apply_state(_stencil_state, False) return 0 - property func_op: + @property + def func_op(self): '''Determine the stencil operation to use for glStencilFunc(). Can be one of 'never', 'less', 'equal', 'lequal', 'greater', 'notequal', 'gequal' or 'always'. @@ -264,18 +265,18 @@ cdef class StencilUse(Instruction): .. versionadded:: 1.5.0 ''' - def __get__(self): - index = _gl_stencil_op.values().index(self._op) - if PY2: - return _gl_stencil_op.keys()[index] - else: - return list(_gl_stencil_op.keys())[index] - - def __set__(self, x): - cdef int op = _stencil_op_to_gl(x) - if op != self._op: - self._op = op - self.flag_update() + index = _gl_stencil_op.values().index(self._op) + if PY2: + return _gl_stencil_op.keys()[index] + else: + return list(_gl_stencil_op.keys())[index] + + @func_op.setter + def func_op(self, x): + cdef int op = _stencil_op_to_gl(x) + if op != self._op: + self._op = op + self.flag_update() cdef class StencilUnUse(Instruction): diff --git a/kivy/graphics/svg.pyx b/kivy/graphics/svg.pyx index d5876e5d2f..06a634d2e0 100644 --- a/kivy/graphics/svg.pyx +++ b/kivy/graphics/svg.pyx @@ -144,6 +144,10 @@ cdef list kv_color_to_int_color(color): c = [int(255*x) for x in color] return c if len(c) == 4 else c + [255] +cdef int_color_to_kv_color(color): + c = [int(x)/255.0 for x in color] + return c if len(c) == 4 else c + [255] + cdef parse_color(c, current_color=None): cdef int r, g, b, a if c is None or c == 'none': @@ -411,86 +415,102 @@ cdef class Svg(RenderContext): size=(2, 1), colorfmt="rgba") self.line_texture.blit_buffer( b"\xff\xff\xff\xff\xff\xff\xff\x00", colorfmt="rgba") + + self._filename = None self.filename = filename - property anchor_x: + + @property + def anchor_x(self): ''' Horizontal anchor position for scaling and rotations. Defaults to 0. The symbolic values 'left', 'center' and 'right' are also accepted. ''' + return self._anchor_x + + @x.setter + def x(self, anchor_x): + self._anchor_x = anchor_x + if self._anchor_x == 'left': + self._a_x = 0 + elif self._anchor_x == 'center': + self._a_x = self.width * .5 + elif self._anchor_x == 'right': + self._a_x = self.width + else: + self._a_x = self._anchor_x - def __set__(self, anchor_x): - self._anchor_x = anchor_x - if self._anchor_x == 'left': - self._a_x = 0 - elif self._anchor_x == 'center': - self._a_x = self.width * .5 - elif self._anchor_x == 'right': - self._a_x = self.width - else: - self._a_x = self._anchor_x - - def __get__(self): - return self._anchor_x - - - property anchor_y: + @property + def anchor_y(self): ''' Vertical anchor position for scaling and rotations. Defaults to 0. The symbolic values 'bottom', 'center' and 'top' are also accepted. ''' + return self._anchor_y + + @anchor_y.setter + def anchor_y(self, anchor_y): + self._anchor_y = anchor_y + if self._anchor_y == 'bottom': + self._a_y = 0 + elif self._anchor_y == 'center': + self._a_y = self.height * .5 + elif self._anchor_y == 'top': + self._a_y = self.height + else: + self._a_y = self.anchor_y - def __set__(self, anchor_y): - self._anchor_y = anchor_y - if self._anchor_y == 'bottom': - self._a_y = 0 - elif self._anchor_y == 'center': - self._a_y = self.height * .5 - elif self._anchor_y == 'top': - self._a_y = self.height - else: - self._a_y = self.anchor_y - - def __get__(self): - return self._anchor_y + @property + def color(self): + '''The default color + Used for SvgElements that specify "currentColor" - '''Set the default color. + .. versionchanged:: 1.10.3 - Used for SvgElements that specify "currentColor" + The color is gettable and settable - .. versionadded:: 1.9.1 + .. versionadded:: 1.9.1 + ''' + return int_color_to_kv_color(self.current_color) - ''' - property color: - def __set__(self, color): - self.current_color = kv_color_to_int_color(color) - self.reload() + @color.setter + def color(self, color): + self.current_color = kv_color_to_int_color(color) + self.reload() - property filename: - '''Filename to load. + @property + def filename(self): + '''filename to load. The parsing and rendering is done as soon as you set the filename. + + .. versionchanged:: 1.10.3 + You can get the used filename ''' - def __set__(self, filename): - Logger.debug('Svg: Loading {}'.format(filename)) - # check gzip - start = time() - with open(filename, 'rb') as fd: - header = fd.read(3) - if header == '\x1f\x8b\x08': - import gzip - fd = gzip.open(filename, 'rb') - else: - fd = open(filename, 'rb') - try: - #save the tree for later reloading - self.tree = parse(fd) - self.reload() - end = time() - Logger.debug("Svg: Loaded {} in {:.2f}s".format(filename, end - start)) - finally: - fd.close() + return self._filename + + @filename.setter + def filename(self, filename): + Logger.debug('Svg: Loading {}'.format(filename)) + # check gzip + start = time() + with open(filename, 'rb') as fd: + header = fd.read(3) + if header == '\x1f\x8b\x08': + import gzip + fd = gzip.open(filename, 'rb') + else: + fd = open(filename, 'rb') + try: + #save the tree for later reloading + self.tree = parse(fd) + self.reload() + end = time() + Logger.debug("Svg: Loaded {} in {:.2f}s".format(filename, end - start)) + finally: + self._filename = filename + fd.close() cdef void reload(self) except *: # parse tree diff --git a/kivy/graphics/texture.pyx b/kivy/graphics/texture.pyx index 00f467664f..56a195d629 100644 --- a/kivy/graphics/texture.pyx +++ b/kivy/graphics/texture.pyx @@ -1137,94 +1137,99 @@ cdef class Texture: id(self), self._id, self.size, self.colorfmt, self.bufferfmt, self._source, len(self.observers)) - property size: + @property + def size(self): '''Return the (width, height) of the texture (readonly). ''' - def __get__(self): - return (self.width, self.height) + return (self.width, self.height) - property mipmap: + @property + def mipmap(self): '''Return True if the texture has mipmap enabled (readonly). ''' - def __get__(self): - return self._mipmap + return self._mipmap - property id: + @property + def id(self): '''Return the OpenGL ID of the texture (readonly). ''' - def __get__(self): - return self._id + return self._id - property target: + @property + def target(self): '''Return the OpenGL target of the texture (readonly). ''' - def __get__(self): - return self._target + return self._target - property width: + @property + def width(self): '''Return the width of the texture (readonly). ''' - def __get__(self): - return self._width + return self._width - property height: + @property + def height(self): '''Return the height of the texture (readonly). ''' - def __get__(self): - return self._height + return self._height - property tex_coords: + @property + def tex_coords(self): '''Return the list of tex_coords (opengl). ''' - def __get__(self): - return ( - self._tex_coords[0], - self._tex_coords[1], - self._tex_coords[2], - self._tex_coords[3], - self._tex_coords[4], - self._tex_coords[5], - self._tex_coords[6], - self._tex_coords[7]) - - property uvpos: + return ( + self._tex_coords[0], + self._tex_coords[1], + self._tex_coords[2], + self._tex_coords[3], + self._tex_coords[4], + self._tex_coords[5], + self._tex_coords[6], + self._tex_coords[7]) + + @property + def uvpos(self): '''Get/set the UV position inside the texture. ''' - def __get__(self): - return (self._uvx, self._uvy) - def __set__(self, x): - self._uvx, self._uvy = x - self.update_tex_coords() + return (self._uvx, self._uvy) - property uvsize: + @uvpos.setter + def uvpos(self, x): + self._uvx, self._uvy = x + self.update_tex_coords() + + @property + def uvsize(self): '''Get/set the UV size inside the texture. .. warning:: The size can be negative if the texture is flipped. ''' - def __get__(self): - return (self._uvw, self._uvh) - def __set__(self, x): - self._uvw, self._uvh = x - self.update_tex_coords() + return (self._uvw, self._uvh) + + @uvsize.setter + def uvsize(self, x): + self._uvw, self._uvh = x + self.update_tex_coords() - property colorfmt: + @property + def colorfmt(self): '''Return the color format used in this texture (readonly). .. versionadded:: 1.0.7 ''' - def __get__(self): - return self._colorfmt + return self._colorfmt - property bufferfmt: + @property + def bufferfmt(self): '''Return the buffer format used in this texture (readonly). .. versionadded:: 1.2.0 ''' - def __get__(self): - return self._bufferfmt + return self._bufferfmt - property min_filter: + @property + def min_filter(self): '''Get/set the min filter texture. Available values: - linear @@ -1238,12 +1243,14 @@ cdef class Texture: of these values : http://www.khronos.org/opengles/sdk/docs/man/xhtml/glTexParameter.xml. ''' - def __get__(self): - return self._min_filter - def __set__(self, x): - self.set_min_filter(x) + return self._min_filter - property mag_filter: + @min_filter.setter + def min_filter(self, x): + self.set_min_filter(x) + + @property + def mag_filter(self): '''Get/set the mag filter texture. Available values: - linear @@ -1253,12 +1260,14 @@ cdef class Texture: of these values : http://www.khronos.org/opengles/sdk/docs/man/xhtml/glTexParameter.xml. ''' - def __get__(self): - return self._mag_filter - def __set__(self, x): - self.set_mag_filter(x) + return self._mag_filter + + @mag_filter.setter + def mag_filter(self, x): + self.set_mag_filter(x) - property wrap: + @property + def wrap(self): '''Get/set the wrap texture. Available values: - repeat @@ -1269,20 +1278,21 @@ cdef class Texture: of these values : http://www.khronos.org/opengles/sdk/docs/man/xhtml/glTexParameter.xml. ''' - def __get__(self): - return self._wrap - def __set__(self, wrap): - self.set_wrap(wrap) + return self._wrap + + @wrap.setter + def wrap(self, wrap): + self.set_wrap(wrap) - property pixels: + @property + def pixels(self): '''Get the pixels texture, in RGBA format only, unsigned byte. The origin of the image is at bottom left. .. versionadded:: 1.7.0 ''' - def __get__(self): - from kivy.graphics.fbo import Fbo - return Fbo(size=self.size, texture=self).pixels + from kivy.graphics.fbo import Fbo + return Fbo(size=self.size, texture=self).pixels cdef class TextureRegion(Texture): @@ -1338,17 +1348,17 @@ cdef class TextureRegion(Texture): cpdef bind(self): self.owner.bind() - property pixels: - def __get__(self): - from kivy.graphics.fbo import Fbo - from kivy.graphics import Color, Rectangle - fbo = Fbo(size=self.size) - fbo.clear() - self.flip_vertical() - with fbo: - Color(1, 1, 1) - Rectangle(size=self.size, texture=self, - tex_coords=self.tex_coords) - fbo.draw() - self.flip_vertical() - return fbo.pixels + @property + def pixels(self): + from kivy.graphics.fbo import Fbo + from kivy.graphics import Color, Rectangle + fbo = Fbo(size=self.size) + fbo.clear() + self.flip_vertical() + with fbo: + Color(1, 1, 1) + Rectangle(size=self.size, texture=self, + tex_coords=self.tex_coords) + fbo.draw() + self.flip_vertical() + return fbo.pixels diff --git a/kivy/graphics/vertex_instructions.pyx b/kivy/graphics/vertex_instructions.pyx index 348f2a2224..beff35f08e 100644 --- a/kivy/graphics/vertex_instructions.pyx +++ b/kivy/graphics/vertex_instructions.pyx @@ -209,7 +209,8 @@ cdef class Bezier(VertexInstruction): free(vertices) free(indices) - property points: + @property + def points(self): '''Property for getting/settings the points of the triangle. .. warning:: @@ -217,49 +218,56 @@ cdef class Bezier(VertexInstruction): This will always reconstruct the whole graphic from the new points list. It can be very CPU intensive. ''' - def __get__(self): - return self._points - def __set__(self, points): - self._points = list(points) - if self._loop: - self._points.extend(points[:2]) - self.flag_update() - - property segments: + return self._points + + @points.setter + def points(self, points): + self._points = list(points) + if self._loop: + self._points.extend(points[:2]) + self.flag_update() + + @property + def segments(self): '''Property for getting/setting the number of segments of the curve. ''' - def __get__(self): - return self._segments - def __set__(self, value): - if value <= 1: - raise GraphicException('Invalid segments value, must be >= 2') - self._segments = value - self.flag_update() - - property dash_length: + return self._segments + + @segments.setter + def segments(self, value): + if value <= 1: + raise GraphicException('Invalid segments value, must be >= 2') + self._segments = value + self.flag_update() + + @property + def dash_length(self): '''Property for getting/setting the length of the dashes in the curve. ''' - def __get__(self): - return self._dash_length + return self._dash_length - def __set__(self, value): - if value < 0: - raise GraphicException('Invalid dash_length value, must be >= 0') - self._dash_length = value - self.flag_update() - property dash_offset: + @dash_length.setter + def dash_length(self, value): + if value < 0: + raise GraphicException('Invalid dash_length value, must be >= 0') + self._dash_length = value + self.flag_update() + + @property + def dash_offset(self): '''Property for getting/setting the offset between the dashes in the curve. ''' - def __get__(self): - return self._dash_offset + return self._dash_offset + - def __set__(self, value): - if value < 0: - raise GraphicException('Invalid dash_offset value, must be >= 0') - self._dash_offset = value - self.flag_update() + @dash_offset.setter + def dash_offset(self, value): + if value < 0: + raise GraphicException('Invalid dash_offset value, must be >= 0') + self._dash_offset = value + self.flag_update() cdef class StripMesh(VertexInstruction): @@ -452,44 +460,50 @@ cdef class Mesh(VertexInstruction): self.batch.set_data(&self._pvertices[0], <int>(self.vcount / vsize), &self._pindices[0], <int>self.icount) - property vertices: + @property + def vertices(self): '''List of x, y, u, v coordinates used to construct the Mesh. Right now, the Mesh instruction doesn't allow you to change the format of the vertices, which means it's only x, y + one texture coordinate. ''' - def __get__(self): - return self._vertices - def __set__(self, value): - self._vertices, self._fvertices = _ensure_float_view(value, - &self._pvertices) - self.vcount = len(self._vertices) - self.flag_update() + return self._vertices - property indices: + @vertices.setter + def vertices(self, value): + self._vertices, self._fvertices = _ensure_float_view(value, + &self._pvertices) + self.vcount = len(self._vertices) + self.flag_update() + + @property + def indices(self): '''Vertex indices used to specify the order when drawing the mesh. ''' - def __get__(self): - return self._indices - def __set__(self, value): - if gles_limts and len(value) > 65535: - raise GraphicException( - 'Cannot upload more than 65535 indices (OpenGL ES 2' - ' limitation - consider setting KIVY_GLES_LIMITS)') - self._indices, self._lindices = _ensure_ushort_view(value, - &self._pindices) - self.icount = len(self._indices) - self.flag_update() - - property mode: + return self._indices + + @indices.setter + def indices(self, value): + if gles_limts and len(value) > 65535: + raise GraphicException( + 'Cannot upload more than 65535 indices (OpenGL ES 2' + ' limitation - consider setting KIVY_GLES_LIMITS)') + self._indices, self._lindices = _ensure_ushort_view(value, + &self._pindices) + self.icount = len(self._indices) + self.flag_update() + + @property + def mode(self): '''VBO Mode used for drawing vertices/indices. Can be one of 'points', 'line_strip', 'line_loop', 'lines', 'triangles', 'triangle_strip' or 'triangle_fan'. ''' - def __get__(self): - return self.batch.get_mode() - def __set__(self, mode): - self.batch.set_mode(mode) + return self.batch.get_mode() + + @mode.setter + def mode(self, mode): + self.batch.set_mode(mode) @@ -630,33 +644,37 @@ cdef class Point(VertexInstruction): if self.parent is not None: self.parent.flag_update() - property points: + @property + def points(self): '''Property for getting/settings the center points in the points list. Each pair of coordinates specifies the center of a new point. ''' - def __get__(self): - return self._points - def __set__(self, points): - if self._points == points: - return - cdef list _points = list(points) - if len(_points) > 2**15-2: - raise GraphicException('Too many elements (limit is 2^15-2)') - self._points = list(points) - self.flag_update() - - property pointsize: + return self._points + + @points.setter + def points(self, points): + if self._points == points: + return + cdef list _points = list(points) + if len(_points) > 2**15-2: + raise GraphicException('Too many elements (limit is 2^15-2)') + self._points = list(points) + self.flag_update() + + @property + def pointsize(self): '''Property for getting/setting point size. The size is measured from the center to the edge, so a value of 1.0 means the real size will be 2.0 x 2.0. ''' - def __get__(self): - return self._pointsize - def __set__(self, float pointsize): - if self._pointsize == pointsize: - return - self._pointsize = pointsize - self.flag_update() + return self._pointsize + + @pointsize.setter + def pointsize(self, float pointsize): + if self._pointsize == pointsize: + return + self._pointsize = pointsize + self.flag_update() cdef class Triangle(VertexInstruction): @@ -698,14 +716,16 @@ cdef class Triangle(VertexInstruction): self.batch.set_data(vertices, 3, indices, 3) - property points: + @property + def points(self): '''Property for getting/settings points of the triangle. ''' - def __get__(self): - return self._points - def __set__(self, points): - self._points = list(points) - self.flag_update() + return self._points + + @points.setter + def points(self, points): + self._points = list(points) + self.flag_update() cdef class Quad(VertexInstruction): @@ -752,18 +772,20 @@ cdef class Quad(VertexInstruction): self.batch.set_data(vertices, 4, indices, 6) - property points: + @property + def points(self): '''Property for getting/settings points of the quad. ''' - def __get__(self): - return self._points - def __set__(self, points): - self._points = list(points) - if len(self._points) != 8: - raise GraphicException( - 'Quad: invalid number of points (%d instead of 8)' % len( - self._points)) - self.flag_update() + return self._points + + @points.setter + def points(self, points): + self._points = list(points) + if len(self._points) != 8: + raise GraphicException( + 'Quad: invalid number of points (%d instead of 8)' % len( + self._points)) + self.flag_update() cdef class Rectangle(VertexInstruction): @@ -812,33 +834,37 @@ cdef class Rectangle(VertexInstruction): self.batch.set_data(vertices, 4, indices, 6) - property pos: + @property + def pos(self): '''Property for getting/settings the position of the rectangle. ''' - def __get__(self): - return (self.x, self.y) - def __set__(self, pos): - cdef float x, y - x, y = pos - if self.x == x and self.y == y: - return - self.x = x - self.y = y - self.flag_update() - - property size: + return (self.x, self.y) + + @pos.setter + def pos(self, pos): + cdef float x, y + x, y = pos + if self.x == x and self.y == y: + return + self.x = x + self.y = y + self.flag_update() + + @property + def size(self): '''Property for getting/settings the size of the rectangle. ''' - def __get__(self): - return (self.w, self.h) - def __set__(self, size): - cdef float w, h - w, h = size - if self.w == w and self.h == h: - return - self.w = w - self.h = h - self.flag_update() + return (self.w, self.h) + + @size.setter + def size(self, size): + cdef float w, h + w, h = size + if self.w == w and self.h == h: + return + self.w = w + self.h = h + self.flag_update() @@ -1052,34 +1078,40 @@ cdef class BorderImage(Rectangle): self.batch.set_data(<vertex_t *>vertices, 16, indices, 54) - property border: + @property + def border(self): '''Property for getting/setting the border of the class. ''' - def __get__(self): - return self._border - def __set__(self, b): - self._border = list(b) - self.flag_update() + return self._border - property auto_scale: + @border.setter + def border(self, b): + self._border = list(b) + self.flag_update() + + @property + def auto_scale(self): '''Property for setting if the corners are automatically scaled when the BorderImage is too small. ''' - def __get__(self): - return self._auto_scale + return self._auto_scale + - def __set__(self, str value): - self._auto_scale = value - self.flag_update() + @auto_scale.setter + def auto_scale(self, str value): + self._auto_scale = value + self.flag_update() - property display_border: + @property + def display_border(self): '''Property for getting/setting the border display size. ''' - def __get__(self): - return self._display_border - def __set__(self, b): - self._display_border = list(b) - self.flag_update() + return self._display_border + + @display_border.setter + def display_border(self, b): + self._display_border = list(b) + self.flag_update() cdef class Ellipse(Rectangle): '''A 2D ellipse. @@ -1196,32 +1228,38 @@ cdef class Ellipse(Rectangle): free(vertices) free(indices) - property segments: + @property + def segments(self): '''Property for getting/setting the number of segments of the ellipse. ''' - def __get__(self): - return self._segments - def __set__(self, value): - self._segments = value - self.flag_update() + return self._segments + + @segments.setter + def segments(self, value): + self._segments = value + self.flag_update() - property angle_start: + @property + def angle_start(self): '''Start angle of the ellipse in degrees, defaults to 0. ''' - def __get__(self): - return self._angle_start - def __set__(self, value): - self._angle_start = value - self.flag_update() + return self._angle_start - property angle_end: + @angle_start.setter + def angle_start(self, value): + self._angle_start = value + self.flag_update() + + @property + def angle_end(self): '''End angle of the ellipse in degrees, defaults to 360. ''' - def __get__(self): - return self._angle_end - def __set__(self, value): - self._angle_end = value - self.flag_update() + return self._angle_end + + @angle_end.setter + def angle_end(self, value): + self._angle_end = value + self.flag_update() cdef class RoundedRectangle(Rectangle): @@ -1514,20 +1552,24 @@ cdef class RoundedRectangle(Rectangle): return points - property segments: + @property + def segments(self): '''Property for getting/setting the number of segments for each corner. ''' - def __get__(self): - return self._segments - def __set__(self, value): - self._segments = self._check_segments(value) - self.flag_update() + return self._segments - property radius: + @segments.setter + def segments(self, value): + self._segments = self._check_segments(value) + self.flag_update() + + @property + def radius(self): '''Corner radii of the rounded rectangle, defaults to [10,]. ''' - def __get__(self): - return self._radius - def __set__(self, value): - self._radius = self._check_radius(value) - self.flag_update() + return self._radius + + @radius.setter + def radius(self, value): + self._radius = self._check_radius(value) + self.flag_update() diff --git a/kivy/lib/vidcore_lite/bcm.pyx b/kivy/lib/vidcore_lite/bcm.pyx index 4d3636af82..c1ad965546 100644 --- a/kivy/lib/vidcore_lite/bcm.pyx +++ b/kivy/lib/vidcore_lite/bcm.pyx @@ -9,33 +9,37 @@ cdef class Rect: self._vc_rect.width = width self._vc_rect.height = height - property x: - def __get__(self): - return self._vc_rect.x + @property + def x(self): + return self._vc_rect.x - def __set__(self, int32_t x): - self._vc_rect.x = x + @x.setter + def x(self, int32_t x): + self._vc_rect.x = x - property y: - def __get__(self): - return self._vc_rect.y + @property + def y(self): + return self._vc_rect.y - def __set__(self, int32_t y): - self._vc_rect.y = y + @y.setter + def y(self, int32_t y): + self._vc_rect.y = y - property width: - def __get__(self): - return self._vc_rect.width + @property + def width(self): + return self._vc_rect.width - def __set__(self, int32_t width): - self._vc_rect.width = width + @width.setter + def width(self, int32_t width): + self._vc_rect.width = width - property height: - def __get__(self): - return self._vc_rect.height + @property + def height(self): + return self._vc_rect.height - def __set__(self, int32_t h): - self._vc_rect.height = h + @height.setter + def height(self, int32_t h): + self._vc_rect.height = h cdef class DisplayHandle: diff --git a/kivy/properties.pyx b/kivy/properties.pyx index ca7522d951..a3b078b7ad 100644 --- a/kivy/properties.pyx +++ b/kivy/properties.pyx @@ -395,9 +395,9 @@ cdef class Property: if 'errorhandler' in kw and not callable(self.errorhandler): raise ValueError('errorhandler %s not callable' % self.errorhandler) - property name: - def __get__(self): - return self._name + @property + def name(self): + return self._name def __repr__(self): return '<{} name={}>'.format(self.__class__.__name__, self._name) @@ -1166,28 +1166,27 @@ cdef class BoundedNumericProperty(Property): self.name, _f_max)) return True - property bounds: + @property + def bounds(self): '''Return min/max of the value. .. versionadded:: 1.0.9 ''' + if self.use_min == 1: + _min = self.min + elif self.use_min == 2: + _min = self.f_min + else: + _min = None - def __get__(self): - if self.use_min == 1: - _min = self.min - elif self.use_min == 2: - _min = self.f_min - else: - _min = None - - if self.use_max == 1: - _max = self.max - elif self.use_max == 2: - _max = self.f_max - else: - _max = None + if self.use_max == 1: + _max = self.max + elif self.use_max == 2: + _max = self.f_max + else: + _max = None - return _min, _max + return _min, _max cdef class OptionProperty(Property): @@ -1232,14 +1231,13 @@ cdef class OptionProperty(Property): self.name, value, ps.options)) - property options: + @property + def options(self): '''Return the options available. .. versionadded:: 1.0.9 ''' - - def __get__(self): - return self.options + return self.options class ObservableReferenceList(ObservableList): def __setitem__(self, key, value, update_properties=True): diff --git a/kivy/weakproxy.pyx b/kivy/weakproxy.pyx index 445b245f21..976ccfed80 100644 --- a/kivy/weakproxy.pyx +++ b/kivy/weakproxy.pyx @@ -35,9 +35,9 @@ cdef class WeakProxy(object): def __delattr__(self, name): delattr(self.__ref__(), name) - property __class__: - def __get__(self): - return self.__ref__().__class__ + @property + def __class__(self): + return self.__ref__().__class__ def __dir__(self): r = self.__ref() diff --git a/setup.py b/setup.py index fbd8773aab..ff43869598 100644 --- a/setup.py +++ b/setup.py @@ -74,7 +74,7 @@ def get_version(filename='kivy/version.py'): return VERSION -MIN_CYTHON_STRING = '0.23' +MIN_CYTHON_STRING = '0.24' MIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING) MAX_CYTHON_STRING = '0.28.3' MAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)
kedro-org__kedro-1734
Make Kedro instantiate datasets from `kedro-datasets` with higher priority than `kedro.extras.datasets` https://github.com/kedro-org/kedro/blob/1b1558952c059eea5636d9ccf9a883f9cf4ef643/kedro/io/core.py#L346
[ { "content": "\"\"\"This module provides a set of classes which underpin the data loading and\nsaving functionality provided by ``kedro.io``.\n\"\"\"\n\nimport abc\nimport copy\nimport logging\nimport re\nimport warnings\nfrom collections import namedtuple\nfrom datetime import datetime, timezone\nfrom functools import partial\nfrom glob import iglob\nfrom operator import attrgetter\nfrom pathlib import Path, PurePath, PurePosixPath\nfrom typing import Any, Callable, Dict, Generic, List, Optional, Tuple, Type, TypeVar\nfrom urllib.parse import urlsplit\n\nfrom cachetools import Cache, cachedmethod\nfrom cachetools.keys import hashkey\n\nfrom kedro.utils import load_obj\n\nwarnings.simplefilter(\"default\", DeprecationWarning)\n\nVERSION_FORMAT = \"%Y-%m-%dT%H.%M.%S.%fZ\"\nVERSIONED_FLAG_KEY = \"versioned\"\nVERSION_KEY = \"version\"\nHTTP_PROTOCOLS = (\"http\", \"https\")\nPROTOCOL_DELIMITER = \"://\"\nCLOUD_PROTOCOLS = (\"s3\", \"gcs\", \"gs\", \"adl\", \"abfs\", \"abfss\", \"gdrive\")\n\n\nclass DataSetError(Exception):\n \"\"\"``DataSetError`` raised by ``AbstractDataSet`` implementations\n in case of failure of input/output methods.\n\n ``AbstractDataSet`` implementations should provide instructive\n information in case of failure.\n \"\"\"\n\n pass\n\n\nclass DataSetNotFoundError(DataSetError):\n \"\"\"``DataSetNotFoundError`` raised by ``DataCatalog`` class in case of\n trying to use a non-existing data set.\n \"\"\"\n\n pass\n\n\nclass DataSetAlreadyExistsError(DataSetError):\n \"\"\"``DataSetAlreadyExistsError`` raised by ``DataCatalog`` class in case\n of trying to add a data set which already exists in the ``DataCatalog``.\n \"\"\"\n\n pass\n\n\nclass VersionNotFoundError(DataSetError):\n \"\"\"``VersionNotFoundError`` raised by ``AbstractVersionedDataSet`` implementations\n in case of no load versions available for the data set.\n \"\"\"\n\n pass\n\n\n_DI = TypeVar(\"_DI\")\n_DO = TypeVar(\"_DO\")\n\n\nclass AbstractDataSet(abc.ABC, Generic[_DI, _DO]):\n \"\"\"``AbstractDataSet`` is the base class for all data set implementations.\n All data set implementations should extend this abstract class\n and implement the methods marked as abstract.\n If a specific dataset implementation cannot be used in conjunction with\n the ``ParallelRunner``, such user-defined dataset should have the\n attribute `_SINGLE_PROCESS = True`.\n Example:\n ::\n\n >>> from pathlib import Path, PurePosixPath\n >>> import pandas as pd\n >>> from kedro.io import AbstractDataSet\n >>>\n >>>\n >>> class MyOwnDataSet(AbstractDataSet[pd.DataFrame, pd.DataFrame]):\n >>> def __init__(self, filepath, param1, param2=True):\n >>> self._filepath = PurePosixPath(filepath)\n >>> self._param1 = param1\n >>> self._param2 = param2\n >>>\n >>> def _load(self) -> pd.DataFrame:\n >>> return pd.read_csv(self._filepath)\n >>>\n >>> def _save(self, df: pd.DataFrame) -> None:\n >>> df.to_csv(str(self._filepath))\n >>>\n >>> def _exists(self) -> bool:\n >>> return Path(self._filepath.as_posix()).exists()\n >>>\n >>> def _describe(self):\n >>> return dict(param1=self._param1, param2=self._param2)\n\n Example catalog.yml specification:\n ::\n\n my_dataset:\n type: <path-to-my-own-dataset>.MyOwnDataSet\n filepath: data/01_raw/my_data.csv\n param1: <param1-value> # param1 is a required argument\n # param2 will be True by default\n \"\"\"\n\n @classmethod\n def from_config(\n cls: Type,\n name: str,\n config: Dict[str, Any],\n load_version: str = None,\n save_version: str = None,\n ) -> \"AbstractDataSet\":\n \"\"\"Create a data set instance using the configuration provided.\n\n Args:\n name: Data set name.\n config: Data set config dictionary.\n load_version: Version string to be used for ``load`` operation if\n the data set is versioned. Has no effect on the data set\n if versioning was not enabled.\n save_version: Version string to be used for ``save`` operation if\n the data set is versioned. Has no effect on the data set\n if versioning was not enabled.\n\n Returns:\n An instance of an ``AbstractDataSet`` subclass.\n\n Raises:\n DataSetError: When the function fails to create the data set\n from its config.\n\n \"\"\"\n try:\n class_obj, config = parse_dataset_definition(\n config, load_version, save_version\n )\n except Exception as exc:\n raise DataSetError(\n f\"An exception occurred when parsing config \"\n f\"for DataSet '{name}':\\n{str(exc)}\"\n ) from exc\n\n try:\n data_set = class_obj(**config) # type: ignore\n except TypeError as err:\n raise DataSetError(\n f\"\\n{err}.\\nDataSet '{name}' must only contain arguments valid for the \"\n f\"constructor of '{class_obj.__module__}.{class_obj.__qualname__}'.\"\n ) from err\n except Exception as err:\n raise DataSetError(\n f\"\\n{err}.\\nFailed to instantiate DataSet '{name}' \"\n f\"of type '{class_obj.__module__}.{class_obj.__qualname__}'.\"\n ) from err\n return data_set\n\n @property\n def _logger(self) -> logging.Logger:\n return logging.getLogger(__name__)\n\n def load(self) -> _DO:\n \"\"\"Loads data by delegation to the provided load method.\n\n Returns:\n Data returned by the provided load method.\n\n Raises:\n DataSetError: When underlying load method raises error.\n\n \"\"\"\n\n self._logger.debug(\"Loading %s\", str(self))\n\n try:\n return self._load()\n except DataSetError:\n raise\n except Exception as exc:\n # This exception handling is by design as the composed data sets\n # can throw any type of exception.\n message = (\n f\"Failed while loading data from data set {str(self)}.\\n{str(exc)}\"\n )\n raise DataSetError(message) from exc\n\n def save(self, data: _DI) -> None:\n \"\"\"Saves data by delegation to the provided save method.\n\n Args:\n data: the value to be saved by provided save method.\n\n Raises:\n DataSetError: when underlying save method raises error.\n FileNotFoundError: when save method got file instead of dir, on Windows.\n NotADirectoryError: when save method got file instead of dir, on Unix.\n \"\"\"\n\n if data is None:\n raise DataSetError(\"Saving 'None' to a 'DataSet' is not allowed\")\n\n try:\n self._logger.debug(\"Saving %s\", str(self))\n self._save(data)\n except DataSetError:\n raise\n except (FileNotFoundError, NotADirectoryError):\n raise\n except Exception as exc:\n message = f\"Failed while saving data to data set {str(self)}.\\n{str(exc)}\"\n raise DataSetError(message) from exc\n\n def __str__(self):\n def _to_str(obj, is_root=False):\n \"\"\"Returns a string representation where\n 1. The root level (i.e. the DataSet.__init__ arguments) are\n formatted like DataSet(key=value).\n 2. Dictionaries have the keys alphabetically sorted recursively.\n 3. None values are not shown.\n \"\"\"\n\n fmt = \"{}={}\" if is_root else \"'{}': {}\" # 1\n\n if isinstance(obj, dict):\n sorted_dict = sorted(obj.items(), key=lambda pair: str(pair[0])) # 2\n\n text = \", \".join(\n fmt.format(key, _to_str(value)) # 2\n for key, value in sorted_dict\n if value is not None # 3\n )\n\n return text if is_root else \"{\" + text + \"}\" # 1\n\n # not a dictionary\n return str(obj)\n\n return f\"{type(self).__name__}({_to_str(self._describe(), True)})\"\n\n @abc.abstractmethod\n def _load(self) -> _DO:\n raise NotImplementedError(\n f\"'{self.__class__.__name__}' is a subclass of AbstractDataSet and \"\n f\"it must implement the '_load' method\"\n )\n\n @abc.abstractmethod\n def _save(self, data: _DI) -> None:\n raise NotImplementedError(\n f\"'{self.__class__.__name__}' is a subclass of AbstractDataSet and \"\n f\"it must implement the '_save' method\"\n )\n\n @abc.abstractmethod\n def _describe(self) -> Dict[str, Any]:\n raise NotImplementedError(\n f\"'{self.__class__.__name__}' is a subclass of AbstractDataSet and \"\n f\"it must implement the '_describe' method\"\n )\n\n def exists(self) -> bool:\n \"\"\"Checks whether a data set's output already exists by calling\n the provided _exists() method.\n\n Returns:\n Flag indicating whether the output already exists.\n\n Raises:\n DataSetError: when underlying exists method raises error.\n\n \"\"\"\n try:\n self._logger.debug(\"Checking whether target of %s exists\", str(self))\n return self._exists()\n except Exception as exc:\n message = (\n f\"Failed during exists check for data set {str(self)}.\\n{str(exc)}\"\n )\n raise DataSetError(message) from exc\n\n def _exists(self) -> bool:\n self._logger.warning(\n \"'exists()' not implemented for '%s'. Assuming output does not exist.\",\n self.__class__.__name__,\n )\n return False\n\n def release(self) -> None:\n \"\"\"Release any cached data.\n\n Raises:\n DataSetError: when underlying release method raises error.\n\n \"\"\"\n try:\n self._logger.debug(\"Releasing %s\", str(self))\n self._release()\n except Exception as exc:\n message = f\"Failed during release for data set {str(self)}.\\n{str(exc)}\"\n raise DataSetError(message) from exc\n\n def _release(self) -> None:\n pass\n\n def _copy(self, **overwrite_params) -> \"AbstractDataSet\":\n dataset_copy = copy.deepcopy(self)\n for name, value in overwrite_params.items():\n setattr(dataset_copy, name, value)\n return dataset_copy\n\n\ndef generate_timestamp() -> str:\n \"\"\"Generate the timestamp to be used by versioning.\n\n Returns:\n String representation of the current timestamp.\n\n \"\"\"\n current_ts = datetime.now(tz=timezone.utc).strftime(VERSION_FORMAT)\n return current_ts[:-4] + current_ts[-1:] # Don't keep microseconds\n\n\nclass Version(namedtuple(\"Version\", [\"load\", \"save\"])):\n \"\"\"This namedtuple is used to provide load and save versions for versioned\n data sets. If ``Version.load`` is None, then the latest available version\n is loaded. If ``Version.save`` is None, then save version is formatted as\n YYYY-MM-DDThh.mm.ss.sssZ of the current timestamp.\n \"\"\"\n\n __slots__ = ()\n\n\n_CONSISTENCY_WARNING = (\n \"Save version '{}' did not match load version '{}' for {}. This is strongly \"\n \"discouraged due to inconsistencies it may cause between 'save' and \"\n \"'load' operations. Please refrain from setting exact load version for \"\n \"intermediate data sets where possible to avoid this warning.\"\n)\n\n_DEFAULT_PACKAGES = [\"kedro.io.\", \"kedro.extras.datasets.\", \"\"]\n\n\ndef parse_dataset_definition(\n config: Dict[str, Any], load_version: str = None, save_version: str = None\n) -> Tuple[Type[AbstractDataSet], Dict[str, Any]]:\n \"\"\"Parse and instantiate a dataset class using the configuration provided.\n\n Args:\n config: Data set config dictionary. It *must* contain the `type` key\n with fully qualified class name.\n load_version: Version string to be used for ``load`` operation if\n the data set is versioned. Has no effect on the data set\n if versioning was not enabled.\n save_version: Version string to be used for ``save`` operation if\n the data set is versioned. Has no effect on the data set\n if versioning was not enabled.\n\n Raises:\n DataSetError: If the function fails to parse the configuration provided.\n\n Returns:\n 2-tuple: (Dataset class object, configuration dictionary)\n \"\"\"\n save_version = save_version or generate_timestamp()\n config = copy.deepcopy(config)\n\n if \"type\" not in config:\n raise DataSetError(\"'type' is missing from DataSet catalog configuration\")\n\n class_obj = config.pop(\"type\")\n if isinstance(class_obj, str):\n if len(class_obj.strip(\".\")) != len(class_obj):\n raise DataSetError(\n \"'type' class path does not support relative \"\n \"paths or paths ending with a dot.\"\n )\n class_paths = (prefix + class_obj for prefix in _DEFAULT_PACKAGES)\n\n trials = (_load_obj(class_path) for class_path in class_paths)\n try:\n class_obj = next(obj for obj in trials if obj is not None)\n except StopIteration as exc:\n raise DataSetError(\n f\"Class '{class_obj}' not found or one of its dependencies \"\n f\"has not been installed.\"\n ) from exc\n\n if not issubclass(class_obj, AbstractDataSet):\n raise DataSetError(\n f\"DataSet type '{class_obj.__module__}.{class_obj.__qualname__}' \"\n f\"is invalid: all data set types must extend 'AbstractDataSet'.\"\n )\n\n if VERSION_KEY in config:\n # remove \"version\" key so that it's not passed\n # to the \"unversioned\" data set constructor\n message = (\n \"'%s' attribute removed from data set configuration since it is a \"\n \"reserved word and cannot be directly specified\"\n )\n logging.getLogger(__name__).warning(message, VERSION_KEY)\n del config[VERSION_KEY]\n\n # dataset is either versioned explicitly by the user or versioned is set to true by default\n # on the dataset\n if config.pop(VERSIONED_FLAG_KEY, False) or getattr(\n class_obj, VERSIONED_FLAG_KEY, False\n ):\n config[VERSION_KEY] = Version(load_version, save_version)\n\n return class_obj, config\n\n\ndef _load_obj(class_path: str) -> Optional[object]:\n mod_path, _, class_name = class_path.rpartition(\".\")\n try:\n available_classes = load_obj(f\"{mod_path}.__all__\")\n # ModuleNotFoundError: When `load_obj` can't find `mod_path` (e.g `kedro.io.pandas`)\n # this is because we try a combination of all prefixes.\n # AttributeError: When `load_obj` manages to load `mod_path` but it doesn't have an\n # `__all__` attribute -- either because it's a custom or a kedro.io dataset\n except (ModuleNotFoundError, AttributeError, ValueError):\n available_classes = None\n\n try:\n class_obj = load_obj(class_path)\n except (ModuleNotFoundError, ValueError):\n return None\n except AttributeError as exc:\n if available_classes and class_name in available_classes:\n raise DataSetError(\n f\"{exc} Please see the documentation on how to \"\n f\"install relevant dependencies for {class_path}:\\n\"\n f\"https://kedro.readthedocs.io/en/stable/\"\n f\"kedro_project_setup/dependencies.html\"\n ) from exc\n return None\n\n return class_obj\n\n\ndef _local_exists(filepath: str) -> bool: # SKIP_IF_NO_SPARK\n filepath = Path(filepath)\n return filepath.exists() or any(par.is_file() for par in filepath.parents)\n\n\nclass AbstractVersionedDataSet(AbstractDataSet[_DI, _DO], abc.ABC):\n \"\"\"\n ``AbstractVersionedDataSet`` is the base class for all versioned data set\n implementations. All data sets that implement versioning should extend this\n abstract class and implement the methods marked as abstract.\n\n Example:\n ::\n\n >>> from pathlib import Path, PurePosixPath\n >>> import pandas as pd\n >>> from kedro.io import AbstractVersionedDataSet\n >>>\n >>>\n >>> class MyOwnDataSet(AbstractVersionedDataSet):\n >>> def __init__(self, filepath, version, param1, param2=True):\n >>> super().__init__(PurePosixPath(filepath), version)\n >>> self._param1 = param1\n >>> self._param2 = param2\n >>>\n >>> def _load(self) -> pd.DataFrame:\n >>> load_path = self._get_load_path()\n >>> return pd.read_csv(load_path)\n >>>\n >>> def _save(self, df: pd.DataFrame) -> None:\n >>> save_path = self._get_save_path()\n >>> df.to_csv(str(save_path))\n >>>\n >>> def _exists(self) -> bool:\n >>> path = self._get_load_path()\n >>> return Path(path.as_posix()).exists()\n >>>\n >>> def _describe(self):\n >>> return dict(version=self._version, param1=self._param1, param2=self._param2)\n\n Example catalog.yml specification:\n ::\n\n my_dataset:\n type: <path-to-my-own-dataset>.MyOwnDataSet\n filepath: data/01_raw/my_data.csv\n versioned: true\n param1: <param1-value> # param1 is a required argument\n # param2 will be True by default\n \"\"\"\n\n def __init__(\n self,\n filepath: PurePosixPath,\n version: Optional[Version],\n exists_function: Callable[[str], bool] = None,\n glob_function: Callable[[str], List[str]] = None,\n ):\n \"\"\"Creates a new instance of ``AbstractVersionedDataSet``.\n\n Args:\n filepath: Filepath in POSIX format to a file.\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n exists_function: Function that is used for determining whether\n a path exists in a filesystem.\n glob_function: Function that is used for finding all paths\n in a filesystem, which match a given pattern.\n \"\"\"\n self._filepath = filepath\n self._version = version\n self._exists_function = exists_function or _local_exists\n self._glob_function = glob_function or iglob\n # 1 entry for load version, 1 for save version\n self._version_cache = Cache(maxsize=2) # type: Cache\n\n # 'key' is set to prevent cache key overlapping for load and save:\n # https://cachetools.readthedocs.io/en/stable/#cachetools.cachedmethod\n @cachedmethod(cache=attrgetter(\"_version_cache\"), key=partial(hashkey, \"load\"))\n def _fetch_latest_load_version(self) -> str:\n # When load version is unpinned, fetch the most recent existing\n # version from the given path.\n pattern = str(self._get_versioned_path(\"*\"))\n version_paths = sorted(self._glob_function(pattern), reverse=True)\n most_recent = next(\n (path for path in version_paths if self._exists_function(path)), None\n )\n protocol = getattr(self, \"_protocol\", None)\n if not most_recent:\n if protocol in CLOUD_PROTOCOLS:\n message = (\n f\"Did not find any versions for {self}. This could be \"\n f\"due to insufficient permission.\"\n )\n else:\n message = f\"Did not find any versions for {self}\"\n raise VersionNotFoundError(message)\n return PurePath(most_recent).parent.name\n\n # 'key' is set to prevent cache key overlapping for load and save:\n # https://cachetools.readthedocs.io/en/stable/#cachetools.cachedmethod\n @cachedmethod(cache=attrgetter(\"_version_cache\"), key=partial(hashkey, \"save\"))\n def _fetch_latest_save_version(self) -> str: # pylint: disable=no-self-use\n \"\"\"Generate and cache the current save version\"\"\"\n return generate_timestamp()\n\n def resolve_load_version(self) -> Optional[str]:\n \"\"\"Compute the version the dataset should be loaded with.\"\"\"\n if not self._version:\n return None\n if self._version.load:\n return self._version.load\n return self._fetch_latest_load_version()\n\n def _get_load_path(self) -> PurePosixPath:\n if not self._version:\n # When versioning is disabled, load from original filepath\n return self._filepath\n\n load_version = self.resolve_load_version()\n return self._get_versioned_path(load_version) # type: ignore\n\n def resolve_save_version(self) -> Optional[str]:\n \"\"\"Compute the version the dataset should be saved with.\"\"\"\n if not self._version:\n return None\n if self._version.save:\n return self._version.save\n return self._fetch_latest_save_version()\n\n def _get_save_path(self) -> PurePosixPath:\n if not self._version:\n # When versioning is disabled, return original filepath\n return self._filepath\n\n save_version = self.resolve_save_version()\n versioned_path = self._get_versioned_path(save_version) # type: ignore\n\n if self._exists_function(str(versioned_path)):\n raise DataSetError(\n f\"Save path '{versioned_path}' for {str(self)} must not exist if \"\n f\"versioning is enabled.\"\n )\n\n return versioned_path\n\n def _get_versioned_path(self, version: str) -> PurePosixPath:\n return self._filepath / version / self._filepath.name\n\n def load(self) -> _DO: # pylint: disable=useless-parent-delegation\n return super().load()\n\n def save(self, data: _DI) -> None:\n self._version_cache.clear()\n save_version = self.resolve_save_version() # Make sure last save version is set\n try:\n super().save(data)\n except (FileNotFoundError, NotADirectoryError) as err:\n # FileNotFoundError raised in Win, NotADirectoryError raised in Unix\n _default_version = \"YYYY-MM-DDThh.mm.ss.sssZ\"\n raise DataSetError(\n f\"Cannot save versioned dataset '{self._filepath.name}' to \"\n f\"'{self._filepath.parent.as_posix()}' because a file with the same \"\n f\"name already exists in the directory. This is likely because \"\n f\"versioning was enabled on a dataset already saved previously. Either \"\n f\"remove '{self._filepath.name}' from the directory or manually \"\n f\"convert it into a versioned dataset by placing it in a versioned \"\n f\"directory (e.g. with default versioning format \"\n f\"'{self._filepath.as_posix()}/{_default_version}/{self._filepath.name}\"\n f\"').\"\n ) from err\n\n load_version = self.resolve_load_version()\n if load_version != save_version:\n warnings.warn(\n _CONSISTENCY_WARNING.format(save_version, load_version, str(self))\n )\n\n def exists(self) -> bool:\n \"\"\"Checks whether a data set's output already exists by calling\n the provided _exists() method.\n\n Returns:\n Flag indicating whether the output already exists.\n\n Raises:\n DataSetError: when underlying exists method raises error.\n\n \"\"\"\n self._logger.debug(\"Checking whether target of %s exists\", str(self))\n try:\n return self._exists()\n except VersionNotFoundError:\n return False\n except Exception as exc: # SKIP_IF_NO_SPARK\n message = (\n f\"Failed during exists check for data set {str(self)}.\\n{str(exc)}\"\n )\n raise DataSetError(message) from exc\n\n def _release(self) -> None:\n super()._release()\n self._version_cache.clear()\n\n\ndef _parse_filepath(filepath: str) -> Dict[str, str]:\n \"\"\"Split filepath on protocol and path. Based on `fsspec.utils.infer_storage_options`.\n\n Args:\n filepath: Either local absolute file path or URL (s3://bucket/file.csv)\n\n Returns:\n Parsed filepath.\n \"\"\"\n if (\n re.match(r\"^[a-zA-Z]:[\\\\/]\", filepath)\n or re.match(r\"^[a-zA-Z0-9]+://\", filepath) is None\n ):\n return {\"protocol\": \"file\", \"path\": filepath}\n\n parsed_path = urlsplit(filepath)\n protocol = parsed_path.scheme or \"file\"\n\n if protocol in HTTP_PROTOCOLS:\n return {\"protocol\": protocol, \"path\": filepath}\n\n path = parsed_path.path\n if protocol == \"file\":\n windows_path = re.match(r\"^/([a-zA-Z])[:|]([\\\\/].*)$\", path)\n if windows_path:\n path = \":\".join(windows_path.groups())\n\n options = {\"protocol\": protocol, \"path\": path}\n\n if parsed_path.netloc:\n if protocol in CLOUD_PROTOCOLS:\n host_with_port = parsed_path.netloc.rsplit(\"@\", 1)[-1]\n host = host_with_port.rsplit(\":\", 1)[0]\n options[\"path\"] = host + options[\"path\"]\n\n return options\n\n\ndef get_protocol_and_path(filepath: str, version: Version = None) -> Tuple[str, str]:\n \"\"\"Parses filepath on protocol and path.\n\n Args:\n filepath: raw filepath e.g.: `gcs://bucket/test.json`.\n version: instance of ``kedro.io.core.Version`` or None.\n\n Returns:\n Protocol and path.\n\n Raises:\n DataSetError: when protocol is http(s) and version is not None.\n Note: HTTP(s) dataset doesn't support versioning.\n \"\"\"\n options_dict = _parse_filepath(filepath)\n path = options_dict[\"path\"]\n protocol = options_dict[\"protocol\"]\n\n if protocol in HTTP_PROTOCOLS:\n if version is not None:\n raise DataSetError(\n \"HTTP(s) DataSet doesn't support versioning. \"\n \"Please remove version flag from the dataset configuration.\"\n )\n path = path.split(PROTOCOL_DELIMITER, 1)[-1]\n\n return protocol, path\n\n\ndef get_filepath_str(path: PurePath, protocol: str) -> str:\n \"\"\"Returns filepath. Returns full filepath (with protocol) if protocol is HTTP(s).\n\n Args:\n path: filepath without protocol.\n protocol: protocol.\n\n Returns:\n Filepath string.\n \"\"\"\n path = path.as_posix()\n if protocol in HTTP_PROTOCOLS:\n path = \"\".join((protocol, PROTOCOL_DELIMITER, path))\n return path\n\n\ndef validate_on_forbidden_chars(**kwargs):\n \"\"\"Validate that string values do not include white-spaces or ;\"\"\"\n for key, value in kwargs.items():\n if \" \" in value or \";\" in value:\n raise DataSetError(\n f\"Neither white-space nor semicolon are allowed in '{key}'.\"\n )\n", "path": "kedro/io/core.py" } ]
[ { "content": "\"\"\"This module provides a set of classes which underpin the data loading and\nsaving functionality provided by ``kedro.io``.\n\"\"\"\n\nimport abc\nimport copy\nimport logging\nimport re\nimport warnings\nfrom collections import namedtuple\nfrom datetime import datetime, timezone\nfrom functools import partial\nfrom glob import iglob\nfrom operator import attrgetter\nfrom pathlib import Path, PurePath, PurePosixPath\nfrom typing import Any, Callable, Dict, Generic, List, Optional, Tuple, Type, TypeVar\nfrom urllib.parse import urlsplit\n\nfrom cachetools import Cache, cachedmethod\nfrom cachetools.keys import hashkey\n\nfrom kedro.utils import load_obj\n\nwarnings.simplefilter(\"default\", DeprecationWarning)\n\nVERSION_FORMAT = \"%Y-%m-%dT%H.%M.%S.%fZ\"\nVERSIONED_FLAG_KEY = \"versioned\"\nVERSION_KEY = \"version\"\nHTTP_PROTOCOLS = (\"http\", \"https\")\nPROTOCOL_DELIMITER = \"://\"\nCLOUD_PROTOCOLS = (\"s3\", \"gcs\", \"gs\", \"adl\", \"abfs\", \"abfss\", \"gdrive\")\n\n\nclass DataSetError(Exception):\n \"\"\"``DataSetError`` raised by ``AbstractDataSet`` implementations\n in case of failure of input/output methods.\n\n ``AbstractDataSet`` implementations should provide instructive\n information in case of failure.\n \"\"\"\n\n pass\n\n\nclass DataSetNotFoundError(DataSetError):\n \"\"\"``DataSetNotFoundError`` raised by ``DataCatalog`` class in case of\n trying to use a non-existing data set.\n \"\"\"\n\n pass\n\n\nclass DataSetAlreadyExistsError(DataSetError):\n \"\"\"``DataSetAlreadyExistsError`` raised by ``DataCatalog`` class in case\n of trying to add a data set which already exists in the ``DataCatalog``.\n \"\"\"\n\n pass\n\n\nclass VersionNotFoundError(DataSetError):\n \"\"\"``VersionNotFoundError`` raised by ``AbstractVersionedDataSet`` implementations\n in case of no load versions available for the data set.\n \"\"\"\n\n pass\n\n\n_DI = TypeVar(\"_DI\")\n_DO = TypeVar(\"_DO\")\n\n\nclass AbstractDataSet(abc.ABC, Generic[_DI, _DO]):\n \"\"\"``AbstractDataSet`` is the base class for all data set implementations.\n All data set implementations should extend this abstract class\n and implement the methods marked as abstract.\n If a specific dataset implementation cannot be used in conjunction with\n the ``ParallelRunner``, such user-defined dataset should have the\n attribute `_SINGLE_PROCESS = True`.\n Example:\n ::\n\n >>> from pathlib import Path, PurePosixPath\n >>> import pandas as pd\n >>> from kedro.io import AbstractDataSet\n >>>\n >>>\n >>> class MyOwnDataSet(AbstractDataSet[pd.DataFrame, pd.DataFrame]):\n >>> def __init__(self, filepath, param1, param2=True):\n >>> self._filepath = PurePosixPath(filepath)\n >>> self._param1 = param1\n >>> self._param2 = param2\n >>>\n >>> def _load(self) -> pd.DataFrame:\n >>> return pd.read_csv(self._filepath)\n >>>\n >>> def _save(self, df: pd.DataFrame) -> None:\n >>> df.to_csv(str(self._filepath))\n >>>\n >>> def _exists(self) -> bool:\n >>> return Path(self._filepath.as_posix()).exists()\n >>>\n >>> def _describe(self):\n >>> return dict(param1=self._param1, param2=self._param2)\n\n Example catalog.yml specification:\n ::\n\n my_dataset:\n type: <path-to-my-own-dataset>.MyOwnDataSet\n filepath: data/01_raw/my_data.csv\n param1: <param1-value> # param1 is a required argument\n # param2 will be True by default\n \"\"\"\n\n @classmethod\n def from_config(\n cls: Type,\n name: str,\n config: Dict[str, Any],\n load_version: str = None,\n save_version: str = None,\n ) -> \"AbstractDataSet\":\n \"\"\"Create a data set instance using the configuration provided.\n\n Args:\n name: Data set name.\n config: Data set config dictionary.\n load_version: Version string to be used for ``load`` operation if\n the data set is versioned. Has no effect on the data set\n if versioning was not enabled.\n save_version: Version string to be used for ``save`` operation if\n the data set is versioned. Has no effect on the data set\n if versioning was not enabled.\n\n Returns:\n An instance of an ``AbstractDataSet`` subclass.\n\n Raises:\n DataSetError: When the function fails to create the data set\n from its config.\n\n \"\"\"\n try:\n class_obj, config = parse_dataset_definition(\n config, load_version, save_version\n )\n except Exception as exc:\n raise DataSetError(\n f\"An exception occurred when parsing config \"\n f\"for DataSet '{name}':\\n{str(exc)}\"\n ) from exc\n\n try:\n data_set = class_obj(**config) # type: ignore\n except TypeError as err:\n raise DataSetError(\n f\"\\n{err}.\\nDataSet '{name}' must only contain arguments valid for the \"\n f\"constructor of '{class_obj.__module__}.{class_obj.__qualname__}'.\"\n ) from err\n except Exception as err:\n raise DataSetError(\n f\"\\n{err}.\\nFailed to instantiate DataSet '{name}' \"\n f\"of type '{class_obj.__module__}.{class_obj.__qualname__}'.\"\n ) from err\n return data_set\n\n @property\n def _logger(self) -> logging.Logger:\n return logging.getLogger(__name__)\n\n def load(self) -> _DO:\n \"\"\"Loads data by delegation to the provided load method.\n\n Returns:\n Data returned by the provided load method.\n\n Raises:\n DataSetError: When underlying load method raises error.\n\n \"\"\"\n\n self._logger.debug(\"Loading %s\", str(self))\n\n try:\n return self._load()\n except DataSetError:\n raise\n except Exception as exc:\n # This exception handling is by design as the composed data sets\n # can throw any type of exception.\n message = (\n f\"Failed while loading data from data set {str(self)}.\\n{str(exc)}\"\n )\n raise DataSetError(message) from exc\n\n def save(self, data: _DI) -> None:\n \"\"\"Saves data by delegation to the provided save method.\n\n Args:\n data: the value to be saved by provided save method.\n\n Raises:\n DataSetError: when underlying save method raises error.\n FileNotFoundError: when save method got file instead of dir, on Windows.\n NotADirectoryError: when save method got file instead of dir, on Unix.\n \"\"\"\n\n if data is None:\n raise DataSetError(\"Saving 'None' to a 'DataSet' is not allowed\")\n\n try:\n self._logger.debug(\"Saving %s\", str(self))\n self._save(data)\n except DataSetError:\n raise\n except (FileNotFoundError, NotADirectoryError):\n raise\n except Exception as exc:\n message = f\"Failed while saving data to data set {str(self)}.\\n{str(exc)}\"\n raise DataSetError(message) from exc\n\n def __str__(self):\n def _to_str(obj, is_root=False):\n \"\"\"Returns a string representation where\n 1. The root level (i.e. the DataSet.__init__ arguments) are\n formatted like DataSet(key=value).\n 2. Dictionaries have the keys alphabetically sorted recursively.\n 3. None values are not shown.\n \"\"\"\n\n fmt = \"{}={}\" if is_root else \"'{}': {}\" # 1\n\n if isinstance(obj, dict):\n sorted_dict = sorted(obj.items(), key=lambda pair: str(pair[0])) # 2\n\n text = \", \".join(\n fmt.format(key, _to_str(value)) # 2\n for key, value in sorted_dict\n if value is not None # 3\n )\n\n return text if is_root else \"{\" + text + \"}\" # 1\n\n # not a dictionary\n return str(obj)\n\n return f\"{type(self).__name__}({_to_str(self._describe(), True)})\"\n\n @abc.abstractmethod\n def _load(self) -> _DO:\n raise NotImplementedError(\n f\"'{self.__class__.__name__}' is a subclass of AbstractDataSet and \"\n f\"it must implement the '_load' method\"\n )\n\n @abc.abstractmethod\n def _save(self, data: _DI) -> None:\n raise NotImplementedError(\n f\"'{self.__class__.__name__}' is a subclass of AbstractDataSet and \"\n f\"it must implement the '_save' method\"\n )\n\n @abc.abstractmethod\n def _describe(self) -> Dict[str, Any]:\n raise NotImplementedError(\n f\"'{self.__class__.__name__}' is a subclass of AbstractDataSet and \"\n f\"it must implement the '_describe' method\"\n )\n\n def exists(self) -> bool:\n \"\"\"Checks whether a data set's output already exists by calling\n the provided _exists() method.\n\n Returns:\n Flag indicating whether the output already exists.\n\n Raises:\n DataSetError: when underlying exists method raises error.\n\n \"\"\"\n try:\n self._logger.debug(\"Checking whether target of %s exists\", str(self))\n return self._exists()\n except Exception as exc:\n message = (\n f\"Failed during exists check for data set {str(self)}.\\n{str(exc)}\"\n )\n raise DataSetError(message) from exc\n\n def _exists(self) -> bool:\n self._logger.warning(\n \"'exists()' not implemented for '%s'. Assuming output does not exist.\",\n self.__class__.__name__,\n )\n return False\n\n def release(self) -> None:\n \"\"\"Release any cached data.\n\n Raises:\n DataSetError: when underlying release method raises error.\n\n \"\"\"\n try:\n self._logger.debug(\"Releasing %s\", str(self))\n self._release()\n except Exception as exc:\n message = f\"Failed during release for data set {str(self)}.\\n{str(exc)}\"\n raise DataSetError(message) from exc\n\n def _release(self) -> None:\n pass\n\n def _copy(self, **overwrite_params) -> \"AbstractDataSet\":\n dataset_copy = copy.deepcopy(self)\n for name, value in overwrite_params.items():\n setattr(dataset_copy, name, value)\n return dataset_copy\n\n\ndef generate_timestamp() -> str:\n \"\"\"Generate the timestamp to be used by versioning.\n\n Returns:\n String representation of the current timestamp.\n\n \"\"\"\n current_ts = datetime.now(tz=timezone.utc).strftime(VERSION_FORMAT)\n return current_ts[:-4] + current_ts[-1:] # Don't keep microseconds\n\n\nclass Version(namedtuple(\"Version\", [\"load\", \"save\"])):\n \"\"\"This namedtuple is used to provide load and save versions for versioned\n data sets. If ``Version.load`` is None, then the latest available version\n is loaded. If ``Version.save`` is None, then save version is formatted as\n YYYY-MM-DDThh.mm.ss.sssZ of the current timestamp.\n \"\"\"\n\n __slots__ = ()\n\n\n_CONSISTENCY_WARNING = (\n \"Save version '{}' did not match load version '{}' for {}. This is strongly \"\n \"discouraged due to inconsistencies it may cause between 'save' and \"\n \"'load' operations. Please refrain from setting exact load version for \"\n \"intermediate data sets where possible to avoid this warning.\"\n)\n\n_DEFAULT_PACKAGES = [\"kedro.io.\", \"kedro_datasets.\", \"kedro.extras.datasets.\", \"\"]\n\n\ndef parse_dataset_definition(\n config: Dict[str, Any], load_version: str = None, save_version: str = None\n) -> Tuple[Type[AbstractDataSet], Dict[str, Any]]:\n \"\"\"Parse and instantiate a dataset class using the configuration provided.\n\n Args:\n config: Data set config dictionary. It *must* contain the `type` key\n with fully qualified class name.\n load_version: Version string to be used for ``load`` operation if\n the data set is versioned. Has no effect on the data set\n if versioning was not enabled.\n save_version: Version string to be used for ``save`` operation if\n the data set is versioned. Has no effect on the data set\n if versioning was not enabled.\n\n Raises:\n DataSetError: If the function fails to parse the configuration provided.\n\n Returns:\n 2-tuple: (Dataset class object, configuration dictionary)\n \"\"\"\n save_version = save_version or generate_timestamp()\n config = copy.deepcopy(config)\n\n if \"type\" not in config:\n raise DataSetError(\"'type' is missing from DataSet catalog configuration\")\n\n class_obj = config.pop(\"type\")\n if isinstance(class_obj, str):\n if len(class_obj.strip(\".\")) != len(class_obj):\n raise DataSetError(\n \"'type' class path does not support relative \"\n \"paths or paths ending with a dot.\"\n )\n class_paths = (prefix + class_obj for prefix in _DEFAULT_PACKAGES)\n\n trials = (_load_obj(class_path) for class_path in class_paths)\n try:\n class_obj = next(obj for obj in trials if obj is not None)\n except StopIteration as exc:\n raise DataSetError(\n f\"Class '{class_obj}' not found or one of its dependencies \"\n f\"has not been installed.\"\n ) from exc\n\n if not issubclass(class_obj, AbstractDataSet):\n raise DataSetError(\n f\"DataSet type '{class_obj.__module__}.{class_obj.__qualname__}' \"\n f\"is invalid: all data set types must extend 'AbstractDataSet'.\"\n )\n\n if VERSION_KEY in config:\n # remove \"version\" key so that it's not passed\n # to the \"unversioned\" data set constructor\n message = (\n \"'%s' attribute removed from data set configuration since it is a \"\n \"reserved word and cannot be directly specified\"\n )\n logging.getLogger(__name__).warning(message, VERSION_KEY)\n del config[VERSION_KEY]\n\n # dataset is either versioned explicitly by the user or versioned is set to true by default\n # on the dataset\n if config.pop(VERSIONED_FLAG_KEY, False) or getattr(\n class_obj, VERSIONED_FLAG_KEY, False\n ):\n config[VERSION_KEY] = Version(load_version, save_version)\n\n return class_obj, config\n\n\ndef _load_obj(class_path: str) -> Optional[object]:\n mod_path, _, class_name = class_path.rpartition(\".\")\n try:\n available_classes = load_obj(f\"{mod_path}.__all__\")\n # ModuleNotFoundError: When `load_obj` can't find `mod_path` (e.g `kedro.io.pandas`)\n # this is because we try a combination of all prefixes.\n # AttributeError: When `load_obj` manages to load `mod_path` but it doesn't have an\n # `__all__` attribute -- either because it's a custom or a kedro.io dataset\n except (ModuleNotFoundError, AttributeError, ValueError):\n available_classes = None\n\n try:\n class_obj = load_obj(class_path)\n except (ModuleNotFoundError, ValueError):\n return None\n except AttributeError as exc:\n if available_classes and class_name in available_classes:\n raise DataSetError(\n f\"{exc} Please see the documentation on how to \"\n f\"install relevant dependencies for {class_path}:\\n\"\n f\"https://kedro.readthedocs.io/en/stable/\"\n f\"kedro_project_setup/dependencies.html\"\n ) from exc\n return None\n\n return class_obj\n\n\ndef _local_exists(filepath: str) -> bool: # SKIP_IF_NO_SPARK\n filepath = Path(filepath)\n return filepath.exists() or any(par.is_file() for par in filepath.parents)\n\n\nclass AbstractVersionedDataSet(AbstractDataSet[_DI, _DO], abc.ABC):\n \"\"\"\n ``AbstractVersionedDataSet`` is the base class for all versioned data set\n implementations. All data sets that implement versioning should extend this\n abstract class and implement the methods marked as abstract.\n\n Example:\n ::\n\n >>> from pathlib import Path, PurePosixPath\n >>> import pandas as pd\n >>> from kedro.io import AbstractVersionedDataSet\n >>>\n >>>\n >>> class MyOwnDataSet(AbstractVersionedDataSet):\n >>> def __init__(self, filepath, version, param1, param2=True):\n >>> super().__init__(PurePosixPath(filepath), version)\n >>> self._param1 = param1\n >>> self._param2 = param2\n >>>\n >>> def _load(self) -> pd.DataFrame:\n >>> load_path = self._get_load_path()\n >>> return pd.read_csv(load_path)\n >>>\n >>> def _save(self, df: pd.DataFrame) -> None:\n >>> save_path = self._get_save_path()\n >>> df.to_csv(str(save_path))\n >>>\n >>> def _exists(self) -> bool:\n >>> path = self._get_load_path()\n >>> return Path(path.as_posix()).exists()\n >>>\n >>> def _describe(self):\n >>> return dict(version=self._version, param1=self._param1, param2=self._param2)\n\n Example catalog.yml specification:\n ::\n\n my_dataset:\n type: <path-to-my-own-dataset>.MyOwnDataSet\n filepath: data/01_raw/my_data.csv\n versioned: true\n param1: <param1-value> # param1 is a required argument\n # param2 will be True by default\n \"\"\"\n\n def __init__(\n self,\n filepath: PurePosixPath,\n version: Optional[Version],\n exists_function: Callable[[str], bool] = None,\n glob_function: Callable[[str], List[str]] = None,\n ):\n \"\"\"Creates a new instance of ``AbstractVersionedDataSet``.\n\n Args:\n filepath: Filepath in POSIX format to a file.\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n exists_function: Function that is used for determining whether\n a path exists in a filesystem.\n glob_function: Function that is used for finding all paths\n in a filesystem, which match a given pattern.\n \"\"\"\n self._filepath = filepath\n self._version = version\n self._exists_function = exists_function or _local_exists\n self._glob_function = glob_function or iglob\n # 1 entry for load version, 1 for save version\n self._version_cache = Cache(maxsize=2) # type: Cache\n\n # 'key' is set to prevent cache key overlapping for load and save:\n # https://cachetools.readthedocs.io/en/stable/#cachetools.cachedmethod\n @cachedmethod(cache=attrgetter(\"_version_cache\"), key=partial(hashkey, \"load\"))\n def _fetch_latest_load_version(self) -> str:\n # When load version is unpinned, fetch the most recent existing\n # version from the given path.\n pattern = str(self._get_versioned_path(\"*\"))\n version_paths = sorted(self._glob_function(pattern), reverse=True)\n most_recent = next(\n (path for path in version_paths if self._exists_function(path)), None\n )\n protocol = getattr(self, \"_protocol\", None)\n if not most_recent:\n if protocol in CLOUD_PROTOCOLS:\n message = (\n f\"Did not find any versions for {self}. This could be \"\n f\"due to insufficient permission.\"\n )\n else:\n message = f\"Did not find any versions for {self}\"\n raise VersionNotFoundError(message)\n return PurePath(most_recent).parent.name\n\n # 'key' is set to prevent cache key overlapping for load and save:\n # https://cachetools.readthedocs.io/en/stable/#cachetools.cachedmethod\n @cachedmethod(cache=attrgetter(\"_version_cache\"), key=partial(hashkey, \"save\"))\n def _fetch_latest_save_version(self) -> str: # pylint: disable=no-self-use\n \"\"\"Generate and cache the current save version\"\"\"\n return generate_timestamp()\n\n def resolve_load_version(self) -> Optional[str]:\n \"\"\"Compute the version the dataset should be loaded with.\"\"\"\n if not self._version:\n return None\n if self._version.load:\n return self._version.load\n return self._fetch_latest_load_version()\n\n def _get_load_path(self) -> PurePosixPath:\n if not self._version:\n # When versioning is disabled, load from original filepath\n return self._filepath\n\n load_version = self.resolve_load_version()\n return self._get_versioned_path(load_version) # type: ignore\n\n def resolve_save_version(self) -> Optional[str]:\n \"\"\"Compute the version the dataset should be saved with.\"\"\"\n if not self._version:\n return None\n if self._version.save:\n return self._version.save\n return self._fetch_latest_save_version()\n\n def _get_save_path(self) -> PurePosixPath:\n if not self._version:\n # When versioning is disabled, return original filepath\n return self._filepath\n\n save_version = self.resolve_save_version()\n versioned_path = self._get_versioned_path(save_version) # type: ignore\n\n if self._exists_function(str(versioned_path)):\n raise DataSetError(\n f\"Save path '{versioned_path}' for {str(self)} must not exist if \"\n f\"versioning is enabled.\"\n )\n\n return versioned_path\n\n def _get_versioned_path(self, version: str) -> PurePosixPath:\n return self._filepath / version / self._filepath.name\n\n def load(self) -> _DO: # pylint: disable=useless-parent-delegation\n return super().load()\n\n def save(self, data: _DI) -> None:\n self._version_cache.clear()\n save_version = self.resolve_save_version() # Make sure last save version is set\n try:\n super().save(data)\n except (FileNotFoundError, NotADirectoryError) as err:\n # FileNotFoundError raised in Win, NotADirectoryError raised in Unix\n _default_version = \"YYYY-MM-DDThh.mm.ss.sssZ\"\n raise DataSetError(\n f\"Cannot save versioned dataset '{self._filepath.name}' to \"\n f\"'{self._filepath.parent.as_posix()}' because a file with the same \"\n f\"name already exists in the directory. This is likely because \"\n f\"versioning was enabled on a dataset already saved previously. Either \"\n f\"remove '{self._filepath.name}' from the directory or manually \"\n f\"convert it into a versioned dataset by placing it in a versioned \"\n f\"directory (e.g. with default versioning format \"\n f\"'{self._filepath.as_posix()}/{_default_version}/{self._filepath.name}\"\n f\"').\"\n ) from err\n\n load_version = self.resolve_load_version()\n if load_version != save_version:\n warnings.warn(\n _CONSISTENCY_WARNING.format(save_version, load_version, str(self))\n )\n\n def exists(self) -> bool:\n \"\"\"Checks whether a data set's output already exists by calling\n the provided _exists() method.\n\n Returns:\n Flag indicating whether the output already exists.\n\n Raises:\n DataSetError: when underlying exists method raises error.\n\n \"\"\"\n self._logger.debug(\"Checking whether target of %s exists\", str(self))\n try:\n return self._exists()\n except VersionNotFoundError:\n return False\n except Exception as exc: # SKIP_IF_NO_SPARK\n message = (\n f\"Failed during exists check for data set {str(self)}.\\n{str(exc)}\"\n )\n raise DataSetError(message) from exc\n\n def _release(self) -> None:\n super()._release()\n self._version_cache.clear()\n\n\ndef _parse_filepath(filepath: str) -> Dict[str, str]:\n \"\"\"Split filepath on protocol and path. Based on `fsspec.utils.infer_storage_options`.\n\n Args:\n filepath: Either local absolute file path or URL (s3://bucket/file.csv)\n\n Returns:\n Parsed filepath.\n \"\"\"\n if (\n re.match(r\"^[a-zA-Z]:[\\\\/]\", filepath)\n or re.match(r\"^[a-zA-Z0-9]+://\", filepath) is None\n ):\n return {\"protocol\": \"file\", \"path\": filepath}\n\n parsed_path = urlsplit(filepath)\n protocol = parsed_path.scheme or \"file\"\n\n if protocol in HTTP_PROTOCOLS:\n return {\"protocol\": protocol, \"path\": filepath}\n\n path = parsed_path.path\n if protocol == \"file\":\n windows_path = re.match(r\"^/([a-zA-Z])[:|]([\\\\/].*)$\", path)\n if windows_path:\n path = \":\".join(windows_path.groups())\n\n options = {\"protocol\": protocol, \"path\": path}\n\n if parsed_path.netloc:\n if protocol in CLOUD_PROTOCOLS:\n host_with_port = parsed_path.netloc.rsplit(\"@\", 1)[-1]\n host = host_with_port.rsplit(\":\", 1)[0]\n options[\"path\"] = host + options[\"path\"]\n\n return options\n\n\ndef get_protocol_and_path(filepath: str, version: Version = None) -> Tuple[str, str]:\n \"\"\"Parses filepath on protocol and path.\n\n Args:\n filepath: raw filepath e.g.: `gcs://bucket/test.json`.\n version: instance of ``kedro.io.core.Version`` or None.\n\n Returns:\n Protocol and path.\n\n Raises:\n DataSetError: when protocol is http(s) and version is not None.\n Note: HTTP(s) dataset doesn't support versioning.\n \"\"\"\n options_dict = _parse_filepath(filepath)\n path = options_dict[\"path\"]\n protocol = options_dict[\"protocol\"]\n\n if protocol in HTTP_PROTOCOLS:\n if version is not None:\n raise DataSetError(\n \"HTTP(s) DataSet doesn't support versioning. \"\n \"Please remove version flag from the dataset configuration.\"\n )\n path = path.split(PROTOCOL_DELIMITER, 1)[-1]\n\n return protocol, path\n\n\ndef get_filepath_str(path: PurePath, protocol: str) -> str:\n \"\"\"Returns filepath. Returns full filepath (with protocol) if protocol is HTTP(s).\n\n Args:\n path: filepath without protocol.\n protocol: protocol.\n\n Returns:\n Filepath string.\n \"\"\"\n path = path.as_posix()\n if protocol in HTTP_PROTOCOLS:\n path = \"\".join((protocol, PROTOCOL_DELIMITER, path))\n return path\n\n\ndef validate_on_forbidden_chars(**kwargs):\n \"\"\"Validate that string values do not include white-spaces or ;\"\"\"\n for key, value in kwargs.items():\n if \" \" in value or \";\" in value:\n raise DataSetError(\n f\"Neither white-space nor semicolon are allowed in '{key}'.\"\n )\n", "path": "kedro/io/core.py" } ]
diff --git a/RELEASE.md b/RELEASE.md index 5996bd108d..9f274159b7 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -11,8 +11,9 @@ # Upcoming Release 0.18.4 ## Major features and improvements -* The config loader objects now implement `UserDict` and the configuration is accessed through `conf_loader['catalog']` -* You can configure config file patterns through `settings.py` without creating a custom config loader +* Make Kedro instantiate datasets from `kedro_datasets` with higher priority than `kedro.extras.datasets`. `kedro_datasets` is the namespace for the new `kedro-datasets` python package. +* The config loader objects now implement `UserDict` and the configuration is accessed through `conf_loader['catalog']`. +* You can configure config file patterns through `settings.py` without creating a custom config loader. ## Bug fixes and other changes * Fixed `kedro micropkg pull` for packages on PyPI. diff --git a/kedro/io/core.py b/kedro/io/core.py index 98f2bb1d6e..289650a792 100644 --- a/kedro/io/core.py +++ b/kedro/io/core.py @@ -347,7 +347,7 @@ class Version(namedtuple("Version", ["load", "save"])): "intermediate data sets where possible to avoid this warning." ) -_DEFAULT_PACKAGES = ["kedro.io.", "kedro.extras.datasets.", ""] +_DEFAULT_PACKAGES = ["kedro.io.", "kedro_datasets.", "kedro.extras.datasets.", ""] def parse_dataset_definition( diff --git a/tests/io/test_data_catalog.py b/tests/io/test_data_catalog.py index 76e18dcae3..b78dfdfde5 100644 --- a/tests/io/test_data_catalog.py +++ b/tests/io/test_data_catalog.py @@ -19,7 +19,13 @@ LambdaDataSet, MemoryDataSet, ) -from kedro.io.core import VERSION_FORMAT, Version, generate_timestamp +from kedro.io.core import ( + _DEFAULT_PACKAGES, + VERSION_FORMAT, + Version, + generate_timestamp, + parse_dataset_definition, +) @pytest.fixture @@ -373,6 +379,20 @@ def test_config_relative_import(self, sane_config): with pytest.raises(DataSetError, match=re.escape(pattern)): DataCatalog.from_config(**sane_config) + def test_config_import_kedro_datasets(self, sane_config, mocker): + """Test kedro.extras.datasets default path to the dataset class""" + # Spy _load_obj because kedro_datasets is not installed and we can't import it. + + import kedro.io.core # pylint: disable=import-outside-toplevel + + spy = mocker.spy(kedro.io.core, "_load_obj") + parse_dataset_definition(sane_config["catalog"]["boats"]) + for prefix, call_args in zip(_DEFAULT_PACKAGES, spy.call_args_list): + # In Python 3.7 call_args.args is not available thus we access the call + # arguments with less meaningful index. + # The 1st index returns a tuple, the 2nd index return the name of module. + assert call_args[0][0] == f"{prefix}pandas.CSVDataSet" + def test_config_import_extras(self, sane_config): """Test kedro.extras.datasets default path to the dataset class""" sane_config["catalog"]["boats"]["type"] = "pandas.CSVDataSet"
flairNLP__flair-419
Logging overwrite less sweeping To be removed, once it is done: Please add the appropriate label to this ticket, e.g. feature or enhancement. **Is your feature/enhancement request related to a problem? Please describe.** When using flair in other applications, the fact that it disables existing logs in `__init__.py` can be detrimental. For instance when wrapping it up as a component in a tool like rasa_nlu, importing flair overrides all logging except its own, breaking functionality in rasa_nlu. This is the [line that does so ](https://github.com/zalandoresearch/flair/blob/c2bb0d8776f25493a5b994dcd89a96f71ac175b8/flair/__init__.py#L13) and it was done on purpose to disable BERT logging in #282 . **Describe the solution you'd like** Ideally, the problem of disabling logging from certain known dependencies should be much more limited in scope. Importing flair as a package shouldn't disable all the other loggers. At a minimum, perhaps the logging could only *optionally* disable all existing logs
[ { "content": "import torch\n\nfrom . import data\nfrom . import models\nfrom . import visual\nfrom . import trainers\n\nimport logging.config\n\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)-15s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout'\n },\n },\n 'loggers': {\n 'flair': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': ['console'],\n 'level': 'WARNING'\n }\n})\n\nlogger = logging.getLogger('flair')\n\n\ndevice = None\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\nelse:\n device = torch.device('cpu')\n", "path": "flair/__init__.py" } ]
[ { "content": "import torch\n\nfrom . import data\nfrom . import models\nfrom . import visual\nfrom . import trainers\n\nimport logging.config\n\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)-15s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout'\n },\n },\n 'loggers': {\n 'flair': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': ['console'],\n 'level': 'WARNING'\n }\n})\n\nlogger = logging.getLogger('flair')\n\n\ndevice = None\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\nelse:\n device = torch.device('cpu')\n", "path": "flair/__init__.py" } ]
diff --git a/flair/__init__.py b/flair/__init__.py index 7bb43a8f0f..22803f63c4 100644 --- a/flair/__init__.py +++ b/flair/__init__.py @@ -10,7 +10,7 @@ logging.config.dictConfig({ 'version': 1, - 'disable_existing_loggers': True, + 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(asctime)-15s %(message)s' diff --git a/tests/test_embeddings.py b/tests/test_embeddings.py index 49833c761a..529ac3b9a8 100644 --- a/tests/test_embeddings.py +++ b/tests/test_embeddings.py @@ -1,8 +1,7 @@ import pytest -import os -from flair.embeddings import WordEmbeddings, TokenEmbeddings, CharLMEmbeddings, StackedEmbeddings, \ - DocumentLSTMEmbeddings, DocumentMeanEmbeddings, DocumentPoolEmbeddings +from flair.embeddings import WordEmbeddings, TokenEmbeddings, StackedEmbeddings, DocumentLSTMEmbeddings, \ + DocumentPoolEmbeddings, FlairEmbeddings from flair.data import Sentence @@ -84,7 +83,7 @@ def test_loading_not_existing_embedding(): def test_loading_not_existing_char_lm_embedding(): with pytest.raises(ValueError): - CharLMEmbeddings('other') + FlairEmbeddings('other') @pytest.mark.integration @@ -158,7 +157,7 @@ def init_document_embeddings(): sentence: Sentence = Sentence(text) glove: TokenEmbeddings = WordEmbeddings('en-glove') - charlm: TokenEmbeddings = CharLMEmbeddings('news-forward-fast') + charlm: TokenEmbeddings = FlairEmbeddings('news-forward-fast') return sentence, glove, charlm @@ -180,7 +179,7 @@ def load_and_apply_word_embeddings(emb_type: str): def load_and_apply_char_lm_embeddings(emb_type: str): text = 'I love Berlin.' sentence: Sentence = Sentence(text) - embeddings: TokenEmbeddings = CharLMEmbeddings(emb_type) + embeddings: TokenEmbeddings = FlairEmbeddings(emb_type) embeddings.embed(sentence) for token in sentence.tokens: diff --git a/tests/test_visual.py b/tests/test_visual.py index b3afb91dc3..7a1119c306 100644 --- a/tests/test_visual.py +++ b/tests/test_visual.py @@ -4,7 +4,7 @@ from flair.visual import * from flair.data import Sentence -from flair.embeddings import CharLMEmbeddings, StackedEmbeddings +from flair.embeddings import FlairEmbeddings, StackedEmbeddings import numpy from flair.visual.manifold import Visualizer, tSNE @@ -19,8 +19,8 @@ def test_visualize_word_emeddings(resources_path): sentences = [Sentence(x) for x in sentences] - charlm_embedding_forward = CharLMEmbeddings('news-forward') - charlm_embedding_backward = CharLMEmbeddings('news-backward') + charlm_embedding_forward = FlairEmbeddings('news-forward') + charlm_embedding_backward = FlairEmbeddings('news-backward') embeddings = StackedEmbeddings([charlm_embedding_backward, charlm_embedding_forward]) @@ -39,7 +39,7 @@ def test_visualize_word_emeddings(resources_path): sentences = [Sentence(x) for x in sentences] - charlm_embedding_forward = CharLMEmbeddings('news-forward') + charlm_embedding_forward = FlairEmbeddings('news-forward') visualizer = Visualizer() visualizer.visualize_char_emeddings(charlm_embedding_forward, sentences, str(resources_path / 'visual/sentence_embeddings.html')) @@ -56,13 +56,13 @@ def test_visualize(resources_path): sentences = [Sentence(x) for x in sentences] - embeddings = CharLMEmbeddings('news-forward') + embeddings = FlairEmbeddings('news-forward') visualizer = Visualizer() X_forward = visualizer.prepare_char_embeddings(embeddings, sentences) - embeddings = CharLMEmbeddings('news-backward') + embeddings = FlairEmbeddings('news-backward') X_backward = visualizer.prepare_char_embeddings(embeddings, sentences) @@ -83,7 +83,7 @@ def test_highlighter(resources_path): with (resources_path / 'visual/snippet.txt').open() as f: sentences = [x for x in f.read().split('\n') if x] - embeddings = CharLMEmbeddings('news-forward') + embeddings = FlairEmbeddings('news-forward') features = embeddings.lm.get_representation(sentences[0]).squeeze()
pyro-ppl__pyro-1629
[FR] Add tutorial on implementing new effects Users at PROBPROG 2018 requested a tutorial on high-level Pyro architecture including advanced features like poutines. This issue proposes adding a tutorial on implementing a new effect handler. Whereas #1553 should aim to explain Pyro's architecture in a simplified way, this tutorial should prepare developers to make changes to Pyro.
[ { "content": "import functools\n\nfrom pyro.params.param_store import _MODULE_NAMESPACE_DIVIDER, ParamStoreDict # noqa: F401\n\n# the global pyro stack\n_PYRO_STACK = []\n\n# the global ParamStore\n_PYRO_PARAM_STORE = ParamStoreDict()\n\n\nclass _DimAllocator(object):\n \"\"\"\n Dimension allocator for internal use by :class:`plate`.\n There is a single global instance.\n\n Note that dimensions are indexed from the right, e.g. -1, -2.\n \"\"\"\n def __init__(self):\n self._stack = [] # in reverse orientation of log_prob.shape\n\n def allocate(self, name, dim):\n \"\"\"\n Allocate a dimension to an :class:`plate` with given name.\n Dim should be either None for automatic allocation or a negative\n integer for manual allocation.\n \"\"\"\n if name in self._stack:\n raise ValueError('duplicate plate \"{}\"'.format(name))\n if dim is None:\n # Automatically designate the rightmost available dim for allocation.\n dim = -1\n while -dim <= len(self._stack) and self._stack[-1 - dim] is not None:\n dim -= 1\n elif dim >= 0:\n raise ValueError('Expected dim < 0 to index from the right, actual {}'.format(dim))\n\n # Allocate the requested dimension.\n while dim < -len(self._stack):\n self._stack.append(None)\n if self._stack[-1 - dim] is not None:\n raise ValueError('\\n'.join([\n 'at plates \"{}\" and \"{}\", collide at dim={}'.format(name, self._stack[-1 - dim], dim),\n '\\nTry moving the dim of one plate to the left, e.g. dim={}'.format(dim - 1)]))\n self._stack[-1 - dim] = name\n return dim\n\n def free(self, name, dim):\n \"\"\"\n Free a dimension.\n \"\"\"\n free_idx = -1 - dim # stack index to free\n assert self._stack[free_idx] == name\n self._stack[free_idx] = None\n while self._stack and self._stack[-1] is None:\n self._stack.pop()\n\n\n# Handles placement of plate dimensions\n_DIM_ALLOCATOR = _DimAllocator()\n\n\nclass _EnumAllocator(object):\n \"\"\"\n Dimension allocator for internal use by :func:`~pyro.poutine.markov`.\n There is a single global instance.\n\n Note that dimensions are indexed from the right, e.g. -1, -2.\n Note that ids are simply nonnegative integers here.\n \"\"\"\n def set_first_available_dim(self, first_available_dim):\n \"\"\"\n Set the first available dim, which should be to the left of all\n :class:`plate` dimensions, e.g. ``-1 - max_plate_nesting``. This should\n be called once per program. In SVI this should be called only once per\n (guide,model) pair.\n \"\"\"\n assert first_available_dim < 0, first_available_dim\n self.next_available_dim = first_available_dim\n self.next_available_id = 0\n self.dim_to_id = {} # only the global ids\n\n def allocate(self, scope_dims=None):\n \"\"\"\n Allocate a new recyclable dim and a unique id.\n\n If ``scope_dims`` is None, this allocates a global enumeration dim\n that will never be recycled. If ``scope_dims`` is specified, this\n allocates a local enumeration dim that can be reused by at any other\n local site whose scope excludes this site.\n\n :param set scope_dims: An optional set of (negative integer)\n local enumeration dims to avoid when allocating this dim.\n :return: A pair ``(dim, id)``, where ``dim`` is a negative integer\n and ``id`` is a nonnegative integer.\n :rtype: tuple\n \"\"\"\n id_ = self.next_available_id\n self.next_available_id += 1\n\n dim = self.next_available_dim\n if dim == -float('inf'):\n raise ValueError(\"max_plate_nesting must be set to a finite value for parallel enumeration\")\n if scope_dims is None:\n # allocate a new global dimension\n self.next_available_dim -= 1\n self.dim_to_id[dim] = id_\n else:\n # allocate a new local dimension\n while dim in scope_dims:\n dim -= 1\n\n return dim, id_\n\n\n# Handles placement of enumeration dimensions\n_ENUM_ALLOCATOR = _EnumAllocator()\n\n\nclass NonlocalExit(Exception):\n \"\"\"\n Exception for exiting nonlocally from poutine execution.\n\n Used by poutine.EscapeMessenger to return site information.\n \"\"\"\n def __init__(self, site, *args, **kwargs):\n \"\"\"\n :param site: message at a pyro site\n\n constructor. Just stores the input site.\n \"\"\"\n super(NonlocalExit, self).__init__(*args, **kwargs)\n self.site = site\n\n def reset_stack(self):\n \"\"\"\n Reset the state of the frames remaining in the stack.\n Necessary for multiple re-executions in poutine.queue.\n \"\"\"\n for frame in reversed(_PYRO_STACK):\n frame._reset()\n if type(frame).__name__ == \"BlockMessenger\" and frame.hide_fn(self.site):\n break\n\n\ndef default_process_message(msg):\n \"\"\"\n Default method for processing messages in inference.\n :param msg: a message to be processed\n :returns: None\n \"\"\"\n if msg[\"done\"] or msg[\"is_observed\"]:\n msg[\"done\"] = True\n return msg\n\n msg[\"value\"] = msg[\"fn\"](*msg[\"args\"], **msg[\"kwargs\"])\n\n # after fn has been called, update msg to prevent it from being called again.\n msg[\"done\"] = True\n\n\ndef apply_stack(initial_msg):\n \"\"\"\n Execute the effect stack at a single site according to the following scheme:\n\n 1. For each ``Messenger`` in the stack from bottom to top,\n execute ``Messenger._process_message`` with the message;\n if the message field \"stop\" is True, stop;\n otherwise, continue\n 2. Apply default behavior (``default_process_message``) to finish remaining site execution\n 3. For each ``Messenger`` in the stack from top to bottom,\n execute ``_postprocess_message`` to update the message and internal messenger state with the site results\n 4. If the message field \"continuation\" is not ``None``, call it with the message\n\n :param dict initial_msg: the starting version of the trace site\n :returns: ``None``\n \"\"\"\n stack = _PYRO_STACK\n # TODO check at runtime if stack is valid\n\n # msg is used to pass information up and down the stack\n msg = initial_msg\n\n pointer = 0\n # go until time to stop?\n for frame in reversed(stack):\n\n pointer = pointer + 1\n\n frame._process_message(msg)\n\n if msg[\"stop\"]:\n break\n\n default_process_message(msg)\n\n for frame in stack[-pointer:]: # reversed(stack[0:pointer])\n frame._postprocess_message(msg)\n\n cont = msg[\"continuation\"]\n if cont is not None:\n cont(msg)\n\n return None\n\n\ndef am_i_wrapped():\n \"\"\"\n Checks whether the current computation is wrapped in a poutine.\n :returns: bool\n \"\"\"\n return len(_PYRO_STACK) > 0\n\n\ndef effectful(fn=None, type=None):\n \"\"\"\n :param fn: function or callable that performs an effectful computation\n :param str type: the type label of the operation, e.g. `\"sample\"`\n\n Wrapper for calling :func:~`pyro.poutine.runtime.apply_stack` to apply any active effects.\n \"\"\"\n if fn is None:\n return functools.partial(effectful, type=type)\n\n if getattr(fn, \"_is_effectful\", None):\n return fn\n\n assert type is not None, \"must provide a type label for operation {}\".format(fn)\n assert type != \"message\", \"cannot use 'message' as keyword\"\n\n def _fn(*args, **kwargs):\n\n name = kwargs.pop(\"name\", None)\n infer = kwargs.pop(\"infer\", {})\n\n value = kwargs.pop(\"obs\", None)\n is_observed = value is not None\n\n if not am_i_wrapped():\n return fn(*args, **kwargs)\n else:\n msg = {\n \"type\": type,\n \"name\": name,\n \"fn\": fn,\n \"is_observed\": is_observed,\n \"args\": args,\n \"kwargs\": kwargs,\n \"value\": value,\n \"scale\": 1.0,\n \"mask\": None,\n \"cond_indep_stack\": (),\n \"done\": False,\n \"stop\": False,\n \"continuation\": None,\n \"infer\": infer,\n }\n # apply the stack and return its return value\n apply_stack(msg)\n return msg[\"value\"]\n _fn._is_effectful = True\n return _fn\n", "path": "pyro/poutine/runtime.py" } ]
[ { "content": "import functools\n\nfrom pyro.params.param_store import _MODULE_NAMESPACE_DIVIDER, ParamStoreDict # noqa: F401\n\n# the global pyro stack\n_PYRO_STACK = []\n\n# the global ParamStore\n_PYRO_PARAM_STORE = ParamStoreDict()\n\n\nclass _DimAllocator(object):\n \"\"\"\n Dimension allocator for internal use by :class:`plate`.\n There is a single global instance.\n\n Note that dimensions are indexed from the right, e.g. -1, -2.\n \"\"\"\n def __init__(self):\n self._stack = [] # in reverse orientation of log_prob.shape\n\n def allocate(self, name, dim):\n \"\"\"\n Allocate a dimension to an :class:`plate` with given name.\n Dim should be either None for automatic allocation or a negative\n integer for manual allocation.\n \"\"\"\n if name in self._stack:\n raise ValueError('duplicate plate \"{}\"'.format(name))\n if dim is None:\n # Automatically designate the rightmost available dim for allocation.\n dim = -1\n while -dim <= len(self._stack) and self._stack[-1 - dim] is not None:\n dim -= 1\n elif dim >= 0:\n raise ValueError('Expected dim < 0 to index from the right, actual {}'.format(dim))\n\n # Allocate the requested dimension.\n while dim < -len(self._stack):\n self._stack.append(None)\n if self._stack[-1 - dim] is not None:\n raise ValueError('\\n'.join([\n 'at plates \"{}\" and \"{}\", collide at dim={}'.format(name, self._stack[-1 - dim], dim),\n '\\nTry moving the dim of one plate to the left, e.g. dim={}'.format(dim - 1)]))\n self._stack[-1 - dim] = name\n return dim\n\n def free(self, name, dim):\n \"\"\"\n Free a dimension.\n \"\"\"\n free_idx = -1 - dim # stack index to free\n assert self._stack[free_idx] == name\n self._stack[free_idx] = None\n while self._stack and self._stack[-1] is None:\n self._stack.pop()\n\n\n# Handles placement of plate dimensions\n_DIM_ALLOCATOR = _DimAllocator()\n\n\nclass _EnumAllocator(object):\n \"\"\"\n Dimension allocator for internal use by :func:`~pyro.poutine.markov`.\n There is a single global instance.\n\n Note that dimensions are indexed from the right, e.g. -1, -2.\n Note that ids are simply nonnegative integers here.\n \"\"\"\n def set_first_available_dim(self, first_available_dim):\n \"\"\"\n Set the first available dim, which should be to the left of all\n :class:`plate` dimensions, e.g. ``-1 - max_plate_nesting``. This should\n be called once per program. In SVI this should be called only once per\n (guide,model) pair.\n \"\"\"\n assert first_available_dim < 0, first_available_dim\n self.next_available_dim = first_available_dim\n self.next_available_id = 0\n self.dim_to_id = {} # only the global ids\n\n def allocate(self, scope_dims=None):\n \"\"\"\n Allocate a new recyclable dim and a unique id.\n\n If ``scope_dims`` is None, this allocates a global enumeration dim\n that will never be recycled. If ``scope_dims`` is specified, this\n allocates a local enumeration dim that can be reused by at any other\n local site whose scope excludes this site.\n\n :param set scope_dims: An optional set of (negative integer)\n local enumeration dims to avoid when allocating this dim.\n :return: A pair ``(dim, id)``, where ``dim`` is a negative integer\n and ``id`` is a nonnegative integer.\n :rtype: tuple\n \"\"\"\n id_ = self.next_available_id\n self.next_available_id += 1\n\n dim = self.next_available_dim\n if dim == -float('inf'):\n raise ValueError(\"max_plate_nesting must be set to a finite value for parallel enumeration\")\n if scope_dims is None:\n # allocate a new global dimension\n self.next_available_dim -= 1\n self.dim_to_id[dim] = id_\n else:\n # allocate a new local dimension\n while dim in scope_dims:\n dim -= 1\n\n return dim, id_\n\n\n# Handles placement of enumeration dimensions\n_ENUM_ALLOCATOR = _EnumAllocator()\n\n\nclass NonlocalExit(Exception):\n \"\"\"\n Exception for exiting nonlocally from poutine execution.\n\n Used by poutine.EscapeMessenger to return site information.\n \"\"\"\n def __init__(self, site, *args, **kwargs):\n \"\"\"\n :param site: message at a pyro site\n\n constructor. Just stores the input site.\n \"\"\"\n super(NonlocalExit, self).__init__(*args, **kwargs)\n self.site = site\n\n def reset_stack(self):\n \"\"\"\n Reset the state of the frames remaining in the stack.\n Necessary for multiple re-executions in poutine.queue.\n \"\"\"\n for frame in reversed(_PYRO_STACK):\n frame._reset()\n if type(frame).__name__ == \"BlockMessenger\" and frame.hide_fn(self.site):\n break\n\n\ndef default_process_message(msg):\n \"\"\"\n Default method for processing messages in inference.\n :param msg: a message to be processed\n :returns: None\n \"\"\"\n if msg[\"done\"] or msg[\"is_observed\"] or msg[\"value\"] is not None:\n msg[\"done\"] = True\n return msg\n\n msg[\"value\"] = msg[\"fn\"](*msg[\"args\"], **msg[\"kwargs\"])\n\n # after fn has been called, update msg to prevent it from being called again.\n msg[\"done\"] = True\n\n\ndef apply_stack(initial_msg):\n \"\"\"\n Execute the effect stack at a single site according to the following scheme:\n\n 1. For each ``Messenger`` in the stack from bottom to top,\n execute ``Messenger._process_message`` with the message;\n if the message field \"stop\" is True, stop;\n otherwise, continue\n 2. Apply default behavior (``default_process_message``) to finish remaining site execution\n 3. For each ``Messenger`` in the stack from top to bottom,\n execute ``_postprocess_message`` to update the message and internal messenger state with the site results\n 4. If the message field \"continuation\" is not ``None``, call it with the message\n\n :param dict initial_msg: the starting version of the trace site\n :returns: ``None``\n \"\"\"\n stack = _PYRO_STACK\n # TODO check at runtime if stack is valid\n\n # msg is used to pass information up and down the stack\n msg = initial_msg\n\n pointer = 0\n # go until time to stop?\n for frame in reversed(stack):\n\n pointer = pointer + 1\n\n frame._process_message(msg)\n\n if msg[\"stop\"]:\n break\n\n default_process_message(msg)\n\n for frame in stack[-pointer:]: # reversed(stack[0:pointer])\n frame._postprocess_message(msg)\n\n cont = msg[\"continuation\"]\n if cont is not None:\n cont(msg)\n\n return None\n\n\ndef am_i_wrapped():\n \"\"\"\n Checks whether the current computation is wrapped in a poutine.\n :returns: bool\n \"\"\"\n return len(_PYRO_STACK) > 0\n\n\ndef effectful(fn=None, type=None):\n \"\"\"\n :param fn: function or callable that performs an effectful computation\n :param str type: the type label of the operation, e.g. `\"sample\"`\n\n Wrapper for calling :func:~`pyro.poutine.runtime.apply_stack` to apply any active effects.\n \"\"\"\n if fn is None:\n return functools.partial(effectful, type=type)\n\n if getattr(fn, \"_is_effectful\", None):\n return fn\n\n assert type is not None, \"must provide a type label for operation {}\".format(fn)\n assert type != \"message\", \"cannot use 'message' as keyword\"\n\n def _fn(*args, **kwargs):\n\n name = kwargs.pop(\"name\", None)\n infer = kwargs.pop(\"infer\", {})\n\n value = kwargs.pop(\"obs\", None)\n is_observed = value is not None\n\n if not am_i_wrapped():\n return fn(*args, **kwargs)\n else:\n msg = {\n \"type\": type,\n \"name\": name,\n \"fn\": fn,\n \"is_observed\": is_observed,\n \"args\": args,\n \"kwargs\": kwargs,\n \"value\": value,\n \"scale\": 1.0,\n \"mask\": None,\n \"cond_indep_stack\": (),\n \"done\": False,\n \"stop\": False,\n \"continuation\": None,\n \"infer\": infer,\n }\n # apply the stack and return its return value\n apply_stack(msg)\n return msg[\"value\"]\n _fn._is_effectful = True\n return _fn\n", "path": "pyro/poutine/runtime.py" } ]
diff --git a/pyro/poutine/runtime.py b/pyro/poutine/runtime.py index 72e220f069..e210741145 100644 --- a/pyro/poutine/runtime.py +++ b/pyro/poutine/runtime.py @@ -149,7 +149,7 @@ def default_process_message(msg): :param msg: a message to be processed :returns: None """ - if msg["done"] or msg["is_observed"]: + if msg["done"] or msg["is_observed"] or msg["value"] is not None: msg["done"] = True return msg diff --git a/tutorial/source/effect_handlers.ipynb b/tutorial/source/effect_handlers.ipynb new file mode 100644 index 0000000000..50313eb88a --- /dev/null +++ b/tutorial/source/effect_handlers.ipynb @@ -0,0 +1,661 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Poutine: a guide to programming with effect handlers in Pyro\n", + "\n", + "**Note to readers**: This tutorial is a guide to the API details of Pyro's effect handling library, [Poutine](http://docs.pyro.ai/en/dev/poutine.html). We recommend readers first orient themselves with the simplified [minipyro.py](https://github.com/uber/pyro/blob/dev/pyro/contrib/minipyro.py) which contains a minimal, readable implementation of Pyro's runtime and the effect handler abstraction described here. Pyro's effect handler library is more general than minipyro's but also contains more layers of indirection; it helps to read them side-by-side." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "import pyro\n", + "import pyro.distributions as dist\n", + "import pyro.poutine as poutine\n", + "\n", + "from pyro.poutine.runtime import effectful\n", + "\n", + "pyro.set_rng_seed(101)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Introduction\n", + "\n", + "Inference in probabilistic programming involves manipulating or transforming probabilistic programs written as generative models. For example, nearly all approximate inference algorithms require computing the unnormalized joint probability of values of latent and observed variables under a generative model.\n", + "\n", + "Consider the following example model from the [introductory inference tutorial](http://pyro.ai/examples/intro_part_ii.html):" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def scale(guess):\n", + " weight = pyro.sample(\"weight\", dist.Normal(guess, 1.0))\n", + " return pyro.sample(\"measurement\", dist.Normal(weight, 0.75))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This model defines a joint probability distribution over `\"weight\"` and `\"measurement\"`:\n", + "\n", + "$${\\sf weight} \\, | \\, {\\sf guess} \\sim \\cal {\\sf Normal}({\\sf guess}, 1) $$\n", + "$${\\sf measurement} \\, | \\, {\\sf guess}, {\\sf weight} \\sim {\\sf Normal}({\\sf weight}, 0.75)$$\n", + "\n", + "If we had access to the inputs and outputs of each `pyro.sample` site, we could compute their log-joint:\n", + "```python\n", + "logp = dist.Normal(guess, 1.0).log_prob(weight).sum() + dist.Normal(weight, 0.75).log_prob(measurement).sum()\n", + "```\n", + "However, the way we wrote `scale` above does not seem to expose these intermediate distribution objects, and rewriting it to return them would be intrusive and would violate the separation of concerns between models and inference algorithms that a probabilistic programming language like Pyro is designed to enforce.\n", + "\n", + "To resolve this conflict and facilitate inference algorithm development, Pyro exposes [Poutine](http://docs.pyro.ai/en/dev/poutine.html), a library of *effect handlers*, or composable building blocks for examining and modifying the behavior of Pyro programs. Most of Pyro's internals are implemented on top of Poutine." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A first look at Poutine: Pyro's library of algorithmic building blocks\n", + "\n", + "Effect handlers, a common abstraction in the programming languages community, give *nonstandard interpretations* or *side effects* to the behavior of particular statements in a programming language, like `pyro.sample` or `pyro.param`. For background reading on effect handlers in programming language research, see the optional \"Background\" section at the end of this tutorial. \n", + "\n", + "Rather than reviewing more definitions, let's look at a first example that addresses the problem above: we can compose two existing effect handlers, `poutine.condition` (which sets output values of `pyro.sample` statements) and `poutine.trace` (which records the inputs, distributions, and outputs of `pyro.sample` statements), to concisely define a new effect handler that computes the log-joint:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor(-3.0203)\n" + ] + } + ], + "source": [ + "def make_log_joint(model):\n", + " def _log_joint(cond_data, *args, **kwargs):\n", + " conditioned_model = poutine.condition(model, data=cond_data)\n", + " trace = poutine.trace(conditioned_model).get_trace(*args, **kwargs)\n", + " return trace.log_prob_sum()\n", + " return _log_joint\n", + "\n", + "scale_log_joint = make_log_joint(scale)\n", + "print(scale_log_joint({\"measurement\": 9.5, \"weight\": 8.23}, 8.5))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That snippet is short, but still somewhat opaque - `poutine.condition`, `poutine.trace`, and `trace.log_prob_sum` are all black boxes. Let's remove a layer of boilerplate from `poutine.condition` and `poutine.trace` and explicitly implement what `trace.log_prob_sum` is doing:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor(-3.0203)\n" + ] + } + ], + "source": [ + "from pyro.poutine.trace_messenger import TraceMessenger\n", + "from pyro.poutine.condition_messenger import ConditionMessenger\n", + "\n", + "def make_log_joint_2(model):\n", + " def _log_joint(cond_data, *args, **kwargs):\n", + " with TraceMessenger() as tracer:\n", + " with ConditionMessenger(data=cond_data):\n", + " model(*args, **kwargs)\n", + " \n", + " trace = tracer.trace\n", + " logp = 0.\n", + " for name, node in trace.nodes.items():\n", + " if node[\"type\"] == \"sample\":\n", + " if node[\"is_observed\"]:\n", + " assert node[\"value\"] is cond_data[name]\n", + " logp = logp + node[\"fn\"].log_prob(node[\"value\"]).sum()\n", + " return logp\n", + " return _log_joint\n", + "\n", + "scale_log_joint = make_log_joint_2(scale)\n", + "print(scale_log_joint({\"measurement\": 9.5, \"weight\": 8.23}, 8.5))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This makes things a little more clear: we can now see that `poutine.trace` and `poutine.condition` are wrappers for context managers that presumably communicate with the model through something inside `pyro.sample`. We can also see that `poutine.trace` produces a data structure (a [`Trace`](http://docs.pyro.ai/en/dev/poutine.html#trace)) containing a dictionary whose keys are `sample` site names and values are dictionaries containing the distribution (`\"fn\"`) and output (`\"value\"`) at each site, and that the output values at each site are exactly the values specified in `data`.\n", + "\n", + "Finally, `TraceMessenger` and `ConditionMessenger` are Pyro effect handlers, or `Messenger`s: stateful context manager objects that are placed on a global stack and pass messages (hence the name) up and down the stack at each effectful operation, like a `pyro.sample` call. A `Messenger` is placed at the bottom of the stack when its `__enter__` method is called, i.e. when it is used in a \"with\" statement.\n", + "\n", + "For a simplified implementation of this mechanism in only a few lines of code, see [pyro.contrib.minipyro](https://github.com/uber/pyro/blob/dev/pyro/contrib/minipyro.py)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Implementing new effect handlers with the `Messenger` API\n", + "\n", + "Although it's easiest to build new effect handlers by composing the existing ones in `pyro.poutine`, implementing a new effect as a `pyro.poutine.messenger.Messenger` subclass is actually fairly straightforward. As a first example, let's implement a version of our log-joint computation that performs the sum while the model is executing." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor(-3.0203)\n", + "tensor(-3.0203)\n" + ] + } + ], + "source": [ + "class LogJointMessenger(poutine.messenger.Messenger):\n", + " \n", + " def __init__(self, cond_data):\n", + " self.data = cond_data\n", + " \n", + " # __call__ allows Messengers to be used as higher-order functions.\n", + " # Messenger already defines __call__, but we re-define it here\n", + " # for exposition and to change the return value:\n", + " def __call__(self, fn):\n", + " def _fn(*args, **kwargs):\n", + " with self:\n", + " fn(*args, **kwargs)\n", + " return self.logp.clone()\n", + " return _fn\n", + " \n", + " def __enter__(self):\n", + " self.logp = torch.tensor(0.)\n", + " return super(LogJointMessenger, self).__enter__()\n", + " \n", + " # __exit__ takes the same arguments in all Python context managers\n", + " def __exit__(self, exc_type, exc_value, traceback):\n", + " self.logp = torch.tensor(0.)\n", + " return super(LogJointMessenger, self).__exit__(exc_type, exc_value, traceback)\n", + " \n", + " def _pyro_sample(self, msg):\n", + " assert msg[\"name\"] in self.data\n", + " msg[\"value\"] = self.data[msg[\"name\"]]\n", + " # Since we've observed a value for this site, we set the \"is_observed\" flag to True\n", + " # This tells any other Messengers not to overwrite msg[\"value\"] with a sample.\n", + " msg[\"is_observed\"] = True\n", + " self.logp = self.logp + (msg[\"scale\"] * msg[\"fn\"].log_prob(msg[\"value\"])).sum()\n", + "\n", + "with LogJointMessenger(cond_data={\"measurement\": 9.5, \"weight\": 8.23}) as m:\n", + " scale(8.5)\n", + " print(m.logp.clone())\n", + " \n", + "scale_log_joint = LogJointMessenger(cond_data={\"measurement\": 9.5, \"weight\": 8.23})(scale)\n", + "print(scale_log_joint(8.5))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A convenient bit of boilerplate that allows the use of `LogJointMessenger` as a context manager, decorator, or higher-order function is the following. Most of the existing effect handlers in `pyro.poutine`, including `poutine.trace` and `poutine.condition` which we used earlier, are `Messenger`s wrapped this way in `pyro.poutine.handlers`." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor(-3.0203)\n" + ] + } + ], + "source": [ + "def log_joint(model=None, cond_data=None):\n", + " msngr = LogJointMessenger(cond_data=cond_data)\n", + " return msngr(model) if model is not None else msngr\n", + "\n", + "scale_log_joint = log_joint(scale, cond_data={\"measurement\": 9.5, \"weight\": 8.23})\n", + "print(scale_log_joint(8.5))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The `Messenger` API in more detail\n", + "\n", + "A generic `Messenger` actually contains two methods that are called once per operation where side effects are performed:\n", + "1. `_process_message` modifies a message and passes the result to the `Messenger` just above on the stack\n", + "2. `_postprocess_message` modifies a message and passes the result to the next `Messenger` down on the stack.\n", + "\n", + "Although custom `Messenger`s can override these, it's convenient to avoid requiring all effect handlers to be aware of all possible effectful operation types. For this reason, by default `Messenger._process_message` will use `msg[\"type\"]` to dispatch to a corresponding method `Messenger._pyro_<type>`, e.g. `Messenger._pyro_sample` as we wrote in `LogJointMessenger`. Just as exception handling code ignores unhandled exception types, this allows `Messenger`s to simply pass operations they don't know how to handle up to the next `Messenger` in the stack:\n", + "```python\n", + "class Messenger(object):\n", + " ...\n", + " def _process_message(self, msg):\n", + " method_name = \"_pyro_{}\".format(msg[\"type\"]) # e.g. _pyro_sample when msg[\"type\"] == \"sample\"\n", + " if hasattr(self, method_name):\n", + " getattr(self, method_name)(msg)\n", + " ...\n", + "```\n", + "`_postprocess_message` is necessary because some effects can only be applied after all other effect handlers have had a chance to update the message once. In the case of `LogJointMessenger`, other effects, like enumeration, may modify a sample site's value or distribution (`msg[\"value\"]` or `msg[\"fn\"]`), so we move the log-probability computation to a new method, `_pyro_post_sample`, which is called by `_postprocess_message` at each `sample` site after all active handlers' `_pyro_sample` methods have been applied:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor(-3.0203)\n" + ] + } + ], + "source": [ + "class LogJointMessenger2(poutine.messenger.Messenger):\n", + " \n", + " def __init__(self, cond_data):\n", + " self.data = cond_data\n", + " \n", + " def __call__(self, fn):\n", + " def _fn(*args, **kwargs):\n", + " with self:\n", + " fn(*args, **kwargs)\n", + " return self.logp.clone()\n", + " return _fn\n", + " \n", + " def __enter__(self):\n", + " self.logp = torch.tensor(0.)\n", + " return super(LogJointMessenger2, self).__enter__()\n", + " \n", + " def __exit__(self, exc_type, exc_value, traceback):\n", + " self.logp = torch.tensor(0.)\n", + " return super(LogJointMessenger2, self).__exit__(exc_type, exc_value, traceback)\n", + "\n", + " def _pyro_sample(self, msg):\n", + " if msg[\"name\"] in self.data:\n", + " msg[\"value\"] = self.data[msg[\"name\"]]\n", + " msg[\"done\"] = True\n", + " \n", + " def _pyro_post_sample(self, msg):\n", + " assert msg[\"done\"] # the \"done\" flag asserts that no more modifications to value and fn will be performed.\n", + " self.logp = self.logp + (msg[\"scale\"] * msg[\"fn\"].log_prob(msg[\"value\"])).sum()\n", + "\n", + "\n", + "with LogJointMessenger2(cond_data={\"measurement\": 9.5, \"weight\": 8.23}) as m:\n", + " scale(8.5)\n", + " print(m.logp)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inside the messages sent by `Messenger`s\n", + "\n", + "As the previous two examples suggest, the actual messages passed up and down the stack are dictionaries with a particular set of keys. Consider the following sample statement:\n", + "```python\n", + "pyro.sample(\"x\", dist.Bernoulli(0.5), infer={\"enumerate\": \"parallel\"}, obs=None)\n", + "```\n", + "This sample statement is converted into an initial message before any effects are applied, and each effect handler's `_process_message` and `_postprocess_message` may update fields in place or add new fields. We write out the full initial message here for completeness:\n", + "```python\n", + "msg = {\n", + " # The following fields contain the name, inputs, function, and output of a site.\n", + " # These are generally the only fields you'll need to think about.\n", + " \"name\": \"x\",\n", + " \"fn\": dist.Bernoulli(0.5),\n", + " \"value\": None, # msg[\"value\"] will eventually contain the value returned by pyro.sample\n", + " \"is_observed\": False, # because obs=None by default; only used by sample sites\n", + " \"args\": (), # positional arguments passed to \"fn\" when it is called; usually empty for sample sites\n", + " \"kwargs\": {}, # keyword arguments passed to \"fn\" when it is called; usually empty for sample sites\n", + " # This field typically contains metadata needed or stored by a particular inference algorithm\n", + " \"infer\": {\"enumerate\": \"parallel\"},\n", + " # The remaining fields are generally only used by Pyro's internals,\n", + " # or for implementing more advanced effects beyond the scope of this tutorial\n", + " \"type\": \"sample\", # label used by Messenger._process_message to dispatch, in this case to _pyro_sample\n", + " \"done\": False,\n", + " \"stop\": False,\n", + " \"scale\": torch.tensor(1.), # Multiplicative scale factor that can be applied to each site's log_prob\n", + " \"mask\": None,\n", + " \"continuation\": None,\n", + " \"cond_indep_stack\": (), # Will contain metadata from each pyro.plate enclosing this sample site.\n", + "}\n", + "```\n", + "Note that when we use `poutine.trace` or `TraceMessenger` as in our first two versions of `make_log_joint`, the contents of `msg` are exactly the information stored in the trace for each sample and param site." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Implementing inference algorithms with existing effect handlers: examples\n", + "\n", + "It turns out that many inference operations, like our first version of `make_log_joint` above, have strikingly short implementations in terms of existing effect handlers in `pyro.poutine`. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example: Variational inference with a Monte Carlo ELBO\n", + "\n", + "For example, here is an implementation of variational inference with a Monte Carlo ELBO that uses `poutine.trace`, `poutine.condition`, and `poutine.replay`. This is very similar to the simple ELBO in [pyro.contrib.minipyro](https://github.com/uber/pyro/blob/dev/pyro/contrib/minipyro.py)." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def monte_carlo_elbo(model, guide, batch, *args, **kwargs):\n", + " # assuming batch is a dictionary, we use poutine.condition to fix values of observed variables\n", + " conditioned_model = poutine.condition(model, data=batch)\n", + " \n", + " # we'll approximate the expectation in the ELBO with a single sample:\n", + " # first, we run the guide forward unmodified and record values and distributions\n", + " # at each sample site using poutine.trace\n", + " guide_trace = poutine.trace(guide).get_trace(*args, **kwargs)\n", + " \n", + " # we use poutine.replay to set the values of latent variables in the model\n", + " # to the values sampled above by our guide, and use poutine.trace\n", + " # to record the distributions that appear at each sample site in in the model\n", + " model_trace = poutine.trace(\n", + " poutine.replay(conditioned_model, trace=guide_trace)\n", + " ).get_trace(*args, **kwargs)\n", + " \n", + " elbo = 0.\n", + " for name, node in model_trace.nodes.items():\n", + " if node[\"type\"] == \"sample\":\n", + " elbo = elbo + node[\"fn\"].log_prob(node[\"value\"]).sum()\n", + " if not node[\"is_observed\"]:\n", + " elbo = elbo - guide_trace.nodes[name][\"fn\"].log_prob(node[\"value\"]).sum()\n", + " return -elbo" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We use `poutine.trace` and `poutine.block` to record `pyro.param` calls for optimization:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def train(model, guide, data):\n", + " optimizer = pyro.optim.Adam({})\n", + " for batch in data:\n", + " # this poutine.trace will record all of the parameters that appear in the model and guide\n", + " # during the execution of monte_carlo_elbo\n", + " with poutine.trace() as param_capture:\n", + " # we use poutine.block here so that only parameters appear in the trace above\n", + " with poutine.block(hide_fn=lambda node: node[\"type\"] != \"param\"):\n", + " loss = monte_carlo_elbo(model, guide, batch)\n", + " \n", + " loss.backward()\n", + " params = set(node[\"value\"].unconstrained()\n", + " for node in param_capture.trace.nodes.values())\n", + " optimizer.step(params)\n", + " pyro.infer.util.zero_grads(params)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example: exact inference via sequential enumeration\n", + "\n", + "Here is an example of a very different inference algorithm--exact inference via enumeration--implemented with `pyro.poutine`. A complete explanation of this algorithm is beyond the scope of this tutorial and may be found in Chapter 3 of the short online book [Design and Implementation of Probabilistic Programming Languages](http://dippl.org/chapters/03-enumeration.html). This example uses `poutine.queue`, itself implemented using `poutine.trace`, `poutine.replay`, and `poutine.block`, to enumerate over possible values of all discrete variables in a model and compute a marginal distribution over all possible return values or the possible values at a particular sample site:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def sequential_discrete_marginal(model, data, site_name=\"_RETURN\"):\n", + " \n", + " from six.moves import queue # queue data structures\n", + " q = queue.Queue() # Instantiate a first-in first-out queue\n", + " q.put(poutine.Trace()) # seed the queue with an empty trace\n", + " \n", + " # as before, we fix the values of observed random variables with poutine.condition\n", + " # assuming data is a dictionary whose keys are names of sample sites in model\n", + " conditioned_model = poutine.condition(model, data=data)\n", + " \n", + " # we wrap the conditioned model in a poutine.queue,\n", + " # which repeatedly pushes and pops partially completed executions from a Queue()\n", + " # to perform breadth-first enumeration over the set of values of all discrete sample sites in model\n", + " enum_model = poutine.queue(conditioned_model, queue=q)\n", + " \n", + " # actually perform the enumeration by repeatedly tracing enum_model\n", + " # and accumulate samples and trace log-probabilities for postprocessing\n", + " samples, log_weights = [], []\n", + " while not q.empty():\n", + " trace = poutine.trace(enum_model).get_trace()\n", + " samples.append(trace.nodes[site_name][\"value\"])\n", + " log_weights.append(trace.log_prob_sum())\n", + " \n", + " # we take the samples and log-joints and turn them into a histogram:\n", + " samples = torch.stack(samples, 0)\n", + " log_weights = torch.stack(log_weights, 0)\n", + " log_weights = log_weights - dist.util.logsumexp(log_weights, dim=0)\n", + " return dist.Empirical(samples, log_weights)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "(Note that `sequential_discrete_marginal` is very general, but is also quite slow. For high-performance parallel enumeration that applies to a less general class of models, see the enumeration tutorial.)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example: implementing lazy evaluation with the `Messenger` API\n", + "\n", + "Now that we've learned more about the internals of `Messenger`, let's use it to implement a slightly more complicated effect: lazy evaluation. We first define a `LazyValue` class that we will use to build up a computation graph:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "class LazyValue(object):\n", + " def __init__(self, fn, *args, **kwargs):\n", + " self._expr = (fn, args, kwargs)\n", + " self._value = None\n", + " \n", + " def __str__(self):\n", + " return \"({} {})\".format(str(self._expr[0]), \" \".join(map(str, self._expr[1])))\n", + " \n", + " def evaluate(self):\n", + " if self._value is None:\n", + " fn, args, kwargs = self._expr\n", + " fn = fn.evaluate() if isinstance(fn, LazyValue) else fn\n", + " args = tuple(arg.evaluate() if isinstance(arg, LazyValue) else arg\n", + " for arg in args)\n", + " kwargs = {k: v.evaluate() if isinstance(v, LazyValue) else v\n", + " for k, v in kwargs.items()}\n", + " self._value = fn(*args, **kwargs)\n", + " return self._value" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With `LazyValue`, implementing lazy evaluation as a `Messenger` compatible with other effect handlers is suprisingly easy. We just make each `msg[\"value\"]` a `LazyValue` and introduce a new operation type `\"apply\"` for deterministic operations:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "class LazyMessenger(pyro.poutine.messenger.Messenger):\n", + " def _process_message(self, msg):\n", + " if msg[\"type\"] in (\"apply\", \"sample\") and not msg[\"done\"]:\n", + " msg[\"done\"] = True\n", + " msg[\"value\"] = LazyValue(msg[\"fn\"], *msg[\"args\"], **msg[\"kwargs\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, just like `torch.autograd` overloads `torch` tensor operations to record an autograd graph, we need to wrap any operations we'd like to be lazy. We'll use `pyro.poutine.runtime.effectful` as a decorator to expose these operations to `LazyMessenger`. `effectful` constructs a message much like the one above and passes it up and down the effect handler stack, but allows us to set the type (in this case, to `\"apply\"` instead of `\"sample\"`) so that these operations aren't mistaken for `sample` statements by other effect handlers like `TraceMessenger`:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "@effectful(type=\"apply\")\n", + "def add(x, y):\n", + " return x + y\n", + "\n", + "@effectful(type=\"apply\")\n", + "def mul(x, y):\n", + " return x * y\n", + "\n", + "@effectful(type=\"apply\")\n", + "def sigmoid(x):\n", + " return torch.sigmoid(x)\n", + "\n", + "@effectful(type=\"apply\")\n", + "def normal(loc, scale):\n", + " return dist.Normal(loc, scale)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Applied to another model:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "((<function normal at 0x7ffdf2a8cd90> (<function add at 0x7ffdf2a8c840> (<function mul at 0x7ffdf2a8cb70> ((<function normal at 0x7ffdf2a8cd90> 8.5 1.0) ) 0.8) 1.0) (<function sigmoid at 0x7ffdf2a8cc80> ((<function normal at 0x7ffdf2a8cd90> 0.0 0.25) ))) )\n", + "tensor(6.5436)\n" + ] + } + ], + "source": [ + "def biased_scale(guess):\n", + " weight = pyro.sample(\"weight\", normal(guess, 1.))\n", + " tolerance = pyro.sample(\"tolerance\", normal(0., 0.25))\n", + " return pyro.sample(\"measurement\", normal(add(mul(weight, 0.8), 1.), sigmoid(tolerance)))\n", + "\n", + "with LazyMessenger():\n", + " v = biased_scale(8.5)\n", + " print(v)\n", + " print(v.evaluate())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Together with other effect handlers like `TraceMessenger` and `ConditionMessenger`, with which it freely composes, `LazyMessenger` demonstrates how to use Poutine to quickly and concisely implement state-of-the-art PPL techniques like [delayed sampling with Rao-Blackwellization](https://arxiv.org/abs/1708.07787)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Background: algebraic effects and handlers in programming language research\n", + "\n", + "This section contains some references to PL papers for readers interested in this direction.\n", + "\n", + "Algebraic effects and handlers, which were developed starting in the early 2000s and are a subject of active research in the programming languages community, are a versatile abstraction for building modular implementations of nonstandard interpreters of particular statements in a programming language, like `pyro.sample` or `pyro.param`. They were originally introduced to address the difficulty of composing nonstandard interpreters implemented with monads and monad transformers.\n", + "\n", + "- For an accessible introduction to the effect handlers literature, see the excellent review/tutorial paper [\"Handlers in Action\"](http://homepages.inf.ed.ac.uk/slindley/papers/handlers.pdf) by Ohad Kammar, Sam Lindley, and Nicolas Oury, and the references therein.\n", + "\n", + "- Algebraic effect handlers were originally introduced by Gordon Plotkin and Matija Pretnar in the paper [\"Handlers of Algebraic Effects\"](https://link.springer.com/chapter/10.1007/978-3-642-00590-9_7).\n", + "\n", + "- A useful mental model of effect handlers is as exception handlers that are capable of resuming computation in the `try` block after raising an exception and performing some processing in the `except` block. This metaphor is explored further in the experimental programming language [Eff](http://math.andrej.com/eff/) and its companion paper [\"Programming with Algebraic Effects and Handlers\"](https://arxiv.org/abs/1203.1539) by Andrej Bauer and Matija Pretnar.\n", + "\n", + "- Most effect handlers in Pyro are \"linear,\" meaning that they only resume once per effectful operation and do not alter the order of execution of the original program. One exception is `poutine.queue`, which uses an inefficient implementation strategy for multiple resumptions like the one described for delimited continuations in the paper [\"Capturing the Future by Replaying the Past\"](http://delivery.acm.org/10.1145/3240000/3236771/icfp18main-p36-p.pdf) by James Koppel, Gabriel Scherer, and Armando Solar-Lezama. \n", + "\n", + "- More efficient implementation strategies for effect handlers in mainstream programming languages like Python or JavaScript is an area of active research. One promising line of work involves selective continuation-passing style transforms as in the paper [\"Type-Directed Compilation of Row-Typed Algebraic Effects\"](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/12/algeff.pdf) by Daan Leijen." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorial/source/index.rst b/tutorial/source/index.rst index 7794183966..d45722c324 100644 --- a/tutorial/source/index.rst +++ b/tutorial/source/index.rst @@ -19,6 +19,7 @@ Welcome to Pyro Examples and Tutorials! enumeration custom_objectives jit + effect_handlers .. toctree:: :maxdepth: 2
evennia__evennia-3018
[BUG - Develop] discord integration multiple connections #### Describe the bug I pulled the latest discord patch. it has successfully removed the traceback, but now it ends up sending each message 3 times. #### To Reproduce Steps to reproduce the behavior: 1. set up discord integration, including a bot. 2. leave your evennia game idle for a bit. 3. watch the discord bot connect, disconnect, connect, connect, disconnect, etc. I am now up to 4 discordbots logged in. They're multiplying. 4. See error when you then try to say something. [Mudinfo] [2022-12-02(20:47)]: DiscordBot disconnected () [Mudinfo] [2022-12-02(20:47)]: DiscordBot connected [Mudinfo] [2022-12-02(20:47)]: DiscordBot connected --------- New Activity --------- [Mudinfo] [2022-12-02(21:05)]: DiscordBot disconnected () [Mudinfo] [2022-12-02(21:05)]: DiscordBot connected [Mudinfo] [2022-12-02(21:05)]: DiscordBot connected [Mudinfo] [2022-12-02(21:05)]: DiscordBot disconnected () [Mudinfo] [2022-12-02(21:05)]: DiscordBot connected [Public] Radiance(#1) yays. it workssssss. on discord: [Public] Radiance yays. it workssssss. [Public] Radiance yays. it workssssss. [Public] Radiance yays. it workssssss. Amethyst — Today at 9:37 PM facepalm. and then back on Evennia [Public] [#chat] Amethyst: facepalm. [Public] [#chat] Amethyst: facepalm. [Public] [#chat] Amethyst: facepalm. #### Expected behavior I think the discord bot should reconnect only as many times as it disconnects. #### Develop-branch commit Evennia version: 1.0-dev (rev 06be8e4b29) #### Additional context No clue how to fix, but thought I would raise this for someone to take a look at when there's time.
[ { "content": "\"\"\"\nImplements Discord chat channel integration.\n\nThe Discord API uses a mix of websockets and REST API endpoints.\n\nIn order for this integration to work, you need to have your own\ndiscord bot set up via https://discord.com/developers/applications\nwith the MESSAGE CONTENT toggle switched on, and your bot token\nadded to `server/conf/secret_settings.py` as your DISCORD_BOT_TOKEN\n\"\"\"\nimport json\nimport os\nfrom io import BytesIO\nfrom random import random\n\nfrom autobahn.twisted.websocket import (\n WebSocketClientFactory,\n WebSocketClientProtocol,\n connectWS,\n)\nfrom django.conf import settings\nfrom twisted.internet import protocol, reactor, ssl, task\nfrom twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool, readBody\nfrom twisted.web.http_headers import Headers\n\nfrom evennia.server.session import Session\nfrom evennia.utils import class_from_module, get_evennia_version, logger\nfrom evennia.utils.utils import delay\n\n_BASE_SESSION_CLASS = class_from_module(settings.BASE_SESSION_CLASS)\n\nDISCORD_API_VERSION = 10\n# include version number to prevent automatically updating to breaking changes\nDISCORD_API_BASE_URL = f\"https://discord.com/api/v{DISCORD_API_VERSION}\"\n\nDISCORD_USER_AGENT = f\"Evennia (https://www.evennia.com, {get_evennia_version(mode='short')})\"\nDISCORD_BOT_TOKEN = settings.DISCORD_BOT_TOKEN\nDISCORD_BOT_INTENTS = settings.DISCORD_BOT_INTENTS\n\n# Discord OP codes, alphabetic\nOP_DISPATCH = 0\nOP_HEARTBEAT = 1\nOP_HEARTBEAT_ACK = 11\nOP_HELLO = 10\nOP_IDENTIFY = 2\nOP_INVALID_SESSION = 9\nOP_RECONNECT = 7\nOP_RESUME = 6\n\n\n# create quiet HTTP pool to muffle GET/POST requests\nclass QuietConnectionPool(HTTPConnectionPool):\n \"\"\"\n A quiet version of the HTTPConnectionPool which sets the factory's\n `noisy` property to False to muffle log output.\n \"\"\"\n\n def __init__(self, reactor, persistent=True):\n super().__init__(reactor, persistent)\n self._factory.noisy = False\n\n\n_AGENT = Agent(reactor, pool=QuietConnectionPool(reactor))\n\n\ndef should_retry(status_code):\n \"\"\"\n Helper function to check if the request should be retried later.\n\n Args:\n status_code (int) - The HTTP status code\n\n Returns:\n retry (bool) - True if request should be retried False otherwise\n \"\"\"\n if status_code >= 500 and status_code <= 504:\n # these are common server error codes when the server is temporarily malfunctioning\n # in these cases, we should retry\n return True\n else:\n # handle all other cases; this can be expanded later if needed for special cases\n return False\n\n\nclass DiscordWebsocketServerFactory(WebSocketClientFactory, protocol.ReconnectingClientFactory):\n \"\"\"\n A variant of the websocket-factory that auto-reconnects.\n\n \"\"\"\n\n initialDelay = 1\n factor = 1.5\n maxDelay = 60\n noisy = False\n gateway = None\n resume_url = None\n do_retry = True\n\n def __init__(self, sessionhandler, *args, **kwargs):\n self.uid = kwargs.get(\"uid\")\n self.sessionhandler = sessionhandler\n self.port = None\n self.bot = None\n\n def get_gateway_url(self, *args, **kwargs):\n # get the websocket gateway URL from Discord\n d = _AGENT.request(\n b\"GET\",\n f\"{DISCORD_API_BASE_URL}/gateway\".encode(\"utf-8\"),\n Headers(\n {\n \"User-Agent\": [DISCORD_USER_AGENT],\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n }\n ),\n None,\n )\n\n def cbResponse(response):\n if response.code == 200:\n d = readBody(response)\n d.addCallback(self.websocket_init, *args, **kwargs)\n return d\n elif should_retry(response.code):\n delay(300, self.get_gateway_url, *args, **kwargs)\n\n d.addCallback(cbResponse)\n\n def websocket_init(self, payload, *args, **kwargs):\n \"\"\"\n callback for when the URL is gotten\n \"\"\"\n data = json.loads(str(payload, \"utf-8\"))\n if url := data.get(\"url\"):\n self.gateway = f\"{url}/?v={DISCORD_API_VERSION}&encoding=json\".encode(\"utf-8\")\n useragent = kwargs.pop(\"useragent\", DISCORD_USER_AGENT)\n headers = kwargs.pop(\n \"headers\",\n {\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n },\n )\n\n logger.log_info(\"Connecting to Discord Gateway...\")\n WebSocketClientFactory.__init__(\n self, url, *args, headers=headers, useragent=useragent, **kwargs\n )\n self.start()\n else:\n logger.log_err(\"Discord did not return a websocket URL; connection cancelled.\")\n\n def buildProtocol(self, addr):\n \"\"\"\n Build new instance of protocol\n\n Args:\n addr (str): Not used, using factory/settings data\n\n \"\"\"\n if hasattr(settings, \"DISCORD_SESSION_CLASS\"):\n protocol_class = class_from_module(\n settings.DISCORD_SESSION_CLASS, fallback=DiscordClient\n )\n protocol = protocol_class()\n else:\n protocol = DiscordClient()\n\n protocol.factory = self\n protocol.sessionhandler = self.sessionhandler\n return protocol\n\n def startedConnecting(self, connector):\n \"\"\"\n Tracks reconnections for debugging.\n\n Args:\n connector (Connector): Represents the connection.\n\n \"\"\"\n logger.log_info(\"Attempting connection to Discord...\")\n\n def clientConnectionFailed(self, connector, reason):\n \"\"\"\n Called when Client failed to connect.\n\n Args:\n connector (Connection): Represents the connection.\n reason (str): The reason for the failure.\n\n \"\"\"\n protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)\n\n def clientConnectionLost(self, connector, reason):\n \"\"\"\n Called when Client loses connection.\n\n Args:\n connector (Connection): Represents the connection.\n reason (str): The reason for the failure.\n\n \"\"\"\n if self.do_retry or not self.bot:\n self.retry(connector)\n\n def reconnect(self):\n \"\"\"\n Force a reconnection of the bot protocol. This requires\n de-registering the session and then reattaching a new one.\n\n \"\"\"\n # set the retry flag to False so it doesn't attempt an automatic retry\n # and duplicate the connection\n self.do_retry = False\n # disconnect everything\n self.bot.transport.loseConnection()\n self.sessionhandler.server_disconnect(self.bot)\n # set up the reconnection\n if self.resume_url:\n self.url = self.resume_url\n elif self.gateway:\n self.url = self.gateway\n else:\n # we don't know where to reconnect to! start from the beginning\n self.get_gateway_url()\n return\n self.start()\n\n def start(self):\n \"Connect protocol to remote server\"\n\n if not self.gateway:\n # we can't actually start yet\n # get the gateway URL from Discord\n self.get_gateway_url()\n else:\n # set the retry flag so we maintain this connection\n self.do_retry = True\n connectWS(self)\n\n\nclass DiscordClient(WebSocketClientProtocol, _BASE_SESSION_CLASS):\n \"\"\"\n Implements the Discord client\n \"\"\"\n\n nextHeartbeatCall = None\n pending_heartbeat = False\n heartbeat_interval = None\n last_sequence = 0\n session_id = None\n discord_id = None\n\n def __init__(self):\n WebSocketClientProtocol.__init__(self)\n _BASE_SESSION_CLASS.__init__(self)\n self.restart_downtime = None\n\n def at_login(self):\n pass\n\n def onOpen(self):\n \"\"\"\n Called when connection is established.\n\n \"\"\"\n self.restart_downtime = None\n self.restart_task = None\n self.factory.bot = self\n\n self.init_session(\"discord\", \"discord.gg\", self.factory.sessionhandler)\n self.uid = int(self.factory.uid)\n self.logged_in = True\n self.sessionhandler.connect(self)\n\n def onMessage(self, payload, isBinary):\n \"\"\"\n Callback fired when a complete WebSocket message was received.\n\n Args:\n payload (bytes): The WebSocket message received.\n isBinary (bool): Flag indicating whether payload is binary or\n UTF-8 encoded text.\n\n \"\"\"\n if isBinary:\n logger.log_info(\"DISCORD: got a binary payload for some reason\")\n return\n data = json.loads(str(payload, \"utf-8\"))\n if seqid := data.get(\"s\"):\n self.last_sequence = seqid\n\n # not sure if that error json format is for websockets, so\n # check for it just in case\n if \"errors\" in data:\n self.handle_error(data)\n return\n\n # check for discord gateway API op codes first\n if data[\"op\"] == OP_HELLO:\n self.interval = data[\"d\"][\"heartbeat_interval\"] / 1000 # convert millisec to seconds\n if self.nextHeartbeatCall:\n self.nextHeartbeatCall.cancel()\n self.nextHeartbeatCall = self.factory._batched_timer.call_later(\n self.interval * random(),\n self.doHeartbeat,\n )\n if self.session_id:\n # we already have a session; try to resume instead\n self.resume()\n else:\n self.identify()\n elif data[\"op\"] == OP_HEARTBEAT_ACK:\n # our last heartbeat was acknowledged, so reset the \"pending\" flag\n self.pending_heartbeat = False\n elif data[\"op\"] == OP_HEARTBEAT:\n # Discord wants us to send a heartbeat immediately\n self.doHeartbeat(force=True)\n elif data[\"op\"] == OP_INVALID_SESSION:\n # Discord doesn't like our current session; reconnect for a new one\n logger.log_msg(\"Discord: received 'Invalid Session' opcode. Reconnecting.\")\n if data[\"d\"] == False:\n # can't resume, clear existing resume data\n self.session_id = None\n self.factory.resume_url = None\n self.factory.reconnect()\n elif data[\"op\"] == OP_RECONNECT:\n # reconnect as requested; Discord does this regularly for server load balancing\n logger.log_msg(\"Discord: received 'Reconnect' opcode. Reconnecting.\")\n self.factory.reconnect()\n elif data[\"op\"] == OP_DISPATCH:\n # handle the general dispatch opcode events by type\n if data[\"t\"] == \"READY\":\n # our recent identification is valid; process new session info\n self.connection_ready(data[\"d\"])\n else:\n # general message, pass on to data_in\n self.data_in(data=data)\n\n def onClose(self, wasClean, code=None, reason=None):\n \"\"\"\n This is executed when the connection is lost for whatever\n reason. it can also be called directly, from the disconnect\n method.\n\n Args:\n wasClean (bool): ``True`` if the WebSocket was closed cleanly.\n code (int or None): Close status as sent by the WebSocket peer.\n reason (str or None): Close reason as sent by the WebSocket peer.\n\n \"\"\"\n if self.nextHeartbeatCall:\n self.nextHeartbeatCall.cancel()\n self.disconnect(reason)\n if code >= 4000:\n logger.log_err(f\"Discord connection closed: {reason}\")\n else:\n logger.log_info(f\"Discord disconnected: {reason}\")\n\n def _send_json(self, data):\n \"\"\"\n Post JSON data to the websocket\n\n Args:\n data (dict): content to send.\n\n \"\"\"\n return self.sendMessage(json.dumps(data).encode(\"utf-8\"))\n\n def _post_json(self, url, data, **kwargs):\n \"\"\"\n Post JSON data to a REST API endpoint\n\n Args:\n url (str) - The API path which is being posted to\n data (dict) - Content to be sent\n \"\"\"\n url = f\"{DISCORD_API_BASE_URL}/{url}\"\n body = FileBodyProducer(BytesIO(json.dumps(data).encode(\"utf-8\")))\n d = _AGENT.request(\n b\"POST\",\n url.encode(\"utf-8\"),\n Headers(\n {\n \"User-Agent\": [DISCORD_USER_AGENT],\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n }\n ),\n body,\n )\n\n def cbResponse(response):\n if response.code == 200:\n d = readBody(response)\n d.addCallback(self.post_response)\n return d\n elif should_retry(response.code):\n delay(300, self._post_json, url, data, **kwargs)\n\n d.addCallback(cbResponse)\n\n def post_response(self, body, **kwargs):\n \"\"\"\n Process the response from sending a POST request\n\n Args:\n body (bytes) - The post response body\n \"\"\"\n data = json.loads(body)\n if \"errors\" in data:\n self.handle_error(data)\n\n def handle_error(self, data, **kwargs):\n \"\"\"\n General hook for processing errors.\n\n Args:\n data (dict) - The received error data\n\n \"\"\"\n logger.log_err(str(data))\n\n def resume(self):\n \"\"\"\n Called after a reconnection to re-identify and replay missed events\n\n \"\"\"\n if not self.last_sequence or not self.session_id:\n # we have no known state to resume from, identify normally\n self.identify()\n\n # build a RESUME request for Discord and send it\n data = {\n \"op\": OP_RESUME,\n \"d\": {\n \"token\": DISCORD_BOT_TOKEN,\n \"session_id\": self.session_id,\n \"s\": self.sequence_id,\n },\n }\n self._send_json(data)\n\n def disconnect(self, reason=None):\n \"\"\"\n Generic hook for the engine to call in order to\n disconnect this protocol.\n\n Args:\n reason (str or None): Motivation for the disconnection.\n\n \"\"\"\n self.sessionhandler.disconnect(self)\n self.sendClose(self.CLOSE_STATUS_CODE_NORMAL, reason)\n\n def identify(self, *args, **kwargs):\n \"\"\"\n Send Discord authentication. This should be sent once heartbeats begin.\n\n \"\"\"\n data = {\n \"op\": 2,\n \"d\": {\n \"token\": DISCORD_BOT_TOKEN,\n \"intents\": DISCORD_BOT_INTENTS,\n \"properties\": {\n \"os\": os.name,\n \"browser\": DISCORD_USER_AGENT,\n \"device\": DISCORD_USER_AGENT,\n },\n },\n }\n self._send_json(data)\n\n def connection_ready(self, data):\n \"\"\"\n Process READY data for relevant bot info.\n \"\"\"\n self.factory.resume_url = data[\"resume_gateway_url\"]\n self.session_id = data[\"session_id\"]\n self.discord_id = data[\"user\"][\"id\"]\n\n def doHeartbeat(self, *args, **kwargs):\n \"\"\"\n Send heartbeat to Discord.\n\n \"\"\"\n if not self.pending_heartbeat or kwargs.get(\"force\"):\n if self.nextHeartbeatCall:\n self.nextHeartbeatCall.cancel()\n # send the heartbeat\n data = {\"op\": 1, \"d\": self.last_sequence}\n self._send_json(data)\n # track that we sent a heartbeat, in case we don't receive an ACK\n self.pending_heartbeat = True\n self.nextHeartbeatCall = self.factory._batched_timer.call_later(\n self.interval,\n self.doHeartbeat,\n )\n else:\n # we didn't get a response since the last heartbeat; reconnect\n self.factory.reconnect()\n\n def send_channel(self, text, channel_id, **kwargs):\n \"\"\"\n Send a message from an Evennia channel to a Discord channel.\n\n Use with session.msg(channel=(message, channel, sender))\n\n \"\"\"\n\n data = {\"content\": text}\n data.update(kwargs)\n self._post_json(f\"channels/{channel_id}/messages\", data)\n\n def send_default(self, *args, **kwargs):\n \"\"\"\n Ignore other outputfuncs\n\n \"\"\"\n pass\n\n def data_in(self, data, **kwargs):\n \"\"\"\n Process incoming data from Discord and sent to the Evennia server\n\n Args:\n data (dict): Converted json data.\n\n \"\"\"\n action_type = data.get(\"t\", \"UNKNOWN\")\n\n if action_type == \"MESSAGE_CREATE\":\n # someone posted a message on Discord that the bot can see\n data = data[\"d\"]\n if data[\"author\"][\"id\"] == self.discord_id:\n # it's by the bot itself! disregard\n return\n message = data[\"content\"]\n channel_id = data[\"channel_id\"]\n keywords = {\"channel_id\": channel_id}\n if \"guild_id\" in data:\n # message received to a Discord channel\n keywords[\"type\"] = \"channel\"\n author = data[\"member\"][\"nick\"] or data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n keywords[\"guild_id\"] = data[\"guild_id\"]\n\n else:\n # message sent directly to the bot account via DM\n keywords[\"type\"] = \"direct\"\n author = data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n\n # pass the processed data to the server\n self.sessionhandler.data_in(self, bot_data_in=(message, keywords))\n\n elif action_type in (\"GUILD_CREATE\", \"GUILD_UPDATE\"):\n # we received the current status of a guild the bot is on; process relevant info\n data = data[\"d\"]\n keywords = {\"type\": \"guild\", \"guild_id\": data[\"id\"], \"guild_name\": data[\"name\"]}\n keywords[\"channels\"] = {\n chan[\"id\"]: {\"name\": chan[\"name\"], \"guild\": data[\"name\"]}\n for chan in data[\"channels\"]\n if chan[\"type\"] == 0\n }\n # send the possibly-updated guild and channel data to the server\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))\n\n elif \"DELETE\" in action_type:\n # deletes should possibly be handled separately to check for channel removal\n # for now, just ignore\n pass\n\n else:\n # send the data for any other action types on to the bot as-is for optional server-side handling\n keywords = {\"type\": action_type}\n keywords.update(data[\"d\"])\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))\n", "path": "evennia/server/portal/discord.py" } ]
[ { "content": "\"\"\"\nImplements Discord chat channel integration.\n\nThe Discord API uses a mix of websockets and REST API endpoints.\n\nIn order for this integration to work, you need to have your own\ndiscord bot set up via https://discord.com/developers/applications\nwith the MESSAGE CONTENT toggle switched on, and your bot token\nadded to `server/conf/secret_settings.py` as your DISCORD_BOT_TOKEN\n\"\"\"\nimport json\nimport os\nfrom io import BytesIO\nfrom random import random\n\nfrom autobahn.twisted.websocket import (\n WebSocketClientFactory,\n WebSocketClientProtocol,\n connectWS,\n)\nfrom django.conf import settings\nfrom twisted.internet import protocol, reactor, ssl, task\nfrom twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool, readBody\nfrom twisted.web.http_headers import Headers\n\nfrom evennia.server.session import Session\nfrom evennia.utils import class_from_module, get_evennia_version, logger\nfrom evennia.utils.utils import delay\n\n_BASE_SESSION_CLASS = class_from_module(settings.BASE_SESSION_CLASS)\n\nDISCORD_API_VERSION = 10\n# include version number to prevent automatically updating to breaking changes\nDISCORD_API_BASE_URL = f\"https://discord.com/api/v{DISCORD_API_VERSION}\"\n\nDISCORD_USER_AGENT = f\"Evennia (https://www.evennia.com, {get_evennia_version(mode='short')})\"\nDISCORD_BOT_TOKEN = settings.DISCORD_BOT_TOKEN\nDISCORD_BOT_INTENTS = settings.DISCORD_BOT_INTENTS\n\n# Discord OP codes, alphabetic\nOP_DISPATCH = 0\nOP_HEARTBEAT = 1\nOP_HEARTBEAT_ACK = 11\nOP_HELLO = 10\nOP_IDENTIFY = 2\nOP_INVALID_SESSION = 9\nOP_RECONNECT = 7\nOP_RESUME = 6\n\n\n# create quiet HTTP pool to muffle GET/POST requests\nclass QuietConnectionPool(HTTPConnectionPool):\n \"\"\"\n A quiet version of the HTTPConnectionPool which sets the factory's\n `noisy` property to False to muffle log output.\n \"\"\"\n\n def __init__(self, reactor, persistent=True):\n super().__init__(reactor, persistent)\n self._factory.noisy = False\n\n\n_AGENT = Agent(reactor, pool=QuietConnectionPool(reactor))\n\n\ndef should_retry(status_code):\n \"\"\"\n Helper function to check if the request should be retried later.\n\n Args:\n status_code (int) - The HTTP status code\n\n Returns:\n retry (bool) - True if request should be retried False otherwise\n \"\"\"\n if status_code >= 500 and status_code <= 504:\n # these are common server error codes when the server is temporarily malfunctioning\n # in these cases, we should retry\n return True\n else:\n # handle all other cases; this can be expanded later if needed for special cases\n return False\n\n\nclass DiscordWebsocketServerFactory(WebSocketClientFactory, protocol.ReconnectingClientFactory):\n \"\"\"\n A variant of the websocket-factory that auto-reconnects.\n\n \"\"\"\n\n initialDelay = 1\n factor = 1.5\n maxDelay = 60\n noisy = False\n gateway = None\n resume_url = None\n do_retry = True\n\n def __init__(self, sessionhandler, *args, **kwargs):\n self.uid = kwargs.get(\"uid\")\n self.sessionhandler = sessionhandler\n self.port = None\n self.bot = None\n\n def get_gateway_url(self, *args, **kwargs):\n # get the websocket gateway URL from Discord\n d = _AGENT.request(\n b\"GET\",\n f\"{DISCORD_API_BASE_URL}/gateway\".encode(\"utf-8\"),\n Headers(\n {\n \"User-Agent\": [DISCORD_USER_AGENT],\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n }\n ),\n None,\n )\n\n def cbResponse(response):\n if response.code == 200:\n d = readBody(response)\n d.addCallback(self.websocket_init, *args, **kwargs)\n return d\n elif should_retry(response.code):\n delay(300, self.get_gateway_url, *args, **kwargs)\n\n d.addCallback(cbResponse)\n\n def websocket_init(self, payload, *args, **kwargs):\n \"\"\"\n callback for when the URL is gotten\n \"\"\"\n data = json.loads(str(payload, \"utf-8\"))\n if url := data.get(\"url\"):\n self.gateway = f\"{url}/?v={DISCORD_API_VERSION}&encoding=json\".encode(\"utf-8\")\n useragent = kwargs.pop(\"useragent\", DISCORD_USER_AGENT)\n headers = kwargs.pop(\n \"headers\",\n {\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n },\n )\n\n logger.log_info(\"Connecting to Discord Gateway...\")\n WebSocketClientFactory.__init__(\n self, url, *args, headers=headers, useragent=useragent, **kwargs\n )\n self.start()\n else:\n logger.log_err(\"Discord did not return a websocket URL; connection cancelled.\")\n\n def buildProtocol(self, addr):\n \"\"\"\n Build new instance of protocol\n\n Args:\n addr (str): Not used, using factory/settings data\n\n \"\"\"\n if hasattr(settings, \"DISCORD_SESSION_CLASS\"):\n protocol_class = class_from_module(\n settings.DISCORD_SESSION_CLASS, fallback=DiscordClient\n )\n protocol = protocol_class()\n else:\n protocol = DiscordClient()\n\n protocol.factory = self\n protocol.sessionhandler = self.sessionhandler\n return protocol\n\n def startedConnecting(self, connector):\n \"\"\"\n Tracks reconnections for debugging.\n\n Args:\n connector (Connector): Represents the connection.\n\n \"\"\"\n logger.log_info(\"Attempting connection to Discord...\")\n\n def clientConnectionFailed(self, connector, reason):\n \"\"\"\n Called when Client failed to connect.\n\n Args:\n connector (Connection): Represents the connection.\n reason (str): The reason for the failure.\n\n \"\"\"\n protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)\n\n def clientConnectionLost(self, connector, reason):\n \"\"\"\n Called when Client loses connection.\n\n Args:\n connector (Connection): Represents the connection.\n reason (str): The reason for the failure.\n\n \"\"\"\n if self.do_retry and self.bot:\n self.retry(connector)\n\n def reconnect(self):\n \"\"\"\n Force a reconnection of the bot protocol. This requires\n de-registering the session and then reattaching a new one.\n\n \"\"\"\n # set the retry flag to False so it doesn't attempt an automatic retry\n # and duplicate the connection\n self.do_retry = False\n # disconnect everything\n self.bot.transport.loseConnection()\n self.sessionhandler.server_disconnect(self.bot)\n # set up the reconnection\n if self.resume_url:\n self.url = self.resume_url\n elif self.gateway:\n self.url = self.gateway\n else:\n # we don't know where to reconnect to! start from the beginning\n self.get_gateway_url()\n return\n self.start()\n\n def start(self):\n \"Connect protocol to remote server\"\n\n if not self.gateway:\n # we can't actually start yet\n # get the gateway URL from Discord\n self.get_gateway_url()\n else:\n # set the retry flag so we maintain this connection\n self.do_retry = True\n connectWS(self)\n\n\nclass DiscordClient(WebSocketClientProtocol, _BASE_SESSION_CLASS):\n \"\"\"\n Implements the Discord client\n \"\"\"\n\n nextHeartbeatCall = None\n pending_heartbeat = False\n heartbeat_interval = None\n last_sequence = 0\n session_id = None\n discord_id = None\n\n def __init__(self):\n WebSocketClientProtocol.__init__(self)\n _BASE_SESSION_CLASS.__init__(self)\n self.restart_downtime = None\n\n def at_login(self):\n pass\n\n def onOpen(self):\n \"\"\"\n Called when connection is established.\n\n \"\"\"\n self.restart_downtime = None\n self.restart_task = None\n self.factory.bot = self\n\n self.init_session(\"discord\", \"discord.gg\", self.factory.sessionhandler)\n self.uid = int(self.factory.uid)\n self.logged_in = True\n self.sessionhandler.connect(self)\n\n def onMessage(self, payload, isBinary):\n \"\"\"\n Callback fired when a complete WebSocket message was received.\n\n Args:\n payload (bytes): The WebSocket message received.\n isBinary (bool): Flag indicating whether payload is binary or\n UTF-8 encoded text.\n\n \"\"\"\n if isBinary:\n logger.log_info(\"DISCORD: got a binary payload for some reason\")\n return\n data = json.loads(str(payload, \"utf-8\"))\n if seqid := data.get(\"s\"):\n self.last_sequence = seqid\n\n # not sure if that error json format is for websockets, so\n # check for it just in case\n if \"errors\" in data:\n self.handle_error(data)\n return\n\n # check for discord gateway API op codes first\n if data[\"op\"] == OP_HELLO:\n self.interval = data[\"d\"][\"heartbeat_interval\"] / 1000 # convert millisec to seconds\n if self.nextHeartbeatCall:\n self.nextHeartbeatCall.cancel()\n self.nextHeartbeatCall = self.factory._batched_timer.call_later(\n self.interval * random(),\n self.doHeartbeat,\n )\n if self.session_id:\n # we already have a session; try to resume instead\n self.resume()\n else:\n self.identify()\n elif data[\"op\"] == OP_HEARTBEAT_ACK:\n # our last heartbeat was acknowledged, so reset the \"pending\" flag\n self.pending_heartbeat = False\n elif data[\"op\"] == OP_HEARTBEAT:\n # Discord wants us to send a heartbeat immediately\n self.doHeartbeat(force=True)\n elif data[\"op\"] == OP_INVALID_SESSION:\n # Discord doesn't like our current session; reconnect for a new one\n logger.log_msg(\"Discord: received 'Invalid Session' opcode. Reconnecting.\")\n if data[\"d\"] == False:\n # can't resume, clear existing resume data\n self.session_id = None\n self.factory.resume_url = None\n self.factory.reconnect()\n elif data[\"op\"] == OP_RECONNECT:\n # reconnect as requested; Discord does this regularly for server load balancing\n logger.log_msg(\"Discord: received 'Reconnect' opcode. Reconnecting.\")\n self.factory.reconnect()\n elif data[\"op\"] == OP_DISPATCH:\n # handle the general dispatch opcode events by type\n if data[\"t\"] == \"READY\":\n # our recent identification is valid; process new session info\n self.connection_ready(data[\"d\"])\n else:\n # general message, pass on to data_in\n self.data_in(data=data)\n\n def onClose(self, wasClean, code=None, reason=None):\n \"\"\"\n This is executed when the connection is lost for whatever\n reason. it can also be called directly, from the disconnect\n method.\n\n Args:\n wasClean (bool): ``True`` if the WebSocket was closed cleanly.\n code (int or None): Close status as sent by the WebSocket peer.\n reason (str or None): Close reason as sent by the WebSocket peer.\n\n \"\"\"\n if self.nextHeartbeatCall:\n self.nextHeartbeatCall.cancel()\n self.disconnect(reason)\n if code >= 4000:\n logger.log_err(f\"Discord connection closed: {reason}\")\n else:\n logger.log_info(f\"Discord disconnected: {reason}\")\n\n def _send_json(self, data):\n \"\"\"\n Post JSON data to the websocket\n\n Args:\n data (dict): content to send.\n\n \"\"\"\n return self.sendMessage(json.dumps(data).encode(\"utf-8\"))\n\n def _post_json(self, url, data, **kwargs):\n \"\"\"\n Post JSON data to a REST API endpoint\n\n Args:\n url (str) - The API path which is being posted to\n data (dict) - Content to be sent\n \"\"\"\n url = f\"{DISCORD_API_BASE_URL}/{url}\"\n body = FileBodyProducer(BytesIO(json.dumps(data).encode(\"utf-8\")))\n d = _AGENT.request(\n b\"POST\",\n url.encode(\"utf-8\"),\n Headers(\n {\n \"User-Agent\": [DISCORD_USER_AGENT],\n \"Authorization\": [f\"Bot {DISCORD_BOT_TOKEN}\"],\n \"Content-Type\": [\"application/json\"],\n }\n ),\n body,\n )\n\n def cbResponse(response):\n if response.code == 200:\n d = readBody(response)\n d.addCallback(self.post_response)\n return d\n elif should_retry(response.code):\n delay(300, self._post_json, url, data, **kwargs)\n\n d.addCallback(cbResponse)\n\n def post_response(self, body, **kwargs):\n \"\"\"\n Process the response from sending a POST request\n\n Args:\n body (bytes) - The post response body\n \"\"\"\n data = json.loads(body)\n if \"errors\" in data:\n self.handle_error(data)\n\n def handle_error(self, data, **kwargs):\n \"\"\"\n General hook for processing errors.\n\n Args:\n data (dict) - The received error data\n\n \"\"\"\n logger.log_err(str(data))\n\n def resume(self):\n \"\"\"\n Called after a reconnection to re-identify and replay missed events\n\n \"\"\"\n if not self.last_sequence or not self.session_id:\n # we have no known state to resume from, identify normally\n self.identify()\n\n # build a RESUME request for Discord and send it\n data = {\n \"op\": OP_RESUME,\n \"d\": {\n \"token\": DISCORD_BOT_TOKEN,\n \"session_id\": self.session_id,\n \"s\": self.sequence_id,\n },\n }\n self._send_json(data)\n\n def disconnect(self, reason=None):\n \"\"\"\n Generic hook for the engine to call in order to\n disconnect this protocol.\n\n Args:\n reason (str or None): Motivation for the disconnection.\n\n \"\"\"\n self.sessionhandler.disconnect(self)\n self.sendClose(self.CLOSE_STATUS_CODE_NORMAL, reason)\n\n def identify(self, *args, **kwargs):\n \"\"\"\n Send Discord authentication. This should be sent once heartbeats begin.\n\n \"\"\"\n data = {\n \"op\": 2,\n \"d\": {\n \"token\": DISCORD_BOT_TOKEN,\n \"intents\": DISCORD_BOT_INTENTS,\n \"properties\": {\n \"os\": os.name,\n \"browser\": DISCORD_USER_AGENT,\n \"device\": DISCORD_USER_AGENT,\n },\n },\n }\n self._send_json(data)\n\n def connection_ready(self, data):\n \"\"\"\n Process READY data for relevant bot info.\n \"\"\"\n self.factory.resume_url = data[\"resume_gateway_url\"]\n self.session_id = data[\"session_id\"]\n self.discord_id = data[\"user\"][\"id\"]\n\n def doHeartbeat(self, *args, **kwargs):\n \"\"\"\n Send heartbeat to Discord.\n\n \"\"\"\n if not self.pending_heartbeat or kwargs.get(\"force\"):\n if self.nextHeartbeatCall:\n self.nextHeartbeatCall.cancel()\n # send the heartbeat\n data = {\"op\": 1, \"d\": self.last_sequence}\n self._send_json(data)\n # track that we sent a heartbeat, in case we don't receive an ACK\n self.pending_heartbeat = True\n self.nextHeartbeatCall = self.factory._batched_timer.call_later(\n self.interval,\n self.doHeartbeat,\n )\n else:\n # we didn't get a response since the last heartbeat; reconnect\n self.factory.reconnect()\n\n def send_channel(self, text, channel_id, **kwargs):\n \"\"\"\n Send a message from an Evennia channel to a Discord channel.\n\n Use with session.msg(channel=(message, channel, sender))\n\n \"\"\"\n\n data = {\"content\": text}\n data.update(kwargs)\n self._post_json(f\"channels/{channel_id}/messages\", data)\n\n def send_default(self, *args, **kwargs):\n \"\"\"\n Ignore other outputfuncs\n\n \"\"\"\n pass\n\n def data_in(self, data, **kwargs):\n \"\"\"\n Process incoming data from Discord and sent to the Evennia server\n\n Args:\n data (dict): Converted json data.\n\n \"\"\"\n action_type = data.get(\"t\", \"UNKNOWN\")\n\n if action_type == \"MESSAGE_CREATE\":\n # someone posted a message on Discord that the bot can see\n data = data[\"d\"]\n if data[\"author\"][\"id\"] == self.discord_id:\n # it's by the bot itself! disregard\n return\n message = data[\"content\"]\n channel_id = data[\"channel_id\"]\n keywords = {\"channel_id\": channel_id}\n if \"guild_id\" in data:\n # message received to a Discord channel\n keywords[\"type\"] = \"channel\"\n author = data[\"member\"][\"nick\"] or data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n keywords[\"guild_id\"] = data[\"guild_id\"]\n\n else:\n # message sent directly to the bot account via DM\n keywords[\"type\"] = \"direct\"\n author = data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n\n # pass the processed data to the server\n self.sessionhandler.data_in(self, bot_data_in=(message, keywords))\n\n elif action_type in (\"GUILD_CREATE\", \"GUILD_UPDATE\"):\n # we received the current status of a guild the bot is on; process relevant info\n data = data[\"d\"]\n keywords = {\"type\": \"guild\", \"guild_id\": data[\"id\"], \"guild_name\": data[\"name\"]}\n keywords[\"channels\"] = {\n chan[\"id\"]: {\"name\": chan[\"name\"], \"guild\": data[\"name\"]}\n for chan in data[\"channels\"]\n if chan[\"type\"] == 0\n }\n # send the possibly-updated guild and channel data to the server\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))\n\n elif \"DELETE\" in action_type:\n # deletes should possibly be handled separately to check for channel removal\n # for now, just ignore\n pass\n\n else:\n # send the data for any other action types on to the bot as-is for optional server-side handling\n keywords = {\"type\": action_type}\n keywords.update(data[\"d\"])\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))\n", "path": "evennia/server/portal/discord.py" } ]
diff --git a/evennia/server/portal/discord.py b/evennia/server/portal/discord.py index d1cb6c34c72..eab9ca1a926 100644 --- a/evennia/server/portal/discord.py +++ b/evennia/server/portal/discord.py @@ -201,7 +201,7 @@ def clientConnectionLost(self, connector, reason): reason (str): The reason for the failure. """ - if self.do_retry or not self.bot: + if self.do_retry and self.bot: self.retry(connector) def reconnect(self):
zulip__zulip-29412
Go to newly created stream (with first-time modal) Even after #29154, users find it hard to navigate to a newly created stream. To address this, we should: 1. Take the user directly to the stream they just created. To avoid a potentially confusing interleaved view, we should go to the most recent topic in the stream (currently "stream events", but might be "general chat" in the future). 2. The first time that a user creates a stream, show an explanatory modal (wording to be finalized when working on the PR): ---- ## Stream **#{stream name}** created! You will now see the stream you created. If you'd like to go back to stream settings, click on the name of the stream at the top of your Zulip window, or use the **back** button in your browser or desktop app. [Continue] --- Since we are changing the behavior, it's fine to show this once to existing users. [CZO thread](https://chat.zulip.org/#narrow/stream/137-feedback/topic/user.20research.3A.20going.20to.20a.20new.20stream/near/1744305)
[ { "content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy\nfrom django_stubs_ext import StrPromise\n\nfrom zerver.models import OnboardingStep, UserProfile\n\n\n@dataclass\nclass Hotspot:\n name: str\n title: Optional[StrPromise]\n description: Optional[StrPromise]\n has_trigger: bool = False\n\n def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:\n return {\n \"type\": \"hotspot\",\n \"name\": self.name,\n \"title\": str(self.title),\n \"description\": str(self.description),\n \"delay\": delay,\n \"has_trigger\": self.has_trigger,\n }\n\n\nINTRO_HOTSPOTS: List[Hotspot] = [\n Hotspot(\n name=\"intro_streams\",\n title=gettext_lazy(\"Catch up on a stream\"),\n description=gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n ),\n Hotspot(\n name=\"intro_topics\",\n title=gettext_lazy(\"Topics\"),\n description=gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n ),\n Hotspot(\n # In theory, this should be renamed to intro_personal, since\n # it's no longer attached to the gear menu, but renaming these\n # requires a migration that is not worth doing at this time.\n name=\"intro_gear\",\n title=gettext_lazy(\"Settings\"),\n description=gettext_lazy(\"Go to Settings to configure your notifications and preferences.\"),\n ),\n Hotspot(\n name=\"intro_compose\",\n title=gettext_lazy(\"Compose\"),\n description=gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n ),\n]\n\n\nNON_INTRO_HOTSPOTS: List[Hotspot] = []\n\n\n@dataclass\nclass OneTimeNotice:\n name: str\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"type\": \"one_time_notice\",\n \"name\": self.name,\n }\n\n\nONE_TIME_NOTICES: List[OneTimeNotice] = [\n OneTimeNotice(\n name=\"visibility_policy_banner\",\n ),\n OneTimeNotice(\n name=\"intro_inbox_view_modal\",\n ),\n OneTimeNotice(\n name=\"intro_recent_view_modal\",\n ),\n]\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]\nALL_ONBOARDING_STEPS: List[Union[Hotspot, OneTimeNotice]] = [*ALL_HOTSPOTS, *ONE_TIME_NOTICES]\n\n\ndef get_next_onboarding_steps(user: UserProfile) -> List[Dict[str, Any]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n seen_onboarding_steps = frozenset(\n OnboardingStep.objects.filter(user=user).values_list(\"onboarding_step\", flat=True)\n )\n\n onboarding_steps: List[Dict[str, Any]] = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]\n\n for one_time_notice in ONE_TIME_NOTICES:\n if one_time_notice.name in seen_onboarding_steps:\n continue\n onboarding_steps.append(one_time_notice.to_dict())\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return onboarding_steps\n\n for hotspot in INTRO_HOTSPOTS:\n if hotspot.name in seen_onboarding_steps:\n continue\n\n onboarding_steps.append(hotspot.to_dict(delay=0.5))\n return onboarding_steps\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return onboarding_steps\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(OnboardingStep.objects.filter(user=source_profile)):\n OnboardingStep.objects.create(\n user=target_profile,\n onboarding_step=userhotspot.onboarding_step,\n timestamp=userhotspot.timestamp,\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py" } ]
[ { "content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy\nfrom django_stubs_ext import StrPromise\n\nfrom zerver.models import OnboardingStep, UserProfile\n\n\n@dataclass\nclass Hotspot:\n name: str\n title: Optional[StrPromise]\n description: Optional[StrPromise]\n has_trigger: bool = False\n\n def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:\n return {\n \"type\": \"hotspot\",\n \"name\": self.name,\n \"title\": str(self.title),\n \"description\": str(self.description),\n \"delay\": delay,\n \"has_trigger\": self.has_trigger,\n }\n\n\nINTRO_HOTSPOTS: List[Hotspot] = [\n Hotspot(\n name=\"intro_streams\",\n title=gettext_lazy(\"Catch up on a stream\"),\n description=gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n ),\n Hotspot(\n name=\"intro_topics\",\n title=gettext_lazy(\"Topics\"),\n description=gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n ),\n Hotspot(\n # In theory, this should be renamed to intro_personal, since\n # it's no longer attached to the gear menu, but renaming these\n # requires a migration that is not worth doing at this time.\n name=\"intro_gear\",\n title=gettext_lazy(\"Settings\"),\n description=gettext_lazy(\"Go to Settings to configure your notifications and preferences.\"),\n ),\n Hotspot(\n name=\"intro_compose\",\n title=gettext_lazy(\"Compose\"),\n description=gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n ),\n]\n\n\nNON_INTRO_HOTSPOTS: List[Hotspot] = []\n\n\n@dataclass\nclass OneTimeNotice:\n name: str\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"type\": \"one_time_notice\",\n \"name\": self.name,\n }\n\n\nONE_TIME_NOTICES: List[OneTimeNotice] = [\n OneTimeNotice(\n name=\"visibility_policy_banner\",\n ),\n OneTimeNotice(\n name=\"intro_inbox_view_modal\",\n ),\n OneTimeNotice(\n name=\"intro_recent_view_modal\",\n ),\n OneTimeNotice(\n name=\"first_stream_created_banner\",\n ),\n]\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]\nALL_ONBOARDING_STEPS: List[Union[Hotspot, OneTimeNotice]] = [*ALL_HOTSPOTS, *ONE_TIME_NOTICES]\n\n\ndef get_next_onboarding_steps(user: UserProfile) -> List[Dict[str, Any]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n seen_onboarding_steps = frozenset(\n OnboardingStep.objects.filter(user=user).values_list(\"onboarding_step\", flat=True)\n )\n\n onboarding_steps: List[Dict[str, Any]] = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]\n\n for one_time_notice in ONE_TIME_NOTICES:\n if one_time_notice.name in seen_onboarding_steps:\n continue\n onboarding_steps.append(one_time_notice.to_dict())\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return onboarding_steps\n\n for hotspot in INTRO_HOTSPOTS:\n if hotspot.name in seen_onboarding_steps:\n continue\n\n onboarding_steps.append(hotspot.to_dict(delay=0.5))\n return onboarding_steps\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return onboarding_steps\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(OnboardingStep.objects.filter(user=source_profile)):\n OnboardingStep.objects.create(\n user=target_profile,\n onboarding_step=userhotspot.onboarding_step,\n timestamp=userhotspot.timestamp,\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py" } ]
diff --git a/web/e2e-tests/stream_create.test.ts b/web/e2e-tests/stream_create.test.ts index 7c2cc401080ff..41b3a6c76ce5c 100644 --- a/web/e2e-tests/stream_create.test.ts +++ b/web/e2e-tests/stream_create.test.ts @@ -81,6 +81,13 @@ async function create_stream(page: Page): Promise<void> { stream_description: "Everything Puppeteer", }); await page.click("form#stream_creation_form .finalize_create_stream"); + // an explanatory modal is shown for the first stream created + await common.wait_for_micromodal_to_open(page); + await page.click(".dialog_submit_button"); + await common.wait_for_micromodal_to_close(page); + await page.waitForSelector(".message-header-stream-settings-button"); + await page.click(".message-header-stream-settings-button"); + await page.waitForSelector(".stream_section"); await page.waitForSelector( `xpath///*[${common.has_class_x("stream-name")} and text()="Puppeteer"]`, ); diff --git a/web/src/stream_create.js b/web/src/stream_create.js index af8dfec580c00..b2b9d3f68e396 100644 --- a/web/src/stream_create.js +++ b/web/src/stream_create.js @@ -9,6 +9,7 @@ import * as confirm_dialog from "./confirm_dialog"; import {$t, $t_html} from "./i18n"; import * as keydown_util from "./keydown_util"; import * as loading from "./loading"; +import * as onboarding_steps from "./onboarding_steps"; import * as people from "./people"; import * as settings_data from "./settings_data"; import {current_user, realm} from "./state_data"; @@ -33,6 +34,14 @@ export function get_name() { return created_stream; } +export function set_first_stream_created_modal_shown() { + onboarding_steps.post_onboarding_step_as_read("first_stream_created_banner"); +} + +export function should_show_first_stream_created_modal() { + return onboarding_steps.ONE_TIME_NOTICES_TO_DISPLAY.has("first_stream_created_banner"); +} + class StreamSubscriptionError { report_no_subs_to_stream() { $("#stream_subscription_error").text( diff --git a/web/src/stream_settings_ui.js b/web/src/stream_settings_ui.js index 1845bf664e08e..5bf5cf742cf92 100644 --- a/web/src/stream_settings_ui.js +++ b/web/src/stream_settings_ui.js @@ -1,9 +1,11 @@ import $ from "jquery"; import _ from "lodash"; +import render_inline_decorated_stream_name from "../templates/inline_decorated_stream_name.hbs"; import render_stream_creation_confirmation_banner from "../templates/modal_banner/stream_creation_confirmation_banner.hbs"; import render_browse_streams_list from "../templates/stream_settings/browse_streams_list.hbs"; import render_browse_streams_list_item from "../templates/stream_settings/browse_streams_list_item.hbs"; +import render_first_stream_created_modal from "../templates/stream_settings/first_stream_created_modal.hbs"; import render_stream_settings from "../templates/stream_settings/stream_settings.hbs"; import render_stream_settings_overlay from "../templates/stream_settings/stream_settings_overlay.hbs"; @@ -13,9 +15,10 @@ import * as components from "./components"; import * as compose_banner from "./compose_banner"; import * as compose_recipient from "./compose_recipient"; import * as compose_state from "./compose_state"; +import * as dialog_widget from "./dialog_widget"; import * as hash_parser from "./hash_parser"; import * as hash_util from "./hash_util"; -import {$t} from "./i18n"; +import {$t, $t_html} from "./i18n"; import * as keydown_util from "./keydown_util"; import * as message_lists from "./message_lists"; import * as message_live_update from "./message_live_update"; @@ -244,9 +247,33 @@ export function add_sub_to_table(sub) { render_stream_creation_confirmation_banner(context), ); stream_create.reset_created_stream(); + // goto topic `stream events` of the newly created stream + browser_history.go_to_location( + hash_util.by_stream_topic_url(sub.stream_id, "stream events"), + ); + if (stream_create.should_show_first_stream_created_modal()) { + stream_create.set_first_stream_created_modal_shown(); + show_first_stream_created_modal(sub); + } } update_empty_left_panel_message(); } +function show_first_stream_created_modal(stream) { + dialog_widget.launch({ + html_heading: $t_html( + {defaultMessage: "Stream <b><z-stream></z-stream></b> created!"}, + { + "z-stream": () => render_inline_decorated_stream_name({stream}), + }, + ), + html_body: render_first_stream_created_modal({stream}), + id: "first_stream_created_modal", + on_click() {}, + html_submit_button: $t({defaultMessage: "Continue"}), + close_on_submit: true, + single_footer_button: true, + }); +} export function remove_stream(stream_id) { // It is possible that row is empty when we deactivate a diff --git a/web/templates/stream_settings/first_stream_created_modal.hbs b/web/templates/stream_settings/first_stream_created_modal.hbs new file mode 100644 index 0000000000000..394eb4f5ccab9 --- /dev/null +++ b/web/templates/stream_settings/first_stream_created_modal.hbs @@ -0,0 +1,14 @@ +{{t 'You will now see the stream you created. To go back to stream settings, you can:' }} +<ul> + <li> + {{#tr}} + Click on <z-stream></z-stream> at the top of your Zulip window. + {{#*inline "z-stream"}}<b>{{> ../inline_decorated_stream_name stream=stream}}</b>{{/inline}} + {{/tr}} + </li> + <li> + {{#tr}} + Use the <b>back</b> button in your browser or desktop app. + {{/tr}} + </li> +</ul> diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py index ea2302a1629e3..95882fce495c9 100644 --- a/zerver/lib/hotspots.py +++ b/zerver/lib/hotspots.py @@ -89,6 +89,9 @@ def to_dict(self) -> Dict[str, str]: OneTimeNotice( name="intro_recent_view_modal", ), + OneTimeNotice( + name="first_stream_created_banner", + ), ] # We would most likely implement new hotspots in the future that aren't diff --git a/zerver/tests/test_hotspots.py b/zerver/tests/test_hotspots.py index accb02ada46a7..0da8ec6e37a40 100644 --- a/zerver/tests/test_hotspots.py +++ b/zerver/tests/test_hotspots.py @@ -39,11 +39,12 @@ def test_some_done_some_not(self) -> None: do_mark_onboarding_step_as_read(self.user, "intro_streams") do_mark_onboarding_step_as_read(self.user, "intro_compose") onboarding_steps = get_next_onboarding_steps(self.user) - self.assert_length(onboarding_steps, 4) + self.assert_length(onboarding_steps, 5) self.assertEqual(onboarding_steps[0]["name"], "visibility_policy_banner") self.assertEqual(onboarding_steps[1]["name"], "intro_inbox_view_modal") self.assertEqual(onboarding_steps[2]["name"], "intro_recent_view_modal") - self.assertEqual(onboarding_steps[3]["name"], "intro_topics") + self.assertEqual(onboarding_steps[3]["name"], "first_stream_created_banner") + self.assertEqual(onboarding_steps[4]["name"], "intro_topics") def test_all_onboarding_steps_done(self) -> None: with self.settings(TUTORIAL_ENABLED=True):
alltheplaces__alltheplaces-4633
Dunelm spider output is missing 41 branches (dunelm_gb) The Dunelm spider dunelm_gb is consistently returning 138 branches for the last few weeks. However, Dunelm's own online store-finder at https://www.dunelm.com/stores/a-z lists 179 branches. All of the 138 are included in the 179, meaning the spider is missing 41. For example, the following branches appear on Dunelm's website, but aren't returned by the spider: - https://www.dunelm.com/stores/altrincham - https://www.dunelm.com/stores/basildon - https://www.dunelm.com/stores/beckton - https://www.dunelm.com/stores/beverley I'm afraid I can't figure out how to manually replicate the spider's request, to check whether the missing branches are missing from the API return, or are just not being picked up by the spider for some reason. I don't know if there's any connection between the missing stores. The Basildon one only opened recently in April 2022 ([source](https://www.echo-news.co.uk/news/20100489.dunelm-opens-mayflower-retail-park-basildon/)) but the Altrincham store has been around since 2017 ([source](https://www.messengernewspapers.co.uk/news/whereyoulive/15122706.customers-attend-opening-of-dunelms-new-altrincham-store/)). I've checked a few of the missing branches and found facebook supprt groupswith recent posts, suggesting that the stores are indeed still open. If the API isn't returning all the stores, then perhaps the online list at https://www.dunelm.com/stores/a-z could be used by the spider instead, or maybe https://www.dunelm.com/sitemap/static-sitemap.xml (which also seems to include all 179).
[ { "content": "from scrapy.http import JsonRequest\nfrom scrapy.spiders import Spider\n\nfrom locations.dict_parser import DictParser\nfrom locations.hours import OpeningHours\n\n\nclass DunelmGB(Spider):\n name = \"dunelm_gb\"\n item_attributes = {\"brand\": \"Dunelm\", \"brand_wikidata\": \"Q5315020\"}\n\n def start_requests(self):\n yield JsonRequest(\n url=\"https://fy8plebn34-dsn.algolia.net/1/indexes/*/queries?x-algolia-application-id=FY8PLEBN34&x-algolia-api-key=ae9bc9ca475f6c3d7579016da0305a33\",\n data={\n \"requests\": [\n {\n \"indexName\": \"stores_prod\",\n \"params\": \"hitsPerPage=300\",\n }\n ]\n },\n )\n\n def parse(self, response, **kwargs):\n for store in response.json()[\"results\"][0][\"hits\"]:\n store[\"location\"] = store[\"_geoloc\"]\n\n item = DictParser.parse(store)\n\n item[\"ref\"] = store[\"sapStoreId\"]\n item[\"website\"] = \"https://www.dunelm.com/stores/\" + store[\"uri\"]\n\n oh = OpeningHours()\n for rule in store[\"openingHours\"]:\n oh.add_range(rule[\"day\"], rule[\"open\"], rule[\"close\"])\n\n item[\"opening_hours\"] = oh.as_opening_hours()\n\n item[\"email\"] = store[\"email\"]\n item[\"extras\"] = {\"storeType\": store.get(\"storeType\")}\n\n yield item\n", "path": "locations/spiders/dunelm_gb.py" } ]
[ { "content": "from scrapy.http import JsonRequest\nfrom scrapy.spiders import Spider\n\nfrom locations.dict_parser import DictParser\nfrom locations.hours import OpeningHours\n\n\nclass DunelmGB(Spider):\n name = \"dunelm_gb\"\n item_attributes = {\"brand\": \"Dunelm\", \"brand_wikidata\": \"Q5315020\"}\n\n def start_requests(self):\n yield JsonRequest(\n url=\"https://fy8plebn34-dsn.algolia.net/1/indexes/*/queries?x-algolia-application-id=FY8PLEBN34&x-algolia-api-key=ae9bc9ca475f6c3d7579016da0305a33\",\n data={\n \"requests\": [\n {\n \"indexName\": \"stores_prod\",\n \"params\": \"hitsPerPage=300\",\n }\n ]\n },\n )\n\n def parse(self, response, **kwargs):\n for store in response.json()[\"results\"][0][\"hits\"]:\n store[\"location\"] = store[\"_geoloc\"]\n\n item = DictParser.parse(store)\n\n item[\"ref\"] = store[\"sapStoreId\"]\n item[\"website\"] = \"https://www.dunelm.com/stores/\" + store[\"uri\"]\n\n oh = OpeningHours()\n for rule in store[\"openingHours\"]:\n oh.add_range(rule[\"day\"], rule[\"open\"], rule[\"close\"])\n\n item[\"opening_hours\"] = oh.as_opening_hours()\n\n item[\"extras\"] = {\"storeType\": store.get(\"storeType\")}\n\n yield item\n", "path": "locations/spiders/dunelm_gb.py" } ]
diff --git a/locations/spiders/dunelm_gb.py b/locations/spiders/dunelm_gb.py index 4dc315bf3db..55b6e6218c2 100644 --- a/locations/spiders/dunelm_gb.py +++ b/locations/spiders/dunelm_gb.py @@ -37,7 +37,6 @@ def parse(self, response, **kwargs): item["opening_hours"] = oh.as_opening_hours() - item["email"] = store["email"] item["extras"] = {"storeType": store.get("storeType")} yield item
pyca__cryptography-1398
Loading private numbers fails on an assertion for bad numbers (I assume they're bad numbers, either way we should never fail assertions). ``` pycon >>>> from cryptography.hazmat.primitives.asymmetric import ec >>>> from cryptography.hazmat.backends import default_backend >>>> numbers = ec.EllipticCurvePrivateNumbers( .... 35764650566032008086366661818264207095808177403860908948280156930896899025506, .... ec.EllipticCurvePublicNumbers( .... 47250808410553270231315736020083458949276863817723245770432653745561185532964, .... 112025329241792435454837567787427195373731133352317839821028450450273536789915, .... ec.SECP256R1(), .... ) .... ) >>>> numbers.private_key(default_backend()) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "cryptography/hazmat/primitives/asymmetric/ec.py", line 275, in private_key return backend.load_elliptic_curve_private_numbers(self) File "cryptography/hazmat/backends/multibackend.py", line 277, in load_elliptic_curve_private_numbers return b.load_elliptic_curve_private_numbers(numbers) File "cryptography/hazmat/backends/openssl/backend.py", line 881, in load_elliptic_curve_private_numbers ec_cdata, public.x, public.y) File "cryptography/hazmat/backends/openssl/backend.py", line 1010, in _ec_key_set_public_key_affine_coordinates assert res == 1 AssertionError ```
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport itertools\nimport warnings\nfrom contextlib import contextmanager\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n InternalError, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import (\n CMACBackend, CipherBackend, DSABackend, EllipticCurveBackend, HMACBackend,\n HashBackend, PBKDF2HMACBackend, PEMSerializationBackend,\n PKCS8SerializationBackend, RSABackend,\n TraditionalOpenSSLSerializationBackend\n)\nfrom cryptography.hazmat.backends.openssl.ciphers import (\n _AESCTRCipherContext, _CipherContext\n)\nfrom cryptography.hazmat.backends.openssl.cmac import _CMACContext\nfrom cryptography.hazmat.backends.openssl.dsa import (\n _DSAParameters, _DSAPrivateKey, _DSAPublicKey\n)\nfrom cryptography.hazmat.backends.openssl.ec import (\n _EllipticCurvePrivateKey, _EllipticCurvePublicKey\n)\nfrom cryptography.hazmat.backends.openssl.hashes import _HashContext\nfrom cryptography.hazmat.backends.openssl.hmac import _HMACContext\nfrom cryptography.hazmat.backends.openssl.rsa import (\n _RSAPrivateKey, _RSAPublicKey\n)\nfrom cryptography.hazmat.bindings.openssl.binding import Binding\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa\nfrom cryptography.hazmat.primitives.asymmetric.padding import (\n MGF1, OAEP, PKCS1v15, PSS\n)\nfrom cryptography.hazmat.primitives.ciphers.algorithms import (\n AES, ARC4, Blowfish, CAST5, Camellia, IDEA, SEED, TripleDES\n)\nfrom cryptography.hazmat.primitives.ciphers.modes import (\n CBC, CFB, CFB8, CTR, ECB, GCM, OFB\n)\n\n\n_MemoryBIO = collections.namedtuple(\"_MemoryBIO\", [\"bio\", \"char_ptr\"])\n_OpenSSLError = collections.namedtuple(\"_OpenSSLError\",\n [\"code\", \"lib\", \"func\", \"reason\"])\n\n\[email protected]_interface(CipherBackend)\[email protected]_interface(CMACBackend)\[email protected]_interface(DSABackend)\[email protected]_interface(EllipticCurveBackend)\[email protected]_interface(HashBackend)\[email protected]_interface(HMACBackend)\[email protected]_interface(PBKDF2HMACBackend)\[email protected]_interface(PKCS8SerializationBackend)\[email protected]_interface(RSABackend)\[email protected]_interface(TraditionalOpenSSLSerializationBackend)\[email protected]_interface(PEMSerializationBackend)\nclass Backend(object):\n \"\"\"\n OpenSSL API binding interfaces.\n \"\"\"\n name = \"openssl\"\n\n def __init__(self):\n self._binding = Binding()\n self._ffi = self._binding.ffi\n self._lib = self._binding.lib\n\n self._binding.init_static_locks()\n\n # adds all ciphers/digests for EVP\n self._lib.OpenSSL_add_all_algorithms()\n # registers available SSL/TLS ciphers and digests\n self._lib.SSL_library_init()\n # loads error strings for libcrypto and libssl functions\n self._lib.SSL_load_error_strings()\n\n self._cipher_registry = {}\n self._register_default_ciphers()\n self.activate_osrandom_engine()\n\n def activate_builtin_random(self):\n # Obtain a new structural reference.\n e = self._lib.ENGINE_get_default_RAND()\n if e != self._ffi.NULL:\n self._lib.ENGINE_unregister_RAND(e)\n # Reset the RNG to use the new engine.\n self._lib.RAND_cleanup()\n # decrement the structural reference from get_default_RAND\n res = self._lib.ENGINE_finish(e)\n assert res == 1\n\n def activate_osrandom_engine(self):\n # Unregister and free the current engine.\n self.activate_builtin_random()\n # Fetches an engine by id and returns it. This creates a structural\n # reference.\n e = self._lib.ENGINE_by_id(self._lib.Cryptography_osrandom_engine_id)\n assert e != self._ffi.NULL\n # Initialize the engine for use. This adds a functional reference.\n res = self._lib.ENGINE_init(e)\n assert res == 1\n # Set the engine as the default RAND provider.\n res = self._lib.ENGINE_set_default_RAND(e)\n assert res == 1\n # Decrement the structural ref incremented by ENGINE_by_id.\n res = self._lib.ENGINE_free(e)\n assert res == 1\n # Decrement the functional ref incremented by ENGINE_init.\n res = self._lib.ENGINE_finish(e)\n assert res == 1\n # Reset the RNG to use the new engine.\n self._lib.RAND_cleanup()\n\n def openssl_version_text(self):\n \"\"\"\n Friendly string name of the loaded OpenSSL library. This is not\n necessarily the same version as it was compiled against.\n\n Example: OpenSSL 1.0.1e 11 Feb 2013\n \"\"\"\n return self._ffi.string(\n self._lib.SSLeay_version(self._lib.SSLEAY_VERSION)\n ).decode(\"ascii\")\n\n def create_hmac_ctx(self, key, algorithm):\n return _HMACContext(self, key, algorithm)\n\n def hash_supported(self, algorithm):\n digest = self._lib.EVP_get_digestbyname(algorithm.name.encode(\"ascii\"))\n return digest != self._ffi.NULL\n\n def hmac_supported(self, algorithm):\n return self.hash_supported(algorithm)\n\n def create_hash_ctx(self, algorithm):\n return _HashContext(self, algorithm)\n\n def cipher_supported(self, cipher, mode):\n if self._evp_cipher_supported(cipher, mode):\n return True\n elif isinstance(mode, CTR) and isinstance(cipher, AES):\n return True\n else:\n return False\n\n def _evp_cipher_supported(self, cipher, mode):\n try:\n adapter = self._cipher_registry[type(cipher), type(mode)]\n except KeyError:\n return False\n evp_cipher = adapter(self, cipher, mode)\n return self._ffi.NULL != evp_cipher\n\n def register_cipher_adapter(self, cipher_cls, mode_cls, adapter):\n if (cipher_cls, mode_cls) in self._cipher_registry:\n raise ValueError(\"Duplicate registration for: {0} {1}.\".format(\n cipher_cls, mode_cls)\n )\n self._cipher_registry[cipher_cls, mode_cls] = adapter\n\n def _register_default_ciphers(self):\n for mode_cls in [CBC, CTR, ECB, OFB, CFB, CFB8]:\n self.register_cipher_adapter(\n AES,\n mode_cls,\n GetCipherByName(\"{cipher.name}-{cipher.key_size}-{mode.name}\")\n )\n for mode_cls in [CBC, CTR, ECB, OFB, CFB]:\n self.register_cipher_adapter(\n Camellia,\n mode_cls,\n GetCipherByName(\"{cipher.name}-{cipher.key_size}-{mode.name}\")\n )\n for mode_cls in [CBC, CFB, CFB8, OFB]:\n self.register_cipher_adapter(\n TripleDES,\n mode_cls,\n GetCipherByName(\"des-ede3-{mode.name}\")\n )\n self.register_cipher_adapter(\n TripleDES,\n ECB,\n GetCipherByName(\"des-ede3\")\n )\n for mode_cls in [CBC, CFB, OFB, ECB]:\n self.register_cipher_adapter(\n Blowfish,\n mode_cls,\n GetCipherByName(\"bf-{mode.name}\")\n )\n for mode_cls in [CBC, CFB, OFB, ECB]:\n self.register_cipher_adapter(\n SEED,\n mode_cls,\n GetCipherByName(\"seed-{mode.name}\")\n )\n for cipher_cls, mode_cls in itertools.product(\n [CAST5, IDEA],\n [CBC, OFB, CFB, ECB],\n ):\n self.register_cipher_adapter(\n cipher_cls,\n mode_cls,\n GetCipherByName(\"{cipher.name}-{mode.name}\")\n )\n self.register_cipher_adapter(\n ARC4,\n type(None),\n GetCipherByName(\"rc4\")\n )\n self.register_cipher_adapter(\n AES,\n GCM,\n GetCipherByName(\"{cipher.name}-{cipher.key_size}-{mode.name}\")\n )\n\n def create_symmetric_encryption_ctx(self, cipher, mode):\n if (isinstance(mode, CTR) and isinstance(cipher, AES)\n and not self._evp_cipher_supported(cipher, mode)):\n # This is needed to provide support for AES CTR mode in OpenSSL\n # 0.9.8. It can be removed when we drop 0.9.8 support (RHEL 5\n # extended life ends 2020).\n return _AESCTRCipherContext(self, cipher, mode)\n else:\n return _CipherContext(self, cipher, mode, _CipherContext._ENCRYPT)\n\n def create_symmetric_decryption_ctx(self, cipher, mode):\n if (isinstance(mode, CTR) and isinstance(cipher, AES)\n and not self._evp_cipher_supported(cipher, mode)):\n # This is needed to provide support for AES CTR mode in OpenSSL\n # 0.9.8. It can be removed when we drop 0.9.8 support (RHEL 5\n # extended life ends 2020).\n return _AESCTRCipherContext(self, cipher, mode)\n else:\n return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)\n\n def pbkdf2_hmac_supported(self, algorithm):\n if self._lib.Cryptography_HAS_PBKDF2_HMAC:\n return self.hmac_supported(algorithm)\n else:\n # OpenSSL < 1.0.0 has an explicit PBKDF2-HMAC-SHA1 function,\n # so if the PBKDF2_HMAC function is missing we only support\n # SHA1 via PBKDF2_HMAC_SHA1.\n return isinstance(algorithm, hashes.SHA1)\n\n def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,\n key_material):\n buf = self._ffi.new(\"char[]\", length)\n if self._lib.Cryptography_HAS_PBKDF2_HMAC:\n evp_md = self._lib.EVP_get_digestbyname(\n algorithm.name.encode(\"ascii\"))\n assert evp_md != self._ffi.NULL\n res = self._lib.PKCS5_PBKDF2_HMAC(\n key_material,\n len(key_material),\n salt,\n len(salt),\n iterations,\n evp_md,\n length,\n buf\n )\n assert res == 1\n else:\n if not isinstance(algorithm, hashes.SHA1):\n raise UnsupportedAlgorithm(\n \"This version of OpenSSL only supports PBKDF2HMAC with \"\n \"SHA1.\",\n _Reasons.UNSUPPORTED_HASH\n )\n res = self._lib.PKCS5_PBKDF2_HMAC_SHA1(\n key_material,\n len(key_material),\n salt,\n len(salt),\n iterations,\n length,\n buf\n )\n assert res == 1\n\n return self._ffi.buffer(buf)[:]\n\n def _err_string(self, code):\n err_buf = self._ffi.new(\"char[]\", 256)\n self._lib.ERR_error_string_n(code, err_buf, 256)\n return self._ffi.string(err_buf, 256)[:]\n\n def _consume_errors(self):\n errors = []\n while True:\n code = self._lib.ERR_get_error()\n if code == 0:\n break\n\n lib = self._lib.ERR_GET_LIB(code)\n func = self._lib.ERR_GET_FUNC(code)\n reason = self._lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, lib, func, reason))\n return errors\n\n def _unknown_error(self, error):\n return InternalError(\n \"Unknown error code {0} from OpenSSL, \"\n \"you should probably file a bug. {1}.\".format(\n error.code, self._err_string(error.code)\n )\n )\n\n def _bn_to_int(self, bn):\n if six.PY3:\n # Python 3 has constant time from_bytes, so use that.\n\n bn_num_bytes = (self._lib.BN_num_bits(bn) + 7) // 8\n bin_ptr = self._ffi.new(\"unsigned char[]\", bn_num_bytes)\n bin_len = self._lib.BN_bn2bin(bn, bin_ptr)\n assert bin_len > 0\n assert bin_ptr != self._ffi.NULL\n return int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], \"big\")\n\n else:\n # Under Python 2 the best we can do is hex()\n\n hex_cdata = self._lib.BN_bn2hex(bn)\n assert hex_cdata != self._ffi.NULL\n hex_str = self._ffi.string(hex_cdata)\n self._lib.OPENSSL_free(hex_cdata)\n return int(hex_str, 16)\n\n def _int_to_bn(self, num, bn=None):\n \"\"\"\n Converts a python integer to a BIGNUM. The returned BIGNUM will not\n be garbage collected (to support adding them to structs that take\n ownership of the object). Be sure to register it for GC if it will\n be discarded after use.\n \"\"\"\n\n if bn is None:\n bn = self._ffi.NULL\n\n if six.PY3:\n # Python 3 has constant time to_bytes, so use that.\n\n binary = num.to_bytes(int(num.bit_length() / 8.0 + 1), \"big\")\n bn_ptr = self._lib.BN_bin2bn(binary, len(binary), bn)\n assert bn_ptr != self._ffi.NULL\n return bn_ptr\n\n else:\n # Under Python 2 the best we can do is hex()\n\n hex_num = hex(num).rstrip(\"L\").lstrip(\"0x\").encode(\"ascii\") or b\"0\"\n bn_ptr = self._ffi.new(\"BIGNUM **\")\n bn_ptr[0] = bn\n res = self._lib.BN_hex2bn(bn_ptr, hex_num)\n assert res != 0\n assert bn_ptr[0] != self._ffi.NULL\n return bn_ptr[0]\n\n def generate_rsa_private_key(self, public_exponent, key_size):\n rsa._verify_rsa_parameters(public_exponent, key_size)\n\n rsa_cdata = self._lib.RSA_new()\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n\n bn = self._int_to_bn(public_exponent)\n bn = self._ffi.gc(bn, self._lib.BN_free)\n\n res = self._lib.RSA_generate_key_ex(\n rsa_cdata, key_size, bn, self._ffi.NULL\n )\n assert res == 1\n\n return _RSAPrivateKey(self, rsa_cdata)\n\n def generate_rsa_parameters_supported(self, public_exponent, key_size):\n return (public_exponent >= 3 and public_exponent & 1 != 0 and\n key_size >= 512)\n\n def load_rsa_private_numbers(self, numbers):\n rsa._check_private_key_components(\n numbers.p,\n numbers.q,\n numbers.d,\n numbers.dmp1,\n numbers.dmq1,\n numbers.iqmp,\n numbers.public_numbers.e,\n numbers.public_numbers.n\n )\n rsa_cdata = self._lib.RSA_new()\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n rsa_cdata.p = self._int_to_bn(numbers.p)\n rsa_cdata.q = self._int_to_bn(numbers.q)\n rsa_cdata.d = self._int_to_bn(numbers.d)\n rsa_cdata.dmp1 = self._int_to_bn(numbers.dmp1)\n rsa_cdata.dmq1 = self._int_to_bn(numbers.dmq1)\n rsa_cdata.iqmp = self._int_to_bn(numbers.iqmp)\n rsa_cdata.e = self._int_to_bn(numbers.public_numbers.e)\n rsa_cdata.n = self._int_to_bn(numbers.public_numbers.n)\n res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)\n assert res == 1\n\n return _RSAPrivateKey(self, rsa_cdata)\n\n def load_rsa_public_numbers(self, numbers):\n rsa._check_public_key_components(numbers.e, numbers.n)\n rsa_cdata = self._lib.RSA_new()\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n rsa_cdata.e = self._int_to_bn(numbers.e)\n rsa_cdata.n = self._int_to_bn(numbers.n)\n res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)\n assert res == 1\n\n return _RSAPublicKey(self, rsa_cdata)\n\n def _bytes_to_bio(self, data):\n \"\"\"\n Return a _MemoryBIO namedtuple of (BIO, char*).\n\n The char* is the storage for the BIO and it must stay alive until the\n BIO is finished with.\n \"\"\"\n data_char_p = self._ffi.new(\"char[]\", data)\n bio = self._lib.BIO_new_mem_buf(\n data_char_p, len(data)\n )\n assert bio != self._ffi.NULL\n\n return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_char_p)\n\n def _evp_pkey_to_private_key(self, evp_pkey):\n \"\"\"\n Return the appropriate type of PrivateKey given an evp_pkey cdata\n pointer.\n \"\"\"\n\n type = evp_pkey.type\n\n if type == self._lib.EVP_PKEY_RSA:\n rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n return _RSAPrivateKey(self, rsa_cdata)\n elif type == self._lib.EVP_PKEY_DSA:\n dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n return _DSAPrivateKey(self, dsa_cdata)\n elif (self._lib.Cryptography_HAS_EC == 1 and\n type == self._lib.EVP_PKEY_EC):\n ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n return _EllipticCurvePrivateKey(self, ec_cdata)\n else:\n raise UnsupportedAlgorithm(\"Unsupported key type.\")\n\n def _evp_pkey_to_public_key(self, evp_pkey):\n \"\"\"\n Return the appropriate type of PublicKey given an evp_pkey cdata\n pointer.\n \"\"\"\n\n type = evp_pkey.type\n\n if type == self._lib.EVP_PKEY_RSA:\n rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n return _RSAPublicKey(self, rsa_cdata)\n elif type == self._lib.EVP_PKEY_DSA:\n dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n return _DSAPublicKey(self, dsa_cdata)\n elif (self._lib.Cryptography_HAS_EC == 1 and\n type == self._lib.EVP_PKEY_EC):\n ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n return _EllipticCurvePublicKey(self, ec_cdata)\n else:\n raise UnsupportedAlgorithm(\"Unsupported key type.\")\n\n def _pem_password_cb(self, password):\n \"\"\"\n Generate a pem_password_cb function pointer that copied the password to\n OpenSSL as required and returns the number of bytes copied.\n\n typedef int pem_password_cb(char *buf, int size,\n int rwflag, void *userdata);\n\n Useful for decrypting PKCS8 files and so on.\n\n Returns a tuple of (cdata function pointer, callback function).\n \"\"\"\n\n def pem_password_cb(buf, size, writing, userdata):\n pem_password_cb.called += 1\n\n if not password:\n pem_password_cb.exception = TypeError(\n \"Password was not given but private key is encrypted.\"\n )\n return 0\n elif len(password) < size:\n pw_buf = self._ffi.buffer(buf, size)\n pw_buf[:len(password)] = password\n return len(password)\n else:\n pem_password_cb.exception = ValueError(\n \"Passwords longer than {0} bytes are not supported \"\n \"by this backend.\".format(size - 1)\n )\n return 0\n\n pem_password_cb.called = 0\n pem_password_cb.exception = None\n\n return (\n self._ffi.callback(\"int (char *, int, int, void *)\",\n pem_password_cb),\n pem_password_cb\n )\n\n def _mgf1_hash_supported(self, algorithm):\n if self._lib.Cryptography_HAS_MGF1_MD:\n return self.hash_supported(algorithm)\n else:\n return isinstance(algorithm, hashes.SHA1)\n\n def rsa_padding_supported(self, padding):\n if isinstance(padding, PKCS1v15):\n return True\n elif isinstance(padding, PSS) and isinstance(padding._mgf, MGF1):\n return self._mgf1_hash_supported(padding._mgf._algorithm)\n elif isinstance(padding, OAEP) and isinstance(padding._mgf, MGF1):\n return isinstance(padding._mgf._algorithm, hashes.SHA1)\n else:\n return False\n\n def generate_dsa_parameters(self, key_size):\n if key_size not in (1024, 2048, 3072):\n raise ValueError(\n \"Key size must be 1024 or 2048 or 3072 bits.\")\n\n if (self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f and\n key_size > 1024):\n raise ValueError(\n \"Key size must be 1024 because OpenSSL < 1.0.0 doesn't \"\n \"support larger key sizes.\")\n\n ctx = self._lib.DSA_new()\n assert ctx != self._ffi.NULL\n ctx = self._ffi.gc(ctx, self._lib.DSA_free)\n\n res = self._lib.DSA_generate_parameters_ex(\n ctx, key_size, self._ffi.NULL, 0,\n self._ffi.NULL, self._ffi.NULL, self._ffi.NULL\n )\n\n assert res == 1\n\n return _DSAParameters(self, ctx)\n\n def generate_dsa_private_key(self, parameters):\n ctx = self._lib.DSA_new()\n assert ctx != self._ffi.NULL\n ctx = self._ffi.gc(ctx, self._lib.DSA_free)\n ctx.p = self._lib.BN_dup(parameters._dsa_cdata.p)\n ctx.q = self._lib.BN_dup(parameters._dsa_cdata.q)\n ctx.g = self._lib.BN_dup(parameters._dsa_cdata.g)\n\n self._lib.DSA_generate_key(ctx)\n\n return _DSAPrivateKey(self, ctx)\n\n def generate_dsa_private_key_and_parameters(self, key_size):\n parameters = self.generate_dsa_parameters(key_size)\n return self.generate_dsa_private_key(parameters)\n\n def load_dsa_private_numbers(self, numbers):\n dsa._check_dsa_private_numbers(numbers)\n parameter_numbers = numbers.public_numbers.parameter_numbers\n\n dsa_cdata = self._lib.DSA_new()\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n\n dsa_cdata.p = self._int_to_bn(parameter_numbers.p)\n dsa_cdata.q = self._int_to_bn(parameter_numbers.q)\n dsa_cdata.g = self._int_to_bn(parameter_numbers.g)\n dsa_cdata.pub_key = self._int_to_bn(numbers.public_numbers.y)\n dsa_cdata.priv_key = self._int_to_bn(numbers.x)\n\n return _DSAPrivateKey(self, dsa_cdata)\n\n def load_dsa_public_numbers(self, numbers):\n dsa._check_dsa_parameters(numbers.parameter_numbers)\n dsa_cdata = self._lib.DSA_new()\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n\n dsa_cdata.p = self._int_to_bn(numbers.parameter_numbers.p)\n dsa_cdata.q = self._int_to_bn(numbers.parameter_numbers.q)\n dsa_cdata.g = self._int_to_bn(numbers.parameter_numbers.g)\n dsa_cdata.pub_key = self._int_to_bn(numbers.y)\n\n return _DSAPublicKey(self, dsa_cdata)\n\n def load_dsa_parameter_numbers(self, numbers):\n dsa._check_dsa_parameters(numbers)\n dsa_cdata = self._lib.DSA_new()\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n\n dsa_cdata.p = self._int_to_bn(numbers.p)\n dsa_cdata.q = self._int_to_bn(numbers.q)\n dsa_cdata.g = self._int_to_bn(numbers.g)\n\n return _DSAParameters(self, dsa_cdata)\n\n def dsa_hash_supported(self, algorithm):\n if self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f:\n return isinstance(algorithm, hashes.SHA1)\n else:\n return self.hash_supported(algorithm)\n\n def dsa_parameters_supported(self, p, q, g):\n if self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f:\n return (utils.bit_length(p) <= 1024 and utils.bit_length(q) <= 160)\n else:\n return True\n\n def cmac_algorithm_supported(self, algorithm):\n return (\n self._lib.Cryptography_HAS_CMAC == 1\n and self.cipher_supported(algorithm, CBC(\n b\"\\x00\" * algorithm.block_size))\n )\n\n def create_cmac_ctx(self, algorithm):\n return _CMACContext(self, algorithm)\n\n def load_pem_private_key(self, data, password):\n return self._load_key(\n self._lib.PEM_read_bio_PrivateKey,\n self._evp_pkey_to_private_key,\n data,\n password,\n )\n\n def load_pem_public_key(self, data):\n return self._load_key(\n self._lib.PEM_read_bio_PUBKEY,\n self._evp_pkey_to_public_key,\n data,\n None,\n )\n\n def load_traditional_openssl_pem_private_key(self, data, password):\n warnings.warn(\n \"load_traditional_openssl_pem_private_key is deprecated and will \"\n \"be removed in a future version, use load_pem_private_key \"\n \"instead.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n return self.load_pem_private_key(data, password)\n\n def load_pkcs8_pem_private_key(self, data, password):\n warnings.warn(\n \"load_pkcs8_pem_private_key is deprecated and will be removed in a\"\n \" future version, use load_pem_private_key instead.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n return self.load_pem_private_key(data, password)\n\n def _load_key(self, openssl_read_func, convert_func, data, password):\n mem_bio = self._bytes_to_bio(data)\n\n password_callback, password_func = self._pem_password_cb(password)\n\n evp_pkey = openssl_read_func(\n mem_bio.bio,\n self._ffi.NULL,\n password_callback,\n self._ffi.NULL\n )\n\n if evp_pkey == self._ffi.NULL:\n if password_func.exception is not None:\n errors = self._consume_errors()\n assert errors\n raise password_func.exception\n else:\n self._handle_key_loading_error()\n\n evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)\n\n if password is not None and password_func.called == 0:\n raise TypeError(\n \"Password was given but private key is not encrypted.\")\n\n assert (\n (password is not None and password_func.called == 1) or\n password is None\n )\n\n return convert_func(evp_pkey)\n\n def _handle_key_loading_error(self):\n errors = self._consume_errors()\n\n if not errors:\n raise ValueError(\"Could not unserialize key data.\")\n\n elif errors[0][1:] == (\n self._lib.ERR_LIB_EVP,\n self._lib.EVP_F_EVP_DECRYPTFINAL_EX,\n self._lib.EVP_R_BAD_DECRYPT\n ):\n raise ValueError(\"Bad decrypt. Incorrect password?\")\n\n elif errors[0][1:] in (\n (\n self._lib.ERR_LIB_PEM,\n self._lib.PEM_F_PEM_GET_EVP_CIPHER_INFO,\n self._lib.PEM_R_UNSUPPORTED_ENCRYPTION\n ),\n\n (\n self._lib.ERR_LIB_EVP,\n self._lib.EVP_F_EVP_PBE_CIPHERINIT,\n self._lib.EVP_R_UNKNOWN_PBE_ALGORITHM\n )\n ):\n raise UnsupportedAlgorithm(\n \"PEM data is encrypted with an unsupported cipher\",\n _Reasons.UNSUPPORTED_CIPHER\n )\n\n elif any(\n error[1:] == (\n self._lib.ERR_LIB_EVP,\n self._lib.EVP_F_EVP_PKCS82PKEY,\n self._lib.EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM\n )\n for error in errors\n ):\n raise UnsupportedAlgorithm(\n \"Unsupported public key algorithm.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM\n )\n\n else:\n assert errors[0][1] in (\n self._lib.ERR_LIB_EVP,\n self._lib.ERR_LIB_PEM,\n self._lib.ERR_LIB_ASN1,\n )\n raise ValueError(\"Could not unserialize key data.\")\n\n def elliptic_curve_supported(self, curve):\n if self._lib.Cryptography_HAS_EC != 1:\n return False\n\n try:\n curve_nid = self._elliptic_curve_to_nid(curve)\n except UnsupportedAlgorithm:\n curve_nid = self._lib.NID_undef\n\n ctx = self._lib.EC_GROUP_new_by_curve_name(curve_nid)\n\n if ctx == self._ffi.NULL:\n errors = self._consume_errors()\n assert (\n curve_nid == self._lib.NID_undef or\n errors[0][1:] == (\n self._lib.ERR_LIB_EC,\n self._lib.EC_F_EC_GROUP_NEW_BY_CURVE_NAME,\n self._lib.EC_R_UNKNOWN_GROUP\n )\n )\n return False\n else:\n assert curve_nid != self._lib.NID_undef\n self._lib.EC_GROUP_free(ctx)\n return True\n\n def elliptic_curve_signature_algorithm_supported(\n self, signature_algorithm, curve\n ):\n if self._lib.Cryptography_HAS_EC != 1:\n return False\n\n # We only support ECDSA right now.\n if not isinstance(signature_algorithm, ec.ECDSA):\n return False\n\n # Before 0.9.8m OpenSSL can't cope with digests longer than the curve.\n if (\n self._lib.OPENSSL_VERSION_NUMBER < 0x009080df and\n curve.key_size < signature_algorithm.algorithm.digest_size * 8\n ):\n return False\n\n return self.elliptic_curve_supported(curve)\n\n def generate_elliptic_curve_private_key(self, curve):\n \"\"\"\n Generate a new private key on the named curve.\n \"\"\"\n\n if self.elliptic_curve_supported(curve):\n curve_nid = self._elliptic_curve_to_nid(curve)\n\n ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n\n res = self._lib.EC_KEY_generate_key(ec_cdata)\n assert res == 1\n\n res = self._lib.EC_KEY_check_key(ec_cdata)\n assert res == 1\n\n return _EllipticCurvePrivateKey(self, ec_cdata)\n else:\n raise UnsupportedAlgorithm(\n \"Backend object does not support {0}.\".format(curve.name),\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n\n def elliptic_curve_private_key_from_numbers(self, numbers):\n warnings.warn(\n \"elliptic_curve_private_key_from_numbers is deprecated and will \"\n \"be removed in a future version.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n return self.load_elliptic_curve_private_numbers(numbers)\n\n def load_elliptic_curve_private_numbers(self, numbers):\n public = numbers.public_numbers\n\n curve_nid = self._elliptic_curve_to_nid(public.curve)\n\n ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n\n ec_cdata = self._ec_key_set_public_key_affine_coordinates(\n ec_cdata, public.x, public.y)\n\n res = self._lib.EC_KEY_set_private_key(\n ec_cdata, self._int_to_bn(numbers.private_value))\n assert res == 1\n\n return _EllipticCurvePrivateKey(self, ec_cdata)\n\n def elliptic_curve_public_key_from_numbers(self, numbers):\n warnings.warn(\n \"elliptic_curve_public_key_from_numbers is deprecated and will be \"\n \"removed in a future version.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n return self.load_elliptic_curve_public_numbers(numbers)\n\n def load_elliptic_curve_public_numbers(self, numbers):\n curve_nid = self._elliptic_curve_to_nid(numbers.curve)\n\n ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n\n ec_cdata = self._ec_key_set_public_key_affine_coordinates(\n ec_cdata, numbers.x, numbers.y)\n\n return _EllipticCurvePublicKey(self, ec_cdata)\n\n def _elliptic_curve_to_nid(self, curve):\n \"\"\"\n Get the NID for a curve name.\n \"\"\"\n\n curve_aliases = {\n \"secp192r1\": \"prime192v1\",\n \"secp256r1\": \"prime256v1\"\n }\n\n curve_name = curve_aliases.get(curve.name, curve.name)\n\n curve_nid = self._lib.OBJ_sn2nid(curve_name.encode())\n if curve_nid == self._lib.NID_undef:\n raise UnsupportedAlgorithm(\n \"{0} is not a supported elliptic curve\".format(curve.name),\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n return curve_nid\n\n @contextmanager\n def _tmp_bn_ctx(self):\n bn_ctx = self._lib.BN_CTX_new()\n assert bn_ctx != self._ffi.NULL\n bn_ctx = self._ffi.gc(bn_ctx, self._lib.BN_CTX_free)\n self._lib.BN_CTX_start(bn_ctx)\n try:\n yield bn_ctx\n finally:\n self._lib.BN_CTX_end(bn_ctx)\n\n def _ec_key_determine_group_get_set_funcs(self, ctx):\n \"\"\"\n Given an EC_KEY determine the group and what methods are required to\n get/set point coordinates.\n \"\"\"\n assert ctx != self._ffi.NULL\n\n nid_two_field = self._lib.OBJ_sn2nid(b\"characteristic-two-field\")\n assert nid_two_field != self._lib.NID_undef\n\n group = self._lib.EC_KEY_get0_group(ctx)\n assert group != self._ffi.NULL\n\n method = self._lib.EC_GROUP_method_of(group)\n assert method != self._ffi.NULL\n\n nid = self._lib.EC_METHOD_get_field_type(method)\n assert nid != self._lib.NID_undef\n\n if nid == nid_two_field and self._lib.Cryptography_HAS_EC2M:\n set_func = self._lib.EC_POINT_set_affine_coordinates_GF2m\n get_func = self._lib.EC_POINT_get_affine_coordinates_GF2m\n else:\n set_func = self._lib.EC_POINT_set_affine_coordinates_GFp\n get_func = self._lib.EC_POINT_get_affine_coordinates_GFp\n\n assert set_func and get_func\n\n return set_func, get_func, group\n\n def _ec_key_set_public_key_affine_coordinates(self, ctx, x, y):\n \"\"\"\n This is a port of EC_KEY_set_public_key_affine_coordinates that was\n added in 1.0.1.\n\n Sets the public key point in the EC_KEY context to the affine x and y\n values.\n \"\"\"\n\n bn_x = self._int_to_bn(x)\n bn_y = self._int_to_bn(y)\n\n set_func, get_func, group = (\n self._ec_key_determine_group_get_set_funcs(ctx)\n )\n\n point = self._lib.EC_POINT_new(group)\n assert point != self._ffi.NULL\n point = self._ffi.gc(point, self._lib.EC_POINT_free)\n\n with self._tmp_bn_ctx() as bn_ctx:\n check_x = self._lib.BN_CTX_get(bn_ctx)\n check_y = self._lib.BN_CTX_get(bn_ctx)\n\n res = set_func(group, point, bn_x, bn_y, bn_ctx)\n assert res == 1\n\n res = get_func(group, point, check_x, check_y, bn_ctx)\n assert res == 1\n\n assert (\n self._lib.BN_cmp(bn_x, check_x) == 0 and\n self._lib.BN_cmp(bn_y, check_y) == 0\n )\n\n res = self._lib.EC_KEY_set_public_key(ctx, point)\n assert res == 1\n\n res = self._lib.EC_KEY_check_key(ctx)\n assert res == 1\n\n return ctx\n\n\nclass GetCipherByName(object):\n def __init__(self, fmt):\n self._fmt = fmt\n\n def __call__(self, backend, cipher, mode):\n cipher_name = self._fmt.format(cipher=cipher, mode=mode).lower()\n return backend._lib.EVP_get_cipherbyname(cipher_name.encode(\"ascii\"))\n\n\nbackend = Backend()\n", "path": "cryptography/hazmat/backends/openssl/backend.py" } ]
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport itertools\nimport warnings\nfrom contextlib import contextmanager\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n InternalError, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import (\n CMACBackend, CipherBackend, DSABackend, EllipticCurveBackend, HMACBackend,\n HashBackend, PBKDF2HMACBackend, PEMSerializationBackend,\n PKCS8SerializationBackend, RSABackend,\n TraditionalOpenSSLSerializationBackend\n)\nfrom cryptography.hazmat.backends.openssl.ciphers import (\n _AESCTRCipherContext, _CipherContext\n)\nfrom cryptography.hazmat.backends.openssl.cmac import _CMACContext\nfrom cryptography.hazmat.backends.openssl.dsa import (\n _DSAParameters, _DSAPrivateKey, _DSAPublicKey\n)\nfrom cryptography.hazmat.backends.openssl.ec import (\n _EllipticCurvePrivateKey, _EllipticCurvePublicKey\n)\nfrom cryptography.hazmat.backends.openssl.hashes import _HashContext\nfrom cryptography.hazmat.backends.openssl.hmac import _HMACContext\nfrom cryptography.hazmat.backends.openssl.rsa import (\n _RSAPrivateKey, _RSAPublicKey\n)\nfrom cryptography.hazmat.bindings.openssl.binding import Binding\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa\nfrom cryptography.hazmat.primitives.asymmetric.padding import (\n MGF1, OAEP, PKCS1v15, PSS\n)\nfrom cryptography.hazmat.primitives.ciphers.algorithms import (\n AES, ARC4, Blowfish, CAST5, Camellia, IDEA, SEED, TripleDES\n)\nfrom cryptography.hazmat.primitives.ciphers.modes import (\n CBC, CFB, CFB8, CTR, ECB, GCM, OFB\n)\n\n\n_MemoryBIO = collections.namedtuple(\"_MemoryBIO\", [\"bio\", \"char_ptr\"])\n_OpenSSLError = collections.namedtuple(\"_OpenSSLError\",\n [\"code\", \"lib\", \"func\", \"reason\"])\n\n\[email protected]_interface(CipherBackend)\[email protected]_interface(CMACBackend)\[email protected]_interface(DSABackend)\[email protected]_interface(EllipticCurveBackend)\[email protected]_interface(HashBackend)\[email protected]_interface(HMACBackend)\[email protected]_interface(PBKDF2HMACBackend)\[email protected]_interface(PKCS8SerializationBackend)\[email protected]_interface(RSABackend)\[email protected]_interface(TraditionalOpenSSLSerializationBackend)\[email protected]_interface(PEMSerializationBackend)\nclass Backend(object):\n \"\"\"\n OpenSSL API binding interfaces.\n \"\"\"\n name = \"openssl\"\n\n def __init__(self):\n self._binding = Binding()\n self._ffi = self._binding.ffi\n self._lib = self._binding.lib\n\n self._binding.init_static_locks()\n\n # adds all ciphers/digests for EVP\n self._lib.OpenSSL_add_all_algorithms()\n # registers available SSL/TLS ciphers and digests\n self._lib.SSL_library_init()\n # loads error strings for libcrypto and libssl functions\n self._lib.SSL_load_error_strings()\n\n self._cipher_registry = {}\n self._register_default_ciphers()\n self.activate_osrandom_engine()\n\n def activate_builtin_random(self):\n # Obtain a new structural reference.\n e = self._lib.ENGINE_get_default_RAND()\n if e != self._ffi.NULL:\n self._lib.ENGINE_unregister_RAND(e)\n # Reset the RNG to use the new engine.\n self._lib.RAND_cleanup()\n # decrement the structural reference from get_default_RAND\n res = self._lib.ENGINE_finish(e)\n assert res == 1\n\n def activate_osrandom_engine(self):\n # Unregister and free the current engine.\n self.activate_builtin_random()\n # Fetches an engine by id and returns it. This creates a structural\n # reference.\n e = self._lib.ENGINE_by_id(self._lib.Cryptography_osrandom_engine_id)\n assert e != self._ffi.NULL\n # Initialize the engine for use. This adds a functional reference.\n res = self._lib.ENGINE_init(e)\n assert res == 1\n # Set the engine as the default RAND provider.\n res = self._lib.ENGINE_set_default_RAND(e)\n assert res == 1\n # Decrement the structural ref incremented by ENGINE_by_id.\n res = self._lib.ENGINE_free(e)\n assert res == 1\n # Decrement the functional ref incremented by ENGINE_init.\n res = self._lib.ENGINE_finish(e)\n assert res == 1\n # Reset the RNG to use the new engine.\n self._lib.RAND_cleanup()\n\n def openssl_version_text(self):\n \"\"\"\n Friendly string name of the loaded OpenSSL library. This is not\n necessarily the same version as it was compiled against.\n\n Example: OpenSSL 1.0.1e 11 Feb 2013\n \"\"\"\n return self._ffi.string(\n self._lib.SSLeay_version(self._lib.SSLEAY_VERSION)\n ).decode(\"ascii\")\n\n def create_hmac_ctx(self, key, algorithm):\n return _HMACContext(self, key, algorithm)\n\n def hash_supported(self, algorithm):\n digest = self._lib.EVP_get_digestbyname(algorithm.name.encode(\"ascii\"))\n return digest != self._ffi.NULL\n\n def hmac_supported(self, algorithm):\n return self.hash_supported(algorithm)\n\n def create_hash_ctx(self, algorithm):\n return _HashContext(self, algorithm)\n\n def cipher_supported(self, cipher, mode):\n if self._evp_cipher_supported(cipher, mode):\n return True\n elif isinstance(mode, CTR) and isinstance(cipher, AES):\n return True\n else:\n return False\n\n def _evp_cipher_supported(self, cipher, mode):\n try:\n adapter = self._cipher_registry[type(cipher), type(mode)]\n except KeyError:\n return False\n evp_cipher = adapter(self, cipher, mode)\n return self._ffi.NULL != evp_cipher\n\n def register_cipher_adapter(self, cipher_cls, mode_cls, adapter):\n if (cipher_cls, mode_cls) in self._cipher_registry:\n raise ValueError(\"Duplicate registration for: {0} {1}.\".format(\n cipher_cls, mode_cls)\n )\n self._cipher_registry[cipher_cls, mode_cls] = adapter\n\n def _register_default_ciphers(self):\n for mode_cls in [CBC, CTR, ECB, OFB, CFB, CFB8]:\n self.register_cipher_adapter(\n AES,\n mode_cls,\n GetCipherByName(\"{cipher.name}-{cipher.key_size}-{mode.name}\")\n )\n for mode_cls in [CBC, CTR, ECB, OFB, CFB]:\n self.register_cipher_adapter(\n Camellia,\n mode_cls,\n GetCipherByName(\"{cipher.name}-{cipher.key_size}-{mode.name}\")\n )\n for mode_cls in [CBC, CFB, CFB8, OFB]:\n self.register_cipher_adapter(\n TripleDES,\n mode_cls,\n GetCipherByName(\"des-ede3-{mode.name}\")\n )\n self.register_cipher_adapter(\n TripleDES,\n ECB,\n GetCipherByName(\"des-ede3\")\n )\n for mode_cls in [CBC, CFB, OFB, ECB]:\n self.register_cipher_adapter(\n Blowfish,\n mode_cls,\n GetCipherByName(\"bf-{mode.name}\")\n )\n for mode_cls in [CBC, CFB, OFB, ECB]:\n self.register_cipher_adapter(\n SEED,\n mode_cls,\n GetCipherByName(\"seed-{mode.name}\")\n )\n for cipher_cls, mode_cls in itertools.product(\n [CAST5, IDEA],\n [CBC, OFB, CFB, ECB],\n ):\n self.register_cipher_adapter(\n cipher_cls,\n mode_cls,\n GetCipherByName(\"{cipher.name}-{mode.name}\")\n )\n self.register_cipher_adapter(\n ARC4,\n type(None),\n GetCipherByName(\"rc4\")\n )\n self.register_cipher_adapter(\n AES,\n GCM,\n GetCipherByName(\"{cipher.name}-{cipher.key_size}-{mode.name}\")\n )\n\n def create_symmetric_encryption_ctx(self, cipher, mode):\n if (isinstance(mode, CTR) and isinstance(cipher, AES)\n and not self._evp_cipher_supported(cipher, mode)):\n # This is needed to provide support for AES CTR mode in OpenSSL\n # 0.9.8. It can be removed when we drop 0.9.8 support (RHEL 5\n # extended life ends 2020).\n return _AESCTRCipherContext(self, cipher, mode)\n else:\n return _CipherContext(self, cipher, mode, _CipherContext._ENCRYPT)\n\n def create_symmetric_decryption_ctx(self, cipher, mode):\n if (isinstance(mode, CTR) and isinstance(cipher, AES)\n and not self._evp_cipher_supported(cipher, mode)):\n # This is needed to provide support for AES CTR mode in OpenSSL\n # 0.9.8. It can be removed when we drop 0.9.8 support (RHEL 5\n # extended life ends 2020).\n return _AESCTRCipherContext(self, cipher, mode)\n else:\n return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)\n\n def pbkdf2_hmac_supported(self, algorithm):\n if self._lib.Cryptography_HAS_PBKDF2_HMAC:\n return self.hmac_supported(algorithm)\n else:\n # OpenSSL < 1.0.0 has an explicit PBKDF2-HMAC-SHA1 function,\n # so if the PBKDF2_HMAC function is missing we only support\n # SHA1 via PBKDF2_HMAC_SHA1.\n return isinstance(algorithm, hashes.SHA1)\n\n def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,\n key_material):\n buf = self._ffi.new(\"char[]\", length)\n if self._lib.Cryptography_HAS_PBKDF2_HMAC:\n evp_md = self._lib.EVP_get_digestbyname(\n algorithm.name.encode(\"ascii\"))\n assert evp_md != self._ffi.NULL\n res = self._lib.PKCS5_PBKDF2_HMAC(\n key_material,\n len(key_material),\n salt,\n len(salt),\n iterations,\n evp_md,\n length,\n buf\n )\n assert res == 1\n else:\n if not isinstance(algorithm, hashes.SHA1):\n raise UnsupportedAlgorithm(\n \"This version of OpenSSL only supports PBKDF2HMAC with \"\n \"SHA1.\",\n _Reasons.UNSUPPORTED_HASH\n )\n res = self._lib.PKCS5_PBKDF2_HMAC_SHA1(\n key_material,\n len(key_material),\n salt,\n len(salt),\n iterations,\n length,\n buf\n )\n assert res == 1\n\n return self._ffi.buffer(buf)[:]\n\n def _err_string(self, code):\n err_buf = self._ffi.new(\"char[]\", 256)\n self._lib.ERR_error_string_n(code, err_buf, 256)\n return self._ffi.string(err_buf, 256)[:]\n\n def _consume_errors(self):\n errors = []\n while True:\n code = self._lib.ERR_get_error()\n if code == 0:\n break\n\n lib = self._lib.ERR_GET_LIB(code)\n func = self._lib.ERR_GET_FUNC(code)\n reason = self._lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, lib, func, reason))\n return errors\n\n def _unknown_error(self, error):\n return InternalError(\n \"Unknown error code {0} from OpenSSL, \"\n \"you should probably file a bug. {1}.\".format(\n error.code, self._err_string(error.code)\n )\n )\n\n def _bn_to_int(self, bn):\n if six.PY3:\n # Python 3 has constant time from_bytes, so use that.\n\n bn_num_bytes = (self._lib.BN_num_bits(bn) + 7) // 8\n bin_ptr = self._ffi.new(\"unsigned char[]\", bn_num_bytes)\n bin_len = self._lib.BN_bn2bin(bn, bin_ptr)\n assert bin_len > 0\n assert bin_ptr != self._ffi.NULL\n return int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], \"big\")\n\n else:\n # Under Python 2 the best we can do is hex()\n\n hex_cdata = self._lib.BN_bn2hex(bn)\n assert hex_cdata != self._ffi.NULL\n hex_str = self._ffi.string(hex_cdata)\n self._lib.OPENSSL_free(hex_cdata)\n return int(hex_str, 16)\n\n def _int_to_bn(self, num, bn=None):\n \"\"\"\n Converts a python integer to a BIGNUM. The returned BIGNUM will not\n be garbage collected (to support adding them to structs that take\n ownership of the object). Be sure to register it for GC if it will\n be discarded after use.\n \"\"\"\n\n if bn is None:\n bn = self._ffi.NULL\n\n if six.PY3:\n # Python 3 has constant time to_bytes, so use that.\n\n binary = num.to_bytes(int(num.bit_length() / 8.0 + 1), \"big\")\n bn_ptr = self._lib.BN_bin2bn(binary, len(binary), bn)\n assert bn_ptr != self._ffi.NULL\n return bn_ptr\n\n else:\n # Under Python 2 the best we can do is hex()\n\n hex_num = hex(num).rstrip(\"L\").lstrip(\"0x\").encode(\"ascii\") or b\"0\"\n bn_ptr = self._ffi.new(\"BIGNUM **\")\n bn_ptr[0] = bn\n res = self._lib.BN_hex2bn(bn_ptr, hex_num)\n assert res != 0\n assert bn_ptr[0] != self._ffi.NULL\n return bn_ptr[0]\n\n def generate_rsa_private_key(self, public_exponent, key_size):\n rsa._verify_rsa_parameters(public_exponent, key_size)\n\n rsa_cdata = self._lib.RSA_new()\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n\n bn = self._int_to_bn(public_exponent)\n bn = self._ffi.gc(bn, self._lib.BN_free)\n\n res = self._lib.RSA_generate_key_ex(\n rsa_cdata, key_size, bn, self._ffi.NULL\n )\n assert res == 1\n\n return _RSAPrivateKey(self, rsa_cdata)\n\n def generate_rsa_parameters_supported(self, public_exponent, key_size):\n return (public_exponent >= 3 and public_exponent & 1 != 0 and\n key_size >= 512)\n\n def load_rsa_private_numbers(self, numbers):\n rsa._check_private_key_components(\n numbers.p,\n numbers.q,\n numbers.d,\n numbers.dmp1,\n numbers.dmq1,\n numbers.iqmp,\n numbers.public_numbers.e,\n numbers.public_numbers.n\n )\n rsa_cdata = self._lib.RSA_new()\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n rsa_cdata.p = self._int_to_bn(numbers.p)\n rsa_cdata.q = self._int_to_bn(numbers.q)\n rsa_cdata.d = self._int_to_bn(numbers.d)\n rsa_cdata.dmp1 = self._int_to_bn(numbers.dmp1)\n rsa_cdata.dmq1 = self._int_to_bn(numbers.dmq1)\n rsa_cdata.iqmp = self._int_to_bn(numbers.iqmp)\n rsa_cdata.e = self._int_to_bn(numbers.public_numbers.e)\n rsa_cdata.n = self._int_to_bn(numbers.public_numbers.n)\n res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)\n assert res == 1\n\n return _RSAPrivateKey(self, rsa_cdata)\n\n def load_rsa_public_numbers(self, numbers):\n rsa._check_public_key_components(numbers.e, numbers.n)\n rsa_cdata = self._lib.RSA_new()\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n rsa_cdata.e = self._int_to_bn(numbers.e)\n rsa_cdata.n = self._int_to_bn(numbers.n)\n res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)\n assert res == 1\n\n return _RSAPublicKey(self, rsa_cdata)\n\n def _bytes_to_bio(self, data):\n \"\"\"\n Return a _MemoryBIO namedtuple of (BIO, char*).\n\n The char* is the storage for the BIO and it must stay alive until the\n BIO is finished with.\n \"\"\"\n data_char_p = self._ffi.new(\"char[]\", data)\n bio = self._lib.BIO_new_mem_buf(\n data_char_p, len(data)\n )\n assert bio != self._ffi.NULL\n\n return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_char_p)\n\n def _evp_pkey_to_private_key(self, evp_pkey):\n \"\"\"\n Return the appropriate type of PrivateKey given an evp_pkey cdata\n pointer.\n \"\"\"\n\n type = evp_pkey.type\n\n if type == self._lib.EVP_PKEY_RSA:\n rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n return _RSAPrivateKey(self, rsa_cdata)\n elif type == self._lib.EVP_PKEY_DSA:\n dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n return _DSAPrivateKey(self, dsa_cdata)\n elif (self._lib.Cryptography_HAS_EC == 1 and\n type == self._lib.EVP_PKEY_EC):\n ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n return _EllipticCurvePrivateKey(self, ec_cdata)\n else:\n raise UnsupportedAlgorithm(\"Unsupported key type.\")\n\n def _evp_pkey_to_public_key(self, evp_pkey):\n \"\"\"\n Return the appropriate type of PublicKey given an evp_pkey cdata\n pointer.\n \"\"\"\n\n type = evp_pkey.type\n\n if type == self._lib.EVP_PKEY_RSA:\n rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)\n assert rsa_cdata != self._ffi.NULL\n rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)\n return _RSAPublicKey(self, rsa_cdata)\n elif type == self._lib.EVP_PKEY_DSA:\n dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n return _DSAPublicKey(self, dsa_cdata)\n elif (self._lib.Cryptography_HAS_EC == 1 and\n type == self._lib.EVP_PKEY_EC):\n ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n return _EllipticCurvePublicKey(self, ec_cdata)\n else:\n raise UnsupportedAlgorithm(\"Unsupported key type.\")\n\n def _pem_password_cb(self, password):\n \"\"\"\n Generate a pem_password_cb function pointer that copied the password to\n OpenSSL as required and returns the number of bytes copied.\n\n typedef int pem_password_cb(char *buf, int size,\n int rwflag, void *userdata);\n\n Useful for decrypting PKCS8 files and so on.\n\n Returns a tuple of (cdata function pointer, callback function).\n \"\"\"\n\n def pem_password_cb(buf, size, writing, userdata):\n pem_password_cb.called += 1\n\n if not password:\n pem_password_cb.exception = TypeError(\n \"Password was not given but private key is encrypted.\"\n )\n return 0\n elif len(password) < size:\n pw_buf = self._ffi.buffer(buf, size)\n pw_buf[:len(password)] = password\n return len(password)\n else:\n pem_password_cb.exception = ValueError(\n \"Passwords longer than {0} bytes are not supported \"\n \"by this backend.\".format(size - 1)\n )\n return 0\n\n pem_password_cb.called = 0\n pem_password_cb.exception = None\n\n return (\n self._ffi.callback(\"int (char *, int, int, void *)\",\n pem_password_cb),\n pem_password_cb\n )\n\n def _mgf1_hash_supported(self, algorithm):\n if self._lib.Cryptography_HAS_MGF1_MD:\n return self.hash_supported(algorithm)\n else:\n return isinstance(algorithm, hashes.SHA1)\n\n def rsa_padding_supported(self, padding):\n if isinstance(padding, PKCS1v15):\n return True\n elif isinstance(padding, PSS) and isinstance(padding._mgf, MGF1):\n return self._mgf1_hash_supported(padding._mgf._algorithm)\n elif isinstance(padding, OAEP) and isinstance(padding._mgf, MGF1):\n return isinstance(padding._mgf._algorithm, hashes.SHA1)\n else:\n return False\n\n def generate_dsa_parameters(self, key_size):\n if key_size not in (1024, 2048, 3072):\n raise ValueError(\n \"Key size must be 1024 or 2048 or 3072 bits.\")\n\n if (self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f and\n key_size > 1024):\n raise ValueError(\n \"Key size must be 1024 because OpenSSL < 1.0.0 doesn't \"\n \"support larger key sizes.\")\n\n ctx = self._lib.DSA_new()\n assert ctx != self._ffi.NULL\n ctx = self._ffi.gc(ctx, self._lib.DSA_free)\n\n res = self._lib.DSA_generate_parameters_ex(\n ctx, key_size, self._ffi.NULL, 0,\n self._ffi.NULL, self._ffi.NULL, self._ffi.NULL\n )\n\n assert res == 1\n\n return _DSAParameters(self, ctx)\n\n def generate_dsa_private_key(self, parameters):\n ctx = self._lib.DSA_new()\n assert ctx != self._ffi.NULL\n ctx = self._ffi.gc(ctx, self._lib.DSA_free)\n ctx.p = self._lib.BN_dup(parameters._dsa_cdata.p)\n ctx.q = self._lib.BN_dup(parameters._dsa_cdata.q)\n ctx.g = self._lib.BN_dup(parameters._dsa_cdata.g)\n\n self._lib.DSA_generate_key(ctx)\n\n return _DSAPrivateKey(self, ctx)\n\n def generate_dsa_private_key_and_parameters(self, key_size):\n parameters = self.generate_dsa_parameters(key_size)\n return self.generate_dsa_private_key(parameters)\n\n def load_dsa_private_numbers(self, numbers):\n dsa._check_dsa_private_numbers(numbers)\n parameter_numbers = numbers.public_numbers.parameter_numbers\n\n dsa_cdata = self._lib.DSA_new()\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n\n dsa_cdata.p = self._int_to_bn(parameter_numbers.p)\n dsa_cdata.q = self._int_to_bn(parameter_numbers.q)\n dsa_cdata.g = self._int_to_bn(parameter_numbers.g)\n dsa_cdata.pub_key = self._int_to_bn(numbers.public_numbers.y)\n dsa_cdata.priv_key = self._int_to_bn(numbers.x)\n\n return _DSAPrivateKey(self, dsa_cdata)\n\n def load_dsa_public_numbers(self, numbers):\n dsa._check_dsa_parameters(numbers.parameter_numbers)\n dsa_cdata = self._lib.DSA_new()\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n\n dsa_cdata.p = self._int_to_bn(numbers.parameter_numbers.p)\n dsa_cdata.q = self._int_to_bn(numbers.parameter_numbers.q)\n dsa_cdata.g = self._int_to_bn(numbers.parameter_numbers.g)\n dsa_cdata.pub_key = self._int_to_bn(numbers.y)\n\n return _DSAPublicKey(self, dsa_cdata)\n\n def load_dsa_parameter_numbers(self, numbers):\n dsa._check_dsa_parameters(numbers)\n dsa_cdata = self._lib.DSA_new()\n assert dsa_cdata != self._ffi.NULL\n dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)\n\n dsa_cdata.p = self._int_to_bn(numbers.p)\n dsa_cdata.q = self._int_to_bn(numbers.q)\n dsa_cdata.g = self._int_to_bn(numbers.g)\n\n return _DSAParameters(self, dsa_cdata)\n\n def dsa_hash_supported(self, algorithm):\n if self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f:\n return isinstance(algorithm, hashes.SHA1)\n else:\n return self.hash_supported(algorithm)\n\n def dsa_parameters_supported(self, p, q, g):\n if self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f:\n return (utils.bit_length(p) <= 1024 and utils.bit_length(q) <= 160)\n else:\n return True\n\n def cmac_algorithm_supported(self, algorithm):\n return (\n self._lib.Cryptography_HAS_CMAC == 1\n and self.cipher_supported(algorithm, CBC(\n b\"\\x00\" * algorithm.block_size))\n )\n\n def create_cmac_ctx(self, algorithm):\n return _CMACContext(self, algorithm)\n\n def load_pem_private_key(self, data, password):\n return self._load_key(\n self._lib.PEM_read_bio_PrivateKey,\n self._evp_pkey_to_private_key,\n data,\n password,\n )\n\n def load_pem_public_key(self, data):\n return self._load_key(\n self._lib.PEM_read_bio_PUBKEY,\n self._evp_pkey_to_public_key,\n data,\n None,\n )\n\n def load_traditional_openssl_pem_private_key(self, data, password):\n warnings.warn(\n \"load_traditional_openssl_pem_private_key is deprecated and will \"\n \"be removed in a future version, use load_pem_private_key \"\n \"instead.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n return self.load_pem_private_key(data, password)\n\n def load_pkcs8_pem_private_key(self, data, password):\n warnings.warn(\n \"load_pkcs8_pem_private_key is deprecated and will be removed in a\"\n \" future version, use load_pem_private_key instead.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n return self.load_pem_private_key(data, password)\n\n def _load_key(self, openssl_read_func, convert_func, data, password):\n mem_bio = self._bytes_to_bio(data)\n\n password_callback, password_func = self._pem_password_cb(password)\n\n evp_pkey = openssl_read_func(\n mem_bio.bio,\n self._ffi.NULL,\n password_callback,\n self._ffi.NULL\n )\n\n if evp_pkey == self._ffi.NULL:\n if password_func.exception is not None:\n errors = self._consume_errors()\n assert errors\n raise password_func.exception\n else:\n self._handle_key_loading_error()\n\n evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)\n\n if password is not None and password_func.called == 0:\n raise TypeError(\n \"Password was given but private key is not encrypted.\")\n\n assert (\n (password is not None and password_func.called == 1) or\n password is None\n )\n\n return convert_func(evp_pkey)\n\n def _handle_key_loading_error(self):\n errors = self._consume_errors()\n\n if not errors:\n raise ValueError(\"Could not unserialize key data.\")\n\n elif errors[0][1:] == (\n self._lib.ERR_LIB_EVP,\n self._lib.EVP_F_EVP_DECRYPTFINAL_EX,\n self._lib.EVP_R_BAD_DECRYPT\n ):\n raise ValueError(\"Bad decrypt. Incorrect password?\")\n\n elif errors[0][1:] in (\n (\n self._lib.ERR_LIB_PEM,\n self._lib.PEM_F_PEM_GET_EVP_CIPHER_INFO,\n self._lib.PEM_R_UNSUPPORTED_ENCRYPTION\n ),\n\n (\n self._lib.ERR_LIB_EVP,\n self._lib.EVP_F_EVP_PBE_CIPHERINIT,\n self._lib.EVP_R_UNKNOWN_PBE_ALGORITHM\n )\n ):\n raise UnsupportedAlgorithm(\n \"PEM data is encrypted with an unsupported cipher\",\n _Reasons.UNSUPPORTED_CIPHER\n )\n\n elif any(\n error[1:] == (\n self._lib.ERR_LIB_EVP,\n self._lib.EVP_F_EVP_PKCS82PKEY,\n self._lib.EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM\n )\n for error in errors\n ):\n raise UnsupportedAlgorithm(\n \"Unsupported public key algorithm.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM\n )\n\n else:\n assert errors[0][1] in (\n self._lib.ERR_LIB_EVP,\n self._lib.ERR_LIB_PEM,\n self._lib.ERR_LIB_ASN1,\n )\n raise ValueError(\"Could not unserialize key data.\")\n\n def elliptic_curve_supported(self, curve):\n if self._lib.Cryptography_HAS_EC != 1:\n return False\n\n try:\n curve_nid = self._elliptic_curve_to_nid(curve)\n except UnsupportedAlgorithm:\n curve_nid = self._lib.NID_undef\n\n ctx = self._lib.EC_GROUP_new_by_curve_name(curve_nid)\n\n if ctx == self._ffi.NULL:\n errors = self._consume_errors()\n assert (\n curve_nid == self._lib.NID_undef or\n errors[0][1:] == (\n self._lib.ERR_LIB_EC,\n self._lib.EC_F_EC_GROUP_NEW_BY_CURVE_NAME,\n self._lib.EC_R_UNKNOWN_GROUP\n )\n )\n return False\n else:\n assert curve_nid != self._lib.NID_undef\n self._lib.EC_GROUP_free(ctx)\n return True\n\n def elliptic_curve_signature_algorithm_supported(\n self, signature_algorithm, curve\n ):\n if self._lib.Cryptography_HAS_EC != 1:\n return False\n\n # We only support ECDSA right now.\n if not isinstance(signature_algorithm, ec.ECDSA):\n return False\n\n # Before 0.9.8m OpenSSL can't cope with digests longer than the curve.\n if (\n self._lib.OPENSSL_VERSION_NUMBER < 0x009080df and\n curve.key_size < signature_algorithm.algorithm.digest_size * 8\n ):\n return False\n\n return self.elliptic_curve_supported(curve)\n\n def generate_elliptic_curve_private_key(self, curve):\n \"\"\"\n Generate a new private key on the named curve.\n \"\"\"\n\n if self.elliptic_curve_supported(curve):\n curve_nid = self._elliptic_curve_to_nid(curve)\n\n ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n\n res = self._lib.EC_KEY_generate_key(ec_cdata)\n assert res == 1\n\n res = self._lib.EC_KEY_check_key(ec_cdata)\n assert res == 1\n\n return _EllipticCurvePrivateKey(self, ec_cdata)\n else:\n raise UnsupportedAlgorithm(\n \"Backend object does not support {0}.\".format(curve.name),\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n\n def elliptic_curve_private_key_from_numbers(self, numbers):\n warnings.warn(\n \"elliptic_curve_private_key_from_numbers is deprecated and will \"\n \"be removed in a future version.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n return self.load_elliptic_curve_private_numbers(numbers)\n\n def load_elliptic_curve_private_numbers(self, numbers):\n public = numbers.public_numbers\n\n curve_nid = self._elliptic_curve_to_nid(public.curve)\n\n ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n\n ec_cdata = self._ec_key_set_public_key_affine_coordinates(\n ec_cdata, public.x, public.y)\n\n res = self._lib.EC_KEY_set_private_key(\n ec_cdata, self._int_to_bn(numbers.private_value))\n assert res == 1\n\n return _EllipticCurvePrivateKey(self, ec_cdata)\n\n def elliptic_curve_public_key_from_numbers(self, numbers):\n warnings.warn(\n \"elliptic_curve_public_key_from_numbers is deprecated and will be \"\n \"removed in a future version.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n return self.load_elliptic_curve_public_numbers(numbers)\n\n def load_elliptic_curve_public_numbers(self, numbers):\n curve_nid = self._elliptic_curve_to_nid(numbers.curve)\n\n ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)\n assert ec_cdata != self._ffi.NULL\n ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)\n\n ec_cdata = self._ec_key_set_public_key_affine_coordinates(\n ec_cdata, numbers.x, numbers.y)\n\n return _EllipticCurvePublicKey(self, ec_cdata)\n\n def _elliptic_curve_to_nid(self, curve):\n \"\"\"\n Get the NID for a curve name.\n \"\"\"\n\n curve_aliases = {\n \"secp192r1\": \"prime192v1\",\n \"secp256r1\": \"prime256v1\"\n }\n\n curve_name = curve_aliases.get(curve.name, curve.name)\n\n curve_nid = self._lib.OBJ_sn2nid(curve_name.encode())\n if curve_nid == self._lib.NID_undef:\n raise UnsupportedAlgorithm(\n \"{0} is not a supported elliptic curve\".format(curve.name),\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n return curve_nid\n\n @contextmanager\n def _tmp_bn_ctx(self):\n bn_ctx = self._lib.BN_CTX_new()\n assert bn_ctx != self._ffi.NULL\n bn_ctx = self._ffi.gc(bn_ctx, self._lib.BN_CTX_free)\n self._lib.BN_CTX_start(bn_ctx)\n try:\n yield bn_ctx\n finally:\n self._lib.BN_CTX_end(bn_ctx)\n\n def _ec_key_determine_group_get_set_funcs(self, ctx):\n \"\"\"\n Given an EC_KEY determine the group and what methods are required to\n get/set point coordinates.\n \"\"\"\n assert ctx != self._ffi.NULL\n\n nid_two_field = self._lib.OBJ_sn2nid(b\"characteristic-two-field\")\n assert nid_two_field != self._lib.NID_undef\n\n group = self._lib.EC_KEY_get0_group(ctx)\n assert group != self._ffi.NULL\n\n method = self._lib.EC_GROUP_method_of(group)\n assert method != self._ffi.NULL\n\n nid = self._lib.EC_METHOD_get_field_type(method)\n assert nid != self._lib.NID_undef\n\n if nid == nid_two_field and self._lib.Cryptography_HAS_EC2M:\n set_func = self._lib.EC_POINT_set_affine_coordinates_GF2m\n get_func = self._lib.EC_POINT_get_affine_coordinates_GF2m\n else:\n set_func = self._lib.EC_POINT_set_affine_coordinates_GFp\n get_func = self._lib.EC_POINT_get_affine_coordinates_GFp\n\n assert set_func and get_func\n\n return set_func, get_func, group\n\n def _ec_key_set_public_key_affine_coordinates(self, ctx, x, y):\n \"\"\"\n This is a port of EC_KEY_set_public_key_affine_coordinates that was\n added in 1.0.1.\n\n Sets the public key point in the EC_KEY context to the affine x and y\n values.\n \"\"\"\n\n bn_x = self._int_to_bn(x)\n bn_y = self._int_to_bn(y)\n\n set_func, get_func, group = (\n self._ec_key_determine_group_get_set_funcs(ctx)\n )\n\n point = self._lib.EC_POINT_new(group)\n assert point != self._ffi.NULL\n point = self._ffi.gc(point, self._lib.EC_POINT_free)\n\n with self._tmp_bn_ctx() as bn_ctx:\n check_x = self._lib.BN_CTX_get(bn_ctx)\n check_y = self._lib.BN_CTX_get(bn_ctx)\n\n res = set_func(group, point, bn_x, bn_y, bn_ctx)\n assert res == 1\n\n res = get_func(group, point, check_x, check_y, bn_ctx)\n assert res == 1\n\n assert (\n self._lib.BN_cmp(bn_x, check_x) == 0 and\n self._lib.BN_cmp(bn_y, check_y) == 0\n )\n\n res = self._lib.EC_KEY_set_public_key(ctx, point)\n assert res == 1\n\n res = self._lib.EC_KEY_check_key(ctx)\n if res != 1:\n self._consume_errors()\n raise ValueError(\"Invalid EC key.\")\n\n return ctx\n\n\nclass GetCipherByName(object):\n def __init__(self, fmt):\n self._fmt = fmt\n\n def __call__(self, backend, cipher, mode):\n cipher_name = self._fmt.format(cipher=cipher, mode=mode).lower()\n return backend._lib.EVP_get_cipherbyname(cipher_name.encode(\"ascii\"))\n\n\nbackend = Backend()\n", "path": "cryptography/hazmat/backends/openssl/backend.py" } ]
diff --git a/cryptography/hazmat/backends/openssl/backend.py b/cryptography/hazmat/backends/openssl/backend.py index eadea50ebd78..a449a55ed28e 100644 --- a/cryptography/hazmat/backends/openssl/backend.py +++ b/cryptography/hazmat/backends/openssl/backend.py @@ -1007,7 +1007,9 @@ def _ec_key_set_public_key_affine_coordinates(self, ctx, x, y): assert res == 1 res = self._lib.EC_KEY_check_key(ctx) - assert res == 1 + if res != 1: + self._consume_errors() + raise ValueError("Invalid EC key.") return ctx diff --git a/tests/hazmat/primitives/test_ec.py b/tests/hazmat/primitives/test_ec.py index c53a0cb66c27..887520decf5e 100644 --- a/tests/hazmat/primitives/test_ec.py +++ b/tests/hazmat/primitives/test_ec.py @@ -260,6 +260,20 @@ def test_unknown_signature_algoritm(self, backend): ec.SECP192R1() ) is False + def test_load_invalid_ec_key_from_numbers(self, backend): + _skip_curve_unsupported(backend, ec.SECP256R1()) + + numbers = ec.EllipticCurvePrivateNumbers( + 357646505660320080863666618182642070958081774038609089496899025506, + ec.EllipticCurvePublicNumbers( + 47250808410327023131573602008345894927686381772325561185532964, + 1120253292479243545483756778742719537373113335231773536789915, + ec.SECP256R1(), + ) + ) + with pytest.raises(ValueError): + numbers.private_key(backend) + @pytest.mark.parametrize( "vector", load_vectors_from_file(
aws-cloudformation__cfn-lint-2168
Unknown warning about unpublished metrics *cfn-lint version: 0.55.0* *Description of issue.* `cfn-lint template.yaml` is outputting `There are unpublished metrics. Please make sure you call publish after you record all metrics.` where previous versions of `cfn-lint` did not. This is causing the Atom plugin to display a really intrusive error message on every save event. Frustratingly, I can't find any information on what this message means
[ { "content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nfrom cfnlint.helpers import load_resource, convert_dict, format_json_string\nfrom cfnlint.data import Serverless\nfrom cfnlint.rules import Match, TransformError\nLOGGER = logging.getLogger('cfnlint')\n\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrapper.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resource(Serverless, 'ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this is not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n template_globals = self._template.get('Globals', {})\n auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n 'Auth' not in resource_dict and 'Cors' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(sam_template=self._template,\n parameter_values=self._parameters))\n\n LOGGER.info('Transformed template: \\n%s',\n format_json_string(self._template))\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict,\n s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict):\n if len(uri_property) == 1:\n for k in uri_property.keys():\n if k == 'Ref':\n resource_property_dict[property_key] = s3_uri_value\n return\n if Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py" } ]
[ { "content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nfrom cfnlint.helpers import load_resource, convert_dict, format_json_string\nfrom cfnlint.data import Serverless\nfrom cfnlint.rules import Match, TransformError\nLOGGER = logging.getLogger('cfnlint')\n\nsamtranslator_logger = logging.getLogger('samtranslator')\nsamtranslator_logger.setLevel(logging.CRITICAL)\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrapper.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resource(Serverless, 'ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this is not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n template_globals = self._template.get('Globals', {})\n auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n 'Auth' not in resource_dict and 'Cors' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(sam_template=self._template,\n parameter_values=self._parameters))\n\n LOGGER.info('Transformed template: \\n%s',\n format_json_string(self._template))\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict,\n s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict):\n if len(uri_property) == 1:\n for k in uri_property.keys():\n if k == 'Ref':\n resource_property_dict[property_key] = s3_uri_value\n return\n if Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py" } ]
diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py index 2d0b546eb6..f57f18f5dc 100644 --- a/src/cfnlint/transform.py +++ b/src/cfnlint/transform.py @@ -15,6 +15,8 @@ from cfnlint.rules import Match, TransformError LOGGER = logging.getLogger('cfnlint') +samtranslator_logger = logging.getLogger('samtranslator') +samtranslator_logger.setLevel(logging.CRITICAL) class Transform(object): """
vega__altair-150
to_dict() not in Chart.__dir__ All in title.
[ { "content": "import pandas as pd\nimport traitlets as T\n\nfrom ..utils._py3k_compat import string_types\n\n_attr_template = \"Attribute not found: {0}. Valid keyword arguments for this class: {1}\"\n\n\nclass BaseObject(T.HasTraits):\n\n skip = []\n\n def __init__(self, **kwargs):\n all_traits = list(self.traits())\n for k in kwargs:\n if k not in all_traits:\n raise KeyError(_attr_template.format(k, all_traits))\n super(BaseObject, self).__init__(**kwargs)\n\n @classmethod\n def infer_keywords(cls, *args, **kwargs):\n \"\"\"Utility to initialize object from args and kwargs\n\n Arguments are converted to keyword arguments by inferring the keyword\n from their type.\n Keyword arguments are converted to the correct Instance class\n if required.\n \"\"\"\n def get_class(trait):\n # TODO: what do do with lists?\n if isinstance(trait, T.Union):\n for klass in map(get_class, trait.trait_types):\n if klass:\n return klass\n elif isinstance(trait, T.Instance):\n return trait.klass\n\n traits = cls.class_traits()\n classes = {n: get_class(t) for n, t in traits.items()}\n\n # Turn all keyword arguments to the appropriate class\n for name, arg in kwargs.items():\n Trait = classes.get(name, None)\n if Trait is not None and not isinstance(arg, Trait):\n try:\n kwargs[name] = Trait(arg)\n except (TypeError, T.TraitError):\n pass # errors will handled by traitlets below\n\n # find forward/backward mapping among unique classes\n name_to_trait = {}\n while classes:\n name, trait = classes.popitem()\n if trait is None:\n continue\n if trait not in set.union(set(classes.values()),\n set(name_to_trait.values())):\n name_to_trait[name] = trait\n trait_to_name = {t: n for n, t in name_to_trait.items()}\n\n # Update all arguments\n for arg in args:\n name = trait_to_name.get(type(arg), None)\n if name is None:\n raise ValueError(\"{0}: Unable to infer argument name for {1}\".format(cls, arg))\n elif name in kwargs:\n raise ValueError(\"{0}: {1} specified both by arg and kwarg\".format(cls, name))\n else:\n kwargs[name] = arg\n return kwargs\n\n def update_traits(self, **kwargs):\n for key, val in kwargs.items():\n self.set_trait(key, val)\n return self\n\n def update_inferred_traits(self, *args, **kwargs):\n kwargs = self.infer_keywords(*args, **kwargs)\n return self.update_traits(**kwargs)\n\n def update_subtraits(self, attrs, *args, **kwargs):\n \"\"\"Update sub-traits without overwriting other traits\"\"\"\n if not (args or kwargs):\n return self\n if isinstance(attrs, string_types):\n attrs = (attrs,)\n if len(attrs) == 0:\n self.update_inferred_traits(*args, **kwargs)\n else:\n attr = attrs[0]\n if attr not in self.traits():\n raise ValueError('{0} has no trait {1}'.format(self, attr))\n trait = getattr(self, attr)\n if trait is None:\n trait = self.traits()[attr].klass()\n setattr(self, attr, trait.update_subtraits(attrs[1:], *args, **kwargs))\n return self\n\n def __contains__(self, key):\n try:\n value = getattr(self, key)\n except AttributeError:\n return False\n\n # comparison to None will break, so check DataFrame specifically\n if isinstance(value, pd.DataFrame):\n return True\n elif value is not None:\n if isinstance(value, (int, float, bool)):\n return True\n else:\n return bool(value)\n else:\n return False\n\n def __dir__(self):\n \"\"\"Customize tab completed attributes.\"\"\"\n return list(self.traits())\n\n @classmethod\n def from_dict(cls, dct):\n \"\"\"Instantiate the object from a valid JSON dictionary\"\"\"\n from ..utils.visitors import FromDict\n return FromDict().clsvisit(cls, dct)\n\n def to_dict(self, data=True):\n \"\"\"Emit the JSON representation for this object as as dict.\"\"\"\n from ..utils.visitors import ToDict\n self._finalize()\n return ToDict().visit(self, data)\n\n def _finalize(self, **kwargs):\n \"\"\"Finalize the object, and all contained objects, for export.\"\"\"\n def finalize_obj(obj):\n if isinstance(obj, BaseObject):\n obj._finalize(**kwargs)\n elif isinstance(obj, list):\n for item in obj:\n finalize_obj(item)\n\n for name in self.traits():\n value = getattr(self, name)\n finalize_obj(value)\n", "path": "altair/schema/baseobject.py" } ]
[ { "content": "import pandas as pd\nimport traitlets as T\n\nfrom ..utils._py3k_compat import string_types\n\n_attr_template = \"Attribute not found: {0}. Valid keyword arguments for this class: {1}\"\n\n\nclass BaseObject(T.HasTraits):\n\n skip = []\n\n def __init__(self, **kwargs):\n all_traits = list(self.traits())\n for k in kwargs:\n if k not in all_traits:\n raise KeyError(_attr_template.format(k, all_traits))\n super(BaseObject, self).__init__(**kwargs)\n\n @classmethod\n def infer_keywords(cls, *args, **kwargs):\n \"\"\"Utility to initialize object from args and kwargs\n\n Arguments are converted to keyword arguments by inferring the keyword\n from their type.\n Keyword arguments are converted to the correct Instance class\n if required.\n \"\"\"\n def get_class(trait):\n # TODO: what do do with lists?\n if isinstance(trait, T.Union):\n for klass in map(get_class, trait.trait_types):\n if klass:\n return klass\n elif isinstance(trait, T.Instance):\n return trait.klass\n\n traits = cls.class_traits()\n classes = {n: get_class(t) for n, t in traits.items()}\n\n # Turn all keyword arguments to the appropriate class\n for name, arg in kwargs.items():\n Trait = classes.get(name, None)\n if Trait is not None and not isinstance(arg, Trait):\n try:\n kwargs[name] = Trait(arg)\n except (TypeError, T.TraitError):\n pass # errors will handled by traitlets below\n\n # find forward/backward mapping among unique classes\n name_to_trait = {}\n while classes:\n name, trait = classes.popitem()\n if trait is None:\n continue\n if trait not in set.union(set(classes.values()),\n set(name_to_trait.values())):\n name_to_trait[name] = trait\n trait_to_name = {t: n for n, t in name_to_trait.items()}\n\n # Update all arguments\n for arg in args:\n name = trait_to_name.get(type(arg), None)\n if name is None:\n raise ValueError(\"{0}: Unable to infer argument name for {1}\".format(cls, arg))\n elif name in kwargs:\n raise ValueError(\"{0}: {1} specified both by arg and kwarg\".format(cls, name))\n else:\n kwargs[name] = arg\n return kwargs\n\n def update_traits(self, **kwargs):\n for key, val in kwargs.items():\n self.set_trait(key, val)\n return self\n\n def update_inferred_traits(self, *args, **kwargs):\n kwargs = self.infer_keywords(*args, **kwargs)\n return self.update_traits(**kwargs)\n\n def update_subtraits(self, attrs, *args, **kwargs):\n \"\"\"Update sub-traits without overwriting other traits\"\"\"\n if not (args or kwargs):\n return self\n if isinstance(attrs, string_types):\n attrs = (attrs,)\n if len(attrs) == 0:\n self.update_inferred_traits(*args, **kwargs)\n else:\n attr = attrs[0]\n if attr not in self.traits():\n raise ValueError('{0} has no trait {1}'.format(self, attr))\n trait = getattr(self, attr)\n if trait is None:\n trait = self.traits()[attr].klass()\n setattr(self, attr, trait.update_subtraits(attrs[1:], *args, **kwargs))\n return self\n\n def __contains__(self, key):\n try:\n value = getattr(self, key)\n except AttributeError:\n return False\n\n # comparison to None will break, so check DataFrame specifically\n if isinstance(value, pd.DataFrame):\n return True\n elif value is not None:\n if isinstance(value, (int, float, bool)):\n return True\n else:\n return bool(value)\n else:\n return False\n\n def __dir__(self):\n \"\"\"Customize tab completed attributes.\"\"\"\n return list(self.traits())+['to_dict', 'from_dict']\n\n @classmethod\n def from_dict(cls, dct):\n \"\"\"Instantiate the object from a valid JSON dictionary\"\"\"\n from ..utils.visitors import FromDict\n return FromDict().clsvisit(cls, dct)\n\n def to_dict(self, data=True):\n \"\"\"Emit the JSON representation for this object as as dict.\"\"\"\n from ..utils.visitors import ToDict\n self._finalize()\n return ToDict().visit(self, data)\n\n def _finalize(self, **kwargs):\n \"\"\"Finalize the object, and all contained objects, for export.\"\"\"\n def finalize_obj(obj):\n if isinstance(obj, BaseObject):\n obj._finalize(**kwargs)\n elif isinstance(obj, list):\n for item in obj:\n finalize_obj(item)\n\n for name in self.traits():\n value = getattr(self, name)\n finalize_obj(value)\n", "path": "altair/schema/baseobject.py" } ]
diff --git a/altair/schema/baseobject.py b/altair/schema/baseobject.py index 91511d8a2..23179f66f 100644 --- a/altair/schema/baseobject.py +++ b/altair/schema/baseobject.py @@ -115,7 +115,7 @@ def __contains__(self, key): def __dir__(self): """Customize tab completed attributes.""" - return list(self.traits()) + return list(self.traits())+['to_dict', 'from_dict'] @classmethod def from_dict(cls, dct):
django-cms__django-cms-4163
Ensure that ToolbarPool.get_watch_models is a list Cast `toolbar.watch_models` to a list at https://github.com/divio/django-cms/blob/develop/cms/toolbar_pool.py#L58 Without this change, you'll get a `*** TypeError: can only concatenate list (not "tuple") to list` when `watch_models` is a tuple.
[ { "content": "# -*- coding: utf-8 -*-\nfrom cms.exceptions import ToolbarAlreadyRegistered, ToolbarNotRegistered\nfrom cms.utils.conf import get_cms_setting\nfrom cms.utils.django_load import load, iterload_objects\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.datastructures import SortedDict\n\n\nclass ToolbarPool(object):\n def __init__(self):\n self.toolbars = SortedDict()\n self._discovered = False\n self.force_register = False\n\n def discover_toolbars(self):\n if self._discovered:\n return\n #import all the modules\n toolbars = get_cms_setting('TOOLBARS')\n if toolbars:\n for cls in iterload_objects(toolbars):\n self.force_register = True\n self.register(cls)\n self.force_register = False\n else:\n load('cms_toolbar')\n self._discovered = True\n\n def clear(self):\n self.toolbars = SortedDict()\n self._discovered = False\n\n def register(self, toolbar):\n if not self.force_register and get_cms_setting('TOOLBARS'):\n return toolbar\n from cms.toolbar_base import CMSToolbar\n # validate the app\n if not issubclass(toolbar, CMSToolbar):\n raise ImproperlyConfigured('CMS Toolbar must inherit '\n 'cms.toolbar_base.CMSToolbar, %r does not' % toolbar)\n name = \"%s.%s\" % (toolbar.__module__, toolbar.__name__)\n if name in self.toolbars.keys():\n raise ToolbarAlreadyRegistered(\"[%s] a toolbar with this name is already registered\" % name)\n self.toolbars[name] = toolbar\n return toolbar\n\n def unregister(self, toolbar):\n name = '%s.%s' % (toolbar.__module__, toolbar.__name__)\n if name not in self.toolbars:\n raise ToolbarNotRegistered('The toolbar %s is not registered' % name)\n del self.toolbars[name]\n\n def get_toolbars(self):\n self.discover_toolbars()\n return self.toolbars\n\n def get_watch_models(self):\n return sum((getattr(tb, 'watch_models', [])\n for tb in self.toolbars.values()), [])\n\n\ntoolbar_pool = ToolbarPool()\n", "path": "cms/toolbar_pool.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom cms.exceptions import ToolbarAlreadyRegistered, ToolbarNotRegistered\nfrom cms.utils.conf import get_cms_setting\nfrom cms.utils.django_load import load, iterload_objects\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.datastructures import SortedDict\n\n\nclass ToolbarPool(object):\n def __init__(self):\n self.toolbars = SortedDict()\n self._discovered = False\n self.force_register = False\n\n def discover_toolbars(self):\n if self._discovered:\n return\n #import all the modules\n toolbars = get_cms_setting('TOOLBARS')\n if toolbars:\n for cls in iterload_objects(toolbars):\n self.force_register = True\n self.register(cls)\n self.force_register = False\n else:\n load('cms_toolbar')\n self._discovered = True\n\n def clear(self):\n self.toolbars = SortedDict()\n self._discovered = False\n\n def register(self, toolbar):\n if not self.force_register and get_cms_setting('TOOLBARS'):\n return toolbar\n from cms.toolbar_base import CMSToolbar\n # validate the app\n if not issubclass(toolbar, CMSToolbar):\n raise ImproperlyConfigured('CMS Toolbar must inherit '\n 'cms.toolbar_base.CMSToolbar, %r does not' % toolbar)\n name = \"%s.%s\" % (toolbar.__module__, toolbar.__name__)\n if name in self.toolbars.keys():\n raise ToolbarAlreadyRegistered(\"[%s] a toolbar with this name is already registered\" % name)\n self.toolbars[name] = toolbar\n return toolbar\n\n def unregister(self, toolbar):\n name = '%s.%s' % (toolbar.__module__, toolbar.__name__)\n if name not in self.toolbars:\n raise ToolbarNotRegistered('The toolbar %s is not registered' % name)\n del self.toolbars[name]\n\n def get_toolbars(self):\n self.discover_toolbars()\n return self.toolbars\n\n def get_watch_models(self):\n return sum((list(getattr(tb, 'watch_models', []))\n for tb in self.toolbars.values()), [])\n\n\ntoolbar_pool = ToolbarPool()\n", "path": "cms/toolbar_pool.py" } ]
diff --git a/cms/test_utils/project/placeholderapp/cms_toolbar.py b/cms/test_utils/project/placeholderapp/cms_toolbar.py index 7a680aa6a25..daee6a9aa19 100644 --- a/cms/test_utils/project/placeholderapp/cms_toolbar.py +++ b/cms/test_utils/project/placeholderapp/cms_toolbar.py @@ -12,7 +12,7 @@ @toolbar_pool.register class Example1Toolbar(CMSToolbar): - watch_models = [Example1, CharPksExample] + watch_models = (Example1, CharPksExample) def populate(self): admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER) diff --git a/cms/tests/toolbar_pool.py b/cms/tests/toolbar_pool.py index 2ef0de21ea6..83a3a589726 100644 --- a/cms/tests/toolbar_pool.py +++ b/cms/tests/toolbar_pool.py @@ -60,3 +60,7 @@ def test_settings(self): response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')) self.assertEqual(response.status_code, 200) toolbar_pool.toolbars = toolbars + + def test_watch_models(self): + toolbar_pool.discover_toolbars() + self.assertEqual(type(toolbar_pool.get_watch_models()), list) diff --git a/cms/toolbar_pool.py b/cms/toolbar_pool.py index 55b669bd50a..93957938933 100644 --- a/cms/toolbar_pool.py +++ b/cms/toolbar_pool.py @@ -55,7 +55,7 @@ def get_toolbars(self): return self.toolbars def get_watch_models(self): - return sum((getattr(tb, 'watch_models', []) + return sum((list(getattr(tb, 'watch_models', [])) for tb in self.toolbars.values()), [])
huggingface__text-generation-inference-1182
Update Docker to torch 2.1? ### Feature request H100s have trouble with gptq quants due to not having latest pytorch, can in the next TGI Docker we update torch to this, or have one special for this for use on h100s? ### Motivation Cant get tgi + gptq quant to work on h100s ### Your contribution Sorry I dont have any contribution ^_^
[ { "content": "import sys\nimport subprocess\nimport contextlib\nimport pytest\nimport asyncio\nimport os\nimport docker\nimport json\nimport math\nimport time\nimport random\n\nfrom docker.errors import NotFound\nfrom typing import Optional, List, Dict\nfrom syrupy.extensions.json import JSONSnapshotExtension\nfrom aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError\n\nfrom text_generation import AsyncClient\nfrom text_generation.types import Response, Details, InputToken, Token, BestOfSequence\n\nDOCKER_IMAGE = os.getenv(\"DOCKER_IMAGE\", None)\nHUGGING_FACE_HUB_TOKEN = os.getenv(\"HUGGING_FACE_HUB_TOKEN\", None)\nDOCKER_VOLUME = os.getenv(\"DOCKER_VOLUME\", \"/data\")\n\n\nclass ResponseComparator(JSONSnapshotExtension):\n def serialize(\n self,\n data,\n *,\n exclude=None,\n matcher=None,\n ):\n if isinstance(data, List):\n data = [d.dict() for d in data]\n\n data = self._filter(\n data=data, depth=0, path=(), exclude=exclude, matcher=matcher\n )\n return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + \"\\n\"\n\n def matches(\n self,\n *,\n serialized_data,\n snapshot_data,\n ) -> bool:\n def convert_data(data):\n data = json.loads(data)\n\n if isinstance(data, Dict):\n return Response(**data)\n if isinstance(data, List):\n return [Response(**d) for d in data]\n raise NotImplementedError\n\n def eq_token(token: Token, other: Token) -> bool:\n return (\n token.id == other.id\n and token.text == other.text\n and math.isclose(token.logprob, other.logprob, rel_tol=0.2)\n and token.special == other.special\n )\n\n def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool:\n try:\n return (\n prefill_token.id == other.id\n and prefill_token.text == other.text\n and (\n math.isclose(prefill_token.logprob, other.logprob, rel_tol=0.2)\n if prefill_token.logprob is not None\n else prefill_token.logprob == other.logprob\n )\n )\n except TypeError:\n return False\n\n def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool:\n return (\n details.finish_reason == other.finish_reason\n and details.generated_tokens == other.generated_tokens\n and details.seed == other.seed\n and len(details.prefill) == len(other.prefill)\n and all(\n [\n eq_prefill_token(d, o)\n for d, o in zip(details.prefill, other.prefill)\n ]\n )\n and len(details.tokens) == len(other.tokens)\n and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])\n )\n\n def eq_details(details: Details, other: Details) -> bool:\n return (\n details.finish_reason == other.finish_reason\n and details.generated_tokens == other.generated_tokens\n and details.seed == other.seed\n and len(details.prefill) == len(other.prefill)\n and all(\n [\n eq_prefill_token(d, o)\n for d, o in zip(details.prefill, other.prefill)\n ]\n )\n and len(details.tokens) == len(other.tokens)\n and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])\n and (\n len(details.best_of_sequences)\n if details.best_of_sequences is not None\n else 0\n )\n == (\n len(other.best_of_sequences)\n if other.best_of_sequences is not None\n else 0\n )\n and (\n all(\n [\n eq_best_of(d, o)\n for d, o in zip(\n details.best_of_sequences, other.best_of_sequences\n )\n ]\n )\n if details.best_of_sequences is not None\n else details.best_of_sequences == other.best_of_sequences\n )\n )\n\n def eq_response(response: Response, other: Response) -> bool:\n return response.generated_text == other.generated_text and eq_details(\n response.details, other.details\n )\n\n serialized_data = convert_data(serialized_data)\n snapshot_data = convert_data(snapshot_data)\n\n if not isinstance(serialized_data, List):\n serialized_data = [serialized_data]\n if not isinstance(snapshot_data, List):\n snapshot_data = [snapshot_data]\n\n return len(snapshot_data) == len(serialized_data) and all(\n [eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)]\n )\n\n\nclass LauncherHandle:\n def __init__(self, port: int):\n self.client = AsyncClient(f\"http://localhost:{port}\")\n\n def _inner_health(self):\n raise NotImplementedError\n\n async def health(self, timeout: int = 60):\n assert timeout > 0\n for _ in range(timeout):\n if not self._inner_health():\n raise RuntimeError(\"Launcher crashed\")\n\n try:\n await self.client.generate(\"test\")\n return\n except (ClientConnectorError, ClientOSError, ServerDisconnectedError) as e:\n time.sleep(1)\n raise RuntimeError(\"Health check failed\")\n\n\nclass ContainerLauncherHandle(LauncherHandle):\n def __init__(self, docker_client, container_name, port: int):\n super(ContainerLauncherHandle, self).__init__(port)\n self.docker_client = docker_client\n self.container_name = container_name\n\n def _inner_health(self) -> bool:\n container = self.docker_client.containers.get(self.container_name)\n return container.status in [\"running\", \"created\"]\n\n\nclass ProcessLauncherHandle(LauncherHandle):\n def __init__(self, process, port: int):\n super(ProcessLauncherHandle, self).__init__(port)\n self.process = process\n\n def _inner_health(self) -> bool:\n return self.process.poll() is None\n\n\[email protected]\ndef response_snapshot(snapshot):\n return snapshot.use_extension(ResponseComparator)\n\n\[email protected](scope=\"module\")\ndef event_loop():\n loop = asyncio.get_event_loop()\n yield loop\n loop.close()\n\n\[email protected](scope=\"module\")\ndef launcher(event_loop):\n @contextlib.contextmanager\n def local_launcher(\n model_id: str,\n num_shard: Optional[int] = None,\n quantize: Optional[str] = None,\n trust_remote_code: bool = False,\n use_flash_attention: bool = True,\n ):\n port = random.randint(8000, 10_000)\n master_port = random.randint(10_000, 20_000)\n\n shard_uds_path = (\n f\"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server\"\n )\n\n args = [\n \"text-generation-launcher\",\n \"--model-id\",\n model_id,\n \"--port\",\n str(port),\n \"--master-port\",\n str(master_port),\n \"--shard-uds-path\",\n shard_uds_path,\n ]\n\n env = os.environ\n\n if num_shard is not None:\n args.extend([\"--num-shard\", str(num_shard)])\n if quantize is not None:\n args.append(\"--quantize\")\n args.append(quantize)\n if trust_remote_code:\n args.append(\"--trust-remote-code\")\n\n env[\"LOG_LEVEL\"] = \"info,text_generation_router=debug\"\n\n if not use_flash_attention:\n env[\"USE_FLASH_ATTENTION\"] = \"false\"\n\n with subprocess.Popen(\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env\n ) as process:\n yield ProcessLauncherHandle(process, port)\n\n process.terminate()\n process.wait(60)\n\n launcher_output = process.stdout.read().decode(\"utf-8\")\n print(launcher_output, file=sys.stderr)\n\n process.stdout.close()\n process.stderr.close()\n\n if not use_flash_attention:\n del env[\"USE_FLASH_ATTENTION\"]\n\n @contextlib.contextmanager\n def docker_launcher(\n model_id: str,\n num_shard: Optional[int] = None,\n quantize: Optional[str] = None,\n trust_remote_code: bool = False,\n use_flash_attention: bool = True,\n ):\n port = random.randint(8000, 10_000)\n\n args = [\"--model-id\", model_id, \"--env\"]\n\n if num_shard is not None:\n args.extend([\"--num-shard\", str(num_shard)])\n if quantize is not None:\n args.append(\"--quantize\")\n args.append(quantize)\n if trust_remote_code:\n args.append(\"--trust-remote-code\")\n\n client = docker.from_env()\n\n container_name = f\"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}\"\n\n try:\n container = client.containers.get(container_name)\n container.stop()\n container.wait()\n except NotFound:\n pass\n\n gpu_count = num_shard if num_shard is not None else 1\n\n env = {\"LOG_LEVEL\": \"info,text_generation_router=debug\"}\n if not use_flash_attention:\n env[\"USE_FLASH_ATTENTION\"] = \"false\"\n\n if HUGGING_FACE_HUB_TOKEN is not None:\n env[\"HUGGING_FACE_HUB_TOKEN\"] = HUGGING_FACE_HUB_TOKEN\n\n volumes = []\n if DOCKER_VOLUME:\n volumes = [f\"{DOCKER_VOLUME}:/data\"]\n\n container = client.containers.run(\n DOCKER_IMAGE,\n command=args,\n name=container_name,\n environment=env,\n auto_remove=False,\n detach=True,\n device_requests=[\n docker.types.DeviceRequest(count=gpu_count, capabilities=[[\"gpu\"]])\n ],\n volumes=volumes,\n ports={\"80/tcp\": port},\n )\n\n yield ContainerLauncherHandle(client, container.name, port)\n\n if not use_flash_attention:\n del env[\"USE_FLASH_ATTENTION\"]\n\n try:\n container.stop()\n container.wait()\n except NotFound:\n pass\n\n container_output = container.logs().decode(\"utf-8\")\n print(container_output, file=sys.stderr)\n\n container.remove()\n\n if DOCKER_IMAGE is not None:\n return docker_launcher\n return local_launcher\n\n\[email protected](scope=\"module\")\ndef generate_load():\n async def generate_load_inner(\n client: AsyncClient, prompt: str, max_new_tokens: int, n: int\n ) -> List[Response]:\n futures = [\n client.generate(\n prompt, max_new_tokens=max_new_tokens, decoder_input_details=True\n )\n for _ in range(n)\n ]\n\n return await asyncio.gather(*futures)\n\n return generate_load_inner\n", "path": "integration-tests/conftest.py" } ]
[ { "content": "import sys\nimport subprocess\nimport contextlib\nimport pytest\nimport asyncio\nimport os\nimport docker\nimport json\nimport math\nimport time\nimport random\n\nfrom docker.errors import NotFound\nfrom typing import Optional, List, Dict\nfrom syrupy.extensions.json import JSONSnapshotExtension\nfrom aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError\n\nfrom text_generation import AsyncClient\nfrom text_generation.types import Response, Details, InputToken, Token, BestOfSequence\n\nDOCKER_IMAGE = os.getenv(\"DOCKER_IMAGE\", None)\nHUGGING_FACE_HUB_TOKEN = os.getenv(\"HUGGING_FACE_HUB_TOKEN\", None)\nDOCKER_VOLUME = os.getenv(\"DOCKER_VOLUME\", \"/data\")\n\n\nclass ResponseComparator(JSONSnapshotExtension):\n def serialize(\n self,\n data,\n *,\n exclude=None,\n matcher=None,\n ):\n if isinstance(data, List):\n data = [d.dict() for d in data]\n\n data = self._filter(\n data=data, depth=0, path=(), exclude=exclude, matcher=matcher\n )\n return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + \"\\n\"\n\n def matches(\n self,\n *,\n serialized_data,\n snapshot_data,\n ) -> bool:\n def convert_data(data):\n data = json.loads(data)\n\n if isinstance(data, Dict):\n return Response(**data)\n if isinstance(data, List):\n return [Response(**d) for d in data]\n raise NotImplementedError\n\n def eq_token(token: Token, other: Token) -> bool:\n return (\n token.id == other.id\n and token.text == other.text\n and math.isclose(token.logprob, other.logprob, rel_tol=0.2)\n and token.special == other.special\n )\n\n def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool:\n try:\n return (\n prefill_token.id == other.id\n and prefill_token.text == other.text\n and (\n math.isclose(prefill_token.logprob, other.logprob, rel_tol=0.2)\n if prefill_token.logprob is not None\n else prefill_token.logprob == other.logprob\n )\n )\n except TypeError:\n return False\n\n def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool:\n return (\n details.finish_reason == other.finish_reason\n and details.generated_tokens == other.generated_tokens\n and details.seed == other.seed\n and len(details.prefill) == len(other.prefill)\n and all(\n [\n eq_prefill_token(d, o)\n for d, o in zip(details.prefill, other.prefill)\n ]\n )\n and len(details.tokens) == len(other.tokens)\n and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])\n )\n\n def eq_details(details: Details, other: Details) -> bool:\n return (\n details.finish_reason == other.finish_reason\n and details.generated_tokens == other.generated_tokens\n and details.seed == other.seed\n and len(details.prefill) == len(other.prefill)\n and all(\n [\n eq_prefill_token(d, o)\n for d, o in zip(details.prefill, other.prefill)\n ]\n )\n and len(details.tokens) == len(other.tokens)\n and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])\n and (\n len(details.best_of_sequences)\n if details.best_of_sequences is not None\n else 0\n )\n == (\n len(other.best_of_sequences)\n if other.best_of_sequences is not None\n else 0\n )\n and (\n all(\n [\n eq_best_of(d, o)\n for d, o in zip(\n details.best_of_sequences, other.best_of_sequences\n )\n ]\n )\n if details.best_of_sequences is not None\n else details.best_of_sequences == other.best_of_sequences\n )\n )\n\n def eq_response(response: Response, other: Response) -> bool:\n return response.generated_text == other.generated_text and eq_details(\n response.details, other.details\n )\n\n serialized_data = convert_data(serialized_data)\n snapshot_data = convert_data(snapshot_data)\n\n if not isinstance(serialized_data, List):\n serialized_data = [serialized_data]\n if not isinstance(snapshot_data, List):\n snapshot_data = [snapshot_data]\n\n return len(snapshot_data) == len(serialized_data) and all(\n [eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)]\n )\n\n\nclass LauncherHandle:\n def __init__(self, port: int):\n self.client = AsyncClient(f\"http://localhost:{port}\")\n\n def _inner_health(self):\n raise NotImplementedError\n\n async def health(self, timeout: int = 60):\n assert timeout > 0\n for _ in range(timeout):\n if not self._inner_health():\n raise RuntimeError(\"Launcher crashed\")\n\n try:\n await self.client.generate(\"test\")\n return\n except (ClientConnectorError, ClientOSError, ServerDisconnectedError) as e:\n time.sleep(1)\n raise RuntimeError(\"Health check failed\")\n\n\nclass ContainerLauncherHandle(LauncherHandle):\n def __init__(self, docker_client, container_name, port: int):\n super(ContainerLauncherHandle, self).__init__(port)\n self.docker_client = docker_client\n self.container_name = container_name\n\n def _inner_health(self) -> bool:\n container = self.docker_client.containers.get(self.container_name)\n return container.status in [\"running\", \"created\"]\n\n\nclass ProcessLauncherHandle(LauncherHandle):\n def __init__(self, process, port: int):\n super(ProcessLauncherHandle, self).__init__(port)\n self.process = process\n\n def _inner_health(self) -> bool:\n return self.process.poll() is None\n\n\[email protected]\ndef response_snapshot(snapshot):\n return snapshot.use_extension(ResponseComparator)\n\n\[email protected](scope=\"module\")\ndef event_loop():\n loop = asyncio.get_event_loop()\n yield loop\n loop.close()\n\n\[email protected](scope=\"module\")\ndef launcher(event_loop):\n @contextlib.contextmanager\n def local_launcher(\n model_id: str,\n num_shard: Optional[int] = None,\n quantize: Optional[str] = None,\n trust_remote_code: bool = False,\n use_flash_attention: bool = True,\n ):\n port = random.randint(8000, 10_000)\n master_port = random.randint(10_000, 20_000)\n\n shard_uds_path = (\n f\"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server\"\n )\n\n args = [\n \"text-generation-launcher\",\n \"--model-id\",\n model_id,\n \"--port\",\n str(port),\n \"--master-port\",\n str(master_port),\n \"--shard-uds-path\",\n shard_uds_path,\n ]\n\n env = os.environ\n\n if num_shard is not None:\n args.extend([\"--num-shard\", str(num_shard)])\n if quantize is not None:\n args.append(\"--quantize\")\n args.append(quantize)\n if trust_remote_code:\n args.append(\"--trust-remote-code\")\n\n env[\"LOG_LEVEL\"] = \"info,text_generation_router=debug\"\n\n if not use_flash_attention:\n env[\"USE_FLASH_ATTENTION\"] = \"false\"\n\n with subprocess.Popen(\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env\n ) as process:\n yield ProcessLauncherHandle(process, port)\n\n process.terminate()\n process.wait(60)\n\n launcher_output = process.stdout.read().decode(\"utf-8\")\n print(launcher_output, file=sys.stderr)\n\n process.stdout.close()\n process.stderr.close()\n\n if not use_flash_attention:\n del env[\"USE_FLASH_ATTENTION\"]\n\n @contextlib.contextmanager\n def docker_launcher(\n model_id: str,\n num_shard: Optional[int] = None,\n quantize: Optional[str] = None,\n trust_remote_code: bool = False,\n use_flash_attention: bool = True,\n ):\n port = random.randint(8000, 10_000)\n\n args = [\"--model-id\", model_id, \"--env\"]\n\n if num_shard is not None:\n args.extend([\"--num-shard\", str(num_shard)])\n if quantize is not None:\n args.append(\"--quantize\")\n args.append(quantize)\n if trust_remote_code:\n args.append(\"--trust-remote-code\")\n\n client = docker.from_env()\n\n container_name = f\"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}\"\n\n try:\n container = client.containers.get(container_name)\n container.stop()\n container.wait()\n except NotFound:\n pass\n\n gpu_count = num_shard if num_shard is not None else 1\n\n env = {\"LOG_LEVEL\": \"info,text_generation_router=debug\"}\n if not use_flash_attention:\n env[\"USE_FLASH_ATTENTION\"] = \"false\"\n\n if HUGGING_FACE_HUB_TOKEN is not None:\n env[\"HUGGING_FACE_HUB_TOKEN\"] = HUGGING_FACE_HUB_TOKEN\n\n volumes = []\n if DOCKER_VOLUME:\n volumes = [f\"{DOCKER_VOLUME}:/data\"]\n\n container = client.containers.run(\n DOCKER_IMAGE,\n command=args,\n name=container_name,\n environment=env,\n auto_remove=False,\n detach=True,\n device_requests=[\n docker.types.DeviceRequest(count=gpu_count, capabilities=[[\"gpu\"]])\n ],\n volumes=volumes,\n ports={\"80/tcp\": port},\n shm_size=\"1G\"\n )\n\n yield ContainerLauncherHandle(client, container.name, port)\n\n if not use_flash_attention:\n del env[\"USE_FLASH_ATTENTION\"]\n\n try:\n container.stop()\n container.wait()\n except NotFound:\n pass\n\n container_output = container.logs().decode(\"utf-8\")\n print(container_output, file=sys.stderr)\n\n container.remove()\n\n if DOCKER_IMAGE is not None:\n return docker_launcher\n return local_launcher\n\n\[email protected](scope=\"module\")\ndef generate_load():\n async def generate_load_inner(\n client: AsyncClient, prompt: str, max_new_tokens: int, n: int\n ) -> List[Response]:\n futures = [\n client.generate(\n prompt, max_new_tokens=max_new_tokens, decoder_input_details=True\n )\n for _ in range(n)\n ]\n\n return await asyncio.gather(*futures)\n\n return generate_load_inner\n", "path": "integration-tests/conftest.py" } ]
diff --git a/Dockerfile b/Dockerfile index 9c15f023e3d..cf5e0ed612c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -37,13 +37,13 @@ RUN cargo build --release # Python builder # Adapted from: https://github.com/pytorch/pytorch/blob/master/Dockerfile -FROM debian:bullseye-slim as pytorch-install +FROM nvidia/cuda:12.1.0-devel-ubuntu20.04 as pytorch-install -ARG PYTORCH_VERSION=2.0.1 -ARG PYTHON_VERSION=3.9 +ARG PYTORCH_VERSION=2.1.1 +ARG PYTHON_VERSION=3.10 # Keep in sync with `server/pyproject.toml -ARG CUDA_VERSION=11.8 -ARG MAMBA_VERSION=23.1.0-1 +ARG CUDA_VERSION=12.1 +ARG MAMBA_VERSION=23.3.1-1 ARG CUDA_CHANNEL=nvidia ARG INSTALL_CHANNEL=pytorch # Automatically set by buildx @@ -75,20 +75,19 @@ RUN chmod +x ~/mambaforge.sh && \ RUN case ${TARGETPLATFORM} in \ "linux/arm64") exit 1 ;; \ *) /opt/conda/bin/conda update -y conda && \ - /opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -c "${CUDA_CHANNEL}" -y "python=${PYTHON_VERSION}" pytorch==$PYTORCH_VERSION "pytorch-cuda=$(echo $CUDA_VERSION | cut -d'.' -f 1-2)" ;; \ + /opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -c "${CUDA_CHANNEL}" -y "python=${PYTHON_VERSION}" "pytorch=$PYTORCH_VERSION" "pytorch-cuda=$(echo $CUDA_VERSION | cut -d'.' -f 1-2)" ;; \ esac && \ /opt/conda/bin/conda clean -ya # CUDA kernels builder image FROM pytorch-install as kernel-builder +ARG MAX_JOBS=8 + RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ ninja-build \ && rm -rf /var/lib/apt/lists/* -RUN /opt/conda/bin/conda install -c "nvidia/label/cuda-11.8.0" cuda==11.8 && \ - /opt/conda/bin/conda clean -ya - # Build Flash Attention CUDA kernels FROM kernel-builder as flash-att-builder @@ -148,7 +147,7 @@ COPY server/Makefile-vllm Makefile RUN make build-vllm # Text Generation Inference base image -FROM nvidia/cuda:11.8.0-base-ubuntu20.04 as base +FROM nvidia/cuda:12.1.0-base-ubuntu20.04 as base # Conda env ENV PATH=/opt/conda/bin:$PATH \ @@ -172,24 +171,24 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins COPY --from=pytorch-install /opt/conda /opt/conda # Copy build artifacts from flash attention builder -COPY --from=flash-att-builder /usr/src/flash-attention/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages -COPY --from=flash-att-builder /usr/src/flash-attention/csrc/layer_norm/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages -COPY --from=flash-att-builder /usr/src/flash-attention/csrc/rotary/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages +COPY --from=flash-att-builder /usr/src/flash-attention/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages +COPY --from=flash-att-builder /usr/src/flash-attention/csrc/layer_norm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages +COPY --from=flash-att-builder /usr/src/flash-attention/csrc/rotary/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from flash attention v2 builder -COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages +COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from custom kernels builder -COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages +COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from exllama kernels builder -COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages +COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from awq kernels builder -COPY --from=awq-kernels-builder /usr/src/llm-awq/awq/kernels/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages +COPY --from=awq-kernels-builder /usr/src/llm-awq/awq/kernels/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from eetq kernels builder -COPY --from=eetq-kernels-builder /usr/src/eetq/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages +COPY --from=eetq-kernels-builder /usr/src/eetq/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy builds artifacts from vllm builder -COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-39 /opt/conda/lib/python3.9/site-packages +COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Install flash-attention dependencies RUN pip install einops --no-cache-dir @@ -201,7 +200,7 @@ COPY server/Makefile server/Makefile RUN cd server && \ make gen-server && \ pip install -r requirements.txt && \ - pip install ".[bnb, accelerate, quantize]" --no-cache-dir + pip install ".[bnb, accelerate, quantize, peft]" --no-cache-dir # Install benchmarker COPY --from=builder /usr/src/target/release/text-generation-benchmark /usr/local/bin/text-generation-benchmark diff --git a/integration-tests/conftest.py b/integration-tests/conftest.py index 3f7a24dd35a..c1cbe7f3e96 100644 --- a/integration-tests/conftest.py +++ b/integration-tests/conftest.py @@ -318,6 +318,7 @@ def docker_launcher( ], volumes=volumes, ports={"80/tcp": port}, + shm_size="1G" ) yield ContainerLauncherHandle(client, container.name, port) diff --git a/server/Makefile b/server/Makefile index 52543e3d215..92958d02034 100644 --- a/server/Makefile +++ b/server/Makefile @@ -16,17 +16,13 @@ gen-server: find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \; touch text_generation_server/pb/__init__.py -install-torch: - # Install specific version of torch - pip install torch --extra-index-url https://download.pytorch.org/whl/cu118 --no-cache-dir - -install: gen-server install-torch +install: gen-server pip install pip --upgrade pip install -r requirements.txt - pip install -e ".[bnb, accelerate]" + pip install -e ".[bnb, accelerate, quantize, peft]" run-dev: SAFETENSORS_FAST_GPU=1 python -m torch.distributed.run --nproc_per_node=2 text_generation_server/cli.py serve bigscience/bloom-560m --sharded export-requirements: - poetry export -o requirements.txt -E bnb -E quantize --without-hashes + poetry export -o requirements.txt -E bnb --without-hashes diff --git a/server/poetry.lock b/server/poetry.lock index 0caa1d34146..48ae40fe0d1 100644 --- a/server/poetry.lock +++ b/server/poetry.lock @@ -1,10 +1,10 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "accelerate" version = "0.20.3" description = "Accelerate" -optional = false +optional = true python-versions = ">=3.7.0" files = [ {file = "accelerate-0.20.3-py3-none-any.whl", hash = "sha256:147183e7a2215f7bd45a7af3b986a963daa8a61fa58b0912b9473049e011ad15"}, @@ -30,111 +30,99 @@ testing = ["datasets", "deepspeed", "evaluate", "parameterized", "pytest", "pyte [[package]] name = "aiohttp" -version = "3.8.5" +version = "3.9.0" description = "Async http client/server framework (asyncio)" optional = true -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a94159871304770da4dd371f4291b20cac04e8c94f11bdea1c3478e557fbe0d8"}, - {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:13bf85afc99ce6f9ee3567b04501f18f9f8dbbb2ea11ed1a2e079670403a7c84"}, - {file = "aiohttp-3.8.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ce2ac5708501afc4847221a521f7e4b245abf5178cf5ddae9d5b3856ddb2f3a"}, - {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96943e5dcc37a6529d18766597c491798b7eb7a61d48878611298afc1fca946c"}, - {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ad5c3c4590bb3cc28b4382f031f3783f25ec223557124c68754a2231d989e2b"}, - {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c413c633d0512df4dc7fd2373ec06cc6a815b7b6d6c2f208ada7e9e93a5061d"}, - {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df72ac063b97837a80d80dec8d54c241af059cc9bb42c4de68bd5b61ceb37caa"}, - {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c48c5c0271149cfe467c0ff8eb941279fd6e3f65c9a388c984e0e6cf57538e14"}, - {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:368a42363c4d70ab52c2c6420a57f190ed3dfaca6a1b19afda8165ee16416a82"}, - {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7607ec3ce4993464368505888af5beb446845a014bc676d349efec0e05085905"}, - {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0d21c684808288a98914e5aaf2a7c6a3179d4df11d249799c32d1808e79503b5"}, - {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:312fcfbacc7880a8da0ae8b6abc6cc7d752e9caa0051a53d217a650b25e9a691"}, - {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad093e823df03bb3fd37e7dec9d4670c34f9e24aeace76808fc20a507cace825"}, - {file = "aiohttp-3.8.5-cp310-cp310-win32.whl", hash = "sha256:33279701c04351a2914e1100b62b2a7fdb9a25995c4a104259f9a5ead7ed4802"}, - {file = "aiohttp-3.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:6e4a280e4b975a2e7745573e3fc9c9ba0d1194a3738ce1cbaa80626cc9b4f4df"}, - {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae871a964e1987a943d83d6709d20ec6103ca1eaf52f7e0d36ee1b5bebb8b9b9"}, - {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:461908b2578955045efde733719d62f2b649c404189a09a632d245b445c9c975"}, - {file = "aiohttp-3.8.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:72a860c215e26192379f57cae5ab12b168b75db8271f111019509a1196dfc780"}, - {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc14be025665dba6202b6a71cfcdb53210cc498e50068bc088076624471f8bb9"}, - {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af740fc2711ad85f1a5c034a435782fbd5b5f8314c9a3ef071424a8158d7f6b"}, - {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:841cd8233cbd2111a0ef0a522ce016357c5e3aff8a8ce92bcfa14cef890d698f"}, - {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed1c46fb119f1b59304b5ec89f834f07124cd23ae5b74288e364477641060ff"}, - {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84f8ae3e09a34f35c18fa57f015cc394bd1389bce02503fb30c394d04ee6b938"}, - {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62360cb771707cb70a6fd114b9871d20d7dd2163a0feafe43fd115cfe4fe845e"}, - {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:23fb25a9f0a1ca1f24c0a371523546366bb642397c94ab45ad3aedf2941cec6a"}, - {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0ba0d15164eae3d878260d4c4df859bbdc6466e9e6689c344a13334f988bb53"}, - {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5d20003b635fc6ae3f96d7260281dfaf1894fc3aa24d1888a9b2628e97c241e5"}, - {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0175d745d9e85c40dcc51c8f88c74bfbaef9e7afeeeb9d03c37977270303064c"}, - {file = "aiohttp-3.8.5-cp311-cp311-win32.whl", hash = "sha256:2e1b1e51b0774408f091d268648e3d57f7260c1682e7d3a63cb00d22d71bb945"}, - {file = "aiohttp-3.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:043d2299f6dfdc92f0ac5e995dfc56668e1587cea7f9aa9d8a78a1b6554e5755"}, - {file = "aiohttp-3.8.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cae533195e8122584ec87531d6df000ad07737eaa3c81209e85c928854d2195c"}, - {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f21e83f355643c345177a5d1d8079f9f28b5133bcd154193b799d380331d5d3"}, - {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a75ef35f2df54ad55dbf4b73fe1da96f370e51b10c91f08b19603c64004acc"}, - {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e2e9839e14dd5308ee773c97115f1e0a1cb1d75cbeeee9f33824fa5144c7634"}, - {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44e65da1de4403d0576473e2344828ef9c4c6244d65cf4b75549bb46d40b8dd"}, - {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d847e4cde6ecc19125ccbc9bfac4a7ab37c234dd88fbb3c5c524e8e14da543"}, - {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:c7a815258e5895d8900aec4454f38dca9aed71085f227537208057853f9d13f2"}, - {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:8b929b9bd7cd7c3939f8bcfffa92fae7480bd1aa425279d51a89327d600c704d"}, - {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:5db3a5b833764280ed7618393832e0853e40f3d3e9aa128ac0ba0f8278d08649"}, - {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:a0215ce6041d501f3155dc219712bc41252d0ab76474615b9700d63d4d9292af"}, - {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:fd1ed388ea7fbed22c4968dd64bab0198de60750a25fe8c0c9d4bef5abe13824"}, - {file = "aiohttp-3.8.5-cp36-cp36m-win32.whl", hash = "sha256:6e6783bcc45f397fdebc118d772103d751b54cddf5b60fbcc958382d7dd64f3e"}, - {file = "aiohttp-3.8.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b5411d82cddd212644cf9360879eb5080f0d5f7d809d03262c50dad02f01421a"}, - {file = "aiohttp-3.8.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:01d4c0c874aa4ddfb8098e85d10b5e875a70adc63db91f1ae65a4b04d3344cda"}, - {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5980a746d547a6ba173fd5ee85ce9077e72d118758db05d229044b469d9029a"}, - {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a482e6da906d5e6e653be079b29bc173a48e381600161c9932d89dfae5942ef"}, - {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80bd372b8d0715c66c974cf57fe363621a02f359f1ec81cba97366948c7fc873"}, - {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1161b345c0a444ebcf46bf0a740ba5dcf50612fd3d0528883fdc0eff578006a"}, - {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd56db019015b6acfaaf92e1ac40eb8434847d9bf88b4be4efe5bfd260aee692"}, - {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:153c2549f6c004d2754cc60603d4668899c9895b8a89397444a9c4efa282aaf4"}, - {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4a01951fabc4ce26ab791da5f3f24dca6d9a6f24121746eb19756416ff2d881b"}, - {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bfb9162dcf01f615462b995a516ba03e769de0789de1cadc0f916265c257e5d8"}, - {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:7dde0009408969a43b04c16cbbe252c4f5ef4574ac226bc8815cd7342d2028b6"}, - {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4149d34c32f9638f38f544b3977a4c24052042affa895352d3636fa8bffd030a"}, - {file = "aiohttp-3.8.5-cp37-cp37m-win32.whl", hash = "sha256:68c5a82c8779bdfc6367c967a4a1b2aa52cd3595388bf5961a62158ee8a59e22"}, - {file = "aiohttp-3.8.5-cp37-cp37m-win_amd64.whl", hash = "sha256:2cf57fb50be5f52bda004b8893e63b48530ed9f0d6c96c84620dc92fe3cd9b9d"}, - {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:eca4bf3734c541dc4f374ad6010a68ff6c6748f00451707f39857f429ca36ced"}, - {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1274477e4c71ce8cfe6c1ec2f806d57c015ebf84d83373676036e256bc55d690"}, - {file = "aiohttp-3.8.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:28c543e54710d6158fc6f439296c7865b29e0b616629767e685a7185fab4a6b9"}, - {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910bec0c49637d213f5d9877105d26e0c4a4de2f8b1b29405ff37e9fc0ad52b8"}, - {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5443910d662db951b2e58eb70b0fbe6b6e2ae613477129a5805d0b66c54b6cb7"}, - {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e460be6978fc24e3df83193dc0cc4de46c9909ed92dd47d349a452ef49325b7"}, - {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1558def481d84f03b45888473fc5a1f35747b5f334ef4e7a571bc0dfcb11f8"}, - {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34dd0c107799dcbbf7d48b53be761a013c0adf5571bf50c4ecad5643fe9cfcd0"}, - {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aa1990247f02a54185dc0dff92a6904521172a22664c863a03ff64c42f9b5410"}, - {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0e584a10f204a617d71d359fe383406305a4b595b333721fa50b867b4a0a1548"}, - {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a3cf433f127efa43fee6b90ea4c6edf6c4a17109d1d037d1a52abec84d8f2e42"}, - {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c11f5b099adafb18e65c2c997d57108b5bbeaa9eeee64a84302c0978b1ec948b"}, - {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:84de26ddf621d7ac4c975dbea4c945860e08cccde492269db4e1538a6a6f3c35"}, - {file = "aiohttp-3.8.5-cp38-cp38-win32.whl", hash = "sha256:ab88bafedc57dd0aab55fa728ea10c1911f7e4d8b43e1d838a1739f33712921c"}, - {file = "aiohttp-3.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:5798a9aad1879f626589f3df0f8b79b3608a92e9beab10e5fda02c8a2c60db2e"}, - {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a6ce61195c6a19c785df04e71a4537e29eaa2c50fe745b732aa937c0c77169f3"}, - {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:773dd01706d4db536335fcfae6ea2440a70ceb03dd3e7378f3e815b03c97ab51"}, - {file = "aiohttp-3.8.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f83a552443a526ea38d064588613aca983d0ee0038801bc93c0c916428310c28"}, - {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f7372f7341fcc16f57b2caded43e81ddd18df53320b6f9f042acad41f8e049a"}, - {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea353162f249c8097ea63c2169dd1aa55de1e8fecbe63412a9bc50816e87b761"}, - {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d47ae48db0b2dcf70bc8a3bc72b3de86e2a590fc299fdbbb15af320d2659de"}, - {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d827176898a2b0b09694fbd1088c7a31836d1a505c243811c87ae53a3f6273c1"}, - {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3562b06567c06439d8b447037bb655ef69786c590b1de86c7ab81efe1c9c15d8"}, - {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4e874cbf8caf8959d2adf572a78bba17cb0e9d7e51bb83d86a3697b686a0ab4d"}, - {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6809a00deaf3810e38c628e9a33271892f815b853605a936e2e9e5129762356c"}, - {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:33776e945d89b29251b33a7e7d006ce86447b2cfd66db5e5ded4e5cd0340585c"}, - {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eaeed7abfb5d64c539e2db173f63631455f1196c37d9d8d873fc316470dfbacd"}, - {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e91d635961bec2d8f19dfeb41a539eb94bd073f075ca6dae6c8dc0ee89ad6f91"}, - {file = "aiohttp-3.8.5-cp39-cp39-win32.whl", hash = "sha256:00ad4b6f185ec67f3e6562e8a1d2b69660be43070bd0ef6fcec5211154c7df67"}, - {file = "aiohttp-3.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:c0a9034379a37ae42dea7ac1e048352d96286626251862e448933c0f59cbd79c"}, - {file = "aiohttp-3.8.5.tar.gz", hash = "sha256:b9552ec52cc147dbf1944ac7ac98af7602e51ea2dcd076ed194ca3c0d1c7d0bc"}, + {file = "aiohttp-3.9.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6896b8416be9ada4d22cd359d7cb98955576ce863eadad5596b7cdfbf3e17c6c"}, + {file = "aiohttp-3.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1736d87dad8ef46a8ec9cddd349fa9f7bd3a064c47dd6469c0d6763d3d49a4fc"}, + {file = "aiohttp-3.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c9e5f4d7208cda1a2bb600e29069eecf857e6980d0ccc922ccf9d1372c16f4b"}, + {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8488519aa05e636c5997719fe543c8daf19f538f4fa044f3ce94bee608817cff"}, + {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ab16c254e2312efeb799bc3c06897f65a133b38b69682bf75d1f1ee1a9c43a9"}, + {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7a94bde005a8f926d0fa38b88092a03dea4b4875a61fbcd9ac6f4351df1b57cd"}, + {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b777c9286b6c6a94f50ddb3a6e730deec327e9e2256cb08b5530db0f7d40fd8"}, + {file = "aiohttp-3.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:571760ad7736b34d05597a1fd38cbc7d47f7b65deb722cb8e86fd827404d1f6b"}, + {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:deac0a32aec29608eb25d730f4bc5a261a65b6c48ded1ed861d2a1852577c932"}, + {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4ee1b4152bc3190cc40ddd6a14715e3004944263ea208229ab4c297712aa3075"}, + {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:3607375053df58ed6f23903aa10cf3112b1240e8c799d243bbad0f7be0666986"}, + {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:65b0a70a25456d329a5e1426702dde67be0fb7a4ead718005ba2ca582d023a94"}, + {file = "aiohttp-3.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5a2eb5311a37fe105aa35f62f75a078537e1a9e4e1d78c86ec9893a3c97d7a30"}, + {file = "aiohttp-3.9.0-cp310-cp310-win32.whl", hash = "sha256:2cbc14a13fb6b42d344e4f27746a4b03a2cb0c1c3c5b932b0d6ad8881aa390e3"}, + {file = "aiohttp-3.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:ac9669990e2016d644ba8ae4758688534aabde8dbbc81f9af129c3f5f01ca9cd"}, + {file = "aiohttp-3.9.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f8e05f5163528962ce1d1806fce763ab893b1c5b7ace0a3538cd81a90622f844"}, + {file = "aiohttp-3.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4afa8f71dba3a5a2e1e1282a51cba7341ae76585345c43d8f0e624882b622218"}, + {file = "aiohttp-3.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f929f4c9b9a00f3e6cc0587abb95ab9c05681f8b14e0fe1daecfa83ea90f8318"}, + {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28185e36a78d247c55e9fbea2332d16aefa14c5276a582ce7a896231c6b1c208"}, + {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a486ddf57ab98b6d19ad36458b9f09e6022de0381674fe00228ca7b741aacb2f"}, + {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70e851f596c00f40a2f00a46126c95c2e04e146015af05a9da3e4867cfc55911"}, + {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5b7bf8fe4d39886adc34311a233a2e01bc10eb4e842220235ed1de57541a896"}, + {file = "aiohttp-3.9.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c67a51ea415192c2e53e4e048c78bab82d21955b4281d297f517707dc836bf3d"}, + {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:694df243f394629bcae2d8ed94c589a181e8ba8604159e6e45e7b22e58291113"}, + {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3dd8119752dd30dd7bca7d4bc2a92a59be6a003e4e5c2cf7e248b89751b8f4b7"}, + {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:eb6dfd52063186ac97b4caa25764cdbcdb4b10d97f5c5f66b0fa95052e744eb7"}, + {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:d97c3e286d0ac9af6223bc132dc4bad6540b37c8d6c0a15fe1e70fb34f9ec411"}, + {file = "aiohttp-3.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:816f4db40555026e4cdda604a1088577c1fb957d02f3f1292e0221353403f192"}, + {file = "aiohttp-3.9.0-cp311-cp311-win32.whl", hash = "sha256:3abf0551874fecf95f93b58f25ef4fc9a250669a2257753f38f8f592db85ddea"}, + {file = "aiohttp-3.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:e18d92c3e9e22553a73e33784fcb0ed484c9874e9a3e96c16a8d6a1e74a0217b"}, + {file = "aiohttp-3.9.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:99ae01fb13a618b9942376df77a1f50c20a281390dad3c56a6ec2942e266220d"}, + {file = "aiohttp-3.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:05857848da443c8c12110d99285d499b4e84d59918a21132e45c3f0804876994"}, + {file = "aiohttp-3.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:317719d7f824eba55857fe0729363af58e27c066c731bc62cd97bc9c3d9c7ea4"}, + {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1e3b3c107ccb0e537f309f719994a55621acd2c8fdf6d5ce5152aed788fb940"}, + {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45820ddbb276113ead8d4907a7802adb77548087ff5465d5c554f9aa3928ae7d"}, + {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05a183f1978802588711aed0dea31e697d760ce9055292db9dc1604daa9a8ded"}, + {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a4cd44788ea0b5e6bb8fa704597af3a30be75503a7ed1098bc5b8ffdf6c982"}, + {file = "aiohttp-3.9.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:673343fbc0c1ac44d0d2640addc56e97a052504beacd7ade0dc5e76d3a4c16e8"}, + {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7e8a3b79b6d186a9c99761fd4a5e8dd575a48d96021f220ac5b5fa856e5dd029"}, + {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6777a390e41e78e7c45dab43a4a0196c55c3b8c30eebe017b152939372a83253"}, + {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7ae5f99a32c53731c93ac3075abd3e1e5cfbe72fc3eaac4c27c9dd64ba3b19fe"}, + {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:f1e4f254e9c35d8965d377e065c4a8a55d396fe87c8e7e8429bcfdeeb229bfb3"}, + {file = "aiohttp-3.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11ca808f9a6b63485059f5f6e164ef7ec826483c1212a44f268b3653c91237d8"}, + {file = "aiohttp-3.9.0-cp312-cp312-win32.whl", hash = "sha256:de3cc86f4ea8b4c34a6e43a7306c40c1275e52bfa9748d869c6b7d54aa6dad80"}, + {file = "aiohttp-3.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:ca4fddf84ac7d8a7d0866664936f93318ff01ee33e32381a115b19fb5a4d1202"}, + {file = "aiohttp-3.9.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f09960b5bb1017d16c0f9e9f7fc42160a5a49fa1e87a175fd4a2b1a1833ea0af"}, + {file = "aiohttp-3.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8303531e2c17b1a494ffaeba48f2da655fe932c4e9a2626c8718403c83e5dd2b"}, + {file = "aiohttp-3.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4790e44f46a4aa07b64504089def5744d3b6780468c4ec3a1a36eb7f2cae9814"}, + {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1d7edf74a36de0e5ca50787e83a77cf352f5504eb0ffa3f07000a911ba353fb"}, + {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94697c7293199c2a2551e3e3e18438b4cba293e79c6bc2319f5fd652fccb7456"}, + {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a1b66dbb8a7d5f50e9e2ea3804b01e766308331d0cac76eb30c563ac89c95985"}, + {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9623cfd9e85b76b83ef88519d98326d4731f8d71869867e47a0b979ffec61c73"}, + {file = "aiohttp-3.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f32c86dc967ab8c719fd229ce71917caad13cc1e8356ee997bf02c5b368799bf"}, + {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f50b4663c3e0262c3a361faf440761fbef60ccdde5fe8545689a4b3a3c149fb4"}, + {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dcf71c55ec853826cd70eadb2b6ac62ec577416442ca1e0a97ad875a1b3a0305"}, + {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:42fe4fd9f0dfcc7be4248c162d8056f1d51a04c60e53366b0098d1267c4c9da8"}, + {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76a86a9989ebf82ee61e06e2bab408aec4ea367dc6da35145c3352b60a112d11"}, + {file = "aiohttp-3.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f9e09a1c83521d770d170b3801eea19b89f41ccaa61d53026ed111cb6f088887"}, + {file = "aiohttp-3.9.0-cp38-cp38-win32.whl", hash = "sha256:a00ce44c21612d185c5275c5cba4bab8d7c1590f248638b667ed8a782fa8cd6f"}, + {file = "aiohttp-3.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:d5b9345ab92ebe6003ae11d8092ce822a0242146e6fa270889b9ba965457ca40"}, + {file = "aiohttp-3.9.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98d21092bf2637c5fa724a428a69e8f5955f2182bff61f8036827cf6ce1157bf"}, + {file = "aiohttp-3.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:35a68cd63ca6aaef5707888f17a70c36efe62b099a4e853d33dc2e9872125be8"}, + {file = "aiohttp-3.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d7f6235c7475658acfc1769d968e07ab585c79f6ca438ddfecaa9a08006aee2"}, + {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db04d1de548f7a62d1dd7e7cdf7c22893ee168e22701895067a28a8ed51b3735"}, + {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:536b01513d67d10baf6f71c72decdf492fb7433c5f2f133e9a9087379d4b6f31"}, + {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c8b0a6487e8109427ccf638580865b54e2e3db4a6e0e11c02639231b41fc0f"}, + {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7276fe0017664414fdc3618fca411630405f1aaf0cc3be69def650eb50441787"}, + {file = "aiohttp-3.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23170247ef89ffa842a02bbfdc425028574d9e010611659abeb24d890bc53bb8"}, + {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b1a2ea8252cacc7fd51df5a56d7a2bb1986ed39be9397b51a08015727dfb69bd"}, + {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2d71abc15ff7047412ef26bf812dfc8d0d1020d664617f4913df2df469f26b76"}, + {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:2d820162c8c2bdbe97d328cd4f417c955ca370027dce593345e437b2e9ffdc4d"}, + {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:2779f5e7c70f7b421915fd47db332c81de365678180a9f3ab404088f87ba5ff9"}, + {file = "aiohttp-3.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:366bc870d7ac61726f32a489fbe3d1d8876e87506870be66b01aeb84389e967e"}, + {file = "aiohttp-3.9.0-cp39-cp39-win32.whl", hash = "sha256:1df43596b826022b14998f0460926ce261544fedefe0d2f653e1b20f49e96454"}, + {file = "aiohttp-3.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:9c196b30f1b1aa3363a69dd69079ae9bec96c2965c4707eaa6914ba099fb7d4f"}, + {file = "aiohttp-3.9.0.tar.gz", hash = "sha256:09f23292d29135025e19e8ff4f0a68df078fe4ee013bca0105b2e803989de92d"}, ] [package.dependencies] aiosignal = ">=1.1.2" -async-timeout = ">=4.0.0a3,<5.0" +async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} attrs = ">=17.3.0" -charset-normalizer = ">=2.0,<4.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" yarl = ">=1.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns", "cchardet"] +speedups = ["Brotli", "aiodns", "brotlicffi"] [[package]] name = "aiosignal" @@ -192,123 +180,123 @@ files = [ [[package]] name = "bitsandbytes" -version = "0.41.1" +version = "0.41.2.post2" description = "k-bit optimizers and matrix multiplication routines." optional = true python-versions = "*" files = [ - {file = "bitsandbytes-0.41.1-py3-none-any.whl", hash = "sha256:b25228c27636367f222232ed4d1e1502eedd2064be215633734fb8ea0c1c65f4"}, - {file = "bitsandbytes-0.41.1.tar.gz", hash = "sha256:b3f8e7e1e5f88d4813d10ebd4072034ba6a18eca7f0e255376f8320e5499032c"}, + {file = "bitsandbytes-0.41.2.post2-py3-none-any.whl", hash = "sha256:98e5e1979aea3d481ed06181c689f3a154d7f5dc1af770c5173485bc54cf7b72"}, + {file = "bitsandbytes-0.41.2.post2.tar.gz", hash = "sha256:d374da4700651f36a285ed53e012ee527736109614e3f5c0249985d41027136d"}, ] [[package]] name = "certifi" -version = "2023.7.22" +version = "2023.11.17" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, + {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, + {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, ] [[package]] name = "charset-normalizer" -version = "3.3.0" +version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.0.tar.gz", hash = "sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-win32.whl", hash = "sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-win32.whl", hash = "sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-win32.whl", hash = "sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-win32.whl", hash = "sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-win32.whl", hash = "sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884"}, - {file = "charset_normalizer-3.3.0-py3-none-any.whl", hash = "sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2"}, + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, ] [[package]] @@ -338,25 +326,26 @@ files = [ [[package]] name = "datasets" -version = "2.14.5" +version = "2.14.7" description = "HuggingFace community-driven open-source library of datasets" optional = true python-versions = ">=3.8.0" files = [ - {file = "datasets-2.14.5-py3-none-any.whl", hash = "sha256:dd4155091034cba04d5a28711f2ed3944275ed15c5d0c5a2d0b6b9ea34a2bdfe"}, - {file = "datasets-2.14.5.tar.gz", hash = "sha256:b738a86540ab8e1a7806c8a3790b67be0056318d0c5d5a58a1b0dbdd76c0f568"}, + {file = "datasets-2.14.7-py3-none-any.whl", hash = "sha256:1a64041a7da4f4130f736fc371c1f528b8ddd208cebe156400f65719bdbba79d"}, + {file = "datasets-2.14.7.tar.gz", hash = "sha256:394cf9b4ec0694b25945977b16ad5d18d5c15fb0e94141713eb8ead7452caf9e"}, ] [package.dependencies] aiohttp = "*" dill = ">=0.3.0,<0.3.8" -fsspec = {version = ">=2023.1.0,<2023.9.0", extras = ["http"]} +fsspec = {version = ">=2023.1.0,<=2023.10.0", extras = ["http"]} huggingface-hub = ">=0.14.0,<1.0.0" multiprocess = "*" numpy = ">=1.17" packaging = "*" pandas = "*" pyarrow = ">=8.0.0" +pyarrow-hotfix = "*" pyyaml = ">=5.1" requests = ">=2.19.0" tqdm = ">=4.62.1" @@ -422,13 +411,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.1.3" +version = "1.2.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"}, - {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"}, + {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, + {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, ] [package.extras] @@ -436,19 +425,19 @@ test = ["pytest (>=6)"] [[package]] name = "filelock" -version = "3.12.4" +version = "3.13.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.12.4-py3-none-any.whl", hash = "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4"}, - {file = "filelock-3.12.4.tar.gz", hash = "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"}, + {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, + {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, ] [package.extras] -docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"] -typing = ["typing-extensions (>=4.7.1)"] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +typing = ["typing-extensions (>=4.8)"] [[package]] name = "frozenlist" @@ -522,13 +511,13 @@ files = [ [[package]] name = "fsspec" -version = "2023.6.0" +version = "2023.10.0" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2023.6.0-py3-none-any.whl", hash = "sha256:1cbad1faef3e391fba6dc005ae9b5bdcbf43005c9167ce78c915549c352c869a"}, - {file = "fsspec-2023.6.0.tar.gz", hash = "sha256:d0b2f935446169753e7a5c5c55681c54ea91996cc67be93c39a154fb3a2742af"}, + {file = "fsspec-2023.10.0-py3-none-any.whl", hash = "sha256:346a8f024efeb749d2a5fca7ba8854474b1ff9af7c3faaf636a4548781136529"}, + {file = "fsspec-2023.10.0.tar.gz", hash = "sha256:330c66757591df346ad3091a53bd907e15348c2ba17d63fd54f5c39c4457d2a5"}, ] [package.dependencies] @@ -561,13 +550,13 @@ tqdm = ["tqdm"] [[package]] name = "googleapis-common-protos" -version = "1.60.0" +version = "1.61.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" files = [ - {file = "googleapis-common-protos-1.60.0.tar.gz", hash = "sha256:e73ebb404098db405ba95d1e1ae0aa91c3e15a71da031a2eeb6b2e23e7bc3708"}, - {file = "googleapis_common_protos-1.60.0-py2.py3-none-any.whl", hash = "sha256:69f9bbcc6acde92cab2db95ce30a70bd2b81d20b12eff3f1aabaffcbe8a93918"}, + {file = "googleapis-common-protos-1.61.0.tar.gz", hash = "sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b"}, + {file = "googleapis_common_protos-1.61.0-py2.py3-none-any.whl", hash = "sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0"}, ] [package.dependencies] @@ -578,13 +567,13 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "grpc-interceptor" -version = "0.15.3" +version = "0.15.4" description = "Simplifies gRPC interceptors" optional = false python-versions = ">=3.7,<4.0" files = [ - {file = "grpc-interceptor-0.15.3.tar.gz", hash = "sha256:33592cb9d8c00fceed5755c71029f75aef55b273496dbced06f1d48f2571fcc3"}, - {file = "grpc_interceptor-0.15.3-py3-none-any.whl", hash = "sha256:96be2043b7e49f9deb444f18b61c373ea28d22d81c90cd3b82127a4744eb9247"}, + {file = "grpc-interceptor-0.15.4.tar.gz", hash = "sha256:1f45c0bcb58b6f332f37c637632247c9b02bc6af0fdceb7ba7ce8d2ebbfb0926"}, + {file = "grpc_interceptor-0.15.4-py3-none-any.whl", hash = "sha256:0035f33228693ed3767ee49d937bac424318db173fef4d2d0170b3215f254d9d"}, ] [package.dependencies] @@ -595,195 +584,199 @@ testing = ["protobuf (>=4.21.9)"] [[package]] name = "grpcio" -version = "1.59.0" +version = "1.59.3" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.7" files = [ - {file = "grpcio-1.59.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:225e5fa61c35eeaebb4e7491cd2d768cd8eb6ed00f2664fa83a58f29418b39fd"}, - {file = "grpcio-1.59.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b95ec8ecc4f703f5caaa8d96e93e40c7f589bad299a2617bdb8becbcce525539"}, - {file = "grpcio-1.59.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:1a839ba86764cc48226f50b924216000c79779c563a301586a107bda9cbe9dcf"}, - {file = "grpcio-1.59.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6cfe44a5d7c7d5f1017a7da1c8160304091ca5dc64a0f85bca0d63008c3137a"}, - {file = "grpcio-1.59.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0fcf53df684fcc0154b1e61f6b4a8c4cf5f49d98a63511e3f30966feff39cd0"}, - {file = "grpcio-1.59.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa66cac32861500f280bb60fe7d5b3e22d68c51e18e65367e38f8669b78cea3b"}, - {file = "grpcio-1.59.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8cd2d38c2d52f607d75a74143113174c36d8a416d9472415eab834f837580cf7"}, - {file = "grpcio-1.59.0-cp310-cp310-win32.whl", hash = "sha256:228b91ce454876d7eed74041aff24a8f04c0306b7250a2da99d35dd25e2a1211"}, - {file = "grpcio-1.59.0-cp310-cp310-win_amd64.whl", hash = "sha256:ca87ee6183421b7cea3544190061f6c1c3dfc959e0b57a5286b108511fd34ff4"}, - {file = "grpcio-1.59.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:c173a87d622ea074ce79be33b952f0b424fa92182063c3bda8625c11d3585d09"}, - {file = "grpcio-1.59.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:ec78aebb9b6771d6a1de7b6ca2f779a2f6113b9108d486e904bde323d51f5589"}, - {file = "grpcio-1.59.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:0b84445fa94d59e6806c10266b977f92fa997db3585f125d6b751af02ff8b9fe"}, - {file = "grpcio-1.59.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c251d22de8f9f5cca9ee47e4bade7c5c853e6e40743f47f5cc02288ee7a87252"}, - {file = "grpcio-1.59.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:956f0b7cb465a65de1bd90d5a7475b4dc55089b25042fe0f6c870707e9aabb1d"}, - {file = "grpcio-1.59.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:38da5310ef84e16d638ad89550b5b9424df508fd5c7b968b90eb9629ca9be4b9"}, - {file = "grpcio-1.59.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:63982150a7d598281fa1d7ffead6096e543ff8be189d3235dd2b5604f2c553e5"}, - {file = "grpcio-1.59.0-cp311-cp311-win32.whl", hash = "sha256:50eff97397e29eeee5df106ea1afce3ee134d567aa2c8e04fabab05c79d791a7"}, - {file = "grpcio-1.59.0-cp311-cp311-win_amd64.whl", hash = "sha256:15f03bd714f987d48ae57fe092cf81960ae36da4e520e729392a59a75cda4f29"}, - {file = "grpcio-1.59.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:f1feb034321ae2f718172d86b8276c03599846dc7bb1792ae370af02718f91c5"}, - {file = "grpcio-1.59.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d09bd2a4e9f5a44d36bb8684f284835c14d30c22d8ec92ce796655af12163588"}, - {file = "grpcio-1.59.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:2f120d27051e4c59db2f267b71b833796770d3ea36ca712befa8c5fff5da6ebd"}, - {file = "grpcio-1.59.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba0ca727a173ee093f49ead932c051af463258b4b493b956a2c099696f38aa66"}, - {file = "grpcio-1.59.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5711c51e204dc52065f4a3327dca46e69636a0b76d3e98c2c28c4ccef9b04c52"}, - {file = "grpcio-1.59.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d74f7d2d7c242a6af9d4d069552ec3669965b74fed6b92946e0e13b4168374f9"}, - {file = "grpcio-1.59.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3859917de234a0a2a52132489c4425a73669de9c458b01c9a83687f1f31b5b10"}, - {file = "grpcio-1.59.0-cp312-cp312-win32.whl", hash = "sha256:de2599985b7c1b4ce7526e15c969d66b93687571aa008ca749d6235d056b7205"}, - {file = "grpcio-1.59.0-cp312-cp312-win_amd64.whl", hash = "sha256:598f3530231cf10ae03f4ab92d48c3be1fee0c52213a1d5958df1a90957e6a88"}, - {file = "grpcio-1.59.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:b34c7a4c31841a2ea27246a05eed8a80c319bfc0d3e644412ec9ce437105ff6c"}, - {file = "grpcio-1.59.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:c4dfdb49f4997dc664f30116af2d34751b91aa031f8c8ee251ce4dcfc11277b0"}, - {file = "grpcio-1.59.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:61bc72a00ecc2b79d9695220b4d02e8ba53b702b42411397e831c9b0589f08a3"}, - {file = "grpcio-1.59.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f367e4b524cb319e50acbdea57bb63c3b717c5d561974ace0b065a648bb3bad3"}, - {file = "grpcio-1.59.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:849c47ef42424c86af069a9c5e691a765e304079755d5c29eff511263fad9c2a"}, - {file = "grpcio-1.59.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c0488c2b0528e6072010182075615620071371701733c63ab5be49140ed8f7f0"}, - {file = "grpcio-1.59.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:611d9aa0017fa386809bddcb76653a5ab18c264faf4d9ff35cb904d44745f575"}, - {file = "grpcio-1.59.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e5378785dce2b91eb2e5b857ec7602305a3b5cf78311767146464bfa365fc897"}, - {file = "grpcio-1.59.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:fe976910de34d21057bcb53b2c5e667843588b48bf11339da2a75f5c4c5b4055"}, - {file = "grpcio-1.59.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:c041a91712bf23b2a910f61e16565a05869e505dc5a5c025d429ca6de5de842c"}, - {file = "grpcio-1.59.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:0ae444221b2c16d8211b55326f8ba173ba8f8c76349bfc1768198ba592b58f74"}, - {file = "grpcio-1.59.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ceb1e68135788c3fce2211de86a7597591f0b9a0d2bb80e8401fd1d915991bac"}, - {file = "grpcio-1.59.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c4b1cc3a9dc1924d2eb26eec8792fedd4b3fcd10111e26c1d551f2e4eda79ce"}, - {file = "grpcio-1.59.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:871371ce0c0055d3db2a86fdebd1e1d647cf21a8912acc30052660297a5a6901"}, - {file = "grpcio-1.59.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:93e9cb546e610829e462147ce724a9cb108e61647a3454500438a6deef610be1"}, - {file = "grpcio-1.59.0-cp38-cp38-win32.whl", hash = "sha256:f21917aa50b40842b51aff2de6ebf9e2f6af3fe0971c31960ad6a3a2b24988f4"}, - {file = "grpcio-1.59.0-cp38-cp38-win_amd64.whl", hash = "sha256:14890da86a0c0e9dc1ea8e90101d7a3e0e7b1e71f4487fab36e2bfd2ecadd13c"}, - {file = "grpcio-1.59.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:34341d9e81a4b669a5f5dca3b2a760b6798e95cdda2b173e65d29d0b16692857"}, - {file = "grpcio-1.59.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:986de4aa75646e963466b386a8c5055c8b23a26a36a6c99052385d6fe8aaf180"}, - {file = "grpcio-1.59.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:aca8a24fef80bef73f83eb8153f5f5a0134d9539b4c436a716256b311dda90a6"}, - {file = "grpcio-1.59.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:936b2e04663660c600d5173bc2cc84e15adbad9c8f71946eb833b0afc205b996"}, - {file = "grpcio-1.59.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc8bf2e7bc725e76c0c11e474634a08c8f24bcf7426c0c6d60c8f9c6e70e4d4a"}, - {file = "grpcio-1.59.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:81d86a096ccd24a57fa5772a544c9e566218bc4de49e8c909882dae9d73392df"}, - {file = "grpcio-1.59.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ea95cd6abbe20138b8df965b4a8674ec312aaef3147c0f46a0bac661f09e8d0"}, - {file = "grpcio-1.59.0-cp39-cp39-win32.whl", hash = "sha256:3b8ff795d35a93d1df6531f31c1502673d1cebeeba93d0f9bd74617381507e3f"}, - {file = "grpcio-1.59.0-cp39-cp39-win_amd64.whl", hash = "sha256:38823bd088c69f59966f594d087d3a929d1ef310506bee9e3648317660d65b81"}, - {file = "grpcio-1.59.0.tar.gz", hash = "sha256:acf70a63cf09dd494000007b798aff88a436e1c03b394995ce450be437b8e54f"}, + {file = "grpcio-1.59.3-cp310-cp310-linux_armv7l.whl", hash = "sha256:aca028a6c7806e5b61e5f9f4232432c52856f7fcb98e330b20b6bc95d657bdcc"}, + {file = "grpcio-1.59.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:19ad26a7967f7999c8960d2b9fe382dae74c55b0c508c613a6c2ba21cddf2354"}, + {file = "grpcio-1.59.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:72b71dad2a3d1650e69ad42a5c4edbc59ee017f08c32c95694172bc501def23c"}, + {file = "grpcio-1.59.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c0f0a11d82d0253656cc42e04b6a149521e02e755fe2e4edd21123de610fd1d4"}, + {file = "grpcio-1.59.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60cddafb70f9a2c81ba251b53b4007e07cca7389e704f86266e22c4bffd8bf1d"}, + {file = "grpcio-1.59.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6c75a1fa0e677c1d2b6d4196ad395a5c381dfb8385f07ed034ef667cdcdbcc25"}, + {file = "grpcio-1.59.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e1d8e01438d5964a11167eec1edb5f85ed8e475648f36c834ed5db4ffba24ac8"}, + {file = "grpcio-1.59.3-cp310-cp310-win32.whl", hash = "sha256:c4b0076f0bf29ee62335b055a9599f52000b7941f577daa001c7ef961a1fbeab"}, + {file = "grpcio-1.59.3-cp310-cp310-win_amd64.whl", hash = "sha256:b1f00a3e6e0c3dccccffb5579fc76ebfe4eb40405ba308505b41ef92f747746a"}, + {file = "grpcio-1.59.3-cp311-cp311-linux_armv7l.whl", hash = "sha256:3996aaa21231451161dc29df6a43fcaa8b332042b6150482c119a678d007dd86"}, + {file = "grpcio-1.59.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:cb4e9cbd9b7388fcb06412da9f188c7803742d06d6f626304eb838d1707ec7e3"}, + {file = "grpcio-1.59.3-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:8022ca303d6c694a0d7acfb2b472add920217618d3a99eb4b14edc7c6a7e8fcf"}, + {file = "grpcio-1.59.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b36683fad5664283755a7f4e2e804e243633634e93cd798a46247b8e54e3cb0d"}, + {file = "grpcio-1.59.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8239b853226e4824e769517e1b5232e7c4dda3815b200534500338960fcc6118"}, + {file = "grpcio-1.59.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0511af8653fbda489ff11d542a08505d56023e63cafbda60e6e00d4e0bae86ea"}, + {file = "grpcio-1.59.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e78dc982bda74cef2ddfce1c91d29b96864c4c680c634e279ed204d51e227473"}, + {file = "grpcio-1.59.3-cp311-cp311-win32.whl", hash = "sha256:6a5c3a96405966c023e139c3bcccb2c7c776a6f256ac6d70f8558c9041bdccc3"}, + {file = "grpcio-1.59.3-cp311-cp311-win_amd64.whl", hash = "sha256:ed26826ee423b11477297b187371cdf4fa1eca874eb1156422ef3c9a60590dd9"}, + {file = "grpcio-1.59.3-cp312-cp312-linux_armv7l.whl", hash = "sha256:45dddc5cb5227d30fa43652d8872dc87f086d81ab4b500be99413bad0ae198d7"}, + {file = "grpcio-1.59.3-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:1736496d74682e53dd0907fd515f2694d8e6a96c9a359b4080b2504bf2b2d91b"}, + {file = "grpcio-1.59.3-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:ddbd1a16138e52e66229047624de364f88a948a4d92ba20e4e25ad7d22eef025"}, + {file = "grpcio-1.59.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcfa56f8d031ffda902c258c84c4b88707f3a4be4827b4e3ab8ec7c24676320d"}, + {file = "grpcio-1.59.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2eb8f0c7c0c62f7a547ad7a91ba627a5aa32a5ae8d930783f7ee61680d7eb8d"}, + {file = "grpcio-1.59.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8d993399cc65e3a34f8fd48dd9ad7a376734564b822e0160dd18b3d00c1a33f9"}, + {file = "grpcio-1.59.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c0bd141f4f41907eb90bda74d969c3cb21c1c62779419782a5b3f5e4b5835718"}, + {file = "grpcio-1.59.3-cp312-cp312-win32.whl", hash = "sha256:33b8fd65d4e97efa62baec6171ce51f9cf68f3a8ba9f866f4abc9d62b5c97b79"}, + {file = "grpcio-1.59.3-cp312-cp312-win_amd64.whl", hash = "sha256:0e735ed002f50d4f3cb9ecfe8ac82403f5d842d274c92d99db64cfc998515e07"}, + {file = "grpcio-1.59.3-cp37-cp37m-linux_armv7l.whl", hash = "sha256:ea40ce4404e7cca0724c91a7404da410f0144148fdd58402a5942971e3469b94"}, + {file = "grpcio-1.59.3-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:83113bcc393477b6f7342b9f48e8a054330c895205517edc66789ceea0796b53"}, + {file = "grpcio-1.59.3-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:73afbac602b8f1212a50088193601f869b5073efa9855b3e51aaaec97848fc8a"}, + {file = "grpcio-1.59.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d61de1950b0b0699917b686b1ca108690702fcc2df127b8c9c9320f93e069"}, + {file = "grpcio-1.59.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cd76057b5c9a4d68814610ef9226925f94c1231bbe533fdf96f6181f7d2ff9e"}, + {file = "grpcio-1.59.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:95d6fd804c81efe4879e38bfd84d2b26e339a0a9b797e7615e884ef4686eb47b"}, + {file = "grpcio-1.59.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0d42048b8a3286ea4134faddf1f9a59cf98192b94aaa10d910a25613c5eb5bfb"}, + {file = "grpcio-1.59.3-cp37-cp37m-win_amd64.whl", hash = "sha256:4619fea15c64bcdd9d447cdbdde40e3d5f1da3a2e8ae84103d94a9c1df210d7e"}, + {file = "grpcio-1.59.3-cp38-cp38-linux_armv7l.whl", hash = "sha256:95b5506e70284ac03b2005dd9ffcb6708c9ae660669376f0192a710687a22556"}, + {file = "grpcio-1.59.3-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:9e17660947660ccfce56c7869032910c179a5328a77b73b37305cd1ee9301c2e"}, + {file = "grpcio-1.59.3-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:00912ce19914d038851be5cd380d94a03f9d195643c28e3ad03d355cc02ce7e8"}, + {file = "grpcio-1.59.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e58b3cadaa3c90f1efca26ba33e0d408b35b497307027d3d707e4bcd8de862a6"}, + {file = "grpcio-1.59.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d787ecadea865bdf78f6679f6f5bf4b984f18f659257ba612979df97a298b3c3"}, + {file = "grpcio-1.59.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0814942ba1bba269db4e760a34388640c601dece525c6a01f3b4ff030cc0db69"}, + {file = "grpcio-1.59.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fb111aa99d3180c361a35b5ae1e2c63750220c584a1344229abc139d5c891881"}, + {file = "grpcio-1.59.3-cp38-cp38-win32.whl", hash = "sha256:eb8ba504c726befe40a356ecbe63c6c3c64c9a439b3164f5a718ec53c9874da0"}, + {file = "grpcio-1.59.3-cp38-cp38-win_amd64.whl", hash = "sha256:cdbc6b32fadab9bebc6f49d3e7ec4c70983c71e965497adab7f87de218e84391"}, + {file = "grpcio-1.59.3-cp39-cp39-linux_armv7l.whl", hash = "sha256:c82ca1e4be24a98a253d6dbaa216542e4163f33f38163fc77964b0f0d255b552"}, + {file = "grpcio-1.59.3-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:36636babfda14f9e9687f28d5b66d349cf88c1301154dc71c6513de2b6c88c59"}, + {file = "grpcio-1.59.3-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5f9b2e591da751ac7fdd316cc25afafb7a626dededa9b414f90faad7f3ccebdb"}, + {file = "grpcio-1.59.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a93a82876a4926bf451db82ceb725bd87f42292bacc94586045261f501a86994"}, + {file = "grpcio-1.59.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce31fa0bfdd1f2bb15b657c16105c8652186eab304eb512e6ae3b99b2fdd7d13"}, + {file = "grpcio-1.59.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:16da0e40573962dab6cba16bec31f25a4f468e6d05b658e589090fe103b03e3d"}, + {file = "grpcio-1.59.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d1d1a17372fd425addd5812049fa7374008ffe689585f27f802d0935522cf4b7"}, + {file = "grpcio-1.59.3-cp39-cp39-win32.whl", hash = "sha256:52cc38a7241b5f7b4a91aaf9000fdd38e26bb00d5e8a71665ce40cfcee716281"}, + {file = "grpcio-1.59.3-cp39-cp39-win_amd64.whl", hash = "sha256:b491e5bbcad3020a96842040421e508780cade35baba30f402df9d321d1c423e"}, + {file = "grpcio-1.59.3.tar.gz", hash = "sha256:7800f99568a74a06ebdccd419dd1b6e639b477dcaf6da77ea702f8fb14ce5f80"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.59.0)"] +protobuf = ["grpcio-tools (>=1.59.3)"] [[package]] name = "grpcio-reflection" -version = "1.59.0" +version = "1.59.3" description = "Standard Protobuf Reflection Service for gRPC" optional = false python-versions = ">=3.6" files = [ - {file = "grpcio-reflection-1.59.0.tar.gz", hash = "sha256:1fe8f0dd6c180fdcf4e12ced2a8f784d9c741ccbc0b198585b1df024b7f8f3f2"}, - {file = "grpcio_reflection-1.59.0-py3-none-any.whl", hash = "sha256:bf4efc7e2e8162e5be9736f4d0a0b324c9bf0c04ad597a9d78fcaf1fbdf818ec"}, + {file = "grpcio-reflection-1.59.3.tar.gz", hash = "sha256:5403c5a738c6eec4bb4080da77e450312dace41a86e292ac37cfd007b2657d4e"}, + {file = "grpcio_reflection-1.59.3-py3-none-any.whl", hash = "sha256:526064089d71cddce7244a83059f410fac6ccd30344a624c10f54420d417b3d2"}, ] [package.dependencies] -grpcio = ">=1.59.0" +grpcio = ">=1.59.3" protobuf = ">=4.21.6" [[package]] name = "grpcio-status" -version = "1.59.0" +version = "1.59.3" description = "Status proto mapping for gRPC" optional = false python-versions = ">=3.6" files = [ - {file = "grpcio-status-1.59.0.tar.gz", hash = "sha256:f93b9c33e0a26162ef8431bfcffcc3e1fb217ccd8d7b5b3061b6e9f813e698b5"}, - {file = "grpcio_status-1.59.0-py3-none-any.whl", hash = "sha256:cb5a222b14a80ee050bff9676623822e953bff0c50d2d29180de723652fdf10d"}, + {file = "grpcio-status-1.59.3.tar.gz", hash = "sha256:65c394ba43380d6bdf8c04c61efc493104b5535552aed35817a1b4dc66598a1f"}, + {file = "grpcio_status-1.59.3-py3-none-any.whl", hash = "sha256:2fd2eb39ca4e9afb3c874c0878ff75b258db0b7dcc25570fc521f16ae0ab942a"}, ] [package.dependencies] googleapis-common-protos = ">=1.5.5" -grpcio = ">=1.59.0" +grpcio = ">=1.59.3" protobuf = ">=4.21.6" [[package]] name = "grpcio-tools" -version = "1.59.0" +version = "1.59.3" description = "Protobuf code generator for gRPC" optional = false python-versions = ">=3.7" files = [ - {file = "grpcio-tools-1.59.0.tar.gz", hash = "sha256:aa4018f2d8662ac4d9830445d3d253a11b3e096e8afe20865547137aa1160e93"}, - {file = "grpcio_tools-1.59.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:882b809b42b5464bee55288f4e60837297f9618e53e69ae3eea6d61b05ce48fa"}, - {file = "grpcio_tools-1.59.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:4499d4bc5aa9c7b645018d8b0db4bebd663d427aabcd7bee7777046cb1bcbca7"}, - {file = "grpcio_tools-1.59.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:f381ae3ad6a5eb27aad8d810438937d8228977067c54e0bd456fce7e11fdbf3d"}, - {file = "grpcio_tools-1.59.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1c684c0d9226d04cadafced620a46ab38c346d0780eaac7448da96bf12066a3"}, - {file = "grpcio_tools-1.59.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40cbf712769242c2ba237745285ef789114d7fcfe8865fc4817d87f20015e99a"}, - {file = "grpcio_tools-1.59.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1df755951f204e65bf9232a9cac5afe7d6b8e4c87ac084d3ecd738fdc7aa4174"}, - {file = "grpcio_tools-1.59.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:de156c18b0c638aaee3be6ad650c8ba7dec94ed4bac26403aec3dce95ffe9407"}, - {file = "grpcio_tools-1.59.0-cp310-cp310-win32.whl", hash = "sha256:9af7e138baa9b2895cf1f3eb718ac96fc5ae2f8e31fca405e21e0e5cd1643c52"}, - {file = "grpcio_tools-1.59.0-cp310-cp310-win_amd64.whl", hash = "sha256:f14a6e4f700dfd30ff8f0e6695f944affc16ae5a1e738666b3fae4e44b65637e"}, - {file = "grpcio_tools-1.59.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:db030140d0da2368319e2f23655df3baec278c7e0078ecbe051eaf609a69382c"}, - {file = "grpcio_tools-1.59.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:eeed386971bb8afc3ec45593df6a1154d680d87be1209ef8e782e44f85f47e64"}, - {file = "grpcio_tools-1.59.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:962d1a3067129152cee3e172213486cb218a6bad703836991f46f216caefcf00"}, - {file = "grpcio_tools-1.59.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:26eb2eebf150a33ebf088e67c1acf37eb2ac4133d9bfccbaa011ad2148c08b42"}, - {file = "grpcio_tools-1.59.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2d6da553980c590487f2e7fd3ec9c1ad8805ff2ec77977b92faa7e3ca14e1f"}, - {file = "grpcio_tools-1.59.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:335e2f355a0c544a88854e2c053aff8a3f398b84a263a96fa19d063ca1fe513a"}, - {file = "grpcio_tools-1.59.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:204e08f807b1d83f5f0efea30c4e680afe26a43dec8ba614a45fa698a7ef0a19"}, - {file = "grpcio_tools-1.59.0-cp311-cp311-win32.whl", hash = "sha256:05bf7b3ed01c8a562bb7e840f864c58acedbd6924eb616367c0bd0a760bdf483"}, - {file = "grpcio_tools-1.59.0-cp311-cp311-win_amd64.whl", hash = "sha256:df85096fcac7cea8aa5bd84b7a39c4cdbf556b93669bb4772eb96aacd3222a4e"}, - {file = "grpcio_tools-1.59.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:240a7a3c2c54f77f1f66085a635bca72003d02f56a670e7db19aec531eda8f78"}, - {file = "grpcio_tools-1.59.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:6119f62c462d119c63227b9534210f0f13506a888151b9bf586f71e7edf5088b"}, - {file = "grpcio_tools-1.59.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:387662bee8e4c0b52cc0f61eaaca0ca583f5b227103f685b76083a3590a71a3e"}, - {file = "grpcio_tools-1.59.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f0da5861ee276ca68493b217daef358960e8527cc63c7cb292ca1c9c54939af"}, - {file = "grpcio_tools-1.59.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0f0806de1161c7f248e4c183633ee7a58dfe45c2b77ddf0136e2e7ad0650b1b"}, - {file = "grpcio_tools-1.59.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:c683be38a9bf4024c223929b4cd2f0a0858c94e9dc8b36d7eaa5a48ce9323a6f"}, - {file = "grpcio_tools-1.59.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f965707da2b48a33128615bcfebedd215a3a30e346447e885bb3da37a143177a"}, - {file = "grpcio_tools-1.59.0-cp312-cp312-win32.whl", hash = "sha256:2ee960904dde12a7fa48e1591a5b3eeae054bdce57bacf9fd26685a98138f5bf"}, - {file = "grpcio_tools-1.59.0-cp312-cp312-win_amd64.whl", hash = "sha256:71cc6db1d66da3bc3730d9937bddc320f7b1f1dfdff6342bcb5741515fe4110b"}, - {file = "grpcio_tools-1.59.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:f6263b85261b62471cb97b7505df72d72b8b62e5e22d8184924871a6155b4dbf"}, - {file = "grpcio_tools-1.59.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:b8e95d921cc2a1521d4750eedefec9f16031457920a6677edebe9d1b2ad6ae60"}, - {file = "grpcio_tools-1.59.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:cb63055739808144b541986291679d643bae58755d0eb082157c4d4c04443905"}, - {file = "grpcio_tools-1.59.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c4634b3589efa156a8d5860c0a2547315bd5c9e52d14c960d716fe86e0927be"}, - {file = "grpcio_tools-1.59.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d970aa26854f535ffb94ea098aa8b43de020d9a14682e4a15dcdaeac7801b27"}, - {file = "grpcio_tools-1.59.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:821dba464d84ebbcffd9d420302404db2fa7a40c7ff4c4c4c93726f72bfa2769"}, - {file = "grpcio_tools-1.59.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0548e901894399886ff4a4cd808cb850b60c021feb4a8977a0751f14dd7e55d9"}, - {file = "grpcio_tools-1.59.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bb87158dbbb9e5a79effe78d54837599caa16df52d8d35366e06a91723b587ae"}, - {file = "grpcio_tools-1.59.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:1d551ff42962c7c333c3da5c70d5e617a87dee581fa2e2c5ae2d5137c8886779"}, - {file = "grpcio_tools-1.59.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:4ee443abcd241a5befb05629013fbf2eac637faa94aaa3056351aded8a31c1bc"}, - {file = "grpcio_tools-1.59.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:520c0c83ea79d14b0679ba43e19c64ca31d30926b26ad2ca7db37cbd89c167e2"}, - {file = "grpcio_tools-1.59.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9fc02a6e517c34dcf885ff3b57260b646551083903e3d2c780b4971ce7d4ab7c"}, - {file = "grpcio_tools-1.59.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6aec8a4ed3808b7dfc1276fe51e3e24bec0eeaf610d395bcd42934647cf902a3"}, - {file = "grpcio_tools-1.59.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:99b3bde646720bbfb77f263f5ba3e1a0de50632d43c38d405a0ef9c7e94373cd"}, - {file = "grpcio_tools-1.59.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:51d9595629998d8b519126c5a610f15deb0327cd6325ed10796b47d1d292e70b"}, - {file = "grpcio_tools-1.59.0-cp38-cp38-win32.whl", hash = "sha256:bfa4b2b7d21c5634b62e5f03462243bd705adc1a21806b5356b8ce06d902e160"}, - {file = "grpcio_tools-1.59.0-cp38-cp38-win_amd64.whl", hash = "sha256:9ed05197c5ab071e91bcef28901e97ca168c4ae94510cb67a14cb4931b94255a"}, - {file = "grpcio_tools-1.59.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:498e7be0b14385980efa681444ba481349c131fc5ec88003819f5d929646947c"}, - {file = "grpcio_tools-1.59.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:b519f2ecde9a579cad2f4a7057d5bb4e040ad17caab8b5e691ed7a13b9db0be9"}, - {file = "grpcio_tools-1.59.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:ef3e8aca2261f7f07436d4e2111556c1fb9bf1f9cfcdf35262743ccdee1b6ce9"}, - {file = "grpcio_tools-1.59.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a7f226b741b2ebf7e2d0779d2c9b17f446d1b839d59886c1619e62cc2ae472"}, - {file = "grpcio_tools-1.59.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:784aa52965916fec5afa1a28eeee6f0073bb43a2a1d7fedf963393898843077a"}, - {file = "grpcio_tools-1.59.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e312ddc2d8bec1a23306a661ad52734f984c9aad5d8f126ebb222a778d95407d"}, - {file = "grpcio_tools-1.59.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:868892ad9e00651a38dace3e4924bae82fc4fd4df2c65d37b74381570ee8deb1"}, - {file = "grpcio_tools-1.59.0-cp39-cp39-win32.whl", hash = "sha256:a4f6cae381f21fee1ef0a5cbbbb146680164311157ae618edf3061742d844383"}, - {file = "grpcio_tools-1.59.0-cp39-cp39-win_amd64.whl", hash = "sha256:4a10e59cca462208b489478340b52a96d64e8b8b6f1ac097f3e8cb211d3f66c0"}, + {file = "grpcio-tools-1.59.3.tar.gz", hash = "sha256:cd160ac4281cd1ae77a2c880377a7728349340b4c91e24285037b57c18e9f651"}, + {file = "grpcio_tools-1.59.3-cp310-cp310-linux_armv7l.whl", hash = "sha256:17017fe74734c158e0f93817f1ff17aeda37d0f105ed6e63b12c26b66743a7a8"}, + {file = "grpcio_tools-1.59.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:ac1013e4f84ffd15c45ead6d19c9d188b76c14466a799aa9c338ce3b9ebf6dcc"}, + {file = "grpcio_tools-1.59.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:0a5d760619305eb51a8719ce9c081398f145a46bc7c86a6e2cebe0648a21f40c"}, + {file = "grpcio_tools-1.59.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de3d9649b7a3091ec785a67d5bf006584440f03896ee52259c6d9ff412d08afb"}, + {file = "grpcio_tools-1.59.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21868aa510317d3f39e5de40208ffb8ab1beb1cbcab333317939b59a9b5db055"}, + {file = "grpcio_tools-1.59.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0b116a888580317e421358f26bfaeec47d6f73079e8a47bad60d1f9f7b30f2a5"}, + {file = "grpcio_tools-1.59.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6bd4a72c27abda191e2360b2b720ada1880aba96a63604a6f9d7c37bb3bbf5c4"}, + {file = "grpcio_tools-1.59.3-cp310-cp310-win32.whl", hash = "sha256:d70cad744e92c7576c226a72998ae8dd59240c942f73798bbde40284eb9eb991"}, + {file = "grpcio_tools-1.59.3-cp310-cp310-win_amd64.whl", hash = "sha256:2b8a4aca0c11f2a8b3bfe103362984bdc427ab762428446ef2e12922fd48ee10"}, + {file = "grpcio_tools-1.59.3-cp311-cp311-linux_armv7l.whl", hash = "sha256:b4418b78408ff56ee70a0b14484c07f5e48c2e6f4fa7be390d486a686d0cd6e4"}, + {file = "grpcio_tools-1.59.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:58de83ced4f86458f45288a5f76d9765dc245a9ce4e783a194decccc7e0674ea"}, + {file = "grpcio_tools-1.59.3-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:76b0cdcbcb38722840d3eaff6439ddb4b8f0210c6718553d7b7c911834b10e60"}, + {file = "grpcio_tools-1.59.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0cacf59513b100bfb3d8de51ba43db6140aa9bcb7bba872badb48acb430c002"}, + {file = "grpcio_tools-1.59.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:019fdd986c80b13187574c291df5054f241bdbe87dbc86e4cee73ffa28328647"}, + {file = "grpcio_tools-1.59.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ff304b9d6c23d8e2ecc860bebac1ec6768a2d920985bcea9ce4a7aaeeea44f76"}, + {file = "grpcio_tools-1.59.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ca286affe613beaf2d5a6b8bd83203dcf488917194b416da48aa849047b5f081"}, + {file = "grpcio_tools-1.59.3-cp311-cp311-win32.whl", hash = "sha256:8f69141ff370729ceaad0286b8c6e15352c9bb39aa8f18c0500ce3d0238c2981"}, + {file = "grpcio_tools-1.59.3-cp311-cp311-win_amd64.whl", hash = "sha256:05ec4ffe16b6eab12813476e6d7465a0027bee33999d4776ae1d9c0664d0fc54"}, + {file = "grpcio_tools-1.59.3-cp312-cp312-linux_armv7l.whl", hash = "sha256:21d976419630f72a7cefebe7dcfc451b33d70c805a43ff5a60c43367813f0527"}, + {file = "grpcio_tools-1.59.3-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:396106f92ea6ab2157535e1a009bac99aa15680ca8addbc8e7c1a4d3f5b1fb2c"}, + {file = "grpcio_tools-1.59.3-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:4f064483e0046a4a193d6c67b26ea0f61737e8232ff61636a7fa0bc5244458be"}, + {file = "grpcio_tools-1.59.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6dc6da8e3780df25095c1952f45c334e1554f25b991ffe75dbf0408795d27a0"}, + {file = "grpcio_tools-1.59.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87111be05c1a159ce3fffbfad86ff69fd4bf1702cde127eb698d8e8c3a018ab6"}, + {file = "grpcio_tools-1.59.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:83453a13c2120238eb7fb993b03b370496e76071a7b45c816aa682d9226d29c1"}, + {file = "grpcio_tools-1.59.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4384b29d8e126bc6e24a5efd9d60a2a2015867c7109fa67ff2ed274b3f4a05c5"}, + {file = "grpcio_tools-1.59.3-cp312-cp312-win32.whl", hash = "sha256:ce1372c9acde9d74c7e54858598ac0c5203dd3ec24b9085f7a8b2f33cc156736"}, + {file = "grpcio_tools-1.59.3-cp312-cp312-win_amd64.whl", hash = "sha256:84179e3a7c9067e993700b3255f2adc10e9b16e8dd28525d1dd1a02b9ca603ee"}, + {file = "grpcio_tools-1.59.3-cp37-cp37m-linux_armv7l.whl", hash = "sha256:592208a9a02df75993cc4dba111d2b81f0e6a3f3837856be239e1aceb6651f31"}, + {file = "grpcio_tools-1.59.3-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:1abe30ce770ac4fed966d017771fa7f8ced6a279de7ce68023e2c07f07911e76"}, + {file = "grpcio_tools-1.59.3-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:4dce57668e2aa8c3bb0b2a0bb766a2654ee0f4d8d31e02a6001e98af18569285"}, + {file = "grpcio_tools-1.59.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:acaefd3c362250a02cc93fc1b5440e3cb30ab8d7fd81594b2975ea19f123aae3"}, + {file = "grpcio_tools-1.59.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76542e1c11e3f2c22c19cff6b3233caab35924fad1f685ce63184d31b4050fa8"}, + {file = "grpcio_tools-1.59.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:64fd1efb23da054f61aca2346c5139f317b7f4c545f6dbda5ec246f281af8e86"}, + {file = "grpcio_tools-1.59.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ea6454acde508c9a62dab3f59e98b32e32b26aa60df20080982503bb7db51304"}, + {file = "grpcio_tools-1.59.3-cp37-cp37m-win_amd64.whl", hash = "sha256:a048d4bde526f3c6e364abea2c3a481f3bbebc4bfa7fdcfcc3e5ee4f8ab9c4c5"}, + {file = "grpcio_tools-1.59.3-cp38-cp38-linux_armv7l.whl", hash = "sha256:b4db59a62d31c98105af08b1bfb8878c239e4cf31088f2d9864756cdfec67746"}, + {file = "grpcio_tools-1.59.3-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:05ac0f6683759e5508081c09af26cb6cc949c2c54d75ff8b76344709f78dda53"}, + {file = "grpcio_tools-1.59.3-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:a031e1ced828a00f1eb59db5f5d4dd39d3bd6a7df8187f84830d4a32a1bbc686"}, + {file = "grpcio_tools-1.59.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f48b4409b306675b7673dad725c9cf3234bf05623bf8a193ad14af39d0368ba6"}, + {file = "grpcio_tools-1.59.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36524d97cc936767a69815b90be76a1420b3218a7724ce69cde6ece794e72a17"}, + {file = "grpcio_tools-1.59.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7cc7e8b893a6c37a8a28735fede1aa40275988a668d1e22c5f234938a77d811d"}, + {file = "grpcio_tools-1.59.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:917be645a71cf9592d2f5a3589c20c074a6539954017e8e2dca5e8ba13025625"}, + {file = "grpcio_tools-1.59.3-cp38-cp38-win32.whl", hash = "sha256:a1394b7a65d738ee0ce4eac1fc95158dd9c97b5c3f690d259e6ee0bf131697de"}, + {file = "grpcio_tools-1.59.3-cp38-cp38-win_amd64.whl", hash = "sha256:007745bd3c5a788dcb73eeb6cd773613a834bd2442e7d062dcafe46dbd4bb5f6"}, + {file = "grpcio_tools-1.59.3-cp39-cp39-linux_armv7l.whl", hash = "sha256:102b5f14a500dbb766f24a96884d9572a3ea7a56d69695461100fb71ec922ef6"}, + {file = "grpcio_tools-1.59.3-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:46c384a0e30a8422a3e2c5530b3cd69b652dd659549907e2eaac7ca4e0ab614d"}, + {file = "grpcio_tools-1.59.3-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:ee013da4f5a4ef50fdeca372470733bc402677a4dc0023ee94bf42478b5a620d"}, + {file = "grpcio_tools-1.59.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b7883ce3d532c09f29c016fdac107f9a3dc43f9e6b60faf8b91fcba21824269"}, + {file = "grpcio_tools-1.59.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2861e4814ebc147854c2246092c433931f4c15f3c8105ae8716b1e282313a5ae"}, + {file = "grpcio_tools-1.59.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d93590a6a82469f3e58e39692230d99c43a39b215cb581e072dcd52286471152"}, + {file = "grpcio_tools-1.59.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f8396183e6e0a16178773963dc21b58c0c532783476fda314198a9e42f57af7d"}, + {file = "grpcio_tools-1.59.3-cp39-cp39-win32.whl", hash = "sha256:6747b1d82d08e0f5e1a6438532343a1c5504147d1a199c5756e702e5f916de4c"}, + {file = "grpcio_tools-1.59.3-cp39-cp39-win_amd64.whl", hash = "sha256:3a560dcb176dd42c37af5d37299e318341a572547e32b942247daa834d2164c0"}, ] [package.dependencies] -grpcio = ">=1.59.0" +grpcio = ">=1.59.3" protobuf = ">=4.21.6,<5.0dev" setuptools = "*" [[package]] name = "hf-transfer" -version = "0.1.3" +version = "0.1.4" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "hf_transfer-0.1.3-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:862b6ddba8e236bdc73408c20d020cfe5069cac3fd0b6de901c46f031df2b7d9"}, - {file = "hf_transfer-0.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:569ef1ec6fec182e706ade4ea0c63f8510fd618ed7ced7c772efaafac7245b07"}, - {file = "hf_transfer-0.1.3-cp310-none-win_amd64.whl", hash = "sha256:c9faa88b3491c50d4aa75faf18ae24040cd91aa0565c7f7ba2357dbcbf8372f6"}, - {file = "hf_transfer-0.1.3-cp311-cp311-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:d53954ecfaadc84c15481bf5d4c7282323196b4b6df1d1be54208d4fdedfb407"}, - {file = "hf_transfer-0.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:334862f4a82f8a09d6d3f550e67d7e498bb8882e678b7725638254fed3276801"}, - {file = "hf_transfer-0.1.3-cp311-none-win_amd64.whl", hash = "sha256:da92a1483a66cf2baa96de133c75e7d5d9a60e4a0e60d228f26c573c73a1feb6"}, - {file = "hf_transfer-0.1.3-cp37-cp37m-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:617692a70cf299576d82cfc860923f29ec5c834a3f5242bc0853d4f106670398"}, - {file = "hf_transfer-0.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca218fb6efc5c22379d9e64989e84bff426fcf5664fdbbf7cd70aa8b79497652"}, - {file = "hf_transfer-0.1.3-cp37-none-win_amd64.whl", hash = "sha256:6e5201b648df6106c232fcdb507db734081fd6220dfb1c432bd27c6fa9453331"}, - {file = "hf_transfer-0.1.3-cp38-cp38-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:445edfcd9d59c9d2541957177a9c9225b1f0e8855f6311fb16e20f67c3426421"}, - {file = "hf_transfer-0.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c1bdfa554f0b0936c1623b99058c6998a00fdcd86f75d9203f3f66572d2e30c"}, - {file = "hf_transfer-0.1.3-cp38-none-win_amd64.whl", hash = "sha256:606f2fe436e5be73f07987a56cd97c695805413d29203ae39ebd9fc596405435"}, - {file = "hf_transfer-0.1.3-cp39-cp39-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:9913f5ad2644a1f57c1b7755a7d959ca5e0189863bb0473817d0707af230bf6a"}, - {file = "hf_transfer-0.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d935946791e338f748e05a23df877d74fbcd39dc7b537f0aa2e5a5841cf7dde8"}, - {file = "hf_transfer-0.1.3-cp39-none-win_amd64.whl", hash = "sha256:79099ac043423b263a2843a24213418f309d5c8bc458776622bffe012ebced73"}, - {file = "hf_transfer-0.1.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ce6c5965a57d94db5e043aa488a4df929a32000db125d9c9a1d325e8c7006dc"}, - {file = "hf_transfer-0.1.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a7934c8e491bb395731f677f66dd5f6641432f338a3a9efc9f0b6c186d37cf8"}, - {file = "hf_transfer-0.1.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efb8b41360c7e3d7700c147b70688aed0a03e86fbe5bcfdee079b0e634f026f9"}, - {file = "hf_transfer-0.1.3.tar.gz", hash = "sha256:7afd7eb03efad7812a48591b639b2e3f3d1f93c1e9060c18cc63ebf08d7e193c"}, + {file = "hf_transfer-0.1.4-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:6ff5fbde30a5bed35ef8f0d4ba78bde9f6d60a233dbff78a0e4035d6e6f71e4c"}, + {file = "hf_transfer-0.1.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1c5c20f76e7f3451cff476b85c55dcb8566ebc94a596cb9eb39c0bb75db8675"}, + {file = "hf_transfer-0.1.4-cp310-none-win_amd64.whl", hash = "sha256:84c3ce20c68863a7d998711b98726ba9ae8f2e3fc0d685bc2c9ac9833c0f4048"}, + {file = "hf_transfer-0.1.4-cp311-cp311-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:dab1cf4e2e6fcb963fe0e48e6b5e3a95cf65ee376c7b6618a05dbb2ef0dde183"}, + {file = "hf_transfer-0.1.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c9c7aef90facf45391c86131ed00e74333637735cfec52da4f5170004d0b3f"}, + {file = "hf_transfer-0.1.4-cp311-none-win_amd64.whl", hash = "sha256:eca1fe6ae145e88455d0a174248080498cea52ad45cee50702070b47dffa421f"}, + {file = "hf_transfer-0.1.4-cp312-cp312-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:d07c0d26b5c01ad50d22ddcff7d30c4e8cbb823565b7f61e0ddb35f7faeae415"}, + {file = "hf_transfer-0.1.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b9cf169c3c64883b07f7ded5e3f14ae1d437eb77448738b88c923fc5597c47"}, + {file = "hf_transfer-0.1.4-cp312-none-win_amd64.whl", hash = "sha256:6b8518b9ebb85b0238745be81f7b88383c7ea216dd8407d46444bcc7806dc0ef"}, + {file = "hf_transfer-0.1.4-cp37-cp37m-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:ea32e9f91de3f2dad3567577c293f2e81a9309e680def4712ec0c4ea49be6833"}, + {file = "hf_transfer-0.1.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81a10dbf2ac534083da06c200456b5d10ba7a1e8c4c5c48f7ea1ca4cf6af474"}, + {file = "hf_transfer-0.1.4-cp37-none-win_amd64.whl", hash = "sha256:97555bbff69a0459712e5d25d659c0dc74cb8f9726562ca66241f1e1b081f6a9"}, + {file = "hf_transfer-0.1.4-cp38-cp38-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:38bce7a511952e1b804168e956cd3a3b1ff7e38828259c3cdae27614060b90c5"}, + {file = "hf_transfer-0.1.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1977e94e8c8fc8a0e9ce74a651d4694629e526da246a492855fcfb710aa489"}, + {file = "hf_transfer-0.1.4-cp38-none-win_amd64.whl", hash = "sha256:6ca2d2c40e5e94c5de7e502037ad23ac1d803a2a12760b15b3e3f88c616202bd"}, + {file = "hf_transfer-0.1.4-cp39-cp39-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:c04a93acb58e50b8da1e2258185e54f6bf48ba24bf95e470310178b7047c1017"}, + {file = "hf_transfer-0.1.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3028a807363e0b2c64985c44732ba4ab187a569f013367d2115a6e09ae95031"}, + {file = "hf_transfer-0.1.4-cp39-none-win_amd64.whl", hash = "sha256:dc9c7c1d0d79fc06baf86d41620623bb6bb2736755329ea6b1ec5faf71e3e36b"}, + {file = "hf_transfer-0.1.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a466ae2b11d72df9e0005eb8ff7f537d5460c98b64fb6e49f3076ee14040dcf"}, + {file = "hf_transfer-0.1.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb34a023276936d4716112e17daea4ff98afc35b6113dd0f0383710dc208c058"}, + {file = "hf_transfer-0.1.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0647b84d7ff0eee1de6479179a5d43d0695001733f17eecc00153f0f8ab1ac"}, + {file = "hf_transfer-0.1.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27d0bc1f8b79a6d65751efbce7eb02d2c1bd7e4de1a46aac18995461590ce4dd"}, + {file = "hf_transfer-0.1.4.tar.gz", hash = "sha256:687e090639cd52a48dedbfaa9e455a2c99c5169ece3d911f95983b1d4d4c84ed"}, ] [[package]] @@ -844,7 +837,7 @@ files = [ name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, @@ -879,7 +872,7 @@ dev = ["Sphinx (>=4.1.1)", "black (>=19.10b0)", "colorama (>=0.3.4)", "docutils name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, @@ -948,7 +941,7 @@ files = [ name = "mpmath" version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" -optional = false +optional = true python-versions = "*" files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, @@ -1074,61 +1067,206 @@ dill = ">=0.3.7" [[package]] name = "networkx" -version = "3.1" +version = "3.2.1" description = "Python package for creating and manipulating graphs and networks" -optional = false -python-versions = ">=3.8" +optional = true +python-versions = ">=3.9" files = [ - {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, - {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, + {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, + {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, ] [package.extras] -default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] -developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] -doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] -test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] +default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "numpy" -version = "1.26.0" +version = "1.26.2" description = "Fundamental package for array computing in Python" optional = false -python-versions = "<3.13,>=3.9" -files = [ - {file = "numpy-1.26.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8db2f125746e44dce707dd44d4f4efeea8d7e2b43aace3f8d1f235cfa2733dd"}, - {file = "numpy-1.26.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0621f7daf973d34d18b4e4bafb210bbaf1ef5e0100b5fa750bd9cde84c7ac292"}, - {file = "numpy-1.26.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51be5f8c349fdd1a5568e72713a21f518e7d6707bcf8503b528b88d33b57dc68"}, - {file = "numpy-1.26.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:767254ad364991ccfc4d81b8152912e53e103ec192d1bb4ea6b1f5a7117040be"}, - {file = "numpy-1.26.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:436c8e9a4bdeeee84e3e59614d38c3dbd3235838a877af8c211cfcac8a80b8d3"}, - {file = "numpy-1.26.0-cp310-cp310-win32.whl", hash = "sha256:c2e698cb0c6dda9372ea98a0344245ee65bdc1c9dd939cceed6bb91256837896"}, - {file = "numpy-1.26.0-cp310-cp310-win_amd64.whl", hash = "sha256:09aaee96c2cbdea95de76ecb8a586cb687d281c881f5f17bfc0fb7f5890f6b91"}, - {file = "numpy-1.26.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:637c58b468a69869258b8ae26f4a4c6ff8abffd4a8334c830ffb63e0feefe99a"}, - {file = "numpy-1.26.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:306545e234503a24fe9ae95ebf84d25cba1fdc27db971aa2d9f1ab6bba19a9dd"}, - {file = "numpy-1.26.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c6adc33561bd1d46f81131d5352348350fc23df4d742bb246cdfca606ea1208"}, - {file = "numpy-1.26.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e062aa24638bb5018b7841977c360d2f5917268d125c833a686b7cbabbec496c"}, - {file = "numpy-1.26.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:546b7dd7e22f3c6861463bebb000646fa730e55df5ee4a0224408b5694cc6148"}, - {file = "numpy-1.26.0-cp311-cp311-win32.whl", hash = "sha256:c0b45c8b65b79337dee5134d038346d30e109e9e2e9d43464a2970e5c0e93229"}, - {file = "numpy-1.26.0-cp311-cp311-win_amd64.whl", hash = "sha256:eae430ecf5794cb7ae7fa3808740b015aa80747e5266153128ef055975a72b99"}, - {file = "numpy-1.26.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:166b36197e9debc4e384e9c652ba60c0bacc216d0fc89e78f973a9760b503388"}, - {file = "numpy-1.26.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f042f66d0b4ae6d48e70e28d487376204d3cbf43b84c03bac57e28dac6151581"}, - {file = "numpy-1.26.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5e18e5b14a7560d8acf1c596688f4dfd19b4f2945b245a71e5af4ddb7422feb"}, - {file = "numpy-1.26.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f6bad22a791226d0a5c7c27a80a20e11cfe09ad5ef9084d4d3fc4a299cca505"}, - {file = "numpy-1.26.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4acc65dd65da28060e206c8f27a573455ed724e6179941edb19f97e58161bb69"}, - {file = "numpy-1.26.0-cp312-cp312-win32.whl", hash = "sha256:bb0d9a1aaf5f1cb7967320e80690a1d7ff69f1d47ebc5a9bea013e3a21faec95"}, - {file = "numpy-1.26.0-cp312-cp312-win_amd64.whl", hash = "sha256:ee84ca3c58fe48b8ddafdeb1db87388dce2c3c3f701bf447b05e4cfcc3679112"}, - {file = "numpy-1.26.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a873a8180479bc829313e8d9798d5234dfacfc2e8a7ac188418189bb8eafbd2"}, - {file = "numpy-1.26.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:914b28d3215e0c721dc75db3ad6d62f51f630cb0c277e6b3bcb39519bed10bd8"}, - {file = "numpy-1.26.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c78a22e95182fb2e7874712433eaa610478a3caf86f28c621708d35fa4fd6e7f"}, - {file = "numpy-1.26.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86f737708b366c36b76e953c46ba5827d8c27b7a8c9d0f471810728e5a2fe57c"}, - {file = "numpy-1.26.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b44e6a09afc12952a7d2a58ca0a2429ee0d49a4f89d83a0a11052da696440e49"}, - {file = "numpy-1.26.0-cp39-cp39-win32.whl", hash = "sha256:5671338034b820c8d58c81ad1dafc0ed5a00771a82fccc71d6438df00302094b"}, - {file = "numpy-1.26.0-cp39-cp39-win_amd64.whl", hash = "sha256:020cdbee66ed46b671429c7265cf00d8ac91c046901c55684954c3958525dab2"}, - {file = "numpy-1.26.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0792824ce2f7ea0c82ed2e4fecc29bb86bee0567a080dacaf2e0a01fe7654369"}, - {file = "numpy-1.26.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d484292eaeb3e84a51432a94f53578689ffdea3f90e10c8b203a99be5af57d8"}, - {file = "numpy-1.26.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:186ba67fad3c60dbe8a3abff3b67a91351100f2661c8e2a80364ae6279720299"}, - {file = "numpy-1.26.0.tar.gz", hash = "sha256:f93fc78fe8bf15afe2b8d6b6499f1c73953169fad1e9a8dd086cdff3190e7fdf"}, +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"}, + {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"}, + {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"}, + {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"}, + {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"}, + {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"}, + {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"}, + {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"}, + {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"}, + {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"}, + {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"}, + {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"}, + {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"}, + {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"}, + {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"}, + {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"}, + {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"}, + {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"}, + {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"}, + {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"}, + {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"}, + {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"}, + {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"}, + {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"}, + {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"}, + {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"}, + {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"}, + {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"}, + {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"}, + {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"}, + {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"}, + {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"}, + {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"}, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.18.1" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_nccl_cu12-2.18.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:1a6c4acefcbebfa6de320f412bf7866de856e786e0462326ba1bac40de0b5e71"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.3.101" +description = "Nvidia JIT LTO Library" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:64335a8088e2b9d196ae8665430bc6a2b7e6ef2eb877a9c735c804bd4ff6467c"}, + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-win_amd64.whl", hash = "sha256:1b2e317e437433753530792f13eece58f0aec21a2b05903be7bffe58a606cbd1"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = true +python-versions = ">=3" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, ] [[package]] @@ -1298,50 +1436,50 @@ files = [ [[package]] name = "pandas" -version = "2.1.1" +version = "2.1.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = true python-versions = ">=3.9" files = [ - {file = "pandas-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58d997dbee0d4b64f3cb881a24f918b5f25dd64ddf31f467bb9b67ae4c63a1e4"}, - {file = "pandas-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02304e11582c5d090e5a52aec726f31fe3f42895d6bfc1f28738f9b64b6f0614"}, - {file = "pandas-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffa8f0966de2c22de408d0e322db2faed6f6e74265aa0856f3824813cf124363"}, - {file = "pandas-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1f84c144dee086fe4f04a472b5cd51e680f061adf75c1ae4fc3a9275560f8f4"}, - {file = "pandas-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75ce97667d06d69396d72be074f0556698c7f662029322027c226fd7a26965cb"}, - {file = "pandas-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:4c3f32fd7c4dccd035f71734df39231ac1a6ff95e8bdab8d891167197b7018d2"}, - {file = "pandas-2.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e2959720b70e106bb1d8b6eadd8ecd7c8e99ccdbe03ee03260877184bb2877d"}, - {file = "pandas-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:25e8474a8eb258e391e30c288eecec565bfed3e026f312b0cbd709a63906b6f8"}, - {file = "pandas-2.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8bd1685556f3374520466998929bade3076aeae77c3e67ada5ed2b90b4de7f0"}, - {file = "pandas-2.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc3657869c7902810f32bd072f0740487f9e030c1a3ab03e0af093db35a9d14e"}, - {file = "pandas-2.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:05674536bd477af36aa2effd4ec8f71b92234ce0cc174de34fd21e2ee99adbc2"}, - {file = "pandas-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:b407381258a667df49d58a1b637be33e514b07f9285feb27769cedb3ab3d0b3a"}, - {file = "pandas-2.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c747793c4e9dcece7bb20156179529898abf505fe32cb40c4052107a3c620b49"}, - {file = "pandas-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3bcad1e6fb34b727b016775bea407311f7721db87e5b409e6542f4546a4951ea"}, - {file = "pandas-2.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5ec7740f9ccb90aec64edd71434711f58ee0ea7f5ed4ac48be11cfa9abf7317"}, - {file = "pandas-2.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29deb61de5a8a93bdd033df328441a79fcf8dd3c12d5ed0b41a395eef9cd76f0"}, - {file = "pandas-2.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4f99bebf19b7e03cf80a4e770a3e65eee9dd4e2679039f542d7c1ace7b7b1daa"}, - {file = "pandas-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:84e7e910096416adec68075dc87b986ff202920fb8704e6d9c8c9897fe7332d6"}, - {file = "pandas-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:366da7b0e540d1b908886d4feb3d951f2f1e572e655c1160f5fde28ad4abb750"}, - {file = "pandas-2.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e50e72b667415a816ac27dfcfe686dc5a0b02202e06196b943d54c4f9c7693e"}, - {file = "pandas-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc1ab6a25da197f03ebe6d8fa17273126120874386b4ac11c1d687df288542dd"}, - {file = "pandas-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0dbfea0dd3901ad4ce2306575c54348d98499c95be01b8d885a2737fe4d7a98"}, - {file = "pandas-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0489b0e6aa3d907e909aef92975edae89b1ee1654db5eafb9be633b0124abe97"}, - {file = "pandas-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:4cdb0fab0400c2cb46dafcf1a0fe084c8bb2480a1fa8d81e19d15e12e6d4ded2"}, - {file = "pandas-2.1.1.tar.gz", hash = "sha256:fecb198dc389429be557cde50a2d46da8434a17fe37d7d41ff102e3987fd947b"}, + {file = "pandas-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:acf08a73b5022b479c1be155d4988b72f3020f308f7a87c527702c5f8966d34f"}, + {file = "pandas-2.1.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3cc4469ff0cf9aa3a005870cb49ab8969942b7156e0a46cc3f5abd6b11051dfb"}, + {file = "pandas-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35172bff95f598cc5866c047f43c7f4df2c893acd8e10e6653a4b792ed7f19bb"}, + {file = "pandas-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59dfe0e65a2f3988e940224e2a70932edc964df79f3356e5f2997c7d63e758b4"}, + {file = "pandas-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0296a66200dee556850d99b24c54c7dfa53a3264b1ca6f440e42bad424caea03"}, + {file = "pandas-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:465571472267a2d6e00657900afadbe6097c8e1dc43746917db4dfc862e8863e"}, + {file = "pandas-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04d4c58e1f112a74689da707be31cf689db086949c71828ef5da86727cfe3f82"}, + {file = "pandas-2.1.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7fa2ad4ff196768ae63a33f8062e6838efed3a319cf938fdf8b95e956c813042"}, + {file = "pandas-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4441ac94a2a2613e3982e502ccec3bdedefe871e8cea54b8775992485c5660ef"}, + {file = "pandas-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5ded6ff28abbf0ea7689f251754d3789e1edb0c4d0d91028f0b980598418a58"}, + {file = "pandas-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fca5680368a5139d4920ae3dc993eb5106d49f814ff24018b64d8850a52c6ed2"}, + {file = "pandas-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:de21e12bf1511190fc1e9ebc067f14ca09fccfb189a813b38d63211d54832f5f"}, + {file = "pandas-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a5d53c725832e5f1645e7674989f4c106e4b7249c1d57549023ed5462d73b140"}, + {file = "pandas-2.1.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7cf4cf26042476e39394f1f86868d25b265ff787c9b2f0d367280f11afbdee6d"}, + {file = "pandas-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72c84ec1b1d8e5efcbff5312abe92bfb9d5b558f11e0cf077f5496c4f4a3c99e"}, + {file = "pandas-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f539e113739a3e0cc15176bf1231a553db0239bfa47a2c870283fd93ba4f683"}, + {file = "pandas-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc77309da3b55732059e484a1efc0897f6149183c522390772d3561f9bf96c00"}, + {file = "pandas-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:08637041279b8981a062899da0ef47828df52a1838204d2b3761fbd3e9fcb549"}, + {file = "pandas-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b99c4e51ef2ed98f69099c72c75ec904dd610eb41a32847c4fcbc1a975f2d2b8"}, + {file = "pandas-2.1.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f7ea8ae8004de0381a2376662c0505bb0a4f679f4c61fbfd122aa3d1b0e5f09d"}, + {file = "pandas-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcd76d67ca2d48f56e2db45833cf9d58f548f97f61eecd3fdc74268417632b8a"}, + {file = "pandas-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1329dbe93a880a3d7893149979caa82d6ba64a25e471682637f846d9dbc10dd2"}, + {file = "pandas-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:321ecdb117bf0f16c339cc6d5c9a06063854f12d4d9bc422a84bb2ed3207380a"}, + {file = "pandas-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:11a771450f36cebf2a4c9dbd3a19dfa8c46c4b905a3ea09dc8e556626060fe71"}, + {file = "pandas-2.1.3.tar.gz", hash = "sha256:22929f84bca106921917eb73c1521317ddd0a4c71b395bcf767a106e3494209f"}, ] [package.dependencies] numpy = [ - {version = ">=1.22.4", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, + {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" tzdata = ">=2022.1" [package.extras] -all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"] +all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"] aws = ["s3fs (>=2022.05.0)"] clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"] compression = ["zstandard (>=0.17.0)"] @@ -1361,14 +1499,14 @@ plot = ["matplotlib (>=3.6.1)"] postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"] spss = ["pyreadstat (>=1.1.5)"] sql-other = ["SQLAlchemy (>=1.4.36)"] -test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.8.0)"] [[package]] name = "peft" version = "0.4.0" description = "Parameter-Efficient Fine-Tuning (PEFT)" -optional = false +optional = true python-versions = ">=3.8.0" files = [ {file = "peft-0.4.0-py3-none-any.whl", hash = "sha256:2cf992772a6d703814477e0bdcdadd68cb8ea388111ce2d793dd2ff0e438f357"}, @@ -1393,65 +1531,65 @@ test = ["black (>=22.0,<23.0)", "datasets", "diffusers", "hf-doc-builder", "para [[package]] name = "pillow" -version = "10.0.1" +version = "10.1.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "Pillow-10.0.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a"}, - {file = "Pillow-10.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d"}, - {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d"}, - {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19"}, - {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f"}, - {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff"}, - {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf"}, - {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd"}, - {file = "Pillow-10.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0"}, - {file = "Pillow-10.0.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1"}, - {file = "Pillow-10.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1"}, - {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21"}, - {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54"}, - {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205"}, - {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2"}, - {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b"}, - {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1"}, - {file = "Pillow-10.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088"}, - {file = "Pillow-10.0.1-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b"}, - {file = "Pillow-10.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed"}, - {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635"}, - {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad"}, - {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a"}, - {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91"}, - {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4"}, - {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08"}, - {file = "Pillow-10.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08"}, - {file = "Pillow-10.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a"}, - {file = "Pillow-10.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68"}, - {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500"}, - {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21"}, - {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d"}, - {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7"}, - {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a"}, - {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7"}, - {file = "Pillow-10.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3"}, - {file = "Pillow-10.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849"}, - {file = "Pillow-10.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1"}, - {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37"}, - {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876"}, - {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f"}, - {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145"}, - {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2"}, - {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf"}, - {file = "Pillow-10.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971"}, - {file = "Pillow-10.0.1-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db"}, - {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e"}, - {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4"}, - {file = "Pillow-10.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f"}, - {file = "Pillow-10.0.1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf"}, - {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317"}, - {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d"}, - {file = "Pillow-10.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d"}, - {file = "Pillow-10.0.1.tar.gz", hash = "sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d"}, + {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, + {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, + {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, + {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, + {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, + {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, + {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, + {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, ] [package.extras] @@ -1475,47 +1613,47 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "protobuf" -version = "4.24.3" +version = "4.25.1" description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "protobuf-4.24.3-cp310-abi3-win32.whl", hash = "sha256:20651f11b6adc70c0f29efbe8f4a94a74caf61b6200472a9aea6e19898f9fcf4"}, - {file = "protobuf-4.24.3-cp310-abi3-win_amd64.whl", hash = "sha256:3d42e9e4796a811478c783ef63dc85b5a104b44aaaca85d4864d5b886e4b05e3"}, - {file = "protobuf-4.24.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:6e514e8af0045be2b56e56ae1bb14f43ce7ffa0f68b1c793670ccbe2c4fc7d2b"}, - {file = "protobuf-4.24.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:ba53c2f04798a326774f0e53b9c759eaef4f6a568ea7072ec6629851c8435959"}, - {file = "protobuf-4.24.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:f6ccbcf027761a2978c1406070c3788f6de4a4b2cc20800cc03d52df716ad675"}, - {file = "protobuf-4.24.3-cp37-cp37m-win32.whl", hash = "sha256:1b182c7181a2891e8f7f3a1b5242e4ec54d1f42582485a896e4de81aa17540c2"}, - {file = "protobuf-4.24.3-cp37-cp37m-win_amd64.whl", hash = "sha256:b0271a701e6782880d65a308ba42bc43874dabd1a0a0f41f72d2dac3b57f8e76"}, - {file = "protobuf-4.24.3-cp38-cp38-win32.whl", hash = "sha256:e29d79c913f17a60cf17c626f1041e5288e9885c8579832580209de8b75f2a52"}, - {file = "protobuf-4.24.3-cp38-cp38-win_amd64.whl", hash = "sha256:067f750169bc644da2e1ef18c785e85071b7c296f14ac53e0900e605da588719"}, - {file = "protobuf-4.24.3-cp39-cp39-win32.whl", hash = "sha256:2da777d34b4f4f7613cdf85c70eb9a90b1fbef9d36ae4a0ccfe014b0b07906f1"}, - {file = "protobuf-4.24.3-cp39-cp39-win_amd64.whl", hash = "sha256:f631bb982c5478e0c1c70eab383af74a84be66945ebf5dd6b06fc90079668d0b"}, - {file = "protobuf-4.24.3-py3-none-any.whl", hash = "sha256:f6f8dc65625dadaad0c8545319c2e2f0424fede988368893ca3844261342c11a"}, - {file = "protobuf-4.24.3.tar.gz", hash = "sha256:12e9ad2ec079b833176d2921be2cb24281fa591f0b119b208b788adc48c2561d"}, + {file = "protobuf-4.25.1-cp310-abi3-win32.whl", hash = "sha256:193f50a6ab78a970c9b4f148e7c750cfde64f59815e86f686c22e26b4fe01ce7"}, + {file = "protobuf-4.25.1-cp310-abi3-win_amd64.whl", hash = "sha256:3497c1af9f2526962f09329fd61a36566305e6c72da2590ae0d7d1322818843b"}, + {file = "protobuf-4.25.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:0bf384e75b92c42830c0a679b0cd4d6e2b36ae0cf3dbb1e1dfdda48a244f4bcd"}, + {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:0f881b589ff449bf0b931a711926e9ddaad3b35089cc039ce1af50b21a4ae8cb"}, + {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:ca37bf6a6d0046272c152eea90d2e4ef34593aaa32e8873fc14c16440f22d4b7"}, + {file = "protobuf-4.25.1-cp38-cp38-win32.whl", hash = "sha256:abc0525ae2689a8000837729eef7883b9391cd6aa7950249dcf5a4ede230d5dd"}, + {file = "protobuf-4.25.1-cp38-cp38-win_amd64.whl", hash = "sha256:1484f9e692091450e7edf418c939e15bfc8fc68856e36ce399aed6889dae8bb0"}, + {file = "protobuf-4.25.1-cp39-cp39-win32.whl", hash = "sha256:8bdbeaddaac52d15c6dce38c71b03038ef7772b977847eb6d374fc86636fa510"}, + {file = "protobuf-4.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:becc576b7e6b553d22cbdf418686ee4daa443d7217999125c045ad56322dda10"}, + {file = "protobuf-4.25.1-py3-none-any.whl", hash = "sha256:a19731d5e83ae4737bb2a089605e636077ac001d18781b3cf489b9546c7c80d6"}, + {file = "protobuf-4.25.1.tar.gz", hash = "sha256:57d65074b4f5baa4ab5da1605c02be90ac20c8b40fb137d6a8df9f416b0d0ce2"}, ] [[package]] name = "psutil" -version = "5.9.5" +version = "5.9.6" description = "Cross-platform lib for process and system monitoring in Python." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"}, - {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"}, - {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"}, - {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"}, - {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"}, - {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"}, - {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"}, - {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"}, +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "psutil-5.9.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fb8a697f11b0f5994550555fcfe3e69799e5b060c8ecf9e2f75c69302cc35c0d"}, + {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:91ecd2d9c00db9817a4b4192107cf6954addb5d9d67a969a4f436dbc9200f88c"}, + {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:10e8c17b4f898d64b121149afb136c53ea8b68c7531155147867b7b1ac9e7e28"}, + {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:18cd22c5db486f33998f37e2bb054cc62fd06646995285e02a51b1e08da97017"}, + {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:ca2780f5e038379e520281e4c032dddd086906ddff9ef0d1b9dcf00710e5071c"}, + {file = "psutil-5.9.6-cp27-none-win32.whl", hash = "sha256:70cb3beb98bc3fd5ac9ac617a327af7e7f826373ee64c80efd4eb2856e5051e9"}, + {file = "psutil-5.9.6-cp27-none-win_amd64.whl", hash = "sha256:51dc3d54607c73148f63732c727856f5febec1c7c336f8f41fcbd6315cce76ac"}, + {file = "psutil-5.9.6-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c69596f9fc2f8acd574a12d5f8b7b1ba3765a641ea5d60fb4736bf3c08a8214a"}, + {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92e0cc43c524834af53e9d3369245e6cc3b130e78e26100d1f63cdb0abeb3d3c"}, + {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:748c9dd2583ed86347ed65d0035f45fa8c851e8d90354c122ab72319b5f366f4"}, + {file = "psutil-5.9.6-cp36-cp36m-win32.whl", hash = "sha256:3ebf2158c16cc69db777e3c7decb3c0f43a7af94a60d72e87b2823aebac3d602"}, + {file = "psutil-5.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:ff18b8d1a784b810df0b0fff3bcb50ab941c3b8e2c8de5726f9c71c601c611aa"}, + {file = "psutil-5.9.6-cp37-abi3-win32.whl", hash = "sha256:a6f01f03bf1843280f4ad16f4bde26b817847b4c1a0db59bf6419807bc5ce05c"}, + {file = "psutil-5.9.6-cp37-abi3-win_amd64.whl", hash = "sha256:6e5fb8dc711a514da83098bc5234264e551ad980cec5f85dabf4d38ed6f15e9a"}, + {file = "psutil-5.9.6-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:daecbcbd29b289aac14ece28eca6a3e60aa361754cf6da3dfb20d4d32b6c7f57"}, + {file = "psutil-5.9.6.tar.gz", hash = "sha256:e4b92ddcd7dd4cdd3f900180ea1e104932c7bce234fb88976e2a3b296441225a"}, ] [package.extras] @@ -1523,54 +1661,72 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "pyarrow" -version = "13.0.0" +version = "14.0.1" description = "Python library for Apache Arrow" optional = true python-versions = ">=3.8" files = [ - {file = "pyarrow-13.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:1afcc2c33f31f6fb25c92d50a86b7a9f076d38acbcb6f9e74349636109550148"}, - {file = "pyarrow-13.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:70fa38cdc66b2fc1349a082987f2b499d51d072faaa6b600f71931150de2e0e3"}, - {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd57b13a6466822498238877892a9b287b0a58c2e81e4bdb0b596dbb151cbb73"}, - {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ce69f7bf01de2e2764e14df45b8404fc6f1a5ed9871e8e08a12169f87b7a26"}, - {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:588f0d2da6cf1b1680974d63be09a6530fd1bd825dc87f76e162404779a157dc"}, - {file = "pyarrow-13.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6241afd72b628787b4abea39e238e3ff9f34165273fad306c7acf780dd850956"}, - {file = "pyarrow-13.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:fda7857e35993673fcda603c07d43889fca60a5b254052a462653f8656c64f44"}, - {file = "pyarrow-13.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:aac0ae0146a9bfa5e12d87dda89d9ef7c57a96210b899459fc2f785303dcbb67"}, - {file = "pyarrow-13.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d7759994217c86c161c6a8060509cfdf782b952163569606bb373828afdd82e8"}, - {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:868a073fd0ff6468ae7d869b5fc1f54de5c4255b37f44fb890385eb68b68f95d"}, - {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51be67e29f3cfcde263a113c28e96aa04362ed8229cb7c6e5f5c719003659d33"}, - {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:d1b4e7176443d12610874bb84d0060bf080f000ea9ed7c84b2801df851320295"}, - {file = "pyarrow-13.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:69b6f9a089d116a82c3ed819eea8fe67dae6105f0d81eaf0fdd5e60d0c6e0944"}, - {file = "pyarrow-13.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:ab1268db81aeb241200e321e220e7cd769762f386f92f61b898352dd27e402ce"}, - {file = "pyarrow-13.0.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:ee7490f0f3f16a6c38f8c680949551053c8194e68de5046e6c288e396dccee80"}, - {file = "pyarrow-13.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3ad79455c197a36eefbd90ad4aa832bece7f830a64396c15c61a0985e337287"}, - {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68fcd2dc1b7d9310b29a15949cdd0cb9bc34b6de767aff979ebf546020bf0ba0"}, - {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc6fd330fd574c51d10638e63c0d00ab456498fc804c9d01f2a61b9264f2c5b2"}, - {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e66442e084979a97bb66939e18f7b8709e4ac5f887e636aba29486ffbf373763"}, - {file = "pyarrow-13.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:0f6eff839a9e40e9c5610d3ff8c5bdd2f10303408312caf4c8003285d0b49565"}, - {file = "pyarrow-13.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b30a27f1cddf5c6efcb67e598d7823a1e253d743d92ac32ec1eb4b6a1417867"}, - {file = "pyarrow-13.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:09552dad5cf3de2dc0aba1c7c4b470754c69bd821f5faafc3d774bedc3b04bb7"}, - {file = "pyarrow-13.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3896ae6c205d73ad192d2fc1489cd0edfab9f12867c85b4c277af4d37383c18c"}, - {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6647444b21cb5e68b593b970b2a9a07748dd74ea457c7dadaa15fd469c48ada1"}, - {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47663efc9c395e31d09c6aacfa860f4473815ad6804311c5433f7085415d62a7"}, - {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:b9ba6b6d34bd2563345488cf444510588ea42ad5613df3b3509f48eb80250afd"}, - {file = "pyarrow-13.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:d00d374a5625beeb448a7fa23060df79adb596074beb3ddc1838adb647b6ef09"}, - {file = "pyarrow-13.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:c51afd87c35c8331b56f796eff954b9c7f8d4b7fef5903daf4e05fcf017d23a8"}, - {file = "pyarrow-13.0.0.tar.gz", hash = "sha256:83333726e83ed44b0ac94d8d7a21bbdee4a05029c3b1e8db58a863eec8fd8a33"}, + {file = "pyarrow-14.0.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:96d64e5ba7dceb519a955e5eeb5c9adcfd63f73a56aea4722e2cc81364fc567a"}, + {file = "pyarrow-14.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a8ae88c0038d1bc362a682320112ee6774f006134cd5afc291591ee4bc06505"}, + {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f6f053cb66dc24091f5511e5920e45c83107f954a21032feadc7b9e3a8e7851"}, + {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:906b0dc25f2be12e95975722f1e60e162437023f490dbd80d0deb7375baf3171"}, + {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:78d4a77a46a7de9388b653af1c4ce539350726cd9af62e0831e4f2bd0c95a2f4"}, + {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:06ca79080ef89d6529bb8e5074d4b4f6086143b2520494fcb7cf8a99079cde93"}, + {file = "pyarrow-14.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:32542164d905002c42dff896efdac79b3bdd7291b1b74aa292fac8450d0e4dcd"}, + {file = "pyarrow-14.0.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:c7331b4ed3401b7ee56f22c980608cf273f0380f77d0f73dd3c185f78f5a6220"}, + {file = "pyarrow-14.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:922e8b49b88da8633d6cac0e1b5a690311b6758d6f5d7c2be71acb0f1e14cd61"}, + {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58c889851ca33f992ea916b48b8540735055201b177cb0dcf0596a495a667b00"}, + {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30d8494870d9916bb53b2a4384948491444741cb9a38253c590e21f836b01222"}, + {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:be28e1a07f20391bb0b15ea03dcac3aade29fc773c5eb4bee2838e9b2cdde0cb"}, + {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:981670b4ce0110d8dcb3246410a4aabf5714db5d8ea63b15686bce1c914b1f83"}, + {file = "pyarrow-14.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:4756a2b373a28f6166c42711240643fb8bd6322467e9aacabd26b488fa41ec23"}, + {file = "pyarrow-14.0.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:cf87e2cec65dd5cf1aa4aba918d523ef56ef95597b545bbaad01e6433851aa10"}, + {file = "pyarrow-14.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:470ae0194fbfdfbf4a6b65b4f9e0f6e1fa0ea5b90c1ee6b65b38aecee53508c8"}, + {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6263cffd0c3721c1e348062997babdf0151301f7353010c9c9a8ed47448f82ab"}, + {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8089d7e77d1455d529dbd7cff08898bbb2666ee48bc4085203af1d826a33cc"}, + {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fada8396bc739d958d0b81d291cfd201126ed5e7913cb73de6bc606befc30226"}, + {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2a145dab9ed7849fc1101bf03bcdc69913547f10513fdf70fc3ab6c0a50c7eee"}, + {file = "pyarrow-14.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:05fe7994745b634c5fb16ce5717e39a1ac1fac3e2b0795232841660aa76647cd"}, + {file = "pyarrow-14.0.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:a8eeef015ae69d104c4c3117a6011e7e3ecd1abec79dc87fd2fac6e442f666ee"}, + {file = "pyarrow-14.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c76807540989fe8fcd02285dd15e4f2a3da0b09d27781abec3adc265ddbeba1"}, + {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:450e4605e3c20e558485f9161a79280a61c55efe585d51513c014de9ae8d393f"}, + {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:323cbe60210173ffd7db78bfd50b80bdd792c4c9daca8843ef3cd70b186649db"}, + {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0140c7e2b740e08c5a459439d87acd26b747fc408bde0a8806096ee0baaa0c15"}, + {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:e592e482edd9f1ab32f18cd6a716c45b2c0f2403dc2af782f4e9674952e6dd27"}, + {file = "pyarrow-14.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:d264ad13605b61959f2ae7c1d25b1a5b8505b112715c961418c8396433f213ad"}, + {file = "pyarrow-14.0.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:01e44de9749cddc486169cb632f3c99962318e9dacac7778315a110f4bf8a450"}, + {file = "pyarrow-14.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0351fecf0e26e152542bc164c22ea2a8e8c682726fce160ce4d459ea802d69c"}, + {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33c1f6110c386464fd2e5e4ea3624466055bbe681ff185fd6c9daa98f30a3f9a"}, + {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11e045dfa09855b6d3e7705a37c42e2dc2c71d608fab34d3c23df2e02df9aec3"}, + {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:097828b55321897db0e1dbfc606e3ff8101ae5725673498cbfa7754ee0da80e4"}, + {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1daab52050a1c48506c029e6fa0944a7b2436334d7e44221c16f6f1b2cc9c510"}, + {file = "pyarrow-14.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:3f6d5faf4f1b0d5a7f97be987cf9e9f8cd39902611e818fe134588ee99bf0283"}, + {file = "pyarrow-14.0.1.tar.gz", hash = "sha256:b8b3f4fe8d4ec15e1ef9b599b94683c5216adaed78d5cb4c606180546d1e2ee1"}, ] [package.dependencies] numpy = ">=1.16.6" +[[package]] +name = "pyarrow-hotfix" +version = "0.6" +description = "" +optional = true +python-versions = ">=3.5" +files = [ + {file = "pyarrow_hotfix-0.6-py3-none-any.whl", hash = "sha256:dcc9ae2d220dff0083be6a9aa8e0cdee5182ad358d4931fce825c545e5c89178"}, + {file = "pyarrow_hotfix-0.6.tar.gz", hash = "sha256:79d3e030f7ff890d408a100ac16d6f00b14d44a502d7897cd9fc3e3a534e9945"}, +] + [[package]] name = "pytest" -version = "7.4.2" +version = "7.4.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.4.2-py3-none-any.whl", hash = "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002"}, - {file = "pytest-7.4.2.tar.gz", hash = "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069"}, + {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, + {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, ] [package.dependencies] @@ -1867,36 +2023,36 @@ torch = ["numpy (>=1.21.6)", "torch (>=1.10)"] [[package]] name = "scipy" -version = "1.11.3" +version = "1.11.4" description = "Fundamental algorithms for scientific computing in Python" optional = false -python-versions = "<3.13,>=3.9" -files = [ - {file = "scipy-1.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:370f569c57e1d888304052c18e58f4a927338eafdaef78613c685ca2ea0d1fa0"}, - {file = "scipy-1.11.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9885e3e4f13b2bd44aaf2a1a6390a11add9f48d5295f7a592393ceb8991577a3"}, - {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e04aa19acc324a1a076abb4035dabe9b64badb19f76ad9c798bde39d41025cdc"}, - {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1a8a4657673bfae1e05e1e1d6e94b0cabe5ed0c7c144c8aa7b7dbb774ce5c1"}, - {file = "scipy-1.11.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7abda0e62ef00cde826d441485e2e32fe737bdddee3324e35c0e01dee65e2a88"}, - {file = "scipy-1.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:033c3fd95d55012dd1148b201b72ae854d5086d25e7c316ec9850de4fe776929"}, - {file = "scipy-1.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:925c6f09d0053b1c0f90b2d92d03b261e889b20d1c9b08a3a51f61afc5f58165"}, - {file = "scipy-1.11.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5664e364f90be8219283eeb844323ff8cd79d7acbd64e15eb9c46b9bc7f6a42a"}, - {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00f325434b6424952fbb636506f0567898dca7b0f7654d48f1c382ea338ce9a3"}, - {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f290cf561a4b4edfe8d1001ee4be6da60c1c4ea712985b58bf6bc62badee221"}, - {file = "scipy-1.11.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:91770cb3b1e81ae19463b3c235bf1e0e330767dca9eb4cd73ba3ded6c4151e4d"}, - {file = "scipy-1.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:e1f97cd89c0fe1a0685f8f89d85fa305deb3067d0668151571ba50913e445820"}, - {file = "scipy-1.11.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dfcc1552add7cb7c13fb70efcb2389d0624d571aaf2c80b04117e2755a0c5d15"}, - {file = "scipy-1.11.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0d3a136ae1ff0883fffbb1b05b0b2fea251cb1046a5077d0b435a1839b3e52b7"}, - {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bae66a2d7d5768eaa33008fa5a974389f167183c87bf39160d3fefe6664f8ddc"}, - {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2f6dee6cbb0e263b8142ed587bc93e3ed5e777f1f75448d24fb923d9fd4dce6"}, - {file = "scipy-1.11.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:74e89dc5e00201e71dd94f5f382ab1c6a9f3ff806c7d24e4e90928bb1aafb280"}, - {file = "scipy-1.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:90271dbde4be191522b3903fc97334e3956d7cfb9cce3f0718d0ab4fd7d8bfd6"}, - {file = "scipy-1.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a63d1ec9cadecce838467ce0631c17c15c7197ae61e49429434ba01d618caa83"}, - {file = "scipy-1.11.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:5305792c7110e32ff155aed0df46aa60a60fc6e52cd4ee02cdeb67eaccd5356e"}, - {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea7f579182d83d00fed0e5c11a4aa5ffe01460444219dedc448a36adf0c3917"}, - {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c77da50c9a91e23beb63c2a711ef9e9ca9a2060442757dffee34ea41847d8156"}, - {file = "scipy-1.11.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15f237e890c24aef6891c7d008f9ff7e758c6ef39a2b5df264650eb7900403c0"}, - {file = "scipy-1.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:4b4bb134c7aa457e26cc6ea482b016fef45db71417d55cc6d8f43d799cdf9ef2"}, - {file = "scipy-1.11.3.tar.gz", hash = "sha256:bba4d955f54edd61899776bad459bf7326e14b9fa1c552181f0479cc60a568cd"}, +python-versions = ">=3.9" +files = [ + {file = "scipy-1.11.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc9a714581f561af0848e6b69947fda0614915f072dfd14142ed1bfe1b806710"}, + {file = "scipy-1.11.4-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cf00bd2b1b0211888d4dc75656c0412213a8b25e80d73898083f402b50f47e41"}, + {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9999c008ccf00e8fbcce1236f85ade5c569d13144f77a1946bef8863e8f6eb4"}, + {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:933baf588daa8dc9a92c20a0be32f56d43faf3d1a60ab11b3f08c356430f6e56"}, + {file = "scipy-1.11.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8fce70f39076a5aa62e92e69a7f62349f9574d8405c0a5de6ed3ef72de07f446"}, + {file = "scipy-1.11.4-cp310-cp310-win_amd64.whl", hash = "sha256:6550466fbeec7453d7465e74d4f4b19f905642c89a7525571ee91dd7adabb5a3"}, + {file = "scipy-1.11.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f313b39a7e94f296025e3cffc2c567618174c0b1dde173960cf23808f9fae4be"}, + {file = "scipy-1.11.4-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1b7c3dca977f30a739e0409fb001056484661cb2541a01aba0bb0029f7b68db8"}, + {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00150c5eae7b610c32589dda259eacc7c4f1665aedf25d921907f4d08a951b1c"}, + {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:530f9ad26440e85766509dbf78edcfe13ffd0ab7fec2560ee5c36ff74d6269ff"}, + {file = "scipy-1.11.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5e347b14fe01003d3b78e196e84bd3f48ffe4c8a7b8a1afbcb8f5505cb710993"}, + {file = "scipy-1.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:acf8ed278cc03f5aff035e69cb511741e0418681d25fbbb86ca65429c4f4d9cd"}, + {file = "scipy-1.11.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:028eccd22e654b3ea01ee63705681ee79933652b2d8f873e7949898dda6d11b6"}, + {file = "scipy-1.11.4-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c6ff6ef9cc27f9b3db93a6f8b38f97387e6e0591600369a297a50a8e96e835d"}, + {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b030c6674b9230d37c5c60ab456e2cf12f6784596d15ce8da9365e70896effc4"}, + {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad669df80528aeca5f557712102538f4f37e503f0c5b9541655016dd0932ca79"}, + {file = "scipy-1.11.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce7fff2e23ab2cc81ff452a9444c215c28e6305f396b2ba88343a567feec9660"}, + {file = "scipy-1.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:36750b7733d960d7994888f0d148d31ea3017ac15eef664194b4ef68d36a4a97"}, + {file = "scipy-1.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e619aba2df228a9b34718efb023966da781e89dd3d21637b27f2e54db0410d7"}, + {file = "scipy-1.11.4-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f3cd9e7b3c2c1ec26364856f9fbe78695fe631150f94cd1c22228456404cf1ec"}, + {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d10e45a6c50211fe256da61a11c34927c68f277e03138777bdebedd933712fea"}, + {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91af76a68eeae0064887a48e25c4e616fa519fa0d38602eda7e0f97d65d57937"}, + {file = "scipy-1.11.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6df1468153a31cf55ed5ed39647279beb9cfb5d3f84369453b49e4b8502394fd"}, + {file = "scipy-1.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:ee410e6de8f88fd5cf6eadd73c135020bfbbbdfcd0f6162c36a7638a1ea8cc65"}, + {file = "scipy-1.11.4.tar.gz", hash = "sha256:90a2b78e7f5733b9de748f589f09225013685f9b218275257f8a8168ededaeaa"}, ] [package.dependencies] @@ -1963,17 +2119,17 @@ files = [ [[package]] name = "setuptools" -version = "68.2.2" +version = "69.0.2" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-68.2.2-py3-none-any.whl", hash = "sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a"}, - {file = "setuptools-68.2.2.tar.gz", hash = "sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87"}, + {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, + {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] @@ -1992,7 +2148,7 @@ files = [ name = "sympy" version = "1.12" description = "Computer algebra system (CAS) in Python" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, @@ -2080,41 +2236,55 @@ files = [ [[package]] name = "torch" -version = "2.0.1" +version = "2.1.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" -optional = false +optional = true python-versions = ">=3.8.0" files = [ - {file = "torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8ced00b3ba471856b993822508f77c98f48a458623596a4c43136158781e306a"}, - {file = "torch-2.0.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:359bfaad94d1cda02ab775dc1cc386d585712329bb47b8741607ef6ef4950747"}, - {file = "torch-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:7c84e44d9002182edd859f3400deaa7410f5ec948a519cc7ef512c2f9b34d2c4"}, - {file = "torch-2.0.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:567f84d657edc5582d716900543e6e62353dbe275e61cdc36eda4929e46df9e7"}, - {file = "torch-2.0.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:787b5a78aa7917465e9b96399b883920c88a08f4eb63b5a5d2d1a16e27d2f89b"}, - {file = "torch-2.0.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e617b1d0abaf6ced02dbb9486803abfef0d581609b09641b34fa315c9c40766d"}, - {file = "torch-2.0.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b6019b1de4978e96daa21d6a3ebb41e88a0b474898fe251fd96189587408873e"}, - {file = "torch-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:dbd68cbd1cd9da32fe5d294dd3411509b3d841baecb780b38b3b7b06c7754434"}, - {file = "torch-2.0.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:ef654427d91600129864644e35deea761fb1fe131710180b952a6f2e2207075e"}, - {file = "torch-2.0.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:25aa43ca80dcdf32f13da04c503ec7afdf8e77e3a0183dd85cd3e53b2842e527"}, - {file = "torch-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5ef3ea3d25441d3957348f7e99c7824d33798258a2bf5f0f0277cbcadad2e20d"}, - {file = "torch-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:0882243755ff28895e8e6dc6bc26ebcf5aa0911ed81b2a12f241fc4b09075b13"}, - {file = "torch-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:f66aa6b9580a22b04d0af54fcd042f52406a8479e2b6a550e3d9f95963e168c8"}, - {file = "torch-2.0.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:1adb60d369f2650cac8e9a95b1d5758e25d526a34808f7448d0bd599e4ae9072"}, - {file = "torch-2.0.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:1bcffc16b89e296826b33b98db5166f990e3b72654a2b90673e817b16c50e32b"}, - {file = "torch-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e10e1597f2175365285db1b24019eb6f04d53dcd626c735fc502f1e8b6be9875"}, - {file = "torch-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:423e0ae257b756bb45a4b49072046772d1ad0c592265c5080070e0767da4e490"}, - {file = "torch-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8742bdc62946c93f75ff92da00e3803216c6cce9b132fbca69664ca38cfb3e18"}, - {file = "torch-2.0.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:c62df99352bd6ee5a5a8d1832452110435d178b5164de450831a3a8cc14dc680"}, - {file = "torch-2.0.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:671a2565e3f63b8fe8e42ae3e36ad249fe5e567435ea27b94edaa672a7d0c416"}, + {file = "torch-2.1.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:5ebc43f5355a9b7be813392b3fb0133991f0380f6f0fcc8218d5468dc45d1071"}, + {file = "torch-2.1.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:84fefd63356416c0cd20578637ccdbb82164993400ed17b57c951dd6376dcee8"}, + {file = "torch-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:0a7a9da0c324409bcb5a7bdad1b4e94e936d21c2590aaa7ac2f63968da8c62f7"}, + {file = "torch-2.1.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:1e1e5faddd43a8f2c0e0e22beacd1e235a2e447794d807483c94a9e31b54a758"}, + {file = "torch-2.1.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:e76bf3c5c354874f1da465c852a2fb60ee6cbce306e935337885760f080f9baa"}, + {file = "torch-2.1.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:98fea993639b0bb432dfceb7b538f07c0f1c33386d63f635219f49254968c80f"}, + {file = "torch-2.1.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:61b51b33c61737c287058b0c3061e6a9d3c363863e4a094f804bc486888a188a"}, + {file = "torch-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:1d70920da827e2276bf07f7ec46958621cad18d228c97da8f9c19638474dbd52"}, + {file = "torch-2.1.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:a70593806f1d7e6b53657d96810518da0f88ef2608c98a402955765b8c79d52c"}, + {file = "torch-2.1.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:e312f7e82e49565f7667b0bbf9559ab0c597063d93044740781c02acd5a87978"}, + {file = "torch-2.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1e3cbecfa5a7314d828f4a37b0c286714dc9aa2e69beb7a22f7aca76567ed9f4"}, + {file = "torch-2.1.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:9ca0fcbf3d5ba644d6a8572c83a9abbdf5f7ff575bc38529ef6c185a3a71bde9"}, + {file = "torch-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:2dc9f312fc1fa0d61a565a0292ad73119d4b74c9f8b5031b55f8b4722abca079"}, + {file = "torch-2.1.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:d56b032176458e2af4709627bbd2c20fe2917eff8cd087a7fe313acccf5ce2f1"}, + {file = "torch-2.1.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:29e3b90a8c281f6660804a939d1f4218604c80162e521e1e6d8c8557325902a0"}, + {file = "torch-2.1.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:bd95cee8511584b67ddc0ba465c3f1edeb5708d833ee02af1206b4486f1d9096"}, + {file = "torch-2.1.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b31230bd058424e56dba7f899280dbc6ac8b9948e43902e0c84a44666b1ec151"}, + {file = "torch-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:403f1095e665e4f35971b43797a920725b8b205723aa68254a4050c6beca29b6"}, + {file = "torch-2.1.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:715b50d8c1de5da5524a68287eb000f73e026e74d5f6b12bc450ef6995fcf5f9"}, + {file = "torch-2.1.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:db67e8725c76f4c7f4f02e7551bb16e81ba1a1912867bc35d7bb96d2be8c78b4"}, ] [package.dependencies] filelock = "*" +fsspec = "*" jinja2 = "*" networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.18.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} sympy = "*" +triton = {version = "2.1.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} typing-extensions = "*" [package.extras] +dynamo = ["jinja2"] opt-einsum = ["opt-einsum (>=3.3)"] [[package]] @@ -2206,6 +2376,31 @@ torchhub = ["filelock", "huggingface-hub (>=0.15.1,<1.0)", "importlib-metadata", video = ["av (==9.2.0)", "decord (==0.6.0)"] vision = ["Pillow (<10.0.0)"] +[[package]] +name = "triton" +version = "2.1.0" +description = "A language and compiler for custom Deep Learning operations" +optional = true +python-versions = "*" +files = [ + {file = "triton-2.1.0-0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:66439923a30d5d48399b08a9eae10370f6c261a5ec864a64983bae63152d39d7"}, + {file = "triton-2.1.0-0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:919b06453f0033ea52c13eaf7833de0e57db3178d23d4e04f9fc71c4f2c32bf8"}, + {file = "triton-2.1.0-0-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ae4bb8a91de790e1866405211c4d618379781188f40d5c4c399766914e84cd94"}, + {file = "triton-2.1.0-0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39f6fb6bdccb3e98f3152e3fbea724f1aeae7d749412bbb1fa9c441d474eba26"}, + {file = "triton-2.1.0-0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:21544e522c02005a626c8ad63d39bdff2f31d41069592919ef281e964ed26446"}, + {file = "triton-2.1.0-0-pp37-pypy37_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:143582ca31dd89cd982bd3bf53666bab1c7527d41e185f9e3d8a3051ce1b663b"}, + {file = "triton-2.1.0-0-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82fc5aeeedf6e36be4e4530cbdcba81a09d65c18e02f52dc298696d45721f3bd"}, + {file = "triton-2.1.0-0-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:81a96d110a738ff63339fc892ded095b31bd0d205e3aace262af8400d40b6fa8"}, +] + +[package.dependencies] +filelock = "*" + +[package.extras] +build = ["cmake (>=3.18)", "lit"] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)"] +tutorials = ["matplotlib", "pandas", "tabulate"] + [[package]] name = "typer" version = "0.6.1" @@ -2250,18 +2445,17 @@ files = [ [[package]] name = "urllib3" -version = "2.0.6" +version = "2.1.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "urllib3-2.0.6-py3-none-any.whl", hash = "sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2"}, - {file = "urllib3-2.0.6.tar.gz", hash = "sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564"}, + {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, + {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -2281,263 +2475,297 @@ dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] [[package]] name = "wrapt" -version = "1.15.0" +version = "1.16.0" description = "Module for decorators, wrappers and monkey patching." optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" -files = [ - {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, - {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, - {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"}, - {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"}, - {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"}, - {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"}, - {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"}, - {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"}, - {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"}, - {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"}, - {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"}, - {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"}, - {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"}, - {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"}, - {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"}, - {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"}, - {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"}, - {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"}, - {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"}, - {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"}, - {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"}, - {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"}, - {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"}, - {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"}, - {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"}, - {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"}, - {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"}, - {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"}, - {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"}, - {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"}, - {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"}, - {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"}, - {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"}, - {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"}, - {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"}, - {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"}, - {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"}, - {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"}, - {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"}, - {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"}, - {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"}, - {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"}, - {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"}, - {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"}, - {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"}, - {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"}, - {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"}, - {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"}, - {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"}, - {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"}, - {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"}, - {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"}, - {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"}, - {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"}, - {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"}, - {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"}, - {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"}, - {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"}, - {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"}, - {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"}, - {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"}, - {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"}, - {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"}, - {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"}, - {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"}, - {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"}, - {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"}, - {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"}, - {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"}, - {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"}, - {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"}, - {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"}, - {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"}, - {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"}, - {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"}, +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, ] [[package]] name = "xxhash" -version = "3.3.0" +version = "3.4.1" description = "Python binding for xxHash" optional = true python-versions = ">=3.7" files = [ - {file = "xxhash-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70ef7288d1cb1ad16e02d101ea43bb0e392d985d60b9b0035aee80663530960d"}, - {file = "xxhash-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:44ff8c673cab50be46784e0aec62aa6f0ca9ea765e2b0690e8945d0cd950dcaf"}, - {file = "xxhash-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfebc90273ae2beb813d8118a2bfffb5a5a81ac054fbfd061ea18fd0a81db0ac"}, - {file = "xxhash-3.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9084e68bedbd665c7e9241a7b597c28f4775edeb3941bf608ecb38732a5f8fb5"}, - {file = "xxhash-3.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72493a14a3e89564b1a6c7400b9b40621e8f4692410706ef27c66aeadc7b431"}, - {file = "xxhash-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98779cbe9068dd7734cc3210693894d5cc9b156920e9c336f10fb99f46bebbd8"}, - {file = "xxhash-3.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:499f8a12767dd28b98ab6b7c7da7d294564e4c9024a2aaa5d0b0b98a8bef2f92"}, - {file = "xxhash-3.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dabda7f42c548f98d8e07e390bda2953fc58302c0e07ded7b3fe0637e7ecd2f"}, - {file = "xxhash-3.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c416409646c793c46370f0f1859253302ee70aeda5278c2a0ca41462f8ec1244"}, - {file = "xxhash-3.3.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b8bd31aaad8a80a7302730676cec26bea3ef1fd9835875aa47fea073aca9fe05"}, - {file = "xxhash-3.3.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:3af8e3bcd630f905efbdfe7a51b51fc1ca3c9dca8b155f841925f3ad41685d41"}, - {file = "xxhash-3.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d86b79c707fc7025d967af71db652429a06a8179175e45bd2e9f17b8af6f5949"}, - {file = "xxhash-3.3.0-cp310-cp310-win32.whl", hash = "sha256:98fe771f36ee9d3a1f5741424a956a2ba9651d9508a9f64a024b57f2cf796414"}, - {file = "xxhash-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:0a65131f7f731ecf7e3dd27f09d877aff3000a79a446caaa2c0d8d0ec0bc7186"}, - {file = "xxhash-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a9761e425e79d23797fa0bec2d781dbadb9fe5dcc2bf69030855f5e393c3bec8"}, - {file = "xxhash-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d28c7ef1deb3c3ac5f5290176ca3d501daa97c2e1f7443bf5d8b61ac651794b2"}, - {file = "xxhash-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:701b7cefffc25de1b7ddfae6505da70a3b3a11e312c2e2b33b09e180bbceb43d"}, - {file = "xxhash-3.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1644f8b8e19a242c3047a089541067248a651038cabb9fcab3c13eb1dfcd757"}, - {file = "xxhash-3.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:20e7d0e3488cc0f0dbe360731b7fe32e1f2df46bf2de2db3317d301efb93084c"}, - {file = "xxhash-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:156c52eca2b20f9839723bef0b929a290f6c2f1c98ccb24e82f58f96f3c16007"}, - {file = "xxhash-3.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d6ce4d3828d79044ed08994e196c20f69c18133ed8a4286afe3e98989adeeac"}, - {file = "xxhash-3.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b85b63757ade2439c8d7d71842c40d42c0ab3b69279ed02afbd3b1635f7d2b4b"}, - {file = "xxhash-3.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2b9051e40b7b649a9a2a38fb223ca6a593d332012df885746b81968948f9435"}, - {file = "xxhash-3.3.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:81b7ce050f26fc1daaaa0d24e320815306736d14608e1ba31920e693a7ca9afb"}, - {file = "xxhash-3.3.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:7442500fcce71669953ca959682dcd47452bc3f9c95c8d88315874aeabec9f82"}, - {file = "xxhash-3.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:36a05bf59a515cfb07f3f83373c527fff2ecaa77eaf30c968c788aea582070a1"}, - {file = "xxhash-3.3.0-cp311-cp311-win32.whl", hash = "sha256:da16f9cd62c6fde74683be1b28c28ef865e706da13e3bee4ba836fcc520de0cc"}, - {file = "xxhash-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:40fd49ef6964b1c90c0bea63cd184f6d0b36e59144a080e8b3ac2c4c06bf6bf2"}, - {file = "xxhash-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:672c60cce1f8026ae32c651f877aa64f342876083a36a4b1ff91bc876aaf0e34"}, - {file = "xxhash-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bb6c83d7a65dd3065566c77425ba72df96982174e8ef613d809052d68ae77ab"}, - {file = "xxhash-3.3.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a4170f3016b621e3200ebfcc18de6f50eb8e8fc1303e16324b1f5625afd51b57"}, - {file = "xxhash-3.3.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bfb9c45d502ab38c0f4edf98a678694ae0f345613ef4900ade98c71f64db4d78"}, - {file = "xxhash-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48af026a2b1569666da42a478248a1f03f4e2350a34eb661afe3cb45429ca1d7"}, - {file = "xxhash-3.3.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe627de8fe8ddfa8b6477bda4ae5d5843ad1a0c83601dcff72247039465cc901"}, - {file = "xxhash-3.3.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:427fc60a188e345534f35b0aa76f7640c5ddf0354f1c9ad826a2bc086282982d"}, - {file = "xxhash-3.3.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d80acb20c7f268fe3150ac0be6a6b798062af56a1795eef855b26c9eae11a99c"}, - {file = "xxhash-3.3.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e71100818943422d1fbbe460e7be7fc4f2d2ba9371b2a745eb09e29ef0493f4a"}, - {file = "xxhash-3.3.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:e3b9bb5fdbe284c7b61c5d82c76688e52bbaf48ab1e53de98c072cc696fa331f"}, - {file = "xxhash-3.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1e25f6c8c46cf1ed8237f610abb231093a748c97d6c2c092789a7cad7e7ef290"}, - {file = "xxhash-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:928208dfecc563be59ae91868d1658e78809cb1e6a0bd74960a96c915db6390c"}, - {file = "xxhash-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bd1b4531a66da6dde1974662c1fd6fb1a2f27e40542e3df5e5e5dbab8ea4aee7"}, - {file = "xxhash-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:deebb296df92e082b6d0171a7d6227b503e2897cea4f8bdd3d708094974d4cf6"}, - {file = "xxhash-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd96e9cb0e2baa294e6d572207d9731c3bb8e2511f1ff70f2bf17266b4488bd9"}, - {file = "xxhash-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3756b44bf247e422a2e47a38f25d03cf4a5ed539fdc2be3c60043e872e6ff13d"}, - {file = "xxhash-3.3.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69550c3c053b8f135ceac97b85dc1b2bc54b7613a966f550f32b43bed81c788a"}, - {file = "xxhash-3.3.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fc8736fc3e0c5aad435520873b9d2e27ddcc5a830b07e00e9c4d3a61ded9675"}, - {file = "xxhash-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80ead7774392efbd95f9f701155048f9ca26cf55133db6f5bb5a0ec69376bda5"}, - {file = "xxhash-3.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8737c9b3fd944d856faafa92c95f6198649ad57987935b6d965d086938be917"}, - {file = "xxhash-3.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2c8e078d0b9f85212801c41bd9eec8122003929686b0ee33360ffbfdf1a189ab"}, - {file = "xxhash-3.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f399269d20ef1dd910331f9ad49e8510c3ba2aa657b623293b536038f266a5c5"}, - {file = "xxhash-3.3.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f3661decef5f9ff7ab50edbef463bf7dc717621b56755dbae5458a946a033b10"}, - {file = "xxhash-3.3.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5ec374d0f1e7d43ef48a4ff643600833d7a325ecc6933b4d6ad9282f55751cf7"}, - {file = "xxhash-3.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:39a947ff02d9a85673f5ce1f6f34059e24c714a797440485bd81b2c3cb69a7ff"}, - {file = "xxhash-3.3.0-cp38-cp38-win32.whl", hash = "sha256:4a4f0645a0ec03b229fb04f2e66bdbcb1ffd341a70d6c86c3ee015ffdcd70fad"}, - {file = "xxhash-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:8af5a687c0fb4357c230eec8a57ca07d3172faa3cb69beb0cbad40672ae6fa4b"}, - {file = "xxhash-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e5bfafda019ecc6202af6f3cf08220fa66af9612ba16ef831033ae3ac7bd1f89"}, - {file = "xxhash-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d113b433bc817adf845689a051363777835577858263ec4325d1934fcb7e394"}, - {file = "xxhash-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56aacf4bf65f575c0392be958aceff719d850950bb6af7d804b32d4bc293159c"}, - {file = "xxhash-3.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f5d3e4e0937dad05585e9bd772bbdf0ca40cd8b2f54789d7a1f3091b608118c"}, - {file = "xxhash-3.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23605d7fc67bc7daa0d263b3a26de3375cfcc0b51ab7de5026625415c05b6fed"}, - {file = "xxhash-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe525be0392d493558a2b10d764bcaae9850cc262b417176a8b001f16e085fc6"}, - {file = "xxhash-3.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b234d08786884f5c8d55dfebb839cfbd846d812e3a052c39ca7e8ce7055fed68"}, - {file = "xxhash-3.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b031395b4b9c3085d9ea1ce89896ab01a65fc63172b2bfda5dd318fefe5e2f93"}, - {file = "xxhash-3.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:5afe44da46b48c75169e622a532dca3fe585343c0577cfd7c18ecd3f1200305d"}, - {file = "xxhash-3.3.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c59f233f38b6a49d5e4ddf16be910a5bbf36a2989b6b2c8591853fb9f5a5e691"}, - {file = "xxhash-3.3.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:ed016e278c5c4633270903c7cf3b9dfb0bd293b7335e43fe695cb95541da53c9"}, - {file = "xxhash-3.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a8bd6612fb35487e9ab329bb37b3df44f58baf752010dde9282593edbfed7e7"}, - {file = "xxhash-3.3.0-cp39-cp39-win32.whl", hash = "sha256:015a0498bde85364abc53fcc713af962dd4555391929736d9c0ff2c555436a03"}, - {file = "xxhash-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:06a484097af32caf1cfffadd60c3ca140c9e52b40a551fb1f6f0fdfd6f7f8977"}, - {file = "xxhash-3.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6c3809740124bbc777d29e3ae53de24f4c13fd5e62878086a8feadf0dcb654a5"}, - {file = "xxhash-3.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae092f0daaeece2acdd6ec46e2ab307d8d6f22b01ecca14dc6078844dbd88339"}, - {file = "xxhash-3.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3498e72ff2610b049b97bb81d1ea6e7bfa5b7a45efb3f255d77ec2fa2bc91653"}, - {file = "xxhash-3.3.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0004dded9d86f129961326e980420187640fb7ba65a184009429861c1d09df7"}, - {file = "xxhash-3.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:41c8bfd27191928bae6fd2b66872965532267785094a03c0ee5f358d9dba51c2"}, - {file = "xxhash-3.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:71db8498e329cef3588b0617f762a3fe31d899872e76a68ce2840e35a1318a5b"}, - {file = "xxhash-3.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d1d24d71b6209bc0124286932c4f0660c1103cb996fe34cb374bc12ac251940"}, - {file = "xxhash-3.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61004587a09b5b385e43d95ffe3a76c9d934dfd79ea38272d5c20ddfba8eab8f"}, - {file = "xxhash-3.3.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f0c92e3fa826425c73acafb31e022a719c85423847a9433d3a9e61e4ac97543"}, - {file = "xxhash-3.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:367e03f1484ce471c94e731b98f5e4a05b43e7188b16692998e1cc89fd1159a5"}, - {file = "xxhash-3.3.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed04c47dfaab98fcda0b748af9ee6fe8c888a0a0fbd13720e0f0221671e387e1"}, - {file = "xxhash-3.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cbfde62516435ca198220aff048a8793383cb7047c7b88714a061968bca786d"}, - {file = "xxhash-3.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73682225faa973ee56743f0fcd36bfcbfec503be258e0e420fb34313f52f1e7b"}, - {file = "xxhash-3.3.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d49efdce2086c2c506af20ed18a1115b40af7aad6d4ee27cb31d7c810585a3f2"}, - {file = "xxhash-3.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:546a0bb8e5a657cadf0da290b30ccd561cb89c256a5421ab8d5eb12eaf087349"}, - {file = "xxhash-3.3.0.tar.gz", hash = "sha256:c3f9e322b1ebeebd44e3d9d2d9b124e0c550c1ef41bd552afdcdd719516ee41a"}, + {file = "xxhash-3.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:91dbfa55346ad3e18e738742236554531a621042e419b70ad8f3c1d9c7a16e7f"}, + {file = "xxhash-3.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:665a65c2a48a72068fcc4d21721510df5f51f1142541c890491afc80451636d2"}, + {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb11628470a6004dc71a09fe90c2f459ff03d611376c1debeec2d648f44cb693"}, + {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bef2a7dc7b4f4beb45a1edbba9b9194c60a43a89598a87f1a0226d183764189"}, + {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c0f7b2d547d72c7eda7aa817acf8791f0146b12b9eba1d4432c531fb0352228"}, + {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00f2fdef6b41c9db3d2fc0e7f94cb3db86693e5c45d6de09625caad9a469635b"}, + {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23cfd9ca09acaf07a43e5a695143d9a21bf00f5b49b15c07d5388cadf1f9ce11"}, + {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6a9ff50a3cf88355ca4731682c168049af1ca222d1d2925ef7119c1a78e95b3b"}, + {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f1d7c69a1e9ca5faa75546fdd267f214f63f52f12692f9b3a2f6467c9e67d5e7"}, + {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:672b273040d5d5a6864a36287f3514efcd1d4b1b6a7480f294c4b1d1ee1b8de0"}, + {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4178f78d70e88f1c4a89ff1ffe9f43147185930bb962ee3979dba15f2b1cc799"}, + {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9804b9eb254d4b8cc83ab5a2002128f7d631dd427aa873c8727dba7f1f0d1c2b"}, + {file = "xxhash-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c09c49473212d9c87261d22c74370457cfff5db2ddfc7fd1e35c80c31a8c14ce"}, + {file = "xxhash-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:ebbb1616435b4a194ce3466d7247df23499475c7ed4eb2681a1fa42ff766aff6"}, + {file = "xxhash-3.4.1-cp310-cp310-win_arm64.whl", hash = "sha256:25dc66be3db54f8a2d136f695b00cfe88018e59ccff0f3b8f545869f376a8a46"}, + {file = "xxhash-3.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58c49083801885273e262c0f5bbeac23e520564b8357fbb18fb94ff09d3d3ea5"}, + {file = "xxhash-3.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b526015a973bfbe81e804a586b703f163861da36d186627e27524f5427b0d520"}, + {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36ad4457644c91a966f6fe137d7467636bdc51a6ce10a1d04f365c70d6a16d7e"}, + {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:248d3e83d119770f96003271fe41e049dd4ae52da2feb8f832b7a20e791d2920"}, + {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2070b6d5bbef5ee031666cf21d4953c16e92c2f8a24a94b5c240f8995ba3b1d0"}, + {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2746035f518f0410915e247877f7df43ef3372bf36cfa52cc4bc33e85242641"}, + {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a8ba6181514681c2591840d5632fcf7356ab287d4aff1c8dea20f3c78097088"}, + {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aac5010869240e95f740de43cd6a05eae180c59edd182ad93bf12ee289484fa"}, + {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4cb11d8debab1626181633d184b2372aaa09825bde709bf927704ed72765bed1"}, + {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b29728cff2c12f3d9f1d940528ee83918d803c0567866e062683f300d1d2eff3"}, + {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:a15cbf3a9c40672523bdb6ea97ff74b443406ba0ab9bca10ceccd9546414bd84"}, + {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6e66df260fed01ed8ea790c2913271641c58481e807790d9fca8bfd5a3c13844"}, + {file = "xxhash-3.4.1-cp311-cp311-win32.whl", hash = "sha256:e867f68a8f381ea12858e6d67378c05359d3a53a888913b5f7d35fbf68939d5f"}, + {file = "xxhash-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:200a5a3ad9c7c0c02ed1484a1d838b63edcf92ff538770ea07456a3732c577f4"}, + {file = "xxhash-3.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:1d03f1c0d16d24ea032e99f61c552cb2b77d502e545187338bea461fde253583"}, + {file = "xxhash-3.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c4bbba9b182697a52bc0c9f8ec0ba1acb914b4937cd4a877ad78a3b3eeabefb3"}, + {file = "xxhash-3.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9fd28a9da300e64e434cfc96567a8387d9a96e824a9be1452a1e7248b7763b78"}, + {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6066d88c9329ab230e18998daec53d819daeee99d003955c8db6fc4971b45ca3"}, + {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93805bc3233ad89abf51772f2ed3355097a5dc74e6080de19706fc447da99cd3"}, + {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64da57d5ed586ebb2ecdde1e997fa37c27fe32fe61a656b77fabbc58e6fbff6e"}, + {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a97322e9a7440bf3c9805cbaac090358b43f650516486746f7fa482672593df"}, + {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbe750d512982ee7d831838a5dee9e9848f3fb440e4734cca3f298228cc957a6"}, + {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fd79d4087727daf4d5b8afe594b37d611ab95dc8e29fe1a7517320794837eb7d"}, + {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:743612da4071ff9aa4d055f3f111ae5247342931dedb955268954ef7201a71ff"}, + {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:b41edaf05734092f24f48c0958b3c6cbaaa5b7e024880692078c6b1f8247e2fc"}, + {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:a90356ead70d715fe64c30cd0969072de1860e56b78adf7c69d954b43e29d9fa"}, + {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac56eebb364e44c85e1d9e9cc5f6031d78a34f0092fea7fc80478139369a8b4a"}, + {file = "xxhash-3.4.1-cp312-cp312-win32.whl", hash = "sha256:911035345932a153c427107397c1518f8ce456f93c618dd1c5b54ebb22e73747"}, + {file = "xxhash-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:f31ce76489f8601cc7b8713201ce94b4bd7b7ce90ba3353dccce7e9e1fee71fa"}, + {file = "xxhash-3.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:b5beb1c6a72fdc7584102f42c4d9df232ee018ddf806e8c90906547dfb43b2da"}, + {file = "xxhash-3.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6d42b24d1496deb05dee5a24ed510b16de1d6c866c626c2beb11aebf3be278b9"}, + {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b685fab18876b14a8f94813fa2ca80cfb5ab6a85d31d5539b7cd749ce9e3624"}, + {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:419ffe34c17ae2df019a4685e8d3934d46b2e0bbe46221ab40b7e04ed9f11137"}, + {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e041ce5714f95251a88670c114b748bca3bf80cc72400e9f23e6d0d59cf2681"}, + {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc860d887c5cb2f524899fb8338e1bb3d5789f75fac179101920d9afddef284b"}, + {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:312eba88ffe0a05e332e3a6f9788b73883752be63f8588a6dc1261a3eaaaf2b2"}, + {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:e01226b6b6a1ffe4e6bd6d08cfcb3ca708b16f02eb06dd44f3c6e53285f03e4f"}, + {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9f3025a0d5d8cf406a9313cd0d5789c77433ba2004b1c75439b67678e5136537"}, + {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:6d3472fd4afef2a567d5f14411d94060099901cd8ce9788b22b8c6f13c606a93"}, + {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:43984c0a92f06cac434ad181f329a1445017c33807b7ae4f033878d860a4b0f2"}, + {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a55e0506fdb09640a82ec4f44171273eeabf6f371a4ec605633adb2837b5d9d5"}, + {file = "xxhash-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:faec30437919555b039a8bdbaba49c013043e8f76c999670aef146d33e05b3a0"}, + {file = "xxhash-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:c9e1b646af61f1fc7083bb7b40536be944f1ac67ef5e360bca2d73430186971a"}, + {file = "xxhash-3.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:961d948b7b1c1b6c08484bbce3d489cdf153e4122c3dfb07c2039621243d8795"}, + {file = "xxhash-3.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:719a378930504ab159f7b8e20fa2aa1896cde050011af838af7e7e3518dd82de"}, + {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74fb5cb9406ccd7c4dd917f16630d2e5e8cbbb02fc2fca4e559b2a47a64f4940"}, + {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5dab508ac39e0ab988039bc7f962c6ad021acd81fd29145962b068df4148c476"}, + {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c59f3e46e7daf4c589e8e853d700ef6607afa037bfad32c390175da28127e8c"}, + {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cc07256eff0795e0f642df74ad096f8c5d23fe66bc138b83970b50fc7f7f6c5"}, + {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9f749999ed80f3955a4af0eb18bb43993f04939350b07b8dd2f44edc98ffee9"}, + {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7688d7c02149a90a3d46d55b341ab7ad1b4a3f767be2357e211b4e893efbaaf6"}, + {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a8b4977963926f60b0d4f830941c864bed16aa151206c01ad5c531636da5708e"}, + {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:8106d88da330f6535a58a8195aa463ef5281a9aa23b04af1848ff715c4398fb4"}, + {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4c76a77dbd169450b61c06fd2d5d436189fc8ab7c1571d39265d4822da16df22"}, + {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:11f11357c86d83e53719c592021fd524efa9cf024dc7cb1dfb57bbbd0d8713f2"}, + {file = "xxhash-3.4.1-cp38-cp38-win32.whl", hash = "sha256:0c786a6cd74e8765c6809892a0d45886e7c3dc54de4985b4a5eb8b630f3b8e3b"}, + {file = "xxhash-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:aabf37fb8fa27430d50507deeab2ee7b1bcce89910dd10657c38e71fee835594"}, + {file = "xxhash-3.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6127813abc1477f3a83529b6bbcfeddc23162cece76fa69aee8f6a8a97720562"}, + {file = "xxhash-3.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef2e194262f5db16075caea7b3f7f49392242c688412f386d3c7b07c7733a70a"}, + {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71be94265b6c6590f0018bbf73759d21a41c6bda20409782d8117e76cd0dfa8b"}, + {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10e0a619cdd1c0980e25eb04e30fe96cf8f4324758fa497080af9c21a6de573f"}, + {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa122124d2e3bd36581dd78c0efa5f429f5220313479fb1072858188bc2d5ff1"}, + {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17032f5a4fea0a074717fe33477cb5ee723a5f428de7563e75af64bfc1b1e10"}, + {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca7783b20e3e4f3f52f093538895863f21d18598f9a48211ad757680c3bd006f"}, + {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d77d09a1113899fad5f354a1eb4f0a9afcf58cefff51082c8ad643ff890e30cf"}, + {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:21287bcdd299fdc3328cc0fbbdeaa46838a1c05391264e51ddb38a3f5b09611f"}, + {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:dfd7a6cc483e20b4ad90224aeb589e64ec0f31e5610ab9957ff4314270b2bf31"}, + {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:543c7fcbc02bbb4840ea9915134e14dc3dc15cbd5a30873a7a5bf66039db97ec"}, + {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fe0a98d990e433013f41827b62be9ab43e3cf18e08b1483fcc343bda0d691182"}, + {file = "xxhash-3.4.1-cp39-cp39-win32.whl", hash = "sha256:b9097af00ebf429cc7c0e7d2fdf28384e4e2e91008130ccda8d5ae653db71e54"}, + {file = "xxhash-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:d699b921af0dcde50ab18be76c0d832f803034d80470703700cb7df0fbec2832"}, + {file = "xxhash-3.4.1-cp39-cp39-win_arm64.whl", hash = "sha256:2be491723405e15cc099ade1280133ccfbf6322d2ef568494fb7d07d280e7eee"}, + {file = "xxhash-3.4.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:431625fad7ab5649368c4849d2b49a83dc711b1f20e1f7f04955aab86cd307bc"}, + {file = "xxhash-3.4.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc6dbd5fc3c9886a9e041848508b7fb65fd82f94cc793253990f81617b61fe49"}, + {file = "xxhash-3.4.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3ff8dbd0ec97aec842476cb8ccc3e17dd288cd6ce3c8ef38bff83d6eb927817"}, + {file = "xxhash-3.4.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef73a53fe90558a4096e3256752268a8bdc0322f4692ed928b6cd7ce06ad4fe3"}, + {file = "xxhash-3.4.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:450401f42bbd274b519d3d8dcf3c57166913381a3d2664d6609004685039f9d3"}, + {file = "xxhash-3.4.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a162840cf4de8a7cd8720ff3b4417fbc10001eefdd2d21541a8226bb5556e3bb"}, + {file = "xxhash-3.4.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b736a2a2728ba45017cb67785e03125a79d246462dfa892d023b827007412c52"}, + {file = "xxhash-3.4.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0ae4c2e7698adef58710d6e7a32ff518b66b98854b1c68e70eee504ad061d8"}, + {file = "xxhash-3.4.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6322c4291c3ff174dcd104fae41500e75dad12be6f3085d119c2c8a80956c51"}, + {file = "xxhash-3.4.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:dd59ed668801c3fae282f8f4edadf6dc7784db6d18139b584b6d9677ddde1b6b"}, + {file = "xxhash-3.4.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92693c487e39523a80474b0394645b393f0ae781d8db3474ccdcead0559ccf45"}, + {file = "xxhash-3.4.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4603a0f642a1e8d7f3ba5c4c25509aca6a9c1cc16f85091004a7028607ead663"}, + {file = "xxhash-3.4.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa45e8cbfbadb40a920fe9ca40c34b393e0b067082d94006f7f64e70c7490a6"}, + {file = "xxhash-3.4.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:595b252943b3552de491ff51e5bb79660f84f033977f88f6ca1605846637b7c6"}, + {file = "xxhash-3.4.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:562d8b8f783c6af969806aaacf95b6c7b776929ae26c0cd941d54644ea7ef51e"}, + {file = "xxhash-3.4.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:41ddeae47cf2828335d8d991f2d2b03b0bdc89289dc64349d712ff8ce59d0647"}, + {file = "xxhash-3.4.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c44d584afdf3c4dbb3277e32321d1a7b01d6071c1992524b6543025fb8f4206f"}, + {file = "xxhash-3.4.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd7bddb3a5b86213cc3f2c61500c16945a1b80ecd572f3078ddbbe68f9dabdfb"}, + {file = "xxhash-3.4.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ecb6c987b62437c2f99c01e97caf8d25660bf541fe79a481d05732e5236719c"}, + {file = "xxhash-3.4.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:696b4e18b7023527d5c50ed0626ac0520edac45a50ec7cf3fc265cd08b1f4c03"}, + {file = "xxhash-3.4.1.tar.gz", hash = "sha256:0379d6cf1ff987cd421609a264ce025e74f346e3e145dd106c0cc2e3ec3f99a9"}, ] [[package]] name = "yarl" -version = "1.9.2" +version = "1.9.3" description = "Yet another URL library" optional = true python-versions = ">=3.7" files = [ - {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"}, - {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"}, - {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"}, - {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"}, - {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"}, - {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"}, - {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"}, - {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"}, - {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"}, - {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"}, - {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"}, - {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"}, - {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"}, - {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"}, - {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"}, + {file = "yarl-1.9.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:32435d134414e01d937cd9d6cc56e8413a8d4741dea36af5840c7750f04d16ab"}, + {file = "yarl-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9a5211de242754b5e612557bca701f39f8b1a9408dff73c6db623f22d20f470e"}, + {file = "yarl-1.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:525cd69eff44833b01f8ef39aa33a9cc53a99ff7f9d76a6ef6a9fb758f54d0ff"}, + {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc94441bcf9cb8c59f51f23193316afefbf3ff858460cb47b5758bf66a14d130"}, + {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e36021db54b8a0475805acc1d6c4bca5d9f52c3825ad29ae2d398a9d530ddb88"}, + {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0f17d1df951336a02afc8270c03c0c6e60d1f9996fcbd43a4ce6be81de0bd9d"}, + {file = "yarl-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5f3faeb8100a43adf3e7925d556801d14b5816a0ac9e75e22948e787feec642"}, + {file = "yarl-1.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aed37db837ecb5962469fad448aaae0f0ee94ffce2062cf2eb9aed13328b5196"}, + {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:721ee3fc292f0d069a04016ef2c3a25595d48c5b8ddc6029be46f6158d129c92"}, + {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b8bc5b87a65a4e64bc83385c05145ea901b613d0d3a434d434b55511b6ab0067"}, + {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:dd952b9c64f3b21aedd09b8fe958e4931864dba69926d8a90c90d36ac4e28c9a"}, + {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:c405d482c320a88ab53dcbd98d6d6f32ada074f2d965d6e9bf2d823158fa97de"}, + {file = "yarl-1.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9df9a0d4c5624790a0dea2e02e3b1b3c69aed14bcb8650e19606d9df3719e87d"}, + {file = "yarl-1.9.3-cp310-cp310-win32.whl", hash = "sha256:d34c4f80956227f2686ddea5b3585e109c2733e2d4ef12eb1b8b4e84f09a2ab6"}, + {file = "yarl-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:cf7a4e8de7f1092829caef66fd90eaf3710bc5efd322a816d5677b7664893c93"}, + {file = "yarl-1.9.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d61a0ca95503867d4d627517bcfdc28a8468c3f1b0b06c626f30dd759d3999fd"}, + {file = "yarl-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:73cc83f918b69110813a7d95024266072d987b903a623ecae673d1e71579d566"}, + {file = "yarl-1.9.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d81657b23e0edb84b37167e98aefb04ae16cbc5352770057893bd222cdc6e45f"}, + {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26a1a8443091c7fbc17b84a0d9f38de34b8423b459fb853e6c8cdfab0eacf613"}, + {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe34befb8c765b8ce562f0200afda3578f8abb159c76de3ab354c80b72244c41"}, + {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c757f64afe53a422e45e3e399e1e3cf82b7a2f244796ce80d8ca53e16a49b9f"}, + {file = "yarl-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72a57b41a0920b9a220125081c1e191b88a4cdec13bf9d0649e382a822705c65"}, + {file = "yarl-1.9.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:632c7aeb99df718765adf58eacb9acb9cbc555e075da849c1378ef4d18bf536a"}, + {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b0b8c06afcf2bac5a50b37f64efbde978b7f9dc88842ce9729c020dc71fae4ce"}, + {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1d93461e2cf76c4796355494f15ffcb50a3c198cc2d601ad8d6a96219a10c363"}, + {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:4003f380dac50328c85e85416aca6985536812c082387255c35292cb4b41707e"}, + {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4d6d74a97e898c1c2df80339aa423234ad9ea2052f66366cef1e80448798c13d"}, + {file = "yarl-1.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b61e64b06c3640feab73fa4ff9cb64bd8182de52e5dc13038e01cfe674ebc321"}, + {file = "yarl-1.9.3-cp311-cp311-win32.whl", hash = "sha256:29beac86f33d6c7ab1d79bd0213aa7aed2d2f555386856bb3056d5fdd9dab279"}, + {file = "yarl-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:f7271d6bd8838c49ba8ae647fc06469137e1c161a7ef97d778b72904d9b68696"}, + {file = "yarl-1.9.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:dd318e6b75ca80bff0b22b302f83a8ee41c62b8ac662ddb49f67ec97e799885d"}, + {file = "yarl-1.9.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c4b1efb11a8acd13246ffb0bee888dd0e8eb057f8bf30112e3e21e421eb82d4a"}, + {file = "yarl-1.9.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c6f034386e5550b5dc8ded90b5e2ff7db21f0f5c7de37b6efc5dac046eb19c10"}, + {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd49a908cb6d387fc26acee8b7d9fcc9bbf8e1aca890c0b2fdfd706057546080"}, + {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa4643635f26052401750bd54db911b6342eb1a9ac3e74f0f8b58a25d61dfe41"}, + {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e741bd48e6a417bdfbae02e088f60018286d6c141639359fb8df017a3b69415a"}, + {file = "yarl-1.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c86d0d0919952d05df880a1889a4f0aeb6868e98961c090e335671dea5c0361"}, + {file = "yarl-1.9.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d5434b34100b504aabae75f0622ebb85defffe7b64ad8f52b8b30ec6ef6e4b9"}, + {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79e1df60f7c2b148722fb6cafebffe1acd95fd8b5fd77795f56247edaf326752"}, + {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:44e91a669c43f03964f672c5a234ae0d7a4d49c9b85d1baa93dec28afa28ffbd"}, + {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3cfa4dbe17b2e6fca1414e9c3bcc216f6930cb18ea7646e7d0d52792ac196808"}, + {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:88d2c3cc4b2f46d1ba73d81c51ec0e486f59cc51165ea4f789677f91a303a9a7"}, + {file = "yarl-1.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cccdc02e46d2bd7cb5f38f8cc3d9db0d24951abd082b2f242c9e9f59c0ab2af3"}, + {file = "yarl-1.9.3-cp312-cp312-win32.whl", hash = "sha256:96758e56dceb8a70f8a5cff1e452daaeff07d1cc9f11e9b0c951330f0a2396a7"}, + {file = "yarl-1.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:c4472fe53ebf541113e533971bd8c32728debc4c6d8cc177f2bff31d011ec17e"}, + {file = "yarl-1.9.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:126638ab961633f0940a06e1c9d59919003ef212a15869708dcb7305f91a6732"}, + {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c99ddaddb2fbe04953b84d1651149a0d85214780e4d0ee824e610ab549d98d92"}, + {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dab30b21bd6fb17c3f4684868c7e6a9e8468078db00f599fb1c14e324b10fca"}, + {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:828235a2a169160ee73a2fcfb8a000709edf09d7511fccf203465c3d5acc59e4"}, + {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc391e3941045fd0987c77484b2799adffd08e4b6735c4ee5f054366a2e1551d"}, + {file = "yarl-1.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51382c72dd5377861b573bd55dcf680df54cea84147c8648b15ac507fbef984d"}, + {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:28a108cb92ce6cf867690a962372996ca332d8cda0210c5ad487fe996e76b8bb"}, + {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8f18a7832ff85dfcd77871fe677b169b1bc60c021978c90c3bb14f727596e0ae"}, + {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:7eaf13af79950142ab2bbb8362f8d8d935be9aaf8df1df89c86c3231e4ff238a"}, + {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:66a6dbf6ca7d2db03cc61cafe1ee6be838ce0fbc97781881a22a58a7c5efef42"}, + {file = "yarl-1.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1a0a4f3aaa18580038cfa52a7183c8ffbbe7d727fe581300817efc1e96d1b0e9"}, + {file = "yarl-1.9.3-cp37-cp37m-win32.whl", hash = "sha256:946db4511b2d815979d733ac6a961f47e20a29c297be0d55b6d4b77ee4b298f6"}, + {file = "yarl-1.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2dad8166d41ebd1f76ce107cf6a31e39801aee3844a54a90af23278b072f1ccf"}, + {file = "yarl-1.9.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bb72d2a94481e7dc7a0c522673db288f31849800d6ce2435317376a345728225"}, + {file = "yarl-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9a172c3d5447b7da1680a1a2d6ecdf6f87a319d21d52729f45ec938a7006d5d8"}, + {file = "yarl-1.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2dc72e891672343b99db6d497024bf8b985537ad6c393359dc5227ef653b2f17"}, + {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8d51817cf4b8d545963ec65ff06c1b92e5765aa98831678d0e2240b6e9fd281"}, + {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53ec65f7eee8655bebb1f6f1607760d123c3c115a324b443df4f916383482a67"}, + {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cfd77e8e5cafba3fb584e0f4b935a59216f352b73d4987be3af51f43a862c403"}, + {file = "yarl-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e73db54c967eb75037c178a54445c5a4e7461b5203b27c45ef656a81787c0c1b"}, + {file = "yarl-1.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09c19e5f4404574fcfb736efecf75844ffe8610606f3fccc35a1515b8b6712c4"}, + {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6280353940f7e5e2efaaabd686193e61351e966cc02f401761c4d87f48c89ea4"}, + {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c25ec06e4241e162f5d1f57c370f4078797ade95c9208bd0c60f484834f09c96"}, + {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:7217234b10c64b52cc39a8d82550342ae2e45be34f5bff02b890b8c452eb48d7"}, + {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4ce77d289f8d40905c054b63f29851ecbfd026ef4ba5c371a158cfe6f623663e"}, + {file = "yarl-1.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5f74b015c99a5eac5ae589de27a1201418a5d9d460e89ccb3366015c6153e60a"}, + {file = "yarl-1.9.3-cp38-cp38-win32.whl", hash = "sha256:8a2538806be846ea25e90c28786136932ec385c7ff3bc1148e45125984783dc6"}, + {file = "yarl-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:6465d36381af057d0fab4e0f24ef0e80ba61f03fe43e6eeccbe0056e74aadc70"}, + {file = "yarl-1.9.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2f3c8822bc8fb4a347a192dd6a28a25d7f0ea3262e826d7d4ef9cc99cd06d07e"}, + {file = "yarl-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7831566595fe88ba17ea80e4b61c0eb599f84c85acaa14bf04dd90319a45b90"}, + {file = "yarl-1.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ff34cb09a332832d1cf38acd0f604c068665192c6107a439a92abfd8acf90fe2"}, + {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe8080b4f25dfc44a86bedd14bc4f9d469dfc6456e6f3c5d9077e81a5fedfba7"}, + {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8535e111a064f3bdd94c0ed443105934d6f005adad68dd13ce50a488a0ad1bf3"}, + {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d155a092bf0ebf4a9f6f3b7a650dc5d9a5bbb585ef83a52ed36ba46f55cc39d"}, + {file = "yarl-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:778df71c8d0c8c9f1b378624b26431ca80041660d7be7c3f724b2c7a6e65d0d6"}, + {file = "yarl-1.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9f9cafaf031c34d95c1528c16b2fa07b710e6056b3c4e2e34e9317072da5d1a"}, + {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ca6b66f69e30f6e180d52f14d91ac854b8119553b524e0e28d5291a724f0f423"}, + {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e0e7e83f31e23c5d00ff618045ddc5e916f9e613d33c5a5823bc0b0a0feb522f"}, + {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:af52725c7c39b0ee655befbbab5b9a1b209e01bb39128dce0db226a10014aacc"}, + {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0ab5baaea8450f4a3e241ef17e3d129b2143e38a685036b075976b9c415ea3eb"}, + {file = "yarl-1.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d350388ba1129bc867c6af1cd17da2b197dff0d2801036d2d7d83c2d771a682"}, + {file = "yarl-1.9.3-cp39-cp39-win32.whl", hash = "sha256:e2a16ef5fa2382af83bef4a18c1b3bcb4284c4732906aa69422cf09df9c59f1f"}, + {file = "yarl-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:d92d897cb4b4bf915fbeb5e604c7911021a8456f0964f3b8ebbe7f9188b9eabb"}, + {file = "yarl-1.9.3-py3-none-any.whl", hash = "sha256:271d63396460b6607b588555ea27a1a02b717ca2e3f2cf53bdde4013d7790929"}, + {file = "yarl-1.9.3.tar.gz", hash = "sha256:4a14907b597ec55740f63e52d7fee0e9ee09d5b9d57a4f399a7423268e457b57"}, ] [package.dependencies] @@ -2547,9 +2775,11 @@ multidict = ">=4.0" [extras] accelerate = ["accelerate"] bnb = ["bitsandbytes"] +peft = ["peft"] quantize = ["accelerate", "datasets", "texttable"] +torch = ["torch"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.13" -content-hash = "f2ef5f41a172d14985367a385ad6ce844c8c05b2d68d9ddcc11b41f581921c96" +content-hash = "cd3fb4b4e4aaf100f6015afa8c9adc28c22e6c0b48752452892dc3d004c1562a" diff --git a/server/pyproject.toml b/server/pyproject.toml index 673968be7da..52431eea831 100644 --- a/server/pyproject.toml +++ b/server/pyproject.toml @@ -30,14 +30,16 @@ transformers = "^4.32.1" einops = "^0.6.1" texttable = { version = "^1.6.7", optional = true } datasets = { version = "^2.14.0", optional = true } -peft = "^0.4.0" -torch = { version = "^2.0.1" } +peft = { version = "^0.4.0", optional = true } +torch = { version = "^2.1.1", optional = true } scipy = "^1.11.1" pillow = "^10.0.0" [tool.poetry.extras] +torch = ["torch"] accelerate = ["accelerate"] bnb = ["bitsandbytes"] +peft = ["peft"] quantize = ["texttable", "datasets", "accelerate"] [tool.poetry.group.dev.dependencies] @@ -47,7 +49,7 @@ pytest = "^7.3.0" [[tool.poetry.source]] name = "pytorch-gpu-src" -url = "https://download.pytorch.org/whl/cu118" +url = "https://download.pytorch.org/whl/cu121" priority = "explicit" [tool.pytest.ini_options] diff --git a/server/requirements.txt b/server/requirements.txt index 7c81c5f964a..bc1b8891216 100644 --- a/server/requirements.txt +++ b/server/requirements.txt @@ -1,38 +1,23 @@ -accelerate==0.20.3 ; python_version >= "3.9" and python_version < "3.13" -aiohttp==3.8.5 ; python_version >= "3.9" and python_version < "3.13" -aiosignal==1.3.1 ; python_version >= "3.9" and python_version < "3.13" -async-timeout==4.0.3 ; python_version >= "3.9" and python_version < "3.13" -attrs==23.1.0 ; python_version >= "3.9" and python_version < "3.13" backoff==2.2.1 ; python_version >= "3.9" and python_version < "3.13" -bitsandbytes==0.41.1 ; python_version >= "3.9" and python_version < "3.13" -certifi==2023.7.22 ; python_version >= "3.9" and python_version < "3.13" -charset-normalizer==3.2.0 ; python_version >= "3.9" and python_version < "3.13" +bitsandbytes==0.41.2.post2 ; python_version >= "3.9" and python_version < "3.13" +certifi==2023.11.17 ; python_version >= "3.9" and python_version < "3.13" +charset-normalizer==3.3.2 ; python_version >= "3.9" and python_version < "3.13" click==8.1.7 ; python_version >= "3.9" and python_version < "3.13" colorama==0.4.6 ; python_version >= "3.9" and python_version < "3.13" and (sys_platform == "win32" or platform_system == "Windows") -datasets==2.14.5 ; python_version >= "3.9" and python_version < "3.13" deprecated==1.2.14 ; python_version >= "3.9" and python_version < "3.13" -dill==0.3.7 ; python_version >= "3.9" and python_version < "3.13" einops==0.6.1 ; python_version >= "3.9" and python_version < "3.13" -filelock==3.12.4 ; python_version >= "3.9" and python_version < "3.13" -frozenlist==1.4.0 ; python_version >= "3.9" and python_version < "3.13" -fsspec==2023.6.0 ; python_version >= "3.9" and python_version < "3.13" -fsspec[http]==2023.6.0 ; python_version >= "3.9" and python_version < "3.13" -googleapis-common-protos==1.60.0 ; python_version >= "3.9" and python_version < "3.13" -grpc-interceptor==0.15.3 ; python_version >= "3.9" and python_version < "3.13" -grpcio-reflection==1.58.0 ; python_version >= "3.9" and python_version < "3.13" -grpcio-status==1.58.0 ; python_version >= "3.9" and python_version < "3.13" -grpcio==1.58.0 ; python_version >= "3.9" and python_version < "3.13" -hf-transfer==0.1.3 ; python_version >= "3.9" and python_version < "3.13" +filelock==3.13.1 ; python_version >= "3.9" and python_version < "3.13" +fsspec==2023.10.0 ; python_version >= "3.9" and python_version < "3.13" +googleapis-common-protos==1.61.0 ; python_version >= "3.9" and python_version < "3.13" +grpc-interceptor==0.15.4 ; python_version >= "3.9" and python_version < "3.13" +grpcio-reflection==1.59.3 ; python_version >= "3.9" and python_version < "3.13" +grpcio-status==1.59.3 ; python_version >= "3.9" and python_version < "3.13" +grpcio==1.59.3 ; python_version >= "3.9" and python_version < "3.13" +hf-transfer==0.1.4 ; python_version >= "3.9" and python_version < "3.13" huggingface-hub==0.16.4 ; python_version >= "3.9" and python_version < "3.13" idna==3.4 ; python_version >= "3.9" and python_version < "3.13" -jinja2==3.1.2 ; python_version >= "3.9" and python_version < "3.13" loguru==0.6.0 ; python_version >= "3.9" and python_version < "3.13" -markupsafe==2.1.3 ; python_version >= "3.9" and python_version < "3.13" -mpmath==1.3.0 ; python_version >= "3.9" and python_version < "3.13" -multidict==6.0.4 ; python_version >= "3.9" and python_version < "3.13" -multiprocess==0.70.15 ; python_version >= "3.9" and python_version < "3.13" -networkx==3.1 ; python_version >= "3.9" and python_version < "3.13" -numpy==1.26.0 ; python_version >= "3.9" and python_version < "3.13" +numpy==1.26.2 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-api==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-exporter-otlp-proto-grpc==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-exporter-otlp-proto-http==1.15.0 ; python_version >= "3.9" and python_version < "3.13" @@ -42,34 +27,21 @@ opentelemetry-instrumentation==0.36b0 ; python_version >= "3.9" and python_versi opentelemetry-proto==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-sdk==1.15.0 ; python_version >= "3.9" and python_version < "3.13" opentelemetry-semantic-conventions==0.36b0 ; python_version >= "3.9" and python_version < "3.13" -packaging==23.1 ; python_version >= "3.9" and python_version < "3.13" -pandas==2.1.1 ; python_version >= "3.9" and python_version < "3.13" -peft==0.4.0 ; python_version >= "3.9" and python_version < "3.13" -pillow==10.0.1 ; python_version >= "3.9" and python_version < "3.13" -protobuf==4.24.3 ; python_version >= "3.9" and python_version < "3.13" -psutil==5.9.5 ; python_version >= "3.9" and python_version < "3.13" -pyarrow==13.0.0 ; python_version >= "3.9" and python_version < "3.13" -python-dateutil==2.8.2 ; python_version >= "3.9" and python_version < "3.13" -pytz==2023.3.post1 ; python_version >= "3.9" and python_version < "3.13" +packaging==23.2 ; python_version >= "3.9" and python_version < "3.13" +pillow==10.1.0 ; python_version >= "3.9" and python_version < "3.13" +protobuf==4.25.1 ; python_version >= "3.9" and python_version < "3.13" pyyaml==6.0.1 ; python_version >= "3.9" and python_version < "3.13" -regex==2023.8.8 ; python_version >= "3.9" and python_version < "3.13" +regex==2023.10.3 ; python_version >= "3.9" and python_version < "3.13" requests==2.31.0 ; python_version >= "3.9" and python_version < "3.13" safetensors==0.3.3 ; python_version >= "3.9" and python_version < "3.13" -scipy==1.11.2 ; python_version >= "3.9" and python_version < "3.13" +scipy==1.11.4 ; python_version >= "3.9" and python_version < "3.13" sentencepiece==0.1.99 ; python_version >= "3.9" and python_version < "3.13" -setuptools==68.2.2 ; python_version >= "3.9" and python_version < "3.13" -six==1.16.0 ; python_version >= "3.9" and python_version < "3.13" -sympy==1.12 ; python_version >= "3.9" and python_version < "3.13" -texttable==1.6.7 ; python_version >= "3.9" and python_version < "3.13" +setuptools==69.0.2 ; python_version >= "3.9" and python_version < "3.13" tokenizers==0.13.3 ; python_version >= "3.9" and python_version < "3.13" -torch==2.0.1 ; python_version >= "3.9" and python_version < "3.13" tqdm==4.66.1 ; python_version >= "3.9" and python_version < "3.13" -transformers==4.33.2 ; python_version >= "3.9" and python_version < "3.13" +transformers==4.33.3 ; python_version >= "3.9" and python_version < "3.13" typer==0.6.1 ; python_version >= "3.9" and python_version < "3.13" typing-extensions==4.8.0 ; python_version >= "3.9" and python_version < "3.13" -tzdata==2023.3 ; python_version >= "3.9" and python_version < "3.13" -urllib3==2.0.5 ; python_version >= "3.9" and python_version < "3.13" +urllib3==2.1.0 ; python_version >= "3.9" and python_version < "3.13" win32-setctime==1.1.0 ; python_version >= "3.9" and python_version < "3.13" and sys_platform == "win32" -wrapt==1.15.0 ; python_version >= "3.9" and python_version < "3.13" -xxhash==3.3.0 ; python_version >= "3.9" and python_version < "3.13" -yarl==1.9.2 ; python_version >= "3.9" and python_version < "3.13" +wrapt==1.16.0 ; python_version >= "3.9" and python_version < "3.13"
Textualize__rich-3257
Export `rich.text.TextType` so it shows up in the reference Exporting `TextType` and making it visible in the docs means we'll be able to link to it from the Textual docs, where it shows up _a lot_.
[ { "content": "import re\nfrom functools import partial, reduce\nfrom math import gcd\nfrom operator import itemgetter\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n List,\n NamedTuple,\n Optional,\n Tuple,\n Union,\n)\n\nfrom ._loop import loop_last\nfrom ._pick import pick_bool\nfrom ._wrap import divide_line\nfrom .align import AlignMethod\nfrom .cells import cell_len, set_cell_size\nfrom .containers import Lines\nfrom .control import strip_control_codes\nfrom .emoji import EmojiVariant\nfrom .jupyter import JupyterMixin\nfrom .measure import Measurement\nfrom .segment import Segment\nfrom .style import Style, StyleType\n\nif TYPE_CHECKING: # pragma: no cover\n from .console import Console, ConsoleOptions, JustifyMethod, OverflowMethod\n\nDEFAULT_JUSTIFY: \"JustifyMethod\" = \"default\"\nDEFAULT_OVERFLOW: \"OverflowMethod\" = \"fold\"\n\n\n_re_whitespace = re.compile(r\"\\s+$\")\n\nTextType = Union[str, \"Text\"]\n\"\"\"A plain string or a [Text][rich.text.Text] instance.\"\"\"\n\nGetStyleCallable = Callable[[str], Optional[StyleType]]\n\n\nclass Span(NamedTuple):\n \"\"\"A marked up region in some text.\"\"\"\n\n start: int\n \"\"\"Span start index.\"\"\"\n end: int\n \"\"\"Span end index.\"\"\"\n style: Union[str, Style]\n \"\"\"Style associated with the span.\"\"\"\n\n def __repr__(self) -> str:\n return f\"Span({self.start}, {self.end}, {self.style!r})\"\n\n def __bool__(self) -> bool:\n return self.end > self.start\n\n def split(self, offset: int) -> Tuple[\"Span\", Optional[\"Span\"]]:\n \"\"\"Split a span in to 2 from a given offset.\"\"\"\n\n if offset < self.start:\n return self, None\n if offset >= self.end:\n return self, None\n\n start, end, style = self\n span1 = Span(start, min(end, offset), style)\n span2 = Span(span1.end, end, style)\n return span1, span2\n\n def move(self, offset: int) -> \"Span\":\n \"\"\"Move start and end by a given offset.\n\n Args:\n offset (int): Number of characters to add to start and end.\n\n Returns:\n TextSpan: A new TextSpan with adjusted position.\n \"\"\"\n start, end, style = self\n return Span(start + offset, end + offset, style)\n\n def right_crop(self, offset: int) -> \"Span\":\n \"\"\"Crop the span at the given offset.\n\n Args:\n offset (int): A value between start and end.\n\n Returns:\n Span: A new (possibly smaller) span.\n \"\"\"\n start, end, style = self\n if offset >= end:\n return self\n return Span(start, min(offset, end), style)\n\n def extend(self, cells: int) -> \"Span\":\n \"\"\"Extend the span by the given number of cells.\n\n Args:\n cells (int): Additional space to add to end of span.\n\n Returns:\n Span: A span.\n \"\"\"\n if cells:\n start, end, style = self\n return Span(start, end + cells, style)\n else:\n return self\n\n\nclass Text(JupyterMixin):\n \"\"\"Text with color / style.\n\n Args:\n text (str, optional): Default unstyled text. Defaults to \"\".\n style (Union[str, Style], optional): Base style for text. Defaults to \"\".\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.\n end (str, optional): Character to end text with. Defaults to \"\\\\\\\\n\".\n tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.\n spans (List[Span], optional). A list of predefined style spans. Defaults to None.\n \"\"\"\n\n __slots__ = [\n \"_text\",\n \"style\",\n \"justify\",\n \"overflow\",\n \"no_wrap\",\n \"end\",\n \"tab_size\",\n \"_spans\",\n \"_length\",\n ]\n\n def __init__(\n self,\n text: str = \"\",\n style: Union[str, Style] = \"\",\n *,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n no_wrap: Optional[bool] = None,\n end: str = \"\\n\",\n tab_size: Optional[int] = None,\n spans: Optional[List[Span]] = None,\n ) -> None:\n sanitized_text = strip_control_codes(text)\n self._text = [sanitized_text]\n self.style = style\n self.justify: Optional[\"JustifyMethod\"] = justify\n self.overflow: Optional[\"OverflowMethod\"] = overflow\n self.no_wrap = no_wrap\n self.end = end\n self.tab_size = tab_size\n self._spans: List[Span] = spans or []\n self._length: int = len(sanitized_text)\n\n def __len__(self) -> int:\n return self._length\n\n def __bool__(self) -> bool:\n return bool(self._length)\n\n def __str__(self) -> str:\n return self.plain\n\n def __repr__(self) -> str:\n return f\"<text {self.plain!r} {self._spans!r}>\"\n\n def __add__(self, other: Any) -> \"Text\":\n if isinstance(other, (str, Text)):\n result = self.copy()\n result.append(other)\n return result\n return NotImplemented\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Text):\n return NotImplemented\n return self.plain == other.plain and self._spans == other._spans\n\n def __contains__(self, other: object) -> bool:\n if isinstance(other, str):\n return other in self.plain\n elif isinstance(other, Text):\n return other.plain in self.plain\n return False\n\n def __getitem__(self, slice: Union[int, slice]) -> \"Text\":\n def get_text_at(offset: int) -> \"Text\":\n _Span = Span\n text = Text(\n self.plain[offset],\n spans=[\n _Span(0, 1, style)\n for start, end, style in self._spans\n if end > offset >= start\n ],\n end=\"\",\n )\n return text\n\n if isinstance(slice, int):\n return get_text_at(slice)\n else:\n start, stop, step = slice.indices(len(self.plain))\n if step == 1:\n lines = self.divide([start, stop])\n return lines[1]\n else:\n # This would be a bit of work to implement efficiently\n # For now, its not required\n raise TypeError(\"slices with step!=1 are not supported\")\n\n @property\n def cell_len(self) -> int:\n \"\"\"Get the number of cells required to render this text.\"\"\"\n return cell_len(self.plain)\n\n @property\n def markup(self) -> str:\n \"\"\"Get console markup to render this Text.\n\n Returns:\n str: A string potentially creating markup tags.\n \"\"\"\n from .markup import escape\n\n output: List[str] = []\n\n plain = self.plain\n markup_spans = [\n (0, False, self.style),\n *((span.start, False, span.style) for span in self._spans),\n *((span.end, True, span.style) for span in self._spans),\n (len(plain), True, self.style),\n ]\n markup_spans.sort(key=itemgetter(0, 1))\n position = 0\n append = output.append\n for offset, closing, style in markup_spans:\n if offset > position:\n append(escape(plain[position:offset]))\n position = offset\n if style:\n append(f\"[/{style}]\" if closing else f\"[{style}]\")\n markup = \"\".join(output)\n return markup\n\n @classmethod\n def from_markup(\n cls,\n text: str,\n *,\n style: Union[str, Style] = \"\",\n emoji: bool = True,\n emoji_variant: Optional[EmojiVariant] = None,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n end: str = \"\\n\",\n ) -> \"Text\":\n \"\"\"Create Text instance from markup.\n\n Args:\n text (str): A string containing console markup.\n style (Union[str, Style], optional): Base style for text. Defaults to \"\".\n emoji (bool, optional): Also render emoji code. Defaults to True.\n emoji_variant (str, optional): Optional emoji variant, either \"text\" or \"emoji\". Defaults to None.\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n end (str, optional): Character to end text with. Defaults to \"\\\\\\\\n\".\n\n Returns:\n Text: A Text instance with markup rendered.\n \"\"\"\n from .markup import render\n\n rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)\n rendered_text.justify = justify\n rendered_text.overflow = overflow\n rendered_text.end = end\n return rendered_text\n\n @classmethod\n def from_ansi(\n cls,\n text: str,\n *,\n style: Union[str, Style] = \"\",\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n no_wrap: Optional[bool] = None,\n end: str = \"\\n\",\n tab_size: Optional[int] = 8,\n ) -> \"Text\":\n \"\"\"Create a Text object from a string containing ANSI escape codes.\n\n Args:\n text (str): A string containing escape codes.\n style (Union[str, Style], optional): Base style for text. Defaults to \"\".\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.\n end (str, optional): Character to end text with. Defaults to \"\\\\\\\\n\".\n tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.\n \"\"\"\n from .ansi import AnsiDecoder\n\n joiner = Text(\n \"\\n\",\n justify=justify,\n overflow=overflow,\n no_wrap=no_wrap,\n end=end,\n tab_size=tab_size,\n style=style,\n )\n decoder = AnsiDecoder()\n result = joiner.join(line for line in decoder.decode(text))\n return result\n\n @classmethod\n def styled(\n cls,\n text: str,\n style: StyleType = \"\",\n *,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n ) -> \"Text\":\n \"\"\"Construct a Text instance with a pre-applied styled. A style applied in this way won't be used\n to pad the text when it is justified.\n\n Args:\n text (str): A string containing console markup.\n style (Union[str, Style]): Style to apply to the text. Defaults to \"\".\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n\n Returns:\n Text: A text instance with a style applied to the entire string.\n \"\"\"\n styled_text = cls(text, justify=justify, overflow=overflow)\n styled_text.stylize(style)\n return styled_text\n\n @classmethod\n def assemble(\n cls,\n *parts: Union[str, \"Text\", Tuple[str, StyleType]],\n style: Union[str, Style] = \"\",\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n no_wrap: Optional[bool] = None,\n end: str = \"\\n\",\n tab_size: int = 8,\n meta: Optional[Dict[str, Any]] = None,\n ) -> \"Text\":\n \"\"\"Construct a text instance by combining a sequence of strings with optional styles.\n The positional arguments should be either strings, or a tuple of string + style.\n\n Args:\n style (Union[str, Style], optional): Base style for text. Defaults to \"\".\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.\n end (str, optional): Character to end text with. Defaults to \"\\\\\\\\n\".\n tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.\n meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None\n\n Returns:\n Text: A new text instance.\n \"\"\"\n text = cls(\n style=style,\n justify=justify,\n overflow=overflow,\n no_wrap=no_wrap,\n end=end,\n tab_size=tab_size,\n )\n append = text.append\n _Text = Text\n for part in parts:\n if isinstance(part, (_Text, str)):\n append(part)\n else:\n append(*part)\n if meta:\n text.apply_meta(meta)\n return text\n\n @property\n def plain(self) -> str:\n \"\"\"Get the text as a single string.\"\"\"\n if len(self._text) != 1:\n self._text[:] = [\"\".join(self._text)]\n return self._text[0]\n\n @plain.setter\n def plain(self, new_text: str) -> None:\n \"\"\"Set the text to a new value.\"\"\"\n if new_text != self.plain:\n sanitized_text = strip_control_codes(new_text)\n self._text[:] = [sanitized_text]\n old_length = self._length\n self._length = len(sanitized_text)\n if old_length > self._length:\n self._trim_spans()\n\n @property\n def spans(self) -> List[Span]:\n \"\"\"Get a reference to the internal list of spans.\"\"\"\n return self._spans\n\n @spans.setter\n def spans(self, spans: List[Span]) -> None:\n \"\"\"Set spans.\"\"\"\n self._spans = spans[:]\n\n def blank_copy(self, plain: str = \"\") -> \"Text\":\n \"\"\"Return a new Text instance with copied metadata (but not the string or spans).\"\"\"\n copy_self = Text(\n plain,\n style=self.style,\n justify=self.justify,\n overflow=self.overflow,\n no_wrap=self.no_wrap,\n end=self.end,\n tab_size=self.tab_size,\n )\n return copy_self\n\n def copy(self) -> \"Text\":\n \"\"\"Return a copy of this instance.\"\"\"\n copy_self = Text(\n self.plain,\n style=self.style,\n justify=self.justify,\n overflow=self.overflow,\n no_wrap=self.no_wrap,\n end=self.end,\n tab_size=self.tab_size,\n )\n copy_self._spans[:] = self._spans\n return copy_self\n\n def stylize(\n self,\n style: Union[str, Style],\n start: int = 0,\n end: Optional[int] = None,\n ) -> None:\n \"\"\"Apply a style to the text, or a portion of the text.\n\n Args:\n style (Union[str, Style]): Style instance or style definition to apply.\n start (int): Start offset (negative indexing is supported). Defaults to 0.\n end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.\n \"\"\"\n if style:\n length = len(self)\n if start < 0:\n start = length + start\n if end is None:\n end = length\n if end < 0:\n end = length + end\n if start >= length or end <= start:\n # Span not in text or not valid\n return\n self._spans.append(Span(start, min(length, end), style))\n\n def stylize_before(\n self,\n style: Union[str, Style],\n start: int = 0,\n end: Optional[int] = None,\n ) -> None:\n \"\"\"Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present.\n\n Args:\n style (Union[str, Style]): Style instance or style definition to apply.\n start (int): Start offset (negative indexing is supported). Defaults to 0.\n end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.\n \"\"\"\n if style:\n length = len(self)\n if start < 0:\n start = length + start\n if end is None:\n end = length\n if end < 0:\n end = length + end\n if start >= length or end <= start:\n # Span not in text or not valid\n return\n self._spans.insert(0, Span(start, min(length, end), style))\n\n def apply_meta(\n self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None\n ) -> None:\n \"\"\"Apply metadata to the text, or a portion of the text.\n\n Args:\n meta (Dict[str, Any]): A dict of meta information.\n start (int): Start offset (negative indexing is supported). Defaults to 0.\n end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.\n\n \"\"\"\n style = Style.from_meta(meta)\n self.stylize(style, start=start, end=end)\n\n def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> \"Text\":\n \"\"\"Apply event handlers (used by Textual project).\n\n Example:\n >>> from rich.text import Text\n >>> text = Text(\"hello world\")\n >>> text.on(click=\"view.toggle('world')\")\n\n Args:\n meta (Dict[str, Any]): Mapping of meta information.\n **handlers: Keyword args are prefixed with \"@\" to defined handlers.\n\n Returns:\n Text: Self is returned to method may be chained.\n \"\"\"\n meta = {} if meta is None else meta\n meta.update({f\"@{key}\": value for key, value in handlers.items()})\n self.stylize(Style.from_meta(meta))\n return self\n\n def remove_suffix(self, suffix: str) -> None:\n \"\"\"Remove a suffix if it exists.\n\n Args:\n suffix (str): Suffix to remove.\n \"\"\"\n if self.plain.endswith(suffix):\n self.right_crop(len(suffix))\n\n def get_style_at_offset(self, console: \"Console\", offset: int) -> Style:\n \"\"\"Get the style of a character at give offset.\n\n Args:\n console (~Console): Console where text will be rendered.\n offset (int): Offset in to text (negative indexing supported)\n\n Returns:\n Style: A Style instance.\n \"\"\"\n # TODO: This is a little inefficient, it is only used by full justify\n if offset < 0:\n offset = len(self) + offset\n get_style = console.get_style\n style = get_style(self.style).copy()\n for start, end, span_style in self._spans:\n if end > offset >= start:\n style += get_style(span_style, default=\"\")\n return style\n\n def extend_style(self, spaces: int) -> None:\n \"\"\"Extend the Text given number of spaces where the spaces have the same style as the last character.\n\n Args:\n spaces (int): Number of spaces to add to the Text.\n \"\"\"\n if spaces <= 0:\n return\n spans = self.spans\n new_spaces = \" \" * spaces\n if spans:\n end_offset = len(self)\n self._spans[:] = [\n span.extend(spaces) if span.end >= end_offset else span\n for span in spans\n ]\n self._text.append(new_spaces)\n self._length += spaces\n else:\n self.plain += new_spaces\n\n def highlight_regex(\n self,\n re_highlight: str,\n style: Optional[Union[GetStyleCallable, StyleType]] = None,\n *,\n style_prefix: str = \"\",\n ) -> int:\n \"\"\"Highlight text with a regular expression, where group names are\n translated to styles.\n\n Args:\n re_highlight (str): A regular expression.\n style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable\n which accepts the matched text and returns a style. Defaults to None.\n style_prefix (str, optional): Optional prefix to add to style group names.\n\n Returns:\n int: Number of regex matches\n \"\"\"\n count = 0\n append_span = self._spans.append\n _Span = Span\n plain = self.plain\n for match in re.finditer(re_highlight, plain):\n get_span = match.span\n if style:\n start, end = get_span()\n match_style = style(plain[start:end]) if callable(style) else style\n if match_style is not None and end > start:\n append_span(_Span(start, end, match_style))\n\n count += 1\n for name in match.groupdict().keys():\n start, end = get_span(name)\n if start != -1 and end > start:\n append_span(_Span(start, end, f\"{style_prefix}{name}\"))\n return count\n\n def highlight_words(\n self,\n words: Iterable[str],\n style: Union[str, Style],\n *,\n case_sensitive: bool = True,\n ) -> int:\n \"\"\"Highlight words with a style.\n\n Args:\n words (Iterable[str]): Words to highlight.\n style (Union[str, Style]): Style to apply.\n case_sensitive (bool, optional): Enable case sensitive matching. Defaults to True.\n\n Returns:\n int: Number of words highlighted.\n \"\"\"\n re_words = \"|\".join(re.escape(word) for word in words)\n add_span = self._spans.append\n count = 0\n _Span = Span\n for match in re.finditer(\n re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE\n ):\n start, end = match.span(0)\n add_span(_Span(start, end, style))\n count += 1\n return count\n\n def rstrip(self) -> None:\n \"\"\"Strip whitespace from end of text.\"\"\"\n self.plain = self.plain.rstrip()\n\n def rstrip_end(self, size: int) -> None:\n \"\"\"Remove whitespace beyond a certain width at the end of the text.\n\n Args:\n size (int): The desired size of the text.\n \"\"\"\n text_length = len(self)\n if text_length > size:\n excess = text_length - size\n whitespace_match = _re_whitespace.search(self.plain)\n if whitespace_match is not None:\n whitespace_count = len(whitespace_match.group(0))\n self.right_crop(min(whitespace_count, excess))\n\n def set_length(self, new_length: int) -> None:\n \"\"\"Set new length of the text, clipping or padding is required.\"\"\"\n length = len(self)\n if length != new_length:\n if length < new_length:\n self.pad_right(new_length - length)\n else:\n self.right_crop(length - new_length)\n\n def __rich_console__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> Iterable[Segment]:\n tab_size: int = console.tab_size if self.tab_size is None else self.tab_size\n justify = self.justify or options.justify or DEFAULT_JUSTIFY\n\n overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW\n\n lines = self.wrap(\n console,\n options.max_width,\n justify=justify,\n overflow=overflow,\n tab_size=tab_size or 8,\n no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),\n )\n all_lines = Text(\"\\n\").join(lines)\n yield from all_lines.render(console, end=self.end)\n\n def __rich_measure__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> Measurement:\n text = self.plain\n lines = text.splitlines()\n max_text_width = max(cell_len(line) for line in lines) if lines else 0\n words = text.split()\n min_text_width = (\n max(cell_len(word) for word in words) if words else max_text_width\n )\n return Measurement(min_text_width, max_text_width)\n\n def render(self, console: \"Console\", end: str = \"\") -> Iterable[\"Segment\"]:\n \"\"\"Render the text as Segments.\n\n Args:\n console (Console): Console instance.\n end (Optional[str], optional): Optional end character.\n\n Returns:\n Iterable[Segment]: Result of render that may be written to the console.\n \"\"\"\n _Segment = Segment\n text = self.plain\n if not self._spans:\n yield Segment(text)\n if end:\n yield _Segment(end)\n return\n get_style = partial(console.get_style, default=Style.null())\n\n enumerated_spans = list(enumerate(self._spans, 1))\n style_map = {index: get_style(span.style) for index, span in enumerated_spans}\n style_map[0] = get_style(self.style)\n\n spans = [\n (0, False, 0),\n *((span.start, False, index) for index, span in enumerated_spans),\n *((span.end, True, index) for index, span in enumerated_spans),\n (len(text), True, 0),\n ]\n spans.sort(key=itemgetter(0, 1))\n\n stack: List[int] = []\n stack_append = stack.append\n stack_pop = stack.remove\n\n style_cache: Dict[Tuple[Style, ...], Style] = {}\n style_cache_get = style_cache.get\n combine = Style.combine\n\n def get_current_style() -> Style:\n \"\"\"Construct current style from stack.\"\"\"\n styles = tuple(style_map[_style_id] for _style_id in sorted(stack))\n cached_style = style_cache_get(styles)\n if cached_style is not None:\n return cached_style\n current_style = combine(styles)\n style_cache[styles] = current_style\n return current_style\n\n for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):\n if leaving:\n stack_pop(style_id)\n else:\n stack_append(style_id)\n if next_offset > offset:\n yield _Segment(text[offset:next_offset], get_current_style())\n if end:\n yield _Segment(end)\n\n def join(self, lines: Iterable[\"Text\"]) -> \"Text\":\n \"\"\"Join text together with this instance as the separator.\n\n Args:\n lines (Iterable[Text]): An iterable of Text instances to join.\n\n Returns:\n Text: A new text instance containing join text.\n \"\"\"\n\n new_text = self.blank_copy()\n\n def iter_text() -> Iterable[\"Text\"]:\n if self.plain:\n for last, line in loop_last(lines):\n yield line\n if not last:\n yield self\n else:\n yield from lines\n\n extend_text = new_text._text.extend\n append_span = new_text._spans.append\n extend_spans = new_text._spans.extend\n offset = 0\n _Span = Span\n\n for text in iter_text():\n extend_text(text._text)\n if text.style:\n append_span(_Span(offset, offset + len(text), text.style))\n extend_spans(\n _Span(offset + start, offset + end, style)\n for start, end, style in text._spans\n )\n offset += len(text)\n new_text._length = offset\n return new_text\n\n def expand_tabs(self, tab_size: Optional[int] = None) -> None:\n \"\"\"Converts tabs to spaces.\n\n Args:\n tab_size (int, optional): Size of tabs. Defaults to 8.\n\n \"\"\"\n if \"\\t\" not in self.plain:\n return\n if tab_size is None:\n tab_size = self.tab_size\n if tab_size is None:\n tab_size = 8\n\n new_text: List[Text] = []\n append = new_text.append\n\n for line in self.split(\"\\n\", include_separator=True):\n if \"\\t\" not in line.plain:\n append(line)\n else:\n cell_position = 0\n parts = line.split(\"\\t\", include_separator=True)\n for part in parts:\n if part.plain.endswith(\"\\t\"):\n part._text[-1] = part._text[-1][:-1] + \" \"\n cell_position += part.cell_len\n tab_remainder = cell_position % tab_size\n if tab_remainder:\n spaces = tab_size - tab_remainder\n part.extend_style(spaces)\n cell_position += spaces\n else:\n cell_position += part.cell_len\n append(part)\n\n result = Text(\"\").join(new_text)\n\n self._text = [result.plain]\n self._length = len(self.plain)\n self._spans[:] = result._spans\n\n def truncate(\n self,\n max_width: int,\n *,\n overflow: Optional[\"OverflowMethod\"] = None,\n pad: bool = False,\n ) -> None:\n \"\"\"Truncate text if it is longer that a given width.\n\n Args:\n max_width (int): Maximum number of characters in text.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", or \"ellipsis\". Defaults to None, to use self.overflow.\n pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.\n \"\"\"\n _overflow = overflow or self.overflow or DEFAULT_OVERFLOW\n if _overflow != \"ignore\":\n length = cell_len(self.plain)\n if length > max_width:\n if _overflow == \"ellipsis\":\n self.plain = set_cell_size(self.plain, max_width - 1) + \"…\"\n else:\n self.plain = set_cell_size(self.plain, max_width)\n if pad and length < max_width:\n spaces = max_width - length\n self._text = [f\"{self.plain}{' ' * spaces}\"]\n self._length = len(self.plain)\n\n def _trim_spans(self) -> None:\n \"\"\"Remove or modify any spans that are over the end of the text.\"\"\"\n max_offset = len(self.plain)\n _Span = Span\n self._spans[:] = [\n (\n span\n if span.end < max_offset\n else _Span(span.start, min(max_offset, span.end), span.style)\n )\n for span in self._spans\n if span.start < max_offset\n ]\n\n def pad(self, count: int, character: str = \" \") -> None:\n \"\"\"Pad left and right with a given number of characters.\n\n Args:\n count (int): Width of padding.\n character (str): The character to pad with. Must be a string of length 1.\n \"\"\"\n assert len(character) == 1, \"Character must be a string of length 1\"\n if count:\n pad_characters = character * count\n self.plain = f\"{pad_characters}{self.plain}{pad_characters}\"\n _Span = Span\n self._spans[:] = [\n _Span(start + count, end + count, style)\n for start, end, style in self._spans\n ]\n\n def pad_left(self, count: int, character: str = \" \") -> None:\n \"\"\"Pad the left with a given character.\n\n Args:\n count (int): Number of characters to pad.\n character (str, optional): Character to pad with. Defaults to \" \".\n \"\"\"\n assert len(character) == 1, \"Character must be a string of length 1\"\n if count:\n self.plain = f\"{character * count}{self.plain}\"\n _Span = Span\n self._spans[:] = [\n _Span(start + count, end + count, style)\n for start, end, style in self._spans\n ]\n\n def pad_right(self, count: int, character: str = \" \") -> None:\n \"\"\"Pad the right with a given character.\n\n Args:\n count (int): Number of characters to pad.\n character (str, optional): Character to pad with. Defaults to \" \".\n \"\"\"\n assert len(character) == 1, \"Character must be a string of length 1\"\n if count:\n self.plain = f\"{self.plain}{character * count}\"\n\n def align(self, align: AlignMethod, width: int, character: str = \" \") -> None:\n \"\"\"Align text to a given width.\n\n Args:\n align (AlignMethod): One of \"left\", \"center\", or \"right\".\n width (int): Desired width.\n character (str, optional): Character to pad with. Defaults to \" \".\n \"\"\"\n self.truncate(width)\n excess_space = width - cell_len(self.plain)\n if excess_space:\n if align == \"left\":\n self.pad_right(excess_space, character)\n elif align == \"center\":\n left = excess_space // 2\n self.pad_left(left, character)\n self.pad_right(excess_space - left, character)\n else:\n self.pad_left(excess_space, character)\n\n def append(\n self, text: Union[\"Text\", str], style: Optional[Union[str, \"Style\"]] = None\n ) -> \"Text\":\n \"\"\"Add text with an optional style.\n\n Args:\n text (Union[Text, str]): A str or Text to append.\n style (str, optional): A style name. Defaults to None.\n\n Returns:\n Text: Returns self for chaining.\n \"\"\"\n\n if not isinstance(text, (str, Text)):\n raise TypeError(\"Only str or Text can be appended to Text\")\n\n if len(text):\n if isinstance(text, str):\n sanitized_text = strip_control_codes(text)\n self._text.append(sanitized_text)\n offset = len(self)\n text_length = len(sanitized_text)\n if style:\n self._spans.append(Span(offset, offset + text_length, style))\n self._length += text_length\n elif isinstance(text, Text):\n _Span = Span\n if style is not None:\n raise ValueError(\n \"style must not be set when appending Text instance\"\n )\n text_length = self._length\n if text.style:\n self._spans.append(\n _Span(text_length, text_length + len(text), text.style)\n )\n self._text.append(text.plain)\n self._spans.extend(\n _Span(start + text_length, end + text_length, style)\n for start, end, style in text._spans\n )\n self._length += len(text)\n return self\n\n def append_text(self, text: \"Text\") -> \"Text\":\n \"\"\"Append another Text instance. This method is more performant that Text.append, but\n only works for Text.\n\n Args:\n text (Text): The Text instance to append to this instance.\n\n Returns:\n Text: Returns self for chaining.\n \"\"\"\n _Span = Span\n text_length = self._length\n if text.style:\n self._spans.append(_Span(text_length, text_length + len(text), text.style))\n self._text.append(text.plain)\n self._spans.extend(\n _Span(start + text_length, end + text_length, style)\n for start, end, style in text._spans\n )\n self._length += len(text)\n return self\n\n def append_tokens(\n self, tokens: Iterable[Tuple[str, Optional[StyleType]]]\n ) -> \"Text\":\n \"\"\"Append iterable of str and style. Style may be a Style instance or a str style definition.\n\n Args:\n tokens (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.\n\n Returns:\n Text: Returns self for chaining.\n \"\"\"\n append_text = self._text.append\n append_span = self._spans.append\n _Span = Span\n offset = len(self)\n for content, style in tokens:\n append_text(content)\n if style:\n append_span(_Span(offset, offset + len(content), style))\n offset += len(content)\n self._length = offset\n return self\n\n def copy_styles(self, text: \"Text\") -> None:\n \"\"\"Copy styles from another Text instance.\n\n Args:\n text (Text): A Text instance to copy styles from, must be the same length.\n \"\"\"\n self._spans.extend(text._spans)\n\n def split(\n self,\n separator: str = \"\\n\",\n *,\n include_separator: bool = False,\n allow_blank: bool = False,\n ) -> Lines:\n \"\"\"Split rich text in to lines, preserving styles.\n\n Args:\n separator (str, optional): String to split on. Defaults to \"\\\\\\\\n\".\n include_separator (bool, optional): Include the separator in the lines. Defaults to False.\n allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.\n\n Returns:\n List[RichText]: A list of rich text, one per line of the original.\n \"\"\"\n assert separator, \"separator must not be empty\"\n\n text = self.plain\n if separator not in text:\n return Lines([self.copy()])\n\n if include_separator:\n lines = self.divide(\n match.end() for match in re.finditer(re.escape(separator), text)\n )\n else:\n\n def flatten_spans() -> Iterable[int]:\n for match in re.finditer(re.escape(separator), text):\n start, end = match.span()\n yield start\n yield end\n\n lines = Lines(\n line for line in self.divide(flatten_spans()) if line.plain != separator\n )\n\n if not allow_blank and text.endswith(separator):\n lines.pop()\n\n return lines\n\n def divide(self, offsets: Iterable[int]) -> Lines:\n \"\"\"Divide text in to a number of lines at given offsets.\n\n Args:\n offsets (Iterable[int]): Offsets used to divide text.\n\n Returns:\n Lines: New RichText instances between offsets.\n \"\"\"\n _offsets = list(offsets)\n\n if not _offsets:\n return Lines([self.copy()])\n\n text = self.plain\n text_length = len(text)\n divide_offsets = [0, *_offsets, text_length]\n line_ranges = list(zip(divide_offsets, divide_offsets[1:]))\n\n style = self.style\n justify = self.justify\n overflow = self.overflow\n _Text = Text\n new_lines = Lines(\n _Text(\n text[start:end],\n style=style,\n justify=justify,\n overflow=overflow,\n )\n for start, end in line_ranges\n )\n if not self._spans:\n return new_lines\n\n _line_appends = [line._spans.append for line in new_lines._lines]\n line_count = len(line_ranges)\n _Span = Span\n\n for span_start, span_end, style in self._spans:\n lower_bound = 0\n upper_bound = line_count\n start_line_no = (lower_bound + upper_bound) // 2\n\n while True:\n line_start, line_end = line_ranges[start_line_no]\n if span_start < line_start:\n upper_bound = start_line_no - 1\n elif span_start > line_end:\n lower_bound = start_line_no + 1\n else:\n break\n start_line_no = (lower_bound + upper_bound) // 2\n\n if span_end < line_end:\n end_line_no = start_line_no\n else:\n end_line_no = lower_bound = start_line_no\n upper_bound = line_count\n\n while True:\n line_start, line_end = line_ranges[end_line_no]\n if span_end < line_start:\n upper_bound = end_line_no - 1\n elif span_end > line_end:\n lower_bound = end_line_no + 1\n else:\n break\n end_line_no = (lower_bound + upper_bound) // 2\n\n for line_no in range(start_line_no, end_line_no + 1):\n line_start, line_end = line_ranges[line_no]\n new_start = max(0, span_start - line_start)\n new_end = min(span_end - line_start, line_end - line_start)\n if new_end > new_start:\n _line_appends[line_no](_Span(new_start, new_end, style))\n\n return new_lines\n\n def right_crop(self, amount: int = 1) -> None:\n \"\"\"Remove a number of characters from the end of the text.\"\"\"\n max_offset = len(self.plain) - amount\n _Span = Span\n self._spans[:] = [\n (\n span\n if span.end < max_offset\n else _Span(span.start, min(max_offset, span.end), span.style)\n )\n for span in self._spans\n if span.start < max_offset\n ]\n self._text = [self.plain[:-amount]]\n self._length -= amount\n\n def wrap(\n self,\n console: \"Console\",\n width: int,\n *,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n tab_size: int = 8,\n no_wrap: Optional[bool] = None,\n ) -> Lines:\n \"\"\"Word wrap the text.\n\n Args:\n console (Console): Console instance.\n width (int): Number of cells available per line.\n justify (str, optional): Justify method: \"default\", \"left\", \"center\", \"full\", \"right\". Defaults to \"default\".\n overflow (str, optional): Overflow method: \"crop\", \"fold\", or \"ellipsis\". Defaults to None.\n tab_size (int, optional): Default tab size. Defaults to 8.\n no_wrap (bool, optional): Disable wrapping, Defaults to False.\n\n Returns:\n Lines: Number of lines.\n \"\"\"\n wrap_justify = justify or self.justify or DEFAULT_JUSTIFY\n wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW\n\n no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == \"ignore\"\n\n lines = Lines()\n for line in self.split(allow_blank=True):\n if \"\\t\" in line:\n line.expand_tabs(tab_size)\n if no_wrap:\n new_lines = Lines([line])\n else:\n offsets = divide_line(str(line), width, fold=wrap_overflow == \"fold\")\n new_lines = line.divide(offsets)\n for line in new_lines:\n line.rstrip_end(width)\n if wrap_justify:\n new_lines.justify(\n console, width, justify=wrap_justify, overflow=wrap_overflow\n )\n for line in new_lines:\n line.truncate(width, overflow=wrap_overflow)\n lines.extend(new_lines)\n return lines\n\n def fit(self, width: int) -> Lines:\n \"\"\"Fit the text in to given width by chopping in to lines.\n\n Args:\n width (int): Maximum characters in a line.\n\n Returns:\n Lines: Lines container.\n \"\"\"\n lines: Lines = Lines()\n append = lines.append\n for line in self.split():\n line.set_length(width)\n append(line)\n return lines\n\n def detect_indentation(self) -> int:\n \"\"\"Auto-detect indentation of code.\n\n Returns:\n int: Number of spaces used to indent code.\n \"\"\"\n\n _indentations = {\n len(match.group(1))\n for match in re.finditer(r\"^( *)(.*)$\", self.plain, flags=re.MULTILINE)\n }\n\n try:\n indentation = (\n reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1\n )\n except TypeError:\n indentation = 1\n\n return indentation\n\n def with_indent_guides(\n self,\n indent_size: Optional[int] = None,\n *,\n character: str = \"│\",\n style: StyleType = \"dim green\",\n ) -> \"Text\":\n \"\"\"Adds indent guide lines to text.\n\n Args:\n indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.\n character (str, optional): Character to use for indentation. Defaults to \"│\".\n style (Union[Style, str], optional): Style of indent guides.\n\n Returns:\n Text: New text with indentation guides.\n \"\"\"\n\n _indent_size = self.detect_indentation() if indent_size is None else indent_size\n\n text = self.copy()\n text.expand_tabs()\n indent_line = f\"{character}{' ' * (_indent_size - 1)}\"\n\n re_indent = re.compile(r\"^( *)(.*)$\")\n new_lines: List[Text] = []\n add_line = new_lines.append\n blank_lines = 0\n for line in text.split(allow_blank=True):\n match = re_indent.match(line.plain)\n if not match or not match.group(2):\n blank_lines += 1\n continue\n indent = match.group(1)\n full_indents, remaining_space = divmod(len(indent), _indent_size)\n new_indent = f\"{indent_line * full_indents}{' ' * remaining_space}\"\n line.plain = new_indent + line.plain[len(new_indent) :]\n line.stylize(style, 0, len(new_indent))\n if blank_lines:\n new_lines.extend([Text(new_indent, style=style)] * blank_lines)\n blank_lines = 0\n add_line(line)\n if blank_lines:\n new_lines.extend([Text(\"\", style=style)] * blank_lines)\n\n new_text = text.blank_copy(\"\\n\").join(new_lines)\n return new_text\n\n\nif __name__ == \"__main__\": # pragma: no cover\n from rich.console import Console\n\n text = Text(\n \"\"\"\\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\\n\"\"\"\n )\n text.highlight_words([\"Lorem\"], \"bold\")\n text.highlight_words([\"ipsum\"], \"italic\")\n\n console = Console()\n\n console.rule(\"justify='left'\")\n console.print(text, style=\"red\")\n console.print()\n\n console.rule(\"justify='center'\")\n console.print(text, style=\"green\", justify=\"center\")\n console.print()\n\n console.rule(\"justify='right'\")\n console.print(text, style=\"blue\", justify=\"right\")\n console.print()\n\n console.rule(\"justify='full'\")\n console.print(text, style=\"magenta\", justify=\"full\")\n console.print()\n", "path": "rich/text.py" } ]
[ { "content": "import re\nfrom functools import partial, reduce\nfrom math import gcd\nfrom operator import itemgetter\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n List,\n NamedTuple,\n Optional,\n Tuple,\n Union,\n)\n\nfrom ._loop import loop_last\nfrom ._pick import pick_bool\nfrom ._wrap import divide_line\nfrom .align import AlignMethod\nfrom .cells import cell_len, set_cell_size\nfrom .containers import Lines\nfrom .control import strip_control_codes\nfrom .emoji import EmojiVariant\nfrom .jupyter import JupyterMixin\nfrom .measure import Measurement\nfrom .segment import Segment\nfrom .style import Style, StyleType\n\nif TYPE_CHECKING: # pragma: no cover\n from .console import Console, ConsoleOptions, JustifyMethod, OverflowMethod\n\nDEFAULT_JUSTIFY: \"JustifyMethod\" = \"default\"\nDEFAULT_OVERFLOW: \"OverflowMethod\" = \"fold\"\n\n\n_re_whitespace = re.compile(r\"\\s+$\")\n\nTextType = Union[str, \"Text\"]\n\"\"\"A plain string or a :class:`Text` instance.\"\"\"\n\nGetStyleCallable = Callable[[str], Optional[StyleType]]\n\n\nclass Span(NamedTuple):\n \"\"\"A marked up region in some text.\"\"\"\n\n start: int\n \"\"\"Span start index.\"\"\"\n end: int\n \"\"\"Span end index.\"\"\"\n style: Union[str, Style]\n \"\"\"Style associated with the span.\"\"\"\n\n def __repr__(self) -> str:\n return f\"Span({self.start}, {self.end}, {self.style!r})\"\n\n def __bool__(self) -> bool:\n return self.end > self.start\n\n def split(self, offset: int) -> Tuple[\"Span\", Optional[\"Span\"]]:\n \"\"\"Split a span in to 2 from a given offset.\"\"\"\n\n if offset < self.start:\n return self, None\n if offset >= self.end:\n return self, None\n\n start, end, style = self\n span1 = Span(start, min(end, offset), style)\n span2 = Span(span1.end, end, style)\n return span1, span2\n\n def move(self, offset: int) -> \"Span\":\n \"\"\"Move start and end by a given offset.\n\n Args:\n offset (int): Number of characters to add to start and end.\n\n Returns:\n TextSpan: A new TextSpan with adjusted position.\n \"\"\"\n start, end, style = self\n return Span(start + offset, end + offset, style)\n\n def right_crop(self, offset: int) -> \"Span\":\n \"\"\"Crop the span at the given offset.\n\n Args:\n offset (int): A value between start and end.\n\n Returns:\n Span: A new (possibly smaller) span.\n \"\"\"\n start, end, style = self\n if offset >= end:\n return self\n return Span(start, min(offset, end), style)\n\n def extend(self, cells: int) -> \"Span\":\n \"\"\"Extend the span by the given number of cells.\n\n Args:\n cells (int): Additional space to add to end of span.\n\n Returns:\n Span: A span.\n \"\"\"\n if cells:\n start, end, style = self\n return Span(start, end + cells, style)\n else:\n return self\n\n\nclass Text(JupyterMixin):\n \"\"\"Text with color / style.\n\n Args:\n text (str, optional): Default unstyled text. Defaults to \"\".\n style (Union[str, Style], optional): Base style for text. Defaults to \"\".\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.\n end (str, optional): Character to end text with. Defaults to \"\\\\\\\\n\".\n tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.\n spans (List[Span], optional). A list of predefined style spans. Defaults to None.\n \"\"\"\n\n __slots__ = [\n \"_text\",\n \"style\",\n \"justify\",\n \"overflow\",\n \"no_wrap\",\n \"end\",\n \"tab_size\",\n \"_spans\",\n \"_length\",\n ]\n\n def __init__(\n self,\n text: str = \"\",\n style: Union[str, Style] = \"\",\n *,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n no_wrap: Optional[bool] = None,\n end: str = \"\\n\",\n tab_size: Optional[int] = None,\n spans: Optional[List[Span]] = None,\n ) -> None:\n sanitized_text = strip_control_codes(text)\n self._text = [sanitized_text]\n self.style = style\n self.justify: Optional[\"JustifyMethod\"] = justify\n self.overflow: Optional[\"OverflowMethod\"] = overflow\n self.no_wrap = no_wrap\n self.end = end\n self.tab_size = tab_size\n self._spans: List[Span] = spans or []\n self._length: int = len(sanitized_text)\n\n def __len__(self) -> int:\n return self._length\n\n def __bool__(self) -> bool:\n return bool(self._length)\n\n def __str__(self) -> str:\n return self.plain\n\n def __repr__(self) -> str:\n return f\"<text {self.plain!r} {self._spans!r}>\"\n\n def __add__(self, other: Any) -> \"Text\":\n if isinstance(other, (str, Text)):\n result = self.copy()\n result.append(other)\n return result\n return NotImplemented\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Text):\n return NotImplemented\n return self.plain == other.plain and self._spans == other._spans\n\n def __contains__(self, other: object) -> bool:\n if isinstance(other, str):\n return other in self.plain\n elif isinstance(other, Text):\n return other.plain in self.plain\n return False\n\n def __getitem__(self, slice: Union[int, slice]) -> \"Text\":\n def get_text_at(offset: int) -> \"Text\":\n _Span = Span\n text = Text(\n self.plain[offset],\n spans=[\n _Span(0, 1, style)\n for start, end, style in self._spans\n if end > offset >= start\n ],\n end=\"\",\n )\n return text\n\n if isinstance(slice, int):\n return get_text_at(slice)\n else:\n start, stop, step = slice.indices(len(self.plain))\n if step == 1:\n lines = self.divide([start, stop])\n return lines[1]\n else:\n # This would be a bit of work to implement efficiently\n # For now, its not required\n raise TypeError(\"slices with step!=1 are not supported\")\n\n @property\n def cell_len(self) -> int:\n \"\"\"Get the number of cells required to render this text.\"\"\"\n return cell_len(self.plain)\n\n @property\n def markup(self) -> str:\n \"\"\"Get console markup to render this Text.\n\n Returns:\n str: A string potentially creating markup tags.\n \"\"\"\n from .markup import escape\n\n output: List[str] = []\n\n plain = self.plain\n markup_spans = [\n (0, False, self.style),\n *((span.start, False, span.style) for span in self._spans),\n *((span.end, True, span.style) for span in self._spans),\n (len(plain), True, self.style),\n ]\n markup_spans.sort(key=itemgetter(0, 1))\n position = 0\n append = output.append\n for offset, closing, style in markup_spans:\n if offset > position:\n append(escape(plain[position:offset]))\n position = offset\n if style:\n append(f\"[/{style}]\" if closing else f\"[{style}]\")\n markup = \"\".join(output)\n return markup\n\n @classmethod\n def from_markup(\n cls,\n text: str,\n *,\n style: Union[str, Style] = \"\",\n emoji: bool = True,\n emoji_variant: Optional[EmojiVariant] = None,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n end: str = \"\\n\",\n ) -> \"Text\":\n \"\"\"Create Text instance from markup.\n\n Args:\n text (str): A string containing console markup.\n style (Union[str, Style], optional): Base style for text. Defaults to \"\".\n emoji (bool, optional): Also render emoji code. Defaults to True.\n emoji_variant (str, optional): Optional emoji variant, either \"text\" or \"emoji\". Defaults to None.\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n end (str, optional): Character to end text with. Defaults to \"\\\\\\\\n\".\n\n Returns:\n Text: A Text instance with markup rendered.\n \"\"\"\n from .markup import render\n\n rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)\n rendered_text.justify = justify\n rendered_text.overflow = overflow\n rendered_text.end = end\n return rendered_text\n\n @classmethod\n def from_ansi(\n cls,\n text: str,\n *,\n style: Union[str, Style] = \"\",\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n no_wrap: Optional[bool] = None,\n end: str = \"\\n\",\n tab_size: Optional[int] = 8,\n ) -> \"Text\":\n \"\"\"Create a Text object from a string containing ANSI escape codes.\n\n Args:\n text (str): A string containing escape codes.\n style (Union[str, Style], optional): Base style for text. Defaults to \"\".\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.\n end (str, optional): Character to end text with. Defaults to \"\\\\\\\\n\".\n tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.\n \"\"\"\n from .ansi import AnsiDecoder\n\n joiner = Text(\n \"\\n\",\n justify=justify,\n overflow=overflow,\n no_wrap=no_wrap,\n end=end,\n tab_size=tab_size,\n style=style,\n )\n decoder = AnsiDecoder()\n result = joiner.join(line for line in decoder.decode(text))\n return result\n\n @classmethod\n def styled(\n cls,\n text: str,\n style: StyleType = \"\",\n *,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n ) -> \"Text\":\n \"\"\"Construct a Text instance with a pre-applied styled. A style applied in this way won't be used\n to pad the text when it is justified.\n\n Args:\n text (str): A string containing console markup.\n style (Union[str, Style]): Style to apply to the text. Defaults to \"\".\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n\n Returns:\n Text: A text instance with a style applied to the entire string.\n \"\"\"\n styled_text = cls(text, justify=justify, overflow=overflow)\n styled_text.stylize(style)\n return styled_text\n\n @classmethod\n def assemble(\n cls,\n *parts: Union[str, \"Text\", Tuple[str, StyleType]],\n style: Union[str, Style] = \"\",\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n no_wrap: Optional[bool] = None,\n end: str = \"\\n\",\n tab_size: int = 8,\n meta: Optional[Dict[str, Any]] = None,\n ) -> \"Text\":\n \"\"\"Construct a text instance by combining a sequence of strings with optional styles.\n The positional arguments should be either strings, or a tuple of string + style.\n\n Args:\n style (Union[str, Style], optional): Base style for text. Defaults to \"\".\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \"right\". Defaults to None.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", \"ellipsis\". Defaults to None.\n no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.\n end (str, optional): Character to end text with. Defaults to \"\\\\\\\\n\".\n tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.\n meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None\n\n Returns:\n Text: A new text instance.\n \"\"\"\n text = cls(\n style=style,\n justify=justify,\n overflow=overflow,\n no_wrap=no_wrap,\n end=end,\n tab_size=tab_size,\n )\n append = text.append\n _Text = Text\n for part in parts:\n if isinstance(part, (_Text, str)):\n append(part)\n else:\n append(*part)\n if meta:\n text.apply_meta(meta)\n return text\n\n @property\n def plain(self) -> str:\n \"\"\"Get the text as a single string.\"\"\"\n if len(self._text) != 1:\n self._text[:] = [\"\".join(self._text)]\n return self._text[0]\n\n @plain.setter\n def plain(self, new_text: str) -> None:\n \"\"\"Set the text to a new value.\"\"\"\n if new_text != self.plain:\n sanitized_text = strip_control_codes(new_text)\n self._text[:] = [sanitized_text]\n old_length = self._length\n self._length = len(sanitized_text)\n if old_length > self._length:\n self._trim_spans()\n\n @property\n def spans(self) -> List[Span]:\n \"\"\"Get a reference to the internal list of spans.\"\"\"\n return self._spans\n\n @spans.setter\n def spans(self, spans: List[Span]) -> None:\n \"\"\"Set spans.\"\"\"\n self._spans = spans[:]\n\n def blank_copy(self, plain: str = \"\") -> \"Text\":\n \"\"\"Return a new Text instance with copied metadata (but not the string or spans).\"\"\"\n copy_self = Text(\n plain,\n style=self.style,\n justify=self.justify,\n overflow=self.overflow,\n no_wrap=self.no_wrap,\n end=self.end,\n tab_size=self.tab_size,\n )\n return copy_self\n\n def copy(self) -> \"Text\":\n \"\"\"Return a copy of this instance.\"\"\"\n copy_self = Text(\n self.plain,\n style=self.style,\n justify=self.justify,\n overflow=self.overflow,\n no_wrap=self.no_wrap,\n end=self.end,\n tab_size=self.tab_size,\n )\n copy_self._spans[:] = self._spans\n return copy_self\n\n def stylize(\n self,\n style: Union[str, Style],\n start: int = 0,\n end: Optional[int] = None,\n ) -> None:\n \"\"\"Apply a style to the text, or a portion of the text.\n\n Args:\n style (Union[str, Style]): Style instance or style definition to apply.\n start (int): Start offset (negative indexing is supported). Defaults to 0.\n end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.\n \"\"\"\n if style:\n length = len(self)\n if start < 0:\n start = length + start\n if end is None:\n end = length\n if end < 0:\n end = length + end\n if start >= length or end <= start:\n # Span not in text or not valid\n return\n self._spans.append(Span(start, min(length, end), style))\n\n def stylize_before(\n self,\n style: Union[str, Style],\n start: int = 0,\n end: Optional[int] = None,\n ) -> None:\n \"\"\"Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present.\n\n Args:\n style (Union[str, Style]): Style instance or style definition to apply.\n start (int): Start offset (negative indexing is supported). Defaults to 0.\n end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.\n \"\"\"\n if style:\n length = len(self)\n if start < 0:\n start = length + start\n if end is None:\n end = length\n if end < 0:\n end = length + end\n if start >= length or end <= start:\n # Span not in text or not valid\n return\n self._spans.insert(0, Span(start, min(length, end), style))\n\n def apply_meta(\n self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None\n ) -> None:\n \"\"\"Apply metadata to the text, or a portion of the text.\n\n Args:\n meta (Dict[str, Any]): A dict of meta information.\n start (int): Start offset (negative indexing is supported). Defaults to 0.\n end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.\n\n \"\"\"\n style = Style.from_meta(meta)\n self.stylize(style, start=start, end=end)\n\n def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> \"Text\":\n \"\"\"Apply event handlers (used by Textual project).\n\n Example:\n >>> from rich.text import Text\n >>> text = Text(\"hello world\")\n >>> text.on(click=\"view.toggle('world')\")\n\n Args:\n meta (Dict[str, Any]): Mapping of meta information.\n **handlers: Keyword args are prefixed with \"@\" to defined handlers.\n\n Returns:\n Text: Self is returned to method may be chained.\n \"\"\"\n meta = {} if meta is None else meta\n meta.update({f\"@{key}\": value for key, value in handlers.items()})\n self.stylize(Style.from_meta(meta))\n return self\n\n def remove_suffix(self, suffix: str) -> None:\n \"\"\"Remove a suffix if it exists.\n\n Args:\n suffix (str): Suffix to remove.\n \"\"\"\n if self.plain.endswith(suffix):\n self.right_crop(len(suffix))\n\n def get_style_at_offset(self, console: \"Console\", offset: int) -> Style:\n \"\"\"Get the style of a character at give offset.\n\n Args:\n console (~Console): Console where text will be rendered.\n offset (int): Offset in to text (negative indexing supported)\n\n Returns:\n Style: A Style instance.\n \"\"\"\n # TODO: This is a little inefficient, it is only used by full justify\n if offset < 0:\n offset = len(self) + offset\n get_style = console.get_style\n style = get_style(self.style).copy()\n for start, end, span_style in self._spans:\n if end > offset >= start:\n style += get_style(span_style, default=\"\")\n return style\n\n def extend_style(self, spaces: int) -> None:\n \"\"\"Extend the Text given number of spaces where the spaces have the same style as the last character.\n\n Args:\n spaces (int): Number of spaces to add to the Text.\n \"\"\"\n if spaces <= 0:\n return\n spans = self.spans\n new_spaces = \" \" * spaces\n if spans:\n end_offset = len(self)\n self._spans[:] = [\n span.extend(spaces) if span.end >= end_offset else span\n for span in spans\n ]\n self._text.append(new_spaces)\n self._length += spaces\n else:\n self.plain += new_spaces\n\n def highlight_regex(\n self,\n re_highlight: str,\n style: Optional[Union[GetStyleCallable, StyleType]] = None,\n *,\n style_prefix: str = \"\",\n ) -> int:\n \"\"\"Highlight text with a regular expression, where group names are\n translated to styles.\n\n Args:\n re_highlight (str): A regular expression.\n style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable\n which accepts the matched text and returns a style. Defaults to None.\n style_prefix (str, optional): Optional prefix to add to style group names.\n\n Returns:\n int: Number of regex matches\n \"\"\"\n count = 0\n append_span = self._spans.append\n _Span = Span\n plain = self.plain\n for match in re.finditer(re_highlight, plain):\n get_span = match.span\n if style:\n start, end = get_span()\n match_style = style(plain[start:end]) if callable(style) else style\n if match_style is not None and end > start:\n append_span(_Span(start, end, match_style))\n\n count += 1\n for name in match.groupdict().keys():\n start, end = get_span(name)\n if start != -1 and end > start:\n append_span(_Span(start, end, f\"{style_prefix}{name}\"))\n return count\n\n def highlight_words(\n self,\n words: Iterable[str],\n style: Union[str, Style],\n *,\n case_sensitive: bool = True,\n ) -> int:\n \"\"\"Highlight words with a style.\n\n Args:\n words (Iterable[str]): Words to highlight.\n style (Union[str, Style]): Style to apply.\n case_sensitive (bool, optional): Enable case sensitive matching. Defaults to True.\n\n Returns:\n int: Number of words highlighted.\n \"\"\"\n re_words = \"|\".join(re.escape(word) for word in words)\n add_span = self._spans.append\n count = 0\n _Span = Span\n for match in re.finditer(\n re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE\n ):\n start, end = match.span(0)\n add_span(_Span(start, end, style))\n count += 1\n return count\n\n def rstrip(self) -> None:\n \"\"\"Strip whitespace from end of text.\"\"\"\n self.plain = self.plain.rstrip()\n\n def rstrip_end(self, size: int) -> None:\n \"\"\"Remove whitespace beyond a certain width at the end of the text.\n\n Args:\n size (int): The desired size of the text.\n \"\"\"\n text_length = len(self)\n if text_length > size:\n excess = text_length - size\n whitespace_match = _re_whitespace.search(self.plain)\n if whitespace_match is not None:\n whitespace_count = len(whitespace_match.group(0))\n self.right_crop(min(whitespace_count, excess))\n\n def set_length(self, new_length: int) -> None:\n \"\"\"Set new length of the text, clipping or padding is required.\"\"\"\n length = len(self)\n if length != new_length:\n if length < new_length:\n self.pad_right(new_length - length)\n else:\n self.right_crop(length - new_length)\n\n def __rich_console__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> Iterable[Segment]:\n tab_size: int = console.tab_size if self.tab_size is None else self.tab_size\n justify = self.justify or options.justify or DEFAULT_JUSTIFY\n\n overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW\n\n lines = self.wrap(\n console,\n options.max_width,\n justify=justify,\n overflow=overflow,\n tab_size=tab_size or 8,\n no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),\n )\n all_lines = Text(\"\\n\").join(lines)\n yield from all_lines.render(console, end=self.end)\n\n def __rich_measure__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> Measurement:\n text = self.plain\n lines = text.splitlines()\n max_text_width = max(cell_len(line) for line in lines) if lines else 0\n words = text.split()\n min_text_width = (\n max(cell_len(word) for word in words) if words else max_text_width\n )\n return Measurement(min_text_width, max_text_width)\n\n def render(self, console: \"Console\", end: str = \"\") -> Iterable[\"Segment\"]:\n \"\"\"Render the text as Segments.\n\n Args:\n console (Console): Console instance.\n end (Optional[str], optional): Optional end character.\n\n Returns:\n Iterable[Segment]: Result of render that may be written to the console.\n \"\"\"\n _Segment = Segment\n text = self.plain\n if not self._spans:\n yield Segment(text)\n if end:\n yield _Segment(end)\n return\n get_style = partial(console.get_style, default=Style.null())\n\n enumerated_spans = list(enumerate(self._spans, 1))\n style_map = {index: get_style(span.style) for index, span in enumerated_spans}\n style_map[0] = get_style(self.style)\n\n spans = [\n (0, False, 0),\n *((span.start, False, index) for index, span in enumerated_spans),\n *((span.end, True, index) for index, span in enumerated_spans),\n (len(text), True, 0),\n ]\n spans.sort(key=itemgetter(0, 1))\n\n stack: List[int] = []\n stack_append = stack.append\n stack_pop = stack.remove\n\n style_cache: Dict[Tuple[Style, ...], Style] = {}\n style_cache_get = style_cache.get\n combine = Style.combine\n\n def get_current_style() -> Style:\n \"\"\"Construct current style from stack.\"\"\"\n styles = tuple(style_map[_style_id] for _style_id in sorted(stack))\n cached_style = style_cache_get(styles)\n if cached_style is not None:\n return cached_style\n current_style = combine(styles)\n style_cache[styles] = current_style\n return current_style\n\n for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):\n if leaving:\n stack_pop(style_id)\n else:\n stack_append(style_id)\n if next_offset > offset:\n yield _Segment(text[offset:next_offset], get_current_style())\n if end:\n yield _Segment(end)\n\n def join(self, lines: Iterable[\"Text\"]) -> \"Text\":\n \"\"\"Join text together with this instance as the separator.\n\n Args:\n lines (Iterable[Text]): An iterable of Text instances to join.\n\n Returns:\n Text: A new text instance containing join text.\n \"\"\"\n\n new_text = self.blank_copy()\n\n def iter_text() -> Iterable[\"Text\"]:\n if self.plain:\n for last, line in loop_last(lines):\n yield line\n if not last:\n yield self\n else:\n yield from lines\n\n extend_text = new_text._text.extend\n append_span = new_text._spans.append\n extend_spans = new_text._spans.extend\n offset = 0\n _Span = Span\n\n for text in iter_text():\n extend_text(text._text)\n if text.style:\n append_span(_Span(offset, offset + len(text), text.style))\n extend_spans(\n _Span(offset + start, offset + end, style)\n for start, end, style in text._spans\n )\n offset += len(text)\n new_text._length = offset\n return new_text\n\n def expand_tabs(self, tab_size: Optional[int] = None) -> None:\n \"\"\"Converts tabs to spaces.\n\n Args:\n tab_size (int, optional): Size of tabs. Defaults to 8.\n\n \"\"\"\n if \"\\t\" not in self.plain:\n return\n if tab_size is None:\n tab_size = self.tab_size\n if tab_size is None:\n tab_size = 8\n\n new_text: List[Text] = []\n append = new_text.append\n\n for line in self.split(\"\\n\", include_separator=True):\n if \"\\t\" not in line.plain:\n append(line)\n else:\n cell_position = 0\n parts = line.split(\"\\t\", include_separator=True)\n for part in parts:\n if part.plain.endswith(\"\\t\"):\n part._text[-1] = part._text[-1][:-1] + \" \"\n cell_position += part.cell_len\n tab_remainder = cell_position % tab_size\n if tab_remainder:\n spaces = tab_size - tab_remainder\n part.extend_style(spaces)\n cell_position += spaces\n else:\n cell_position += part.cell_len\n append(part)\n\n result = Text(\"\").join(new_text)\n\n self._text = [result.plain]\n self._length = len(self.plain)\n self._spans[:] = result._spans\n\n def truncate(\n self,\n max_width: int,\n *,\n overflow: Optional[\"OverflowMethod\"] = None,\n pad: bool = False,\n ) -> None:\n \"\"\"Truncate text if it is longer that a given width.\n\n Args:\n max_width (int): Maximum number of characters in text.\n overflow (str, optional): Overflow method: \"crop\", \"fold\", or \"ellipsis\". Defaults to None, to use self.overflow.\n pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.\n \"\"\"\n _overflow = overflow or self.overflow or DEFAULT_OVERFLOW\n if _overflow != \"ignore\":\n length = cell_len(self.plain)\n if length > max_width:\n if _overflow == \"ellipsis\":\n self.plain = set_cell_size(self.plain, max_width - 1) + \"…\"\n else:\n self.plain = set_cell_size(self.plain, max_width)\n if pad and length < max_width:\n spaces = max_width - length\n self._text = [f\"{self.plain}{' ' * spaces}\"]\n self._length = len(self.plain)\n\n def _trim_spans(self) -> None:\n \"\"\"Remove or modify any spans that are over the end of the text.\"\"\"\n max_offset = len(self.plain)\n _Span = Span\n self._spans[:] = [\n (\n span\n if span.end < max_offset\n else _Span(span.start, min(max_offset, span.end), span.style)\n )\n for span in self._spans\n if span.start < max_offset\n ]\n\n def pad(self, count: int, character: str = \" \") -> None:\n \"\"\"Pad left and right with a given number of characters.\n\n Args:\n count (int): Width of padding.\n character (str): The character to pad with. Must be a string of length 1.\n \"\"\"\n assert len(character) == 1, \"Character must be a string of length 1\"\n if count:\n pad_characters = character * count\n self.plain = f\"{pad_characters}{self.plain}{pad_characters}\"\n _Span = Span\n self._spans[:] = [\n _Span(start + count, end + count, style)\n for start, end, style in self._spans\n ]\n\n def pad_left(self, count: int, character: str = \" \") -> None:\n \"\"\"Pad the left with a given character.\n\n Args:\n count (int): Number of characters to pad.\n character (str, optional): Character to pad with. Defaults to \" \".\n \"\"\"\n assert len(character) == 1, \"Character must be a string of length 1\"\n if count:\n self.plain = f\"{character * count}{self.plain}\"\n _Span = Span\n self._spans[:] = [\n _Span(start + count, end + count, style)\n for start, end, style in self._spans\n ]\n\n def pad_right(self, count: int, character: str = \" \") -> None:\n \"\"\"Pad the right with a given character.\n\n Args:\n count (int): Number of characters to pad.\n character (str, optional): Character to pad with. Defaults to \" \".\n \"\"\"\n assert len(character) == 1, \"Character must be a string of length 1\"\n if count:\n self.plain = f\"{self.plain}{character * count}\"\n\n def align(self, align: AlignMethod, width: int, character: str = \" \") -> None:\n \"\"\"Align text to a given width.\n\n Args:\n align (AlignMethod): One of \"left\", \"center\", or \"right\".\n width (int): Desired width.\n character (str, optional): Character to pad with. Defaults to \" \".\n \"\"\"\n self.truncate(width)\n excess_space = width - cell_len(self.plain)\n if excess_space:\n if align == \"left\":\n self.pad_right(excess_space, character)\n elif align == \"center\":\n left = excess_space // 2\n self.pad_left(left, character)\n self.pad_right(excess_space - left, character)\n else:\n self.pad_left(excess_space, character)\n\n def append(\n self, text: Union[\"Text\", str], style: Optional[Union[str, \"Style\"]] = None\n ) -> \"Text\":\n \"\"\"Add text with an optional style.\n\n Args:\n text (Union[Text, str]): A str or Text to append.\n style (str, optional): A style name. Defaults to None.\n\n Returns:\n Text: Returns self for chaining.\n \"\"\"\n\n if not isinstance(text, (str, Text)):\n raise TypeError(\"Only str or Text can be appended to Text\")\n\n if len(text):\n if isinstance(text, str):\n sanitized_text = strip_control_codes(text)\n self._text.append(sanitized_text)\n offset = len(self)\n text_length = len(sanitized_text)\n if style:\n self._spans.append(Span(offset, offset + text_length, style))\n self._length += text_length\n elif isinstance(text, Text):\n _Span = Span\n if style is not None:\n raise ValueError(\n \"style must not be set when appending Text instance\"\n )\n text_length = self._length\n if text.style:\n self._spans.append(\n _Span(text_length, text_length + len(text), text.style)\n )\n self._text.append(text.plain)\n self._spans.extend(\n _Span(start + text_length, end + text_length, style)\n for start, end, style in text._spans\n )\n self._length += len(text)\n return self\n\n def append_text(self, text: \"Text\") -> \"Text\":\n \"\"\"Append another Text instance. This method is more performant that Text.append, but\n only works for Text.\n\n Args:\n text (Text): The Text instance to append to this instance.\n\n Returns:\n Text: Returns self for chaining.\n \"\"\"\n _Span = Span\n text_length = self._length\n if text.style:\n self._spans.append(_Span(text_length, text_length + len(text), text.style))\n self._text.append(text.plain)\n self._spans.extend(\n _Span(start + text_length, end + text_length, style)\n for start, end, style in text._spans\n )\n self._length += len(text)\n return self\n\n def append_tokens(\n self, tokens: Iterable[Tuple[str, Optional[StyleType]]]\n ) -> \"Text\":\n \"\"\"Append iterable of str and style. Style may be a Style instance or a str style definition.\n\n Args:\n tokens (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.\n\n Returns:\n Text: Returns self for chaining.\n \"\"\"\n append_text = self._text.append\n append_span = self._spans.append\n _Span = Span\n offset = len(self)\n for content, style in tokens:\n append_text(content)\n if style:\n append_span(_Span(offset, offset + len(content), style))\n offset += len(content)\n self._length = offset\n return self\n\n def copy_styles(self, text: \"Text\") -> None:\n \"\"\"Copy styles from another Text instance.\n\n Args:\n text (Text): A Text instance to copy styles from, must be the same length.\n \"\"\"\n self._spans.extend(text._spans)\n\n def split(\n self,\n separator: str = \"\\n\",\n *,\n include_separator: bool = False,\n allow_blank: bool = False,\n ) -> Lines:\n \"\"\"Split rich text in to lines, preserving styles.\n\n Args:\n separator (str, optional): String to split on. Defaults to \"\\\\\\\\n\".\n include_separator (bool, optional): Include the separator in the lines. Defaults to False.\n allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.\n\n Returns:\n List[RichText]: A list of rich text, one per line of the original.\n \"\"\"\n assert separator, \"separator must not be empty\"\n\n text = self.plain\n if separator not in text:\n return Lines([self.copy()])\n\n if include_separator:\n lines = self.divide(\n match.end() for match in re.finditer(re.escape(separator), text)\n )\n else:\n\n def flatten_spans() -> Iterable[int]:\n for match in re.finditer(re.escape(separator), text):\n start, end = match.span()\n yield start\n yield end\n\n lines = Lines(\n line for line in self.divide(flatten_spans()) if line.plain != separator\n )\n\n if not allow_blank and text.endswith(separator):\n lines.pop()\n\n return lines\n\n def divide(self, offsets: Iterable[int]) -> Lines:\n \"\"\"Divide text in to a number of lines at given offsets.\n\n Args:\n offsets (Iterable[int]): Offsets used to divide text.\n\n Returns:\n Lines: New RichText instances between offsets.\n \"\"\"\n _offsets = list(offsets)\n\n if not _offsets:\n return Lines([self.copy()])\n\n text = self.plain\n text_length = len(text)\n divide_offsets = [0, *_offsets, text_length]\n line_ranges = list(zip(divide_offsets, divide_offsets[1:]))\n\n style = self.style\n justify = self.justify\n overflow = self.overflow\n _Text = Text\n new_lines = Lines(\n _Text(\n text[start:end],\n style=style,\n justify=justify,\n overflow=overflow,\n )\n for start, end in line_ranges\n )\n if not self._spans:\n return new_lines\n\n _line_appends = [line._spans.append for line in new_lines._lines]\n line_count = len(line_ranges)\n _Span = Span\n\n for span_start, span_end, style in self._spans:\n lower_bound = 0\n upper_bound = line_count\n start_line_no = (lower_bound + upper_bound) // 2\n\n while True:\n line_start, line_end = line_ranges[start_line_no]\n if span_start < line_start:\n upper_bound = start_line_no - 1\n elif span_start > line_end:\n lower_bound = start_line_no + 1\n else:\n break\n start_line_no = (lower_bound + upper_bound) // 2\n\n if span_end < line_end:\n end_line_no = start_line_no\n else:\n end_line_no = lower_bound = start_line_no\n upper_bound = line_count\n\n while True:\n line_start, line_end = line_ranges[end_line_no]\n if span_end < line_start:\n upper_bound = end_line_no - 1\n elif span_end > line_end:\n lower_bound = end_line_no + 1\n else:\n break\n end_line_no = (lower_bound + upper_bound) // 2\n\n for line_no in range(start_line_no, end_line_no + 1):\n line_start, line_end = line_ranges[line_no]\n new_start = max(0, span_start - line_start)\n new_end = min(span_end - line_start, line_end - line_start)\n if new_end > new_start:\n _line_appends[line_no](_Span(new_start, new_end, style))\n\n return new_lines\n\n def right_crop(self, amount: int = 1) -> None:\n \"\"\"Remove a number of characters from the end of the text.\"\"\"\n max_offset = len(self.plain) - amount\n _Span = Span\n self._spans[:] = [\n (\n span\n if span.end < max_offset\n else _Span(span.start, min(max_offset, span.end), span.style)\n )\n for span in self._spans\n if span.start < max_offset\n ]\n self._text = [self.plain[:-amount]]\n self._length -= amount\n\n def wrap(\n self,\n console: \"Console\",\n width: int,\n *,\n justify: Optional[\"JustifyMethod\"] = None,\n overflow: Optional[\"OverflowMethod\"] = None,\n tab_size: int = 8,\n no_wrap: Optional[bool] = None,\n ) -> Lines:\n \"\"\"Word wrap the text.\n\n Args:\n console (Console): Console instance.\n width (int): Number of cells available per line.\n justify (str, optional): Justify method: \"default\", \"left\", \"center\", \"full\", \"right\". Defaults to \"default\".\n overflow (str, optional): Overflow method: \"crop\", \"fold\", or \"ellipsis\". Defaults to None.\n tab_size (int, optional): Default tab size. Defaults to 8.\n no_wrap (bool, optional): Disable wrapping, Defaults to False.\n\n Returns:\n Lines: Number of lines.\n \"\"\"\n wrap_justify = justify or self.justify or DEFAULT_JUSTIFY\n wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW\n\n no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == \"ignore\"\n\n lines = Lines()\n for line in self.split(allow_blank=True):\n if \"\\t\" in line:\n line.expand_tabs(tab_size)\n if no_wrap:\n new_lines = Lines([line])\n else:\n offsets = divide_line(str(line), width, fold=wrap_overflow == \"fold\")\n new_lines = line.divide(offsets)\n for line in new_lines:\n line.rstrip_end(width)\n if wrap_justify:\n new_lines.justify(\n console, width, justify=wrap_justify, overflow=wrap_overflow\n )\n for line in new_lines:\n line.truncate(width, overflow=wrap_overflow)\n lines.extend(new_lines)\n return lines\n\n def fit(self, width: int) -> Lines:\n \"\"\"Fit the text in to given width by chopping in to lines.\n\n Args:\n width (int): Maximum characters in a line.\n\n Returns:\n Lines: Lines container.\n \"\"\"\n lines: Lines = Lines()\n append = lines.append\n for line in self.split():\n line.set_length(width)\n append(line)\n return lines\n\n def detect_indentation(self) -> int:\n \"\"\"Auto-detect indentation of code.\n\n Returns:\n int: Number of spaces used to indent code.\n \"\"\"\n\n _indentations = {\n len(match.group(1))\n for match in re.finditer(r\"^( *)(.*)$\", self.plain, flags=re.MULTILINE)\n }\n\n try:\n indentation = (\n reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1\n )\n except TypeError:\n indentation = 1\n\n return indentation\n\n def with_indent_guides(\n self,\n indent_size: Optional[int] = None,\n *,\n character: str = \"│\",\n style: StyleType = \"dim green\",\n ) -> \"Text\":\n \"\"\"Adds indent guide lines to text.\n\n Args:\n indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.\n character (str, optional): Character to use for indentation. Defaults to \"│\".\n style (Union[Style, str], optional): Style of indent guides.\n\n Returns:\n Text: New text with indentation guides.\n \"\"\"\n\n _indent_size = self.detect_indentation() if indent_size is None else indent_size\n\n text = self.copy()\n text.expand_tabs()\n indent_line = f\"{character}{' ' * (_indent_size - 1)}\"\n\n re_indent = re.compile(r\"^( *)(.*)$\")\n new_lines: List[Text] = []\n add_line = new_lines.append\n blank_lines = 0\n for line in text.split(allow_blank=True):\n match = re_indent.match(line.plain)\n if not match or not match.group(2):\n blank_lines += 1\n continue\n indent = match.group(1)\n full_indents, remaining_space = divmod(len(indent), _indent_size)\n new_indent = f\"{indent_line * full_indents}{' ' * remaining_space}\"\n line.plain = new_indent + line.plain[len(new_indent) :]\n line.stylize(style, 0, len(new_indent))\n if blank_lines:\n new_lines.extend([Text(new_indent, style=style)] * blank_lines)\n blank_lines = 0\n add_line(line)\n if blank_lines:\n new_lines.extend([Text(\"\", style=style)] * blank_lines)\n\n new_text = text.blank_copy(\"\\n\").join(new_lines)\n return new_text\n\n\nif __name__ == \"__main__\": # pragma: no cover\n from rich.console import Console\n\n text = Text(\n \"\"\"\\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\\n\"\"\"\n )\n text.highlight_words([\"Lorem\"], \"bold\")\n text.highlight_words([\"ipsum\"], \"italic\")\n\n console = Console()\n\n console.rule(\"justify='left'\")\n console.print(text, style=\"red\")\n console.print()\n\n console.rule(\"justify='center'\")\n console.print(text, style=\"green\", justify=\"center\")\n console.print()\n\n console.rule(\"justify='right'\")\n console.print(text, style=\"blue\", justify=\"right\")\n console.print()\n\n console.rule(\"justify='full'\")\n console.print(text, style=\"magenta\", justify=\"full\")\n console.print()\n", "path": "rich/text.py" } ]
diff --git a/docs/source/reference/text.rst b/docs/source/reference/text.rst index 76b41f4bb..0b929e44b 100644 --- a/docs/source/reference/text.rst +++ b/docs/source/reference/text.rst @@ -2,5 +2,5 @@ rich.text ========= .. automodule:: rich.text - :members: Text + :members: Text, TextType diff --git a/rich/text.py b/rich/text.py index 7091e4291..7b32967f7 100644 --- a/rich/text.py +++ b/rich/text.py @@ -38,7 +38,7 @@ _re_whitespace = re.compile(r"\s+$") TextType = Union[str, "Text"] -"""A plain string or a [Text][rich.text.Text] instance.""" +"""A plain string or a :class:`Text` instance.""" GetStyleCallable = Callable[[str], Optional[StyleType]]
geopandas__geopandas-1670
BUG: categories passed as a series are not properly sorted If you pass a series of categories to plot, those are used directly as an array, even though the series might have an index of a different order than df. See the example below. ```python from shapely.geometry import Point import pandas as pd pts = [Point(x, x) for x in range(10)] df = gpd.GeoDataFrame(geometry=pts) # this is correct, since gdf.index and colors.index are in the same order colors = pd.Series(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a']) df.plot(colors, legend=True) ``` ![image](https://user-images.githubusercontent.com/36797143/94190673-54dbbd00-fea4-11ea-865d-845bd77ad929.png) ```python # I would assume that this gets sorted based on the index first, but it does not - the index is ignored colors_ord = pd.Series(['a', 'a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'], index=[0, 3, 6, 9, 1, 4, 7, 2, 5, 8]) df.plot(colors_ord, legend=True) ``` ![image](https://user-images.githubusercontent.com/36797143/94190771-789f0300-fea4-11ea-9fc3-b23a11caf568.png) ```python # if you assign the same series as a column before plotting, it works properly df['colors_ord'] = colors_ord df.plot('colors_ord', legend=True) ``` ![image](https://user-images.githubusercontent.com/36797143/94190841-90768700-fea4-11ea-9320-9c63fab23a63.png) I would say that in these cases, we should check the index first and sort it properly before using values.
[ { "content": "import warnings\n\nimport numpy as np\nimport pandas as pd\n\nimport geopandas\n\nfrom distutils.version import LooseVersion\n\n\ndef deprecated(new):\n \"\"\"Helper to provide deprecation warning.\"\"\"\n\n def old(*args, **kwargs):\n warnings.warn(\n \"{} is intended for internal \".format(new.__name__[1:])\n + \"use only, and will be deprecated.\",\n DeprecationWarning,\n stacklevel=2,\n )\n new(*args, **kwargs)\n\n return old\n\n\ndef _flatten_multi_geoms(geoms, prefix=\"Multi\"):\n \"\"\"\n Returns Series like geoms and index, except that any Multi geometries\n are split into their components and indices are repeated for all component\n in the same Multi geometry. Maintains 1:1 matching of geometry to value.\n\n Prefix specifies type of geometry to be flatten. 'Multi' for MultiPoint and similar,\n \"Geom\" for GeometryCollection.\n\n Returns\n -------\n components : list of geometry\n\n component_index : index array\n indices are repeated for all components in the same Multi geometry\n \"\"\"\n components, component_index = [], []\n\n if not geoms.geom_type.str.startswith(prefix).any():\n return geoms, np.arange(len(geoms))\n\n for ix, geom in enumerate(geoms):\n if geom.type.startswith(prefix):\n for poly in geom.geoms:\n components.append(poly)\n component_index.append(ix)\n else:\n components.append(geom)\n component_index.append(ix)\n\n return components, np.array(component_index)\n\n\ndef _expand_kwargs(kwargs, multiindex):\n \"\"\"\n Most arguments to the plot functions must be a (single) value, or a sequence\n of values. This function checks each key-value pair in 'kwargs' and expands\n it (in place) to the correct length/formats with help of 'multiindex', unless\n the value appears to already be a valid (single) value for the key.\n \"\"\"\n from matplotlib.colors import is_color_like\n from typing import Iterable\n\n for att, value in kwargs.items():\n if \"color\" in att: # color(s), edgecolor(s), facecolor(s)\n if is_color_like(value):\n continue\n elif \"linestyle\" in att: # linestyle(s)\n # A single linestyle can be 2-tuple of a number and an iterable.\n if (\n isinstance(value, tuple)\n and len(value) == 2\n and isinstance(value[1], Iterable)\n ):\n continue\n elif att in [\"marker\", \"alpha\"]:\n # For these attributes, only a single value is allowed, so never expand.\n continue\n\n if pd.api.types.is_list_like(value):\n kwargs[att] = np.take(value, multiindex, axis=0)\n\n\ndef _plot_polygon_collection(\n ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs\n):\n \"\"\"\n Plots a collection of Polygon and MultiPolygon geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)\n\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n Otherwise follows `color` / `facecolor` kwargs.\n edgecolor : single color or sequence of `N` colors\n Color for the edge of the polygons\n facecolor : single color or sequence of `N` colors\n Color to fill the polygons. Cannot be used together with `values`.\n color : single color or sequence of `N` colors\n Sets both `edgecolor` and `facecolor`\n **kwargs\n Additional keyword arguments passed to the collection\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n\n try:\n from descartes.patch import PolygonPatch\n except ImportError:\n raise ImportError(\n \"The descartes package is required for plotting polygons in geopandas. \"\n \"You can install it using 'conda install -c conda-forge descartes' or \"\n \"'pip install descartes'.\"\n )\n from matplotlib.collections import PatchCollection\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n # PatchCollection does not accept some kwargs.\n kwargs = {\n att: value\n for att, value in kwargs.items()\n if att not in [\"markersize\", \"marker\"]\n }\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n\n _expand_kwargs(kwargs, multiindex)\n\n collection = PatchCollection([PolygonPatch(poly) for poly in geoms], **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n if \"norm\" not in kwargs:\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\nplot_polygon_collection = deprecated(_plot_polygon_collection)\n\n\ndef _plot_linestring_collection(\n ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs\n):\n \"\"\"\n Plots a collection of LineString and MultiLineString geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be\n mixed)\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n color : single color or sequence of `N` colors\n Cannot be used together with `values`.\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n from matplotlib.collections import LineCollection\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n # LineCollection does not accept some kwargs.\n kwargs = {\n att: value\n for att, value in kwargs.items()\n if att not in [\"markersize\", \"marker\"]\n }\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n\n _expand_kwargs(kwargs, multiindex)\n\n segments = [np.array(linestring.coords)[:, :2] for linestring in geoms]\n collection = LineCollection(segments, **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n if \"norm\" not in kwargs:\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\nplot_linestring_collection = deprecated(_plot_linestring_collection)\n\n\ndef _plot_point_collection(\n ax,\n geoms,\n values=None,\n color=None,\n cmap=None,\n vmin=None,\n vmax=None,\n marker=\"o\",\n markersize=None,\n **kwargs\n):\n \"\"\"\n Plots a collection of Point and MultiPoint geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : sequence of `N` Points or MultiPoints\n\n values : a sequence of `N` values, optional\n Values mapped to colors using vmin, vmax, and cmap.\n Cannot be specified together with `color`.\n markersize : scalar or array-like, optional\n Size of the markers. Note that under the hood ``scatter`` is\n used, so the specified value will be proportional to the\n area of the marker (size in points^2).\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n if values is not None and color is not None:\n raise ValueError(\"Can only specify one of 'values' and 'color' kwargs\")\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n x = [p.x for p in geoms]\n y = [p.y for p in geoms]\n\n # matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None\n if values is not None:\n kwargs[\"c\"] = values\n if markersize is not None:\n kwargs[\"s\"] = markersize\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n if marker is not None:\n kwargs[\"marker\"] = marker\n _expand_kwargs(kwargs, multiindex)\n\n if \"norm\" not in kwargs:\n collection = ax.scatter(x, y, vmin=vmin, vmax=vmax, cmap=cmap, **kwargs)\n else:\n collection = ax.scatter(x, y, cmap=cmap, **kwargs)\n\n return collection\n\n\nplot_point_collection = deprecated(_plot_point_collection)\n\n\ndef plot_series(\n s, cmap=None, color=None, ax=None, figsize=None, aspect=\"auto\", **style_kwds\n):\n \"\"\"\n Plot a GeoSeries.\n\n Generate a plot of a GeoSeries geometry with matplotlib.\n\n Parameters\n ----------\n s : Series\n The GeoSeries to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib. Any\n colormap will work, but categorical colormaps are\n generally recommended. Examples of useful discrete\n colormaps include:\n\n tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2\n\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n figsize : pair of floats (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n ax is given explicitly, figsize is ignored.\n aspect : 'auto', 'equal', None or float (default 'auto')\n Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if\n however data are not projected (coordinates are long/lat), the aspect is by\n default set to 1/cos(s_y * pi/180) with s_y the y coordinate of the middle of\n the GeoSeries (the mean of the y range of bounding box) so that a long/lat\n square appears square in the middle of the plot. This implies an\n Equirectangular projection. If None, the aspect of `ax` won't be changed. It can\n also be set manually (float) as the ratio of y-unit to x-unit.\n **style_kwds : dict\n Color options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n \"\"\"\n if \"colormap\" in style_kwds:\n warnings.warn(\n \"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\",\n FutureWarning,\n )\n cmap = style_kwds.pop(\"colormap\")\n if \"axes\" in style_kwds:\n warnings.warn(\n \"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\",\n FutureWarning,\n )\n ax = style_kwds.pop(\"axes\")\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\n \"The matplotlib package is required for plotting in geopandas. \"\n \"You can install it using 'conda install -c conda-forge matplotlib' or \"\n \"'pip install matplotlib'.\"\n )\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n if aspect == \"auto\":\n if s.crs and s.crs.is_geographic:\n bounds = s.total_bounds\n y_coord = np.mean([bounds[1], bounds[3]])\n ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))\n # formula ported from R package sp\n # https://github.com/edzer/sp/blob/master/R/mapasp.R\n else:\n ax.set_aspect(\"equal\")\n elif aspect is not None:\n ax.set_aspect(aspect)\n\n if s.empty:\n warnings.warn(\n \"The GeoSeries you are attempting to plot is \"\n \"empty. Nothing has been displayed.\",\n UserWarning,\n )\n return ax\n\n # if cmap is specified, create range of colors based on cmap\n values = None\n if cmap is not None:\n values = np.arange(len(s))\n if hasattr(cmap, \"N\"):\n values = values % cmap.N\n style_kwds[\"vmin\"] = style_kwds.get(\"vmin\", values.min())\n style_kwds[\"vmax\"] = style_kwds.get(\"vmax\", values.max())\n\n # decompose GeometryCollections\n geoms, multiindex = _flatten_multi_geoms(s.geometry, prefix=\"Geom\")\n values = np.take(values, multiindex, axis=0) if cmap else None\n expl_series = geopandas.GeoSeries(geoms)\n\n geom_types = expl_series.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"MultiLineString\")\n | (geom_types == \"LinearRing\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = expl_series[poly_idx]\n if not polys.empty:\n # color overrides both face and edgecolor. As we want people to be\n # able to use edgecolor as well, pass color to facecolor\n facecolor = style_kwds.pop(\"facecolor\", None)\n if color is not None:\n facecolor = color\n\n values_ = values[poly_idx] if cmap else None\n _plot_polygon_collection(\n ax, polys, values_, facecolor=facecolor, cmap=cmap, **style_kwds\n )\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = expl_series[line_idx]\n if not lines.empty:\n values_ = values[line_idx] if cmap else None\n _plot_linestring_collection(\n ax, lines, values_, color=color, cmap=cmap, **style_kwds\n )\n\n # plot all Points in the same collection\n points = expl_series[point_idx]\n if not points.empty:\n values_ = values[point_idx] if cmap else None\n _plot_point_collection(\n ax, points, values_, color=color, cmap=cmap, **style_kwds\n )\n\n plt.draw()\n return ax\n\n\ndef plot_dataframe(\n df,\n column=None,\n cmap=None,\n color=None,\n ax=None,\n cax=None,\n categorical=False,\n legend=False,\n scheme=None,\n k=5,\n vmin=None,\n vmax=None,\n markersize=None,\n figsize=None,\n legend_kwds=None,\n categories=None,\n classification_kwds=None,\n missing_kwds=None,\n aspect=\"auto\",\n **style_kwds\n):\n \"\"\"\n Plot a GeoDataFrame.\n\n Generate a plot of a GeoDataFrame with matplotlib. If a\n column is specified, the plot coloring will be based on values\n in that column.\n\n Parameters\n ----------\n df : GeoDataFrame\n The GeoDataFrame to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n column : str, np.array, pd.Series (default None)\n The name of the dataframe column, np.array, or pd.Series to be plotted.\n If np.array or pd.Series are used then it must have same length as\n dataframe. Values are used to color the plot. Ignored if `color` is\n also set.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib.\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n cax : matplotlib.pyplot Artist (default None)\n axes on which to draw the legend in case of color map.\n categorical : bool (default False)\n If False, cmap will reflect numerical values of the\n column being plotted. For non-numerical columns, this\n will be set to True.\n legend : bool (default False)\n Plot a legend. Ignored if no `column` is given, or if `color` is given.\n scheme : str (default None)\n Name of a choropleth classification scheme (requires mapclassify).\n A mapclassify.MapClassifier object will be used\n under the hood. Supported are all schemes provided by mapclassify (e.g.\n 'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled',\n 'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced',\n 'JenksCaspallSampled', 'MaxP', 'MaximumBreaks',\n 'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean',\n 'UserDefined'). Arguments can be passed in classification_kwds.\n k : int (default 5)\n Number of classes (ignored if scheme is None)\n vmin : None or float (default None)\n Minimum value of cmap. If None, the minimum data value\n in the column to be plotted is used.\n vmax : None or float (default None)\n Maximum value of cmap. If None, the maximum data value\n in the column to be plotted is used.\n markersize : str or float or sequence (default None)\n Only applies to point geometries within a frame.\n If a str, will use the values in the column of the frame specified\n by markersize to set the size of markers. Otherwise can be a value\n to apply to all points, or a sequence of the same length as the\n number of points.\n figsize : tuple of integers (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n axes is given explicitly, figsize is ignored.\n legend_kwds : dict (default None)\n Keyword arguments to pass to matplotlib.pyplot.legend() or\n matplotlib.pyplot.colorbar().\n Additional accepted keywords when `scheme` is specified:\n\n fmt : string\n A formatting specification for the bin edges of the classes in the\n legend. For example, to have no decimals: ``{\"fmt\": \"{:.0f}\"}``.\n labels : list-like\n A list of legend labels to override the auto-generated labels.\n Needs to have the same number of elements as the number of\n classes (`k`).\n categories : list-like\n Ordered list-like object of categories to be used for categorical plot.\n classification_kwds : dict (default None)\n Keyword arguments to pass to mapclassify\n missing_kwds : dict (default None)\n Keyword arguments specifying color options (as style_kwds)\n to be passed on to geometries with missing values in addition to\n or overwriting other style kwds. If None, geometries with missing\n values are not plotted.\n aspect : 'auto', 'equal', None or float (default 'auto')\n Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if\n however data are not projected (coordinates are long/lat), the aspect is by\n default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of\n the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat\n square appears square in the middle of the plot. This implies an\n Equirectangular projection. If None, the aspect of `ax` won't be changed. It can\n also be set manually (float) as the ratio of y-unit to x-unit.\n\n **style_kwds : dict\n Style options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n\n \"\"\"\n if \"colormap\" in style_kwds:\n warnings.warn(\n \"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\",\n FutureWarning,\n )\n cmap = style_kwds.pop(\"colormap\")\n if \"axes\" in style_kwds:\n warnings.warn(\n \"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\",\n FutureWarning,\n )\n ax = style_kwds.pop(\"axes\")\n if column is not None and color is not None:\n warnings.warn(\n \"Only specify one of 'column' or 'color'. Using 'color'.\", UserWarning\n )\n column = None\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\n \"The matplotlib package is required for plotting in geopandas. \"\n \"You can install it using 'conda install -c conda-forge matplotlib' or \"\n \"'pip install matplotlib'.\"\n )\n\n if ax is None:\n if cax is not None:\n raise ValueError(\"'ax' can not be None if 'cax' is not.\")\n fig, ax = plt.subplots(figsize=figsize)\n\n if aspect == \"auto\":\n if df.crs and df.crs.is_geographic:\n bounds = df.total_bounds\n y_coord = np.mean([bounds[1], bounds[3]])\n ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))\n # formula ported from R package sp\n # https://github.com/edzer/sp/blob/master/R/mapasp.R\n else:\n ax.set_aspect(\"equal\")\n elif aspect is not None:\n ax.set_aspect(aspect)\n\n # GH 1555\n # if legend_kwds set, copy so we don't update it in place\n if legend_kwds is not None:\n legend_kwds = legend_kwds.copy()\n\n if df.empty:\n warnings.warn(\n \"The GeoDataFrame you are attempting to plot is \"\n \"empty. Nothing has been displayed.\",\n UserWarning,\n )\n return ax\n\n if isinstance(markersize, str):\n markersize = df[markersize].values\n\n if column is None:\n return plot_series(\n df.geometry,\n cmap=cmap,\n color=color,\n ax=ax,\n figsize=figsize,\n markersize=markersize,\n aspect=aspect,\n **style_kwds\n )\n\n # To accept pd.Series and np.arrays as column\n if isinstance(column, (np.ndarray, pd.Series)):\n if column.shape[0] != df.shape[0]:\n raise ValueError(\n \"The dataframe and given column have different number of rows.\"\n )\n else:\n values = column\n else:\n values = df[column]\n\n if pd.api.types.is_categorical_dtype(values.dtype):\n if categories is not None:\n raise ValueError(\n \"Cannot specify 'categories' when column has categorical dtype\"\n )\n categorical = True\n elif values.dtype is np.dtype(\"O\") or categories:\n categorical = True\n\n nan_idx = np.asarray(pd.isna(values), dtype=\"bool\")\n\n # Define `values` as a Series\n if categorical:\n if cmap is None:\n cmap = \"tab10\"\n\n cat = pd.Categorical(values, categories=categories)\n categories = list(cat.categories)\n\n # values missing in the Categorical but not in original values\n missing = list(np.unique(values[~nan_idx & cat.isna()]))\n if missing:\n raise ValueError(\n \"Column contains values not listed in categories. \"\n \"Missing categories: {}.\".format(missing)\n )\n\n values = cat.codes[~nan_idx]\n vmin = 0 if vmin is None else vmin\n vmax = len(categories) - 1 if vmax is None else vmax\n\n if scheme is not None:\n if classification_kwds is None:\n classification_kwds = {}\n if \"k\" not in classification_kwds:\n classification_kwds[\"k\"] = k\n\n binning = _mapclassify_choro(values[~nan_idx], scheme, **classification_kwds)\n # set categorical to True for creating the legend\n categorical = True\n if legend_kwds is not None and \"labels\" in legend_kwds:\n if len(legend_kwds[\"labels\"]) != binning.k:\n raise ValueError(\n \"Number of labels must match number of bins, \"\n \"received {} labels for {} bins\".format(\n len(legend_kwds[\"labels\"]), binning.k\n )\n )\n else:\n categories = list(legend_kwds.pop(\"labels\"))\n else:\n fmt = \"{:.2f}\"\n if legend_kwds is not None and \"fmt\" in legend_kwds:\n fmt = legend_kwds.pop(\"fmt\")\n categories = binning.get_legend_classes(fmt)\n values = np.array(binning.yb)\n\n # fill values with placeholder where were NaNs originally to map them properly\n # (after removing them in categorical or scheme)\n if categorical:\n for n in np.where(nan_idx)[0]:\n values = np.insert(values, n, values[0])\n\n mn = values[~np.isnan(values)].min() if vmin is None else vmin\n mx = values[~np.isnan(values)].max() if vmax is None else vmax\n\n # decompose GeometryCollections\n geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix=\"Geom\")\n values = np.take(values, multiindex, axis=0)\n nan_idx = np.take(nan_idx, multiindex, axis=0)\n expl_series = geopandas.GeoSeries(geoms)\n\n geom_types = expl_series.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"MultiLineString\")\n | (geom_types == \"LinearRing\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = expl_series[poly_idx & np.invert(nan_idx)]\n subset = values[poly_idx & np.invert(nan_idx)]\n if not polys.empty:\n _plot_polygon_collection(\n ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds\n )\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = expl_series[line_idx & np.invert(nan_idx)]\n subset = values[line_idx & np.invert(nan_idx)]\n if not lines.empty:\n _plot_linestring_collection(\n ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds\n )\n\n # plot all Points in the same collection\n points = expl_series[point_idx & np.invert(nan_idx)]\n subset = values[point_idx & np.invert(nan_idx)]\n if not points.empty:\n if isinstance(markersize, np.ndarray):\n markersize = np.take(markersize, multiindex, axis=0)\n markersize = markersize[point_idx & np.invert(nan_idx)]\n _plot_point_collection(\n ax,\n points,\n subset,\n vmin=mn,\n vmax=mx,\n markersize=markersize,\n cmap=cmap,\n **style_kwds\n )\n\n if missing_kwds is not None and not expl_series[nan_idx].empty:\n if color:\n if \"color\" not in missing_kwds:\n missing_kwds[\"color\"] = color\n\n merged_kwds = style_kwds.copy()\n merged_kwds.update(missing_kwds)\n\n plot_series(expl_series[nan_idx], ax=ax, **merged_kwds)\n\n if legend and not color:\n\n if legend_kwds is None:\n legend_kwds = {}\n if \"fmt\" in legend_kwds:\n legend_kwds.pop(\"fmt\")\n\n from matplotlib.lines import Line2D\n from matplotlib.colors import Normalize\n from matplotlib import cm\n\n norm = style_kwds.get(\"norm\", None)\n if not norm:\n norm = Normalize(vmin=mn, vmax=mx)\n n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n if categorical:\n patches = []\n for value, cat in enumerate(categories):\n patches.append(\n Line2D(\n [0],\n [0],\n linestyle=\"none\",\n marker=\"o\",\n alpha=style_kwds.get(\"alpha\", 1),\n markersize=10,\n markerfacecolor=n_cmap.to_rgba(value),\n markeredgewidth=0,\n )\n )\n if missing_kwds is not None:\n if \"color\" in merged_kwds:\n merged_kwds[\"facecolor\"] = merged_kwds[\"color\"]\n patches.append(\n Line2D(\n [0],\n [0],\n linestyle=\"none\",\n marker=\"o\",\n alpha=merged_kwds.get(\"alpha\", 1),\n markersize=10,\n markerfacecolor=merged_kwds.get(\"facecolor\", None),\n markeredgecolor=merged_kwds.get(\"edgecolor\", None),\n markeredgewidth=merged_kwds.get(\n \"linewidth\", 1 if merged_kwds.get(\"edgecolor\", False) else 0\n ),\n )\n )\n categories.append(merged_kwds.get(\"label\", \"NaN\"))\n legend_kwds.setdefault(\"numpoints\", 1)\n legend_kwds.setdefault(\"loc\", \"best\")\n ax.legend(patches, categories, **legend_kwds)\n else:\n\n if cax is not None:\n legend_kwds.setdefault(\"cax\", cax)\n else:\n legend_kwds.setdefault(\"ax\", ax)\n\n n_cmap.set_array([])\n ax.get_figure().colorbar(n_cmap, **legend_kwds)\n\n plt.draw()\n return ax\n\n\ndef _mapclassify_choro(values, scheme, **classification_kwds):\n \"\"\"\n Wrapper for choropleth schemes from mapclassify for use with plot_dataframe\n\n Parameters\n ----------\n values\n Series to be plotted\n scheme : str\n One of mapclassify classification schemes\n Options are BoxPlot, EqualInterval, FisherJenks,\n FisherJenksSampled, HeadTailBreaks, JenksCaspall,\n JenksCaspallForced, JenksCaspallSampled, MaxP,\n MaximumBreaks, NaturalBreaks, Quantiles, Percentiles, StdMean,\n UserDefined\n\n **classification_kwds : dict\n Keyword arguments for classification scheme\n For details see mapclassify documentation:\n https://pysal.org/mapclassify/api.html\n\n Returns\n -------\n binning\n Binning objects that holds the Series with values replaced with\n class identifier and the bins.\n \"\"\"\n try:\n import mapclassify.classifiers as classifiers\n\n except ImportError:\n raise ImportError(\n \"The 'mapclassify' >= 2.2.0 package is required to use the 'scheme' keyword\"\n )\n from mapclassify import __version__ as mc_version\n\n if mc_version < LooseVersion(\"2.2.0\"):\n raise ImportError(\n \"The 'mapclassify' >= 2.2.0 package is required to \"\n \"use the 'scheme' keyword\"\n )\n schemes = {}\n for classifier in classifiers.CLASSIFIERS:\n schemes[classifier.lower()] = getattr(classifiers, classifier)\n\n scheme = scheme.lower()\n\n # mapclassify < 2.1 cleaned up the scheme names (removing underscores)\n # trying both to keep compatibility with older versions and provide\n # compatibility with newer versions of mapclassify\n oldnew = {\n \"Box_Plot\": \"BoxPlot\",\n \"Equal_Interval\": \"EqualInterval\",\n \"Fisher_Jenks\": \"FisherJenks\",\n \"Fisher_Jenks_Sampled\": \"FisherJenksSampled\",\n \"HeadTail_Breaks\": \"HeadTailBreaks\",\n \"Jenks_Caspall\": \"JenksCaspall\",\n \"Jenks_Caspall_Forced\": \"JenksCaspallForced\",\n \"Jenks_Caspall_Sampled\": \"JenksCaspallSampled\",\n \"Max_P_Plassifier\": \"MaxP\",\n \"Maximum_Breaks\": \"MaximumBreaks\",\n \"Natural_Breaks\": \"NaturalBreaks\",\n \"Std_Mean\": \"StdMean\",\n \"User_Defined\": \"UserDefined\",\n }\n scheme_names_mapping = {}\n scheme_names_mapping.update(\n {old.lower(): new.lower() for old, new in oldnew.items()}\n )\n scheme_names_mapping.update(\n {new.lower(): old.lower() for old, new in oldnew.items()}\n )\n\n try:\n scheme_class = schemes[scheme]\n except KeyError:\n scheme = scheme_names_mapping.get(scheme, scheme)\n try:\n scheme_class = schemes[scheme]\n except KeyError:\n raise ValueError(\n \"Invalid scheme. Scheme must be in the set: %r\" % schemes.keys()\n )\n\n if classification_kwds[\"k\"] is not None:\n from inspect import getfullargspec as getspec\n\n spec = getspec(scheme_class.__init__)\n if \"k\" not in spec.args:\n del classification_kwds[\"k\"]\n try:\n binning = scheme_class(np.asarray(values), **classification_kwds)\n except TypeError:\n raise TypeError(\"Invalid keyword argument for %r \" % scheme)\n return binning\n", "path": "geopandas/plotting.py" } ]
[ { "content": "import warnings\n\nimport numpy as np\nimport pandas as pd\n\nimport geopandas\n\nfrom distutils.version import LooseVersion\n\n\ndef deprecated(new):\n \"\"\"Helper to provide deprecation warning.\"\"\"\n\n def old(*args, **kwargs):\n warnings.warn(\n \"{} is intended for internal \".format(new.__name__[1:])\n + \"use only, and will be deprecated.\",\n DeprecationWarning,\n stacklevel=2,\n )\n new(*args, **kwargs)\n\n return old\n\n\ndef _flatten_multi_geoms(geoms, prefix=\"Multi\"):\n \"\"\"\n Returns Series like geoms and index, except that any Multi geometries\n are split into their components and indices are repeated for all component\n in the same Multi geometry. Maintains 1:1 matching of geometry to value.\n\n Prefix specifies type of geometry to be flatten. 'Multi' for MultiPoint and similar,\n \"Geom\" for GeometryCollection.\n\n Returns\n -------\n components : list of geometry\n\n component_index : index array\n indices are repeated for all components in the same Multi geometry\n \"\"\"\n components, component_index = [], []\n\n if not geoms.geom_type.str.startswith(prefix).any():\n return geoms, np.arange(len(geoms))\n\n for ix, geom in enumerate(geoms):\n if geom.type.startswith(prefix):\n for poly in geom.geoms:\n components.append(poly)\n component_index.append(ix)\n else:\n components.append(geom)\n component_index.append(ix)\n\n return components, np.array(component_index)\n\n\ndef _expand_kwargs(kwargs, multiindex):\n \"\"\"\n Most arguments to the plot functions must be a (single) value, or a sequence\n of values. This function checks each key-value pair in 'kwargs' and expands\n it (in place) to the correct length/formats with help of 'multiindex', unless\n the value appears to already be a valid (single) value for the key.\n \"\"\"\n from matplotlib.colors import is_color_like\n from typing import Iterable\n\n for att, value in kwargs.items():\n if \"color\" in att: # color(s), edgecolor(s), facecolor(s)\n if is_color_like(value):\n continue\n elif \"linestyle\" in att: # linestyle(s)\n # A single linestyle can be 2-tuple of a number and an iterable.\n if (\n isinstance(value, tuple)\n and len(value) == 2\n and isinstance(value[1], Iterable)\n ):\n continue\n elif att in [\"marker\", \"alpha\"]:\n # For these attributes, only a single value is allowed, so never expand.\n continue\n\n if pd.api.types.is_list_like(value):\n kwargs[att] = np.take(value, multiindex, axis=0)\n\n\ndef _plot_polygon_collection(\n ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs\n):\n \"\"\"\n Plots a collection of Polygon and MultiPolygon geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)\n\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n Otherwise follows `color` / `facecolor` kwargs.\n edgecolor : single color or sequence of `N` colors\n Color for the edge of the polygons\n facecolor : single color or sequence of `N` colors\n Color to fill the polygons. Cannot be used together with `values`.\n color : single color or sequence of `N` colors\n Sets both `edgecolor` and `facecolor`\n **kwargs\n Additional keyword arguments passed to the collection\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n\n try:\n from descartes.patch import PolygonPatch\n except ImportError:\n raise ImportError(\n \"The descartes package is required for plotting polygons in geopandas. \"\n \"You can install it using 'conda install -c conda-forge descartes' or \"\n \"'pip install descartes'.\"\n )\n from matplotlib.collections import PatchCollection\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n # PatchCollection does not accept some kwargs.\n kwargs = {\n att: value\n for att, value in kwargs.items()\n if att not in [\"markersize\", \"marker\"]\n }\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n\n _expand_kwargs(kwargs, multiindex)\n\n collection = PatchCollection([PolygonPatch(poly) for poly in geoms], **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n if \"norm\" not in kwargs:\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\nplot_polygon_collection = deprecated(_plot_polygon_collection)\n\n\ndef _plot_linestring_collection(\n ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs\n):\n \"\"\"\n Plots a collection of LineString and MultiLineString geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be\n mixed)\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n color : single color or sequence of `N` colors\n Cannot be used together with `values`.\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n from matplotlib.collections import LineCollection\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n # LineCollection does not accept some kwargs.\n kwargs = {\n att: value\n for att, value in kwargs.items()\n if att not in [\"markersize\", \"marker\"]\n }\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n\n _expand_kwargs(kwargs, multiindex)\n\n segments = [np.array(linestring.coords)[:, :2] for linestring in geoms]\n collection = LineCollection(segments, **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n if \"norm\" not in kwargs:\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\nplot_linestring_collection = deprecated(_plot_linestring_collection)\n\n\ndef _plot_point_collection(\n ax,\n geoms,\n values=None,\n color=None,\n cmap=None,\n vmin=None,\n vmax=None,\n marker=\"o\",\n markersize=None,\n **kwargs\n):\n \"\"\"\n Plots a collection of Point and MultiPoint geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : sequence of `N` Points or MultiPoints\n\n values : a sequence of `N` values, optional\n Values mapped to colors using vmin, vmax, and cmap.\n Cannot be specified together with `color`.\n markersize : scalar or array-like, optional\n Size of the markers. Note that under the hood ``scatter`` is\n used, so the specified value will be proportional to the\n area of the marker (size in points^2).\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n if values is not None and color is not None:\n raise ValueError(\"Can only specify one of 'values' and 'color' kwargs\")\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n x = [p.x for p in geoms]\n y = [p.y for p in geoms]\n\n # matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None\n if values is not None:\n kwargs[\"c\"] = values\n if markersize is not None:\n kwargs[\"s\"] = markersize\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n if marker is not None:\n kwargs[\"marker\"] = marker\n _expand_kwargs(kwargs, multiindex)\n\n if \"norm\" not in kwargs:\n collection = ax.scatter(x, y, vmin=vmin, vmax=vmax, cmap=cmap, **kwargs)\n else:\n collection = ax.scatter(x, y, cmap=cmap, **kwargs)\n\n return collection\n\n\nplot_point_collection = deprecated(_plot_point_collection)\n\n\ndef plot_series(\n s, cmap=None, color=None, ax=None, figsize=None, aspect=\"auto\", **style_kwds\n):\n \"\"\"\n Plot a GeoSeries.\n\n Generate a plot of a GeoSeries geometry with matplotlib.\n\n Parameters\n ----------\n s : Series\n The GeoSeries to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib. Any\n colormap will work, but categorical colormaps are\n generally recommended. Examples of useful discrete\n colormaps include:\n\n tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2\n\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n figsize : pair of floats (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n ax is given explicitly, figsize is ignored.\n aspect : 'auto', 'equal', None or float (default 'auto')\n Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if\n however data are not projected (coordinates are long/lat), the aspect is by\n default set to 1/cos(s_y * pi/180) with s_y the y coordinate of the middle of\n the GeoSeries (the mean of the y range of bounding box) so that a long/lat\n square appears square in the middle of the plot. This implies an\n Equirectangular projection. If None, the aspect of `ax` won't be changed. It can\n also be set manually (float) as the ratio of y-unit to x-unit.\n **style_kwds : dict\n Color options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n \"\"\"\n if \"colormap\" in style_kwds:\n warnings.warn(\n \"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\",\n FutureWarning,\n )\n cmap = style_kwds.pop(\"colormap\")\n if \"axes\" in style_kwds:\n warnings.warn(\n \"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\",\n FutureWarning,\n )\n ax = style_kwds.pop(\"axes\")\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\n \"The matplotlib package is required for plotting in geopandas. \"\n \"You can install it using 'conda install -c conda-forge matplotlib' or \"\n \"'pip install matplotlib'.\"\n )\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n if aspect == \"auto\":\n if s.crs and s.crs.is_geographic:\n bounds = s.total_bounds\n y_coord = np.mean([bounds[1], bounds[3]])\n ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))\n # formula ported from R package sp\n # https://github.com/edzer/sp/blob/master/R/mapasp.R\n else:\n ax.set_aspect(\"equal\")\n elif aspect is not None:\n ax.set_aspect(aspect)\n\n if s.empty:\n warnings.warn(\n \"The GeoSeries you are attempting to plot is \"\n \"empty. Nothing has been displayed.\",\n UserWarning,\n )\n return ax\n\n # if cmap is specified, create range of colors based on cmap\n values = None\n if cmap is not None:\n values = np.arange(len(s))\n if hasattr(cmap, \"N\"):\n values = values % cmap.N\n style_kwds[\"vmin\"] = style_kwds.get(\"vmin\", values.min())\n style_kwds[\"vmax\"] = style_kwds.get(\"vmax\", values.max())\n\n # decompose GeometryCollections\n geoms, multiindex = _flatten_multi_geoms(s.geometry, prefix=\"Geom\")\n values = np.take(values, multiindex, axis=0) if cmap else None\n expl_series = geopandas.GeoSeries(geoms)\n\n geom_types = expl_series.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"MultiLineString\")\n | (geom_types == \"LinearRing\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = expl_series[poly_idx]\n if not polys.empty:\n # color overrides both face and edgecolor. As we want people to be\n # able to use edgecolor as well, pass color to facecolor\n facecolor = style_kwds.pop(\"facecolor\", None)\n if color is not None:\n facecolor = color\n\n values_ = values[poly_idx] if cmap else None\n _plot_polygon_collection(\n ax, polys, values_, facecolor=facecolor, cmap=cmap, **style_kwds\n )\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = expl_series[line_idx]\n if not lines.empty:\n values_ = values[line_idx] if cmap else None\n _plot_linestring_collection(\n ax, lines, values_, color=color, cmap=cmap, **style_kwds\n )\n\n # plot all Points in the same collection\n points = expl_series[point_idx]\n if not points.empty:\n values_ = values[point_idx] if cmap else None\n _plot_point_collection(\n ax, points, values_, color=color, cmap=cmap, **style_kwds\n )\n\n plt.draw()\n return ax\n\n\ndef plot_dataframe(\n df,\n column=None,\n cmap=None,\n color=None,\n ax=None,\n cax=None,\n categorical=False,\n legend=False,\n scheme=None,\n k=5,\n vmin=None,\n vmax=None,\n markersize=None,\n figsize=None,\n legend_kwds=None,\n categories=None,\n classification_kwds=None,\n missing_kwds=None,\n aspect=\"auto\",\n **style_kwds\n):\n \"\"\"\n Plot a GeoDataFrame.\n\n Generate a plot of a GeoDataFrame with matplotlib. If a\n column is specified, the plot coloring will be based on values\n in that column.\n\n Parameters\n ----------\n df : GeoDataFrame\n The GeoDataFrame to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n column : str, np.array, pd.Series (default None)\n The name of the dataframe column, np.array, or pd.Series to be plotted.\n If np.array or pd.Series are used then it must have same length as\n dataframe. Values are used to color the plot. Ignored if `color` is\n also set.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib.\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n cax : matplotlib.pyplot Artist (default None)\n axes on which to draw the legend in case of color map.\n categorical : bool (default False)\n If False, cmap will reflect numerical values of the\n column being plotted. For non-numerical columns, this\n will be set to True.\n legend : bool (default False)\n Plot a legend. Ignored if no `column` is given, or if `color` is given.\n scheme : str (default None)\n Name of a choropleth classification scheme (requires mapclassify).\n A mapclassify.MapClassifier object will be used\n under the hood. Supported are all schemes provided by mapclassify (e.g.\n 'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled',\n 'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced',\n 'JenksCaspallSampled', 'MaxP', 'MaximumBreaks',\n 'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean',\n 'UserDefined'). Arguments can be passed in classification_kwds.\n k : int (default 5)\n Number of classes (ignored if scheme is None)\n vmin : None or float (default None)\n Minimum value of cmap. If None, the minimum data value\n in the column to be plotted is used.\n vmax : None or float (default None)\n Maximum value of cmap. If None, the maximum data value\n in the column to be plotted is used.\n markersize : str or float or sequence (default None)\n Only applies to point geometries within a frame.\n If a str, will use the values in the column of the frame specified\n by markersize to set the size of markers. Otherwise can be a value\n to apply to all points, or a sequence of the same length as the\n number of points.\n figsize : tuple of integers (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n axes is given explicitly, figsize is ignored.\n legend_kwds : dict (default None)\n Keyword arguments to pass to matplotlib.pyplot.legend() or\n matplotlib.pyplot.colorbar().\n Additional accepted keywords when `scheme` is specified:\n\n fmt : string\n A formatting specification for the bin edges of the classes in the\n legend. For example, to have no decimals: ``{\"fmt\": \"{:.0f}\"}``.\n labels : list-like\n A list of legend labels to override the auto-generated labels.\n Needs to have the same number of elements as the number of\n classes (`k`).\n categories : list-like\n Ordered list-like object of categories to be used for categorical plot.\n classification_kwds : dict (default None)\n Keyword arguments to pass to mapclassify\n missing_kwds : dict (default None)\n Keyword arguments specifying color options (as style_kwds)\n to be passed on to geometries with missing values in addition to\n or overwriting other style kwds. If None, geometries with missing\n values are not plotted.\n aspect : 'auto', 'equal', None or float (default 'auto')\n Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if\n however data are not projected (coordinates are long/lat), the aspect is by\n default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of\n the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat\n square appears square in the middle of the plot. This implies an\n Equirectangular projection. If None, the aspect of `ax` won't be changed. It can\n also be set manually (float) as the ratio of y-unit to x-unit.\n\n **style_kwds : dict\n Style options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n\n \"\"\"\n if \"colormap\" in style_kwds:\n warnings.warn(\n \"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\",\n FutureWarning,\n )\n cmap = style_kwds.pop(\"colormap\")\n if \"axes\" in style_kwds:\n warnings.warn(\n \"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\",\n FutureWarning,\n )\n ax = style_kwds.pop(\"axes\")\n if column is not None and color is not None:\n warnings.warn(\n \"Only specify one of 'column' or 'color'. Using 'color'.\", UserWarning\n )\n column = None\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\n \"The matplotlib package is required for plotting in geopandas. \"\n \"You can install it using 'conda install -c conda-forge matplotlib' or \"\n \"'pip install matplotlib'.\"\n )\n\n if ax is None:\n if cax is not None:\n raise ValueError(\"'ax' can not be None if 'cax' is not.\")\n fig, ax = plt.subplots(figsize=figsize)\n\n if aspect == \"auto\":\n if df.crs and df.crs.is_geographic:\n bounds = df.total_bounds\n y_coord = np.mean([bounds[1], bounds[3]])\n ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))\n # formula ported from R package sp\n # https://github.com/edzer/sp/blob/master/R/mapasp.R\n else:\n ax.set_aspect(\"equal\")\n elif aspect is not None:\n ax.set_aspect(aspect)\n\n # GH 1555\n # if legend_kwds set, copy so we don't update it in place\n if legend_kwds is not None:\n legend_kwds = legend_kwds.copy()\n\n if df.empty:\n warnings.warn(\n \"The GeoDataFrame you are attempting to plot is \"\n \"empty. Nothing has been displayed.\",\n UserWarning,\n )\n return ax\n\n if isinstance(markersize, str):\n markersize = df[markersize].values\n\n if column is None:\n return plot_series(\n df.geometry,\n cmap=cmap,\n color=color,\n ax=ax,\n figsize=figsize,\n markersize=markersize,\n aspect=aspect,\n **style_kwds\n )\n\n # To accept pd.Series and np.arrays as column\n if isinstance(column, (np.ndarray, pd.Series)):\n if column.shape[0] != df.shape[0]:\n raise ValueError(\n \"The dataframe and given column have different number of rows.\"\n )\n else:\n values = column\n\n # Make sure index of a Series matches index of df\n if isinstance(values, pd.Series):\n values = values.reindex(df.index)\n else:\n values = df[column]\n\n if pd.api.types.is_categorical_dtype(values.dtype):\n if categories is not None:\n raise ValueError(\n \"Cannot specify 'categories' when column has categorical dtype\"\n )\n categorical = True\n elif values.dtype is np.dtype(\"O\") or categories:\n categorical = True\n\n nan_idx = np.asarray(pd.isna(values), dtype=\"bool\")\n\n # Define `values` as a Series\n if categorical:\n if cmap is None:\n cmap = \"tab10\"\n\n cat = pd.Categorical(values, categories=categories)\n categories = list(cat.categories)\n\n # values missing in the Categorical but not in original values\n missing = list(np.unique(values[~nan_idx & cat.isna()]))\n if missing:\n raise ValueError(\n \"Column contains values not listed in categories. \"\n \"Missing categories: {}.\".format(missing)\n )\n\n values = cat.codes[~nan_idx]\n vmin = 0 if vmin is None else vmin\n vmax = len(categories) - 1 if vmax is None else vmax\n\n if scheme is not None:\n if classification_kwds is None:\n classification_kwds = {}\n if \"k\" not in classification_kwds:\n classification_kwds[\"k\"] = k\n\n binning = _mapclassify_choro(values[~nan_idx], scheme, **classification_kwds)\n # set categorical to True for creating the legend\n categorical = True\n if legend_kwds is not None and \"labels\" in legend_kwds:\n if len(legend_kwds[\"labels\"]) != binning.k:\n raise ValueError(\n \"Number of labels must match number of bins, \"\n \"received {} labels for {} bins\".format(\n len(legend_kwds[\"labels\"]), binning.k\n )\n )\n else:\n categories = list(legend_kwds.pop(\"labels\"))\n else:\n fmt = \"{:.2f}\"\n if legend_kwds is not None and \"fmt\" in legend_kwds:\n fmt = legend_kwds.pop(\"fmt\")\n categories = binning.get_legend_classes(fmt)\n values = np.array(binning.yb)\n\n # fill values with placeholder where were NaNs originally to map them properly\n # (after removing them in categorical or scheme)\n if categorical:\n for n in np.where(nan_idx)[0]:\n values = np.insert(values, n, values[0])\n\n mn = values[~np.isnan(values)].min() if vmin is None else vmin\n mx = values[~np.isnan(values)].max() if vmax is None else vmax\n\n # decompose GeometryCollections\n geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix=\"Geom\")\n values = np.take(values, multiindex, axis=0)\n nan_idx = np.take(nan_idx, multiindex, axis=0)\n expl_series = geopandas.GeoSeries(geoms)\n\n geom_types = expl_series.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"MultiLineString\")\n | (geom_types == \"LinearRing\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = expl_series[poly_idx & np.invert(nan_idx)]\n subset = values[poly_idx & np.invert(nan_idx)]\n if not polys.empty:\n _plot_polygon_collection(\n ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds\n )\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = expl_series[line_idx & np.invert(nan_idx)]\n subset = values[line_idx & np.invert(nan_idx)]\n if not lines.empty:\n _plot_linestring_collection(\n ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds\n )\n\n # plot all Points in the same collection\n points = expl_series[point_idx & np.invert(nan_idx)]\n subset = values[point_idx & np.invert(nan_idx)]\n if not points.empty:\n if isinstance(markersize, np.ndarray):\n markersize = np.take(markersize, multiindex, axis=0)\n markersize = markersize[point_idx & np.invert(nan_idx)]\n _plot_point_collection(\n ax,\n points,\n subset,\n vmin=mn,\n vmax=mx,\n markersize=markersize,\n cmap=cmap,\n **style_kwds\n )\n\n if missing_kwds is not None and not expl_series[nan_idx].empty:\n if color:\n if \"color\" not in missing_kwds:\n missing_kwds[\"color\"] = color\n\n merged_kwds = style_kwds.copy()\n merged_kwds.update(missing_kwds)\n\n plot_series(expl_series[nan_idx], ax=ax, **merged_kwds)\n\n if legend and not color:\n\n if legend_kwds is None:\n legend_kwds = {}\n if \"fmt\" in legend_kwds:\n legend_kwds.pop(\"fmt\")\n\n from matplotlib.lines import Line2D\n from matplotlib.colors import Normalize\n from matplotlib import cm\n\n norm = style_kwds.get(\"norm\", None)\n if not norm:\n norm = Normalize(vmin=mn, vmax=mx)\n n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n if categorical:\n patches = []\n for value, cat in enumerate(categories):\n patches.append(\n Line2D(\n [0],\n [0],\n linestyle=\"none\",\n marker=\"o\",\n alpha=style_kwds.get(\"alpha\", 1),\n markersize=10,\n markerfacecolor=n_cmap.to_rgba(value),\n markeredgewidth=0,\n )\n )\n if missing_kwds is not None:\n if \"color\" in merged_kwds:\n merged_kwds[\"facecolor\"] = merged_kwds[\"color\"]\n patches.append(\n Line2D(\n [0],\n [0],\n linestyle=\"none\",\n marker=\"o\",\n alpha=merged_kwds.get(\"alpha\", 1),\n markersize=10,\n markerfacecolor=merged_kwds.get(\"facecolor\", None),\n markeredgecolor=merged_kwds.get(\"edgecolor\", None),\n markeredgewidth=merged_kwds.get(\n \"linewidth\", 1 if merged_kwds.get(\"edgecolor\", False) else 0\n ),\n )\n )\n categories.append(merged_kwds.get(\"label\", \"NaN\"))\n legend_kwds.setdefault(\"numpoints\", 1)\n legend_kwds.setdefault(\"loc\", \"best\")\n ax.legend(patches, categories, **legend_kwds)\n else:\n\n if cax is not None:\n legend_kwds.setdefault(\"cax\", cax)\n else:\n legend_kwds.setdefault(\"ax\", ax)\n\n n_cmap.set_array([])\n ax.get_figure().colorbar(n_cmap, **legend_kwds)\n\n plt.draw()\n return ax\n\n\ndef _mapclassify_choro(values, scheme, **classification_kwds):\n \"\"\"\n Wrapper for choropleth schemes from mapclassify for use with plot_dataframe\n\n Parameters\n ----------\n values\n Series to be plotted\n scheme : str\n One of mapclassify classification schemes\n Options are BoxPlot, EqualInterval, FisherJenks,\n FisherJenksSampled, HeadTailBreaks, JenksCaspall,\n JenksCaspallForced, JenksCaspallSampled, MaxP,\n MaximumBreaks, NaturalBreaks, Quantiles, Percentiles, StdMean,\n UserDefined\n\n **classification_kwds : dict\n Keyword arguments for classification scheme\n For details see mapclassify documentation:\n https://pysal.org/mapclassify/api.html\n\n Returns\n -------\n binning\n Binning objects that holds the Series with values replaced with\n class identifier and the bins.\n \"\"\"\n try:\n import mapclassify.classifiers as classifiers\n\n except ImportError:\n raise ImportError(\n \"The 'mapclassify' >= 2.2.0 package is required to use the 'scheme' keyword\"\n )\n from mapclassify import __version__ as mc_version\n\n if mc_version < LooseVersion(\"2.2.0\"):\n raise ImportError(\n \"The 'mapclassify' >= 2.2.0 package is required to \"\n \"use the 'scheme' keyword\"\n )\n schemes = {}\n for classifier in classifiers.CLASSIFIERS:\n schemes[classifier.lower()] = getattr(classifiers, classifier)\n\n scheme = scheme.lower()\n\n # mapclassify < 2.1 cleaned up the scheme names (removing underscores)\n # trying both to keep compatibility with older versions and provide\n # compatibility with newer versions of mapclassify\n oldnew = {\n \"Box_Plot\": \"BoxPlot\",\n \"Equal_Interval\": \"EqualInterval\",\n \"Fisher_Jenks\": \"FisherJenks\",\n \"Fisher_Jenks_Sampled\": \"FisherJenksSampled\",\n \"HeadTail_Breaks\": \"HeadTailBreaks\",\n \"Jenks_Caspall\": \"JenksCaspall\",\n \"Jenks_Caspall_Forced\": \"JenksCaspallForced\",\n \"Jenks_Caspall_Sampled\": \"JenksCaspallSampled\",\n \"Max_P_Plassifier\": \"MaxP\",\n \"Maximum_Breaks\": \"MaximumBreaks\",\n \"Natural_Breaks\": \"NaturalBreaks\",\n \"Std_Mean\": \"StdMean\",\n \"User_Defined\": \"UserDefined\",\n }\n scheme_names_mapping = {}\n scheme_names_mapping.update(\n {old.lower(): new.lower() for old, new in oldnew.items()}\n )\n scheme_names_mapping.update(\n {new.lower(): old.lower() for old, new in oldnew.items()}\n )\n\n try:\n scheme_class = schemes[scheme]\n except KeyError:\n scheme = scheme_names_mapping.get(scheme, scheme)\n try:\n scheme_class = schemes[scheme]\n except KeyError:\n raise ValueError(\n \"Invalid scheme. Scheme must be in the set: %r\" % schemes.keys()\n )\n\n if classification_kwds[\"k\"] is not None:\n from inspect import getfullargspec as getspec\n\n spec = getspec(scheme_class.__init__)\n if \"k\" not in spec.args:\n del classification_kwds[\"k\"]\n try:\n binning = scheme_class(np.asarray(values), **classification_kwds)\n except TypeError:\n raise TypeError(\"Invalid keyword argument for %r \" % scheme)\n return binning\n", "path": "geopandas/plotting.py" } ]
diff --git a/geopandas/plotting.py b/geopandas/plotting.py index 8edd02a8e3..41e5fede74 100644 --- a/geopandas/plotting.py +++ b/geopandas/plotting.py @@ -636,6 +636,10 @@ def plot_dataframe( ) else: values = column + + # Make sure index of a Series matches index of df + if isinstance(values, pd.Series): + values = values.reindex(df.index) else: values = df[column] diff --git a/geopandas/tests/test_plotting.py b/geopandas/tests/test_plotting.py index 7102dddc69..03b889a940 100644 --- a/geopandas/tests/test_plotting.py +++ b/geopandas/tests/test_plotting.py @@ -86,6 +86,43 @@ def test_default_colors(self): expected_colors = cmap(np.arange(self.N) / (self.N - 1)) _check_colors(self.N, ax.collections[0].get_facecolors(), expected_colors) + def test_series_color_no_index(self): + + # Color order with ordered index + colors_ord = pd.Series(["a", "b", "c", "a", "b", "c", "a", "b", "c", "a"]) + + # Plot using Series as color + ax1 = self.df.plot(colors_ord) + + # Correct answer: Add as column to df and plot + self.df["colors_ord"] = colors_ord + ax2 = self.df.plot("colors_ord") + + # Confirm out-of-order index re-sorted + point_colors1 = ax1.collections[0].get_facecolors() + point_colors2 = ax2.collections[0].get_facecolors() + np.testing.assert_array_equal(point_colors1[1], point_colors2[1]) + + def test_series_color_index(self): + + # Color order with out-of-order index + colors_ord = pd.Series( + ["a", "a", "a", "a", "b", "b", "b", "c", "c", "c"], + index=[0, 3, 6, 9, 1, 4, 7, 2, 5, 8], + ) + + # Plot using Series as color + ax1 = self.df.plot(colors_ord) + + # Correct answer: Add as column to df and plot + self.df["colors_ord"] = colors_ord + ax2 = self.df.plot("colors_ord") + + # Confirm out-of-order index re-sorted + point_colors1 = ax1.collections[0].get_facecolors() + point_colors2 = ax2.collections[0].get_facecolors() + np.testing.assert_array_equal(point_colors1[1], point_colors2[1]) + def test_colormap(self): # without specifying values but cmap specified -> no uniform color
qutip__qutip-684
Reverse Circuit doesn't work Whenever i try to reverse some Circuit it throws an exception telling that temp does not have append method implemented. I checked the source code and i think that instead o append the developers meant add_gate.
[ { "content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport numpy as np\nimport warnings\n\nfrom qutip.qip.circuit_latex import _latex_compile\nfrom qutip.qip.gates import *\nfrom qutip.qip.qubits import qubit_states\n\n__all__ = ['Gate', 'QubitCircuit']\n\n\nclass Gate(object):\n \"\"\"\n Representation of a quantum gate, with its required parametrs, and target\n and control qubits.\n \"\"\"\n\n def __init__(self, name, targets=None, controls=None, arg_value=None,\n arg_label=None):\n \"\"\"\n Creates a gate with specified parameters.\n\n Parameters\n ----------\n name : String\n Gate name.\n targets : List\n Gate targets.\n controls : List\n Gate controls.\n arg_value : Float\n Argument value(phi).\n arg_label : String\n Label for gate representation.\n \"\"\"\n self.name = name\n self.targets = None\n self.controls = None\n\n if not isinstance(targets, list) and targets is not None:\n self.targets = [targets]\n else:\n self.targets = targets\n\n if not isinstance(controls, list) and controls is not None:\n self.controls = [controls]\n else:\n self.controls = controls\n\n self.arg_value = arg_value\n self.arg_label = arg_label\n\n if name in [\"SWAP\", \"ISWAP\", \"SQRTISWAP\", \"SQRTSWAP\", \"BERKELEY\",\n \"SWAPalpha\"]:\n if len(self.targets) != 2:\n raise ValueError(\"Gate %s requires two target\" % name)\n if self.controls is not None:\n raise ValueError(\"Gate %s does not require a control\" % name)\n\n if name in [\"CNOT\", \"CSIGN\", \"CRX\", \"CRY\", \"CRZ\"]:\n if self.targets is None or len(self.targets) != 1:\n raise ValueError(\"Gate %s requires one target\" % name)\n if self.controls is None or len(self.controls) != 1:\n raise ValueError(\"Gate %s requires one control\" % name)\n\n if name in [\"SNOT\", \"RX\", \"RY\", \"RZ\", \"PHASEGATE\"]:\n if self.controls is not None:\n raise ValueError(\"Gate %s does not take controls\" % name)\n\n if name in [\"RX\", \"RY\", \"RZ\", \"CPHASE\", \"SWAPalpha\", \"PHASEGATE\",\n \"GLOBALPHASE\", \"CRX\", \"CRY\", \"CRZ\"]:\n if arg_value is None:\n raise ValueError(\"Gate %s requires an argument value\" % name)\n\n self.arg_value = arg_value\n self.arg_label = arg_label\n\n def __str__(self):\n s = \"Gate(%s, targets=%s, controls=%s)\" % (self.name,\n self.targets,\n self.controls)\n return s\n\n def __repr__(self):\n return str(self)\n\n def _repr_latex_(self):\n return str(self)\n\n\n_gate_name_to_label = {\n 'RX': r'R_x',\n 'RY': r'R_y',\n 'RZ': r'R_z',\n 'CRX': r'R_x',\n 'CRY': r'R_y',\n 'CRZ': r'R_z',\n 'SQRTNOT': r'\\sqrt{\\rm NOT}',\n 'SNOT': r'{\\rm H}',\n 'PHASEGATE': r'{\\rm PHASE}',\n 'CPHASE': r'{\\rm R}',\n 'CNOT': r'{\\rm CNOT}',\n 'CSIGN': r'{\\rm Z}',\n 'BERKELEY': r'{\\rm BERKELEY}',\n 'SWAPalpha': r'{\\rm SWAPalpha}',\n 'SWAP': r'{\\rm SWAP}',\n 'ISWAP': r'{i}{\\rm SWAP}',\n 'SQRTSWAP': r'\\sqrt{\\rm SWAP}',\n 'SQRTISWAP': r'\\sqrt{{i}\\rm SWAP}',\n 'FREDKIN': r'{\\rm FREDKIN}',\n 'TOFFOLI': r'{\\rm TOFFOLI}',\n 'GLOBALPHASE': r'{\\rm Ph}',\n}\n\n\ndef _gate_label(name, arg_label):\n\n if name in _gate_name_to_label:\n gate_label = _gate_name_to_label[name]\n else:\n warnings.warn(\"Unknown gate %s\" % name)\n gate_label = name\n\n if arg_label:\n return r'%s(%s)' % (gate_label, arg_label)\n else:\n return r'%s' % gate_label\n\n\nclass QubitCircuit(object):\n \"\"\"\n Representation of a quantum program/algorithm, maintaining a sequence\n of gates.\n \"\"\"\n\n def __init__(self, N, input_states=None, output_states=None,\n reverse_states=True):\n # number of qubits in the register\n self.N = N\n self.reverse_states = reverse_states\n self.gates = []\n self.U_list = []\n self.input_states = [None for i in range(N)]\n self.output_states = [None for i in range(N)]\n\n def add_state(self, state, targets=None, state_type=\"input\"):\n \"\"\"\n Add an input or ouput state to the circuit. By default all the input\n and output states will be initialized to `None`. A particular state can\n be added by specifying the state and the qubit where it has to be added\n along with the type as input or output.\n\n Parameters\n ----------\n state: str\n The state that has to be added. It can be any string such as `0`,\n '+', \"A\", \"Y\"\n targets: list\n A list of qubit positions where the given state has to be added.\n state_type: str\n One of either \"input\" or \"output\". This specifies whether the state\n to be added is an input or output.\n default: \"input\"\n\n \"\"\"\n if state_type == \"input\":\n for i in targets:\n self.input_states[i] = state\n if state_type == \"output\":\n for i in targets:\n self.output_states[i] = state\n\n def add_gate(self, gate, targets=None, controls=None, arg_value=None,\n arg_label=None):\n \"\"\"\n Adds a gate with specified parameters to the circuit.\n\n Parameters\n ----------\n gate: String or `Gate`\n Gate name. If gate is an instance of `Gate`, parameters are\n unpacked and added.\n targets: List\n Gate targets.\n controls: List\n Gate controls.\n arg_value: Float\n Argument value(phi).\n arg_label: String\n Label for gate representation.\n \"\"\"\n if isinstance(gate, Gate):\n name = gate.name\n targets = gate.targets\n controls = gate.controls\n arg_value = gate.arg_value\n arg_label = gate.arg_label\n\n else:\n name = gate\n self.gates.append(Gate(name, targets=targets, controls=controls,\n arg_value=arg_value, arg_label=arg_label))\n\n def add_1q_gate(self, name, start=0, end=None, qubits=None,\n arg_value=None, arg_label=None):\n \"\"\"\n Adds a single qubit gate with specified parameters on a variable\n number of qubits in the circuit. By default, it applies the given gate\n to all the qubits in the register.\n\n Parameters\n ----------\n name : String\n Gate name.\n start : Integer\n Starting location of qubits.\n end : Integer\n Last qubit for the gate.\n qubits : List\n Specific qubits for applying gates.\n arg_value : Float\n Argument value(phi).\n arg_label : String\n Label for gate representation.\n \"\"\"\n if name not in [\"RX\", \"RY\", \"RZ\", \"SNOT\", \"SQRTNOT\", \"PHASEGATE\"]:\n raise ValueError(\"%s is not a single qubit gate\" % name)\n\n if qubits is not None:\n for i in range(len(qubits)):\n self.gates.append(Gate(name, targets=qubits[i], controls=None,\n arg_value=arg_value,\n arg_label=arg_label))\n\n else:\n if end is None:\n end = self.N - 1\n for i in range(start, end):\n self.gates.append(Gate(name, targets=i, controls=None,\n arg_value=arg_value,\n arg_label=arg_label))\n\n def add_circuit(self, qc, start=0):\n \"\"\"\n Adds a block of a qubit circuit to the main circuit.\n Globalphase gates are not added.\n\n Parameters\n ----------\n qc : QubitCircuit\n The circuit block to be added to the main circuit.\n start : Integer\n The qubit on which the first gate is applied.\n \"\"\"\n\n if self.N - start < len(qc.gates):\n raise NotImplementedError(\"Targets exceed number of qubits.\")\n\n for gate in qc.gates:\n if gate.name in [\"RX\", \"RY\", \"RZ\", \"SNOT\", \"SQRTNOT\", \"PHASEGATE\"]:\n self.add_gate(gate.name, gate.targets[0] + start, None,\n gate.arg_value, gate.arg_label)\n elif gate.name in [\"CPHASE\", \"CNOT\", \"CSIGN\", \"CRX\", \"CRY\", \"CRZ\"]:\n self.add_gate(gate.name, gate.targets[0] + start,\n gate.controls[0] + start, gate.arg_value,\n gate.arg_label)\n elif gate.name in [\"BERKELEY\", \"SWAPalpha\", \"SWAP\", \"ISWAP\",\n \"SQRTSWAP\", \"SQRTISWAP\"]:\n self.add_gate(gate.name, None,\n [gate.controls[0] + start,\n gate.controls[1] + start], None, None)\n elif gate.name in [\"TOFFOLI\"]:\n self.add_gate(gate.name, gate.targets[0] + start,\n [gate.controls[0] + start,\n gate.controls[1] + start], None, None)\n elif gate.name in [\"FREDKIN\"]:\n self.add_gate(gate.name,\n [gate.targets[0] + start,\n gate.targets[1] + start],\n gate.controls + start, None, None)\n\n def remove_gate(self, index=None, end=None, name=None, remove=\"first\"):\n \"\"\"\n Removes a gate from a specific index or between two indexes or the\n first, last or all instances of a particular gate.\n\n Parameters\n ----------\n index : Integer\n Location of gate to be removed.\n name : String\n Gate name to be removed.\n remove : String\n If first or all gate are to be removed.\n \"\"\"\n if index is not None and index <= self.N:\n if end is not None and end <= self.N:\n for i in range(end - index):\n self.gates.pop(index + i)\n elif end is not None and end > self.N:\n raise ValueError(\"End target exceeds number of gates.\")\n else:\n self.gates.pop(index)\n\n elif name is not None and remove == \"first\":\n for gate in self.gates:\n if name == gate.name:\n self.gates.remove(gate)\n break\n\n elif name is not None and remove == \"last\":\n for i in range(self.N + 1):\n if name == self.gates[self.N - i].name:\n self.gates.remove(self.gates[self.N - i])\n break\n\n elif name is not None and remove == \"all\":\n for j in range(self.N + 1):\n if name == self.gates[self.N - j].name:\n self.gates.remove(self.gates[self.N - j])\n\n else:\n self.gates.pop()\n\n def reverse_circuit(self):\n \"\"\"\n Reverses an entire circuit of unitary gates.\n\n Returns\n ----------\n qc : QubitCircuit\n Returns QubitCircuit of resolved gates for the qubit circuit in the\n reverse order.\n\n \"\"\"\n temp = QubitCircuit(self.N, self.reverse_states)\n\n for i in range(self.N):\n temp.append(self.gates[self.N - i - 1])\n\n return temp\n\n def resolve_gates(self, basis=[\"CNOT\", \"RX\", \"RY\", \"RZ\"]):\n \"\"\"\n Unitary matrix calculator for N qubits returning the individual\n steps as unitary matrices operating from left to right in the specified\n basis.\n\n Parameters\n ----------\n basis : list.\n Basis of the resolved circuit.\n\n Returns\n -------\n qc : QubitCircuit\n Returns QubitCircuit of resolved gates for the qubit circuit in the\n desired basis.\n \"\"\"\n qc_temp = QubitCircuit(self.N, self.reverse_states)\n temp_resolved = []\n\n basis_1q = []\n basis_2q = None\n\n basis_1q_valid = [\"RX\", \"RY\", \"RZ\"]\n basis_2q_valid = [\"CNOT\", \"CSIGN\", \"ISWAP\", \"SQRTSWAP\", \"SQRTISWAP\"]\n\n if isinstance(basis, list):\n for gate in basis:\n if gate not in (basis_1q_valid + basis_2q_valid):\n raise ValueError(\"%s is not a valid basis gate\" % gate)\n\n if gate in basis_2q_valid:\n if basis_2q is not None:\n raise ValueError(\"At most one two-qubit gate allowed\")\n basis_2q = gate\n\n else:\n basis_1q.append(gate)\n\n if len(basis_1q) == 1:\n raise ValueError(\"Not sufficient single-qubit gates in basis\")\n elif len(basis_1q) == 0:\n basis_1q = [\"RX\", \"RY\", \"RZ\"]\n\n else:\n basis_1q = [\"RX\", \"RY\", \"RZ\"]\n if basis in basis_2q_valid:\n basis_2q = basis\n else:\n raise ValueError(\"%s is not a valid two-qubit basis gate\"\n % basis)\n\n for gate in self.gates:\n if gate.name == \"RX\":\n temp_resolved.append(gate)\n elif gate.name == \"RY\":\n temp_resolved.append(gate)\n elif gate.name == \"RZ\":\n temp_resolved.append(gate)\n elif gate.name == \"SQRTNOT\":\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"SNOT\":\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"PHASEGATE\":\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=gate.arg_value / 2,\n arg_label=gate.arg_label))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n gate.arg_value, gate.arg_label))\n elif gate.name == \"CPHASE\":\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"CNOT\":\n temp_resolved.append(gate)\n elif gate.name == \"CSIGN\" and basis_2q is not \"CSIGN\":\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets, gate.controls))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n elif gate.name == \"BERKELEY\":\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"SWAPalpha\":\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"SWAP\" and basis_2q is not \"ISWAP\":\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.targets[0]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n elif gate.name == \"ISWAP\" and basis_2q is not \"ISWAP\":\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.targets[0]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"RZ\", gate.targets[0], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets[0], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"RY\", gate.targets[0], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"SQRTSWAP\" and basis_2q not in [\"SQRTSWAP\",\n \"ISWAP\"]:\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"SQRTISWAP\" and basis_2q not in [\"SQRTISWAP\",\n \"ISWAP\"]:\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"FREDKIN\":\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.controls))\n temp_resolved.append(Gate(\"RZ\", gate.controls, None,\n arg_value=np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"RZ\", [gate.targets[0]], None,\n arg_value=-np.pi / 8,\n arg_label=r\"-\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.controls))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets[1], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets[0], None,\n arg_value=np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.controls))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=-np.pi / 8,\n arg_label=r\"-\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.targets[0]))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.controls))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=-np.pi / 8,\n arg_label=r\"-\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.targets[0]))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets[1], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n\n elif gate.name == \"TOFFOLI\":\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=1 * np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"RZ\", gate.controls[1], None,\n arg_value=np.pi/2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.controls[0], None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.controls[1],\n gate.controls[0]))\n temp_resolved.append(Gate(\"RZ\", gate.controls[1], None,\n arg_value=-np.pi / 4,\n arg_label=r\"-\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.controls[1],\n gate.controls[0]))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"RZ\", gate.controls[1], None,\n arg_value=-np.pi / 4,\n arg_label=r\"-\\pi/4\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets,\n gate.controls[0]))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 4,\n arg_label=r\"-\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets,\n gate.controls[1]))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets,\n gate.controls[0]))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 4,\n arg_label=r\"-\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets,\n gate.controls[1]))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n\n elif gate.name == \"GLOBALPHASE\":\n temp_resolved.append(Gate(gate.name, gate.targets,\n gate.controls,\n gate.arg_value, gate.arg_label))\n else:\n temp_resolved.append(gate)\n\n if basis_2q == \"CSIGN\":\n for gate in temp_resolved:\n if gate.name == \"CNOT\":\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"CSIGN\", gate.targets,\n gate.controls))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n elif basis_2q == \"ISWAP\":\n for gate in temp_resolved:\n if gate.name == \"CNOT\":\n qc_temp.gates.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n qc_temp.gates.append(Gate(\"ISWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RY\", gate.controls, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.controls, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n qc_temp.gates.append(Gate(\"ISWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"SWAP\":\n qc_temp.gates.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n qc_temp.gates.append(Gate(\"ISWAP\", gate.targets, None))\n qc_temp.gates.append(Gate(\"RX\", gate.targets[0], None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"ISWAP\", gate.targets, None))\n qc_temp.gates.append(Gate(\"RX\", gate.targets[1], None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"ISWAP\", [gate.targets[1],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RX\", gate.targets[0], None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n elif basis_2q == \"SQRTSWAP\":\n for gate in temp_resolved:\n if gate.name == \"CNOT\":\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n qc_temp.gates.append(Gate(\"SQRTSWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RZ\", gate.controls, None,\n arg_value=np.pi,\n arg_label=r\"\\pi\"))\n qc_temp.gates.append(Gate(\"SQRTSWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.controls, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n elif basis_2q == \"SQRTISWAP\":\n for gate in temp_resolved:\n if gate.name == \"CNOT\":\n qc_temp.gates.append(Gate(\"RY\", gate.controls, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RX\", gate.controls, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n qc_temp.gates.append(Gate(\"RX\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"SQRTISWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RX\", gate.controls, None,\n arg_value=np.pi,\n arg_label=r\"\\pi\"))\n qc_temp.gates.append(Gate(\"SQRTISWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RY\", gate.controls, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n qc_temp.gates.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.controls, None,\n arg_value=np.pi,\n arg_label=r\"\\pi\"))\n qc_temp.gates.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=3 * np.pi / 2,\n arg_label=r\"3\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n else:\n qc_temp.gates = temp_resolved\n\n if len(basis_1q) == 2:\n temp_resolved = qc_temp.gates\n qc_temp.gates = []\n for gate in temp_resolved:\n if gate.name == \"RX\" and \"RX\" not in basis_1q:\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n gate.arg_value, gate.arg_label))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"RY\" and \"RY\" not in basis_1q:\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RX\", gate.targets, None,\n gate.arg_value, gate.arg_label))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"RZ\" and \"RZ\" not in basis_1q:\n qc_temp.gates.append(Gate(\"RX\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n gate.arg_value, gate.arg_label))\n qc_temp.gates.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n\n return qc_temp\n\n def adjacent_gates(self):\n \"\"\"\n Method to resolve two qubit gates with non-adjacent control/s or\n target/s in terms of gates with adjacent interactions.\n\n Returns\n ----------\n qc : QubitCircuit\n Returns QubitCircuit of the gates for the qubit circuit with the\n resolved non-adjacent gates.\n\n \"\"\"\n temp = QubitCircuit(self.N, self.reverse_states)\n swap_gates = [\"SWAP\", \"ISWAP\", \"SQRTISWAP\", \"SQRTSWAP\", \"BERKELEY\",\n \"SWAPalpha\"]\n\n for gate in self.gates:\n if gate.name == \"CNOT\" or gate.name == \"CSIGN\":\n start = min([gate.targets[0], gate.controls[0]])\n end = max([gate.targets[0], gate.controls[0]])\n i = start\n while i < end:\n if start + end - i - i == 1 and (end - start + 1) % 2 == 0:\n # Apply required gate if control, target are adjacent\n # to each other, provided |control-target| is even.\n if end == gate.controls[0]:\n temp.gates.append(Gate(gate.name, targets=[i],\n controls=[i + 1]))\n else:\n temp.gates.append(Gate(gate.name, targets=[i + 1],\n controls=[i]))\n elif (start + end - i - i == 2 and\n (end - start + 1) % 2 == 1):\n # Apply a swap between i and its adjacent gate, then\n # the required gate if and then another swap if control\n # and target have one qubit between them, provided\n # |control-target| is odd.\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n if end == gate.controls[0]:\n temp.gates.append(Gate(gate.name, targets=[i + 1],\n controls=[i + 2]))\n else:\n temp.gates.append(Gate(gate.name, targets=[i + 2],\n controls=[i + 1]))\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n i += 1\n else:\n # Swap the target/s and/or control with their adjacent\n # qubit to bring them closer.\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n temp.gates.append(Gate(\"SWAP\",\n targets=[start + end - i - 1,\n start + end - i]))\n i += 1\n\n elif gate.name in swap_gates:\n start = min([gate.targets[0], gate.targets[1]])\n end = max([gate.targets[0], gate.targets[1]])\n i = start\n while i < end:\n if start + end - i - i == 1 and (end - start + 1) % 2 == 0:\n temp.gates.append(Gate(gate.name, targets=[i, i + 1]))\n elif ((start + end - i - i) == 2 and\n (end - start + 1) % 2 == 1):\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n temp.gates.append(\n Gate(gate.name, targets=[i + 1, i + 2]))\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n i += 1\n else:\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n temp.gates.append(Gate(\"SWAP\",\n targets=[start + end - i - 1,\n start + end - i]))\n i += 1\n\n else:\n temp.gates.append(gate)\n\n return temp\n\n def propagators(self):\n \"\"\"\n Propagator matrix calculator for N qubits returning the individual\n steps as unitary matrices operating from left to right.\n\n Returns\n -------\n U_list : list\n Returns list of unitary matrices for the qubit circuit.\n\n \"\"\"\n self.U_list = []\n\n for gate in self.gates:\n if gate.name == \"RX\":\n self.U_list.append(rx(gate.arg_value, self.N, gate.targets[0]))\n elif gate.name == \"RY\":\n self.U_list.append(ry(gate.arg_value, self.N, gate.targets[0]))\n elif gate.name == \"RZ\":\n self.U_list.append(rz(gate.arg_value, self.N, gate.targets[0]))\n elif gate.name == \"SQRTNOT\":\n self.U_list.append(sqrtnot(self.N, gate.targets[0]))\n elif gate.name == \"SNOT\":\n self.U_list.append(snot(self.N, gate.targets[0]))\n elif gate.name == \"PHASEGATE\":\n self.U_list.append(phasegate(gate.arg_value, self.N,\n gate.targets[0]))\n if gate.name == \"CRX\":\n self.U_list.append(controlled_gate(rx(gate.arg_value),\n N=self.N,\n control=gate.controls[0],\n target=gate.targets[0]))\n elif gate.name == \"CRY\":\n self.U_list.append(controlled_gate(ry(gate.arg_value),\n N=self.N,\n control=gate.controls[0],\n target=gate.targets[0]))\n elif gate.name == \"CRZ\":\n self.U_list.append(controlled_gate(rz(gate.arg_value),\n N=self.N,\n control=gate.controls[0],\n target=gate.targets[0]))\n elif gate.name == \"CPHASE\":\n self.U_list.append(cphase(gate.arg_value, self.N,\n gate.controls[0], gate.targets[0]))\n elif gate.name == \"CNOT\":\n self.U_list.append(cnot(self.N,\n gate.controls[0], gate.targets[0]))\n elif gate.name == \"CSIGN\":\n self.U_list.append(csign(self.N,\n gate.controls[0], gate.targets[0]))\n elif gate.name == \"BERKELEY\":\n self.U_list.append(berkeley(self.N, gate.targets))\n elif gate.name == \"SWAPalpha\":\n self.U_list.append(swapalpha(gate.arg_value, self.N,\n gate.targets))\n elif gate.name == \"SWAP\":\n self.U_list.append(swap(self.N, gate.targets))\n elif gate.name == \"ISWAP\":\n self.U_list.append(iswap(self.N, gate.targets))\n elif gate.name == \"SQRTSWAP\":\n self.U_list.append(sqrtswap(self.N, gate.targets))\n elif gate.name == \"SQRTISWAP\":\n self.U_list.append(sqrtiswap(self.N, gate.targets))\n elif gate.name == \"FREDKIN\":\n self.U_list.append(fredkin(self.N, gate.controls[0],\n gate.targets))\n elif gate.name == \"TOFFOLI\":\n self.U_list.append(toffoli(self.N, gate.controls,\n gate.targets[0]))\n elif gate.name == \"GLOBALPHASE\":\n self.U_list.append(globalphase(gate.arg_value, self.N))\n\n return self.U_list\n\n def latex_code(self):\n rows = []\n\n gates = self.gates\n\n for gate in gates:\n col = []\n for n in range(self.N):\n if gate.targets and n in gate.targets:\n\n if len(gate.targets) > 1:\n if ((self.reverse_states and n == max(gate.targets)) or\n (not self.reverse_states\n and n == min(gate.targets))):\n col.append(r\" \\multigate{%d}{%s} \" %\n (len(gate.targets) - 1,\n _gate_label(gate.name,\n gate.arg_label)))\n else:\n col.append(r\" \\ghost{%s} \" %\n (_gate_label(gate.name,\n gate.arg_label)))\n\n elif gate.name == \"CNOT\":\n col.append(r\" \\targ \")\n elif gate.name == \"SWAP\":\n col.append(r\" \\qswap \")\n else:\n col.append(r\" \\gate{%s} \" %\n _gate_label(gate.name, gate.arg_label))\n\n elif gate.controls and n in gate.controls:\n m = (gate.targets[0] - n) * (-1 if self.reverse_states\n else 1)\n if gate.name == \"SWAP\":\n col.append(r\" \\qswap \\ctrl{%d} \" % m)\n else:\n col.append(r\" \\ctrl{%d} \" % m)\n\n elif (not gate.controls and not gate.targets):\n # global gate\n if ((self.reverse_states and n == self.N - 1)\n or (not self.reverse_states and n == 0)):\n col.append(r\" \\multigate{%d}{%s} \" %\n (self.N - 1,\n _gate_label(gate.name, gate.arg_label)))\n else:\n col.append(r\" \\ghost{%s} \" %\n (_gate_label(gate.name, gate.arg_label)))\n\n else:\n col.append(r\" \\qw \")\n\n col.append(r\" \\qw \")\n rows.append(col)\n\n input_states = [\"\\lstick{\\ket{\" + x + \"}}\" if x is not None\n else \"\" for x in self.input_states]\n\n code = \"\"\n n_iter = (reversed(range(self.N)) if self.reverse_states\n else range(self.N))\n for n in n_iter:\n code += r\" & %s\" % input_states[n]\n for m in range(len(gates)):\n code += r\" & %s\" % rows[m][n]\n code += r\" & \\qw \\\\ \" + \"\\n\"\n\n return code\n\n def _repr_png_(self):\n return _latex_compile(self.latex_code(), format=\"png\")\n\n def _repr_svg_(self):\n return _latex_compile(self.latex_code(), format=\"svg\")\n\n @property\n def png(self):\n from IPython.display import Image\n return Image(self._repr_png_(), embed=True)\n\n @property\n def svg(self):\n from IPython.display import SVG\n return SVG(self._repr_svg_())\n\n def qasm(self):\n\n code = \"# qasm code generated by QuTiP\\n\\n\"\n\n for n in range(self.N):\n code += \"\\tqubit\\tq%d\\n\" % n\n\n code += \"\\n\"\n\n for gate in self.gates:\n code += \"\\t%s\\t\" % gate.name\n qtargets = [\"q%d\" %\n t for t in gate.targets] if gate.targets else []\n qcontrols = ([\"q%d\" % c for c in gate.controls] if gate.controls\n else [])\n code += \",\".join(qtargets + qcontrols)\n code += \"\\n\"\n\n return code\n", "path": "qutip/qip/circuit.py" } ]
[ { "content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport numpy as np\nimport warnings\n\nfrom qutip.qip.circuit_latex import _latex_compile\nfrom qutip.qip.gates import *\nfrom qutip.qip.qubits import qubit_states\n\n__all__ = ['Gate', 'QubitCircuit']\n\n\nclass Gate(object):\n \"\"\"\n Representation of a quantum gate, with its required parametrs, and target\n and control qubits.\n \"\"\"\n\n def __init__(self, name, targets=None, controls=None, arg_value=None,\n arg_label=None):\n \"\"\"\n Creates a gate with specified parameters.\n\n Parameters\n ----------\n name : String\n Gate name.\n targets : List\n Gate targets.\n controls : List\n Gate controls.\n arg_value : Float\n Argument value(phi).\n arg_label : String\n Label for gate representation.\n \"\"\"\n self.name = name\n self.targets = None\n self.controls = None\n\n if not isinstance(targets, list) and targets is not None:\n self.targets = [targets]\n else:\n self.targets = targets\n\n if not isinstance(controls, list) and controls is not None:\n self.controls = [controls]\n else:\n self.controls = controls\n\n self.arg_value = arg_value\n self.arg_label = arg_label\n\n if name in [\"SWAP\", \"ISWAP\", \"SQRTISWAP\", \"SQRTSWAP\", \"BERKELEY\",\n \"SWAPalpha\"]:\n if len(self.targets) != 2:\n raise ValueError(\"Gate %s requires two target\" % name)\n if self.controls is not None:\n raise ValueError(\"Gate %s does not require a control\" % name)\n\n if name in [\"CNOT\", \"CSIGN\", \"CRX\", \"CRY\", \"CRZ\"]:\n if self.targets is None or len(self.targets) != 1:\n raise ValueError(\"Gate %s requires one target\" % name)\n if self.controls is None or len(self.controls) != 1:\n raise ValueError(\"Gate %s requires one control\" % name)\n\n if name in [\"SNOT\", \"RX\", \"RY\", \"RZ\", \"PHASEGATE\"]:\n if self.controls is not None:\n raise ValueError(\"Gate %s does not take controls\" % name)\n\n if name in [\"RX\", \"RY\", \"RZ\", \"CPHASE\", \"SWAPalpha\", \"PHASEGATE\",\n \"GLOBALPHASE\", \"CRX\", \"CRY\", \"CRZ\"]:\n if arg_value is None:\n raise ValueError(\"Gate %s requires an argument value\" % name)\n\n self.arg_value = arg_value\n self.arg_label = arg_label\n\n def __str__(self):\n s = \"Gate(%s, targets=%s, controls=%s)\" % (self.name,\n self.targets,\n self.controls)\n return s\n\n def __repr__(self):\n return str(self)\n\n def _repr_latex_(self):\n return str(self)\n\n\n_gate_name_to_label = {\n 'RX': r'R_x',\n 'RY': r'R_y',\n 'RZ': r'R_z',\n 'CRX': r'R_x',\n 'CRY': r'R_y',\n 'CRZ': r'R_z',\n 'SQRTNOT': r'\\sqrt{\\rm NOT}',\n 'SNOT': r'{\\rm H}',\n 'PHASEGATE': r'{\\rm PHASE}',\n 'CPHASE': r'{\\rm R}',\n 'CNOT': r'{\\rm CNOT}',\n 'CSIGN': r'{\\rm Z}',\n 'BERKELEY': r'{\\rm BERKELEY}',\n 'SWAPalpha': r'{\\rm SWAPalpha}',\n 'SWAP': r'{\\rm SWAP}',\n 'ISWAP': r'{i}{\\rm SWAP}',\n 'SQRTSWAP': r'\\sqrt{\\rm SWAP}',\n 'SQRTISWAP': r'\\sqrt{{i}\\rm SWAP}',\n 'FREDKIN': r'{\\rm FREDKIN}',\n 'TOFFOLI': r'{\\rm TOFFOLI}',\n 'GLOBALPHASE': r'{\\rm Ph}',\n}\n\n\ndef _gate_label(name, arg_label):\n\n if name in _gate_name_to_label:\n gate_label = _gate_name_to_label[name]\n else:\n warnings.warn(\"Unknown gate %s\" % name)\n gate_label = name\n\n if arg_label:\n return r'%s(%s)' % (gate_label, arg_label)\n else:\n return r'%s' % gate_label\n\n\nclass QubitCircuit(object):\n \"\"\"\n Representation of a quantum program/algorithm, maintaining a sequence\n of gates.\n \"\"\"\n\n def __init__(self, N, input_states=None, output_states=None,\n reverse_states=True):\n # number of qubits in the register\n self.N = N\n self.reverse_states = reverse_states\n self.gates = []\n self.U_list = []\n self.input_states = [None for i in range(N)]\n self.output_states = [None for i in range(N)]\n\n def add_state(self, state, targets=None, state_type=\"input\"):\n \"\"\"\n Add an input or ouput state to the circuit. By default all the input\n and output states will be initialized to `None`. A particular state can\n be added by specifying the state and the qubit where it has to be added\n along with the type as input or output.\n\n Parameters\n ----------\n state: str\n The state that has to be added. It can be any string such as `0`,\n '+', \"A\", \"Y\"\n targets: list\n A list of qubit positions where the given state has to be added.\n state_type: str\n One of either \"input\" or \"output\". This specifies whether the state\n to be added is an input or output.\n default: \"input\"\n\n \"\"\"\n if state_type == \"input\":\n for i in targets:\n self.input_states[i] = state\n if state_type == \"output\":\n for i in targets:\n self.output_states[i] = state\n\n def add_gate(self, gate, targets=None, controls=None, arg_value=None,\n arg_label=None):\n \"\"\"\n Adds a gate with specified parameters to the circuit.\n\n Parameters\n ----------\n gate: String or `Gate`\n Gate name. If gate is an instance of `Gate`, parameters are\n unpacked and added.\n targets: List\n Gate targets.\n controls: List\n Gate controls.\n arg_value: Float\n Argument value(phi).\n arg_label: String\n Label for gate representation.\n \"\"\"\n if isinstance(gate, Gate):\n name = gate.name\n targets = gate.targets\n controls = gate.controls\n arg_value = gate.arg_value\n arg_label = gate.arg_label\n\n else:\n name = gate\n self.gates.append(Gate(name, targets=targets, controls=controls,\n arg_value=arg_value, arg_label=arg_label))\n\n def add_1q_gate(self, name, start=0, end=None, qubits=None,\n arg_value=None, arg_label=None):\n \"\"\"\n Adds a single qubit gate with specified parameters on a variable\n number of qubits in the circuit. By default, it applies the given gate\n to all the qubits in the register.\n\n Parameters\n ----------\n name : String\n Gate name.\n start : Integer\n Starting location of qubits.\n end : Integer\n Last qubit for the gate.\n qubits : List\n Specific qubits for applying gates.\n arg_value : Float\n Argument value(phi).\n arg_label : String\n Label for gate representation.\n \"\"\"\n if name not in [\"RX\", \"RY\", \"RZ\", \"SNOT\", \"SQRTNOT\", \"PHASEGATE\"]:\n raise ValueError(\"%s is not a single qubit gate\" % name)\n\n if qubits is not None:\n for i in range(len(qubits)):\n self.gates.append(Gate(name, targets=qubits[i], controls=None,\n arg_value=arg_value,\n arg_label=arg_label))\n\n else:\n if end is None:\n end = self.N - 1\n for i in range(start, end):\n self.gates.append(Gate(name, targets=i, controls=None,\n arg_value=arg_value,\n arg_label=arg_label))\n\n def add_circuit(self, qc, start=0):\n \"\"\"\n Adds a block of a qubit circuit to the main circuit.\n Globalphase gates are not added.\n\n Parameters\n ----------\n qc : QubitCircuit\n The circuit block to be added to the main circuit.\n start : Integer\n The qubit on which the first gate is applied.\n \"\"\"\n\n if self.N - start < len(qc.gates):\n raise NotImplementedError(\"Targets exceed number of qubits.\")\n\n for gate in qc.gates:\n if gate.name in [\"RX\", \"RY\", \"RZ\", \"SNOT\", \"SQRTNOT\", \"PHASEGATE\"]:\n self.add_gate(gate.name, gate.targets[0] + start, None,\n gate.arg_value, gate.arg_label)\n elif gate.name in [\"CPHASE\", \"CNOT\", \"CSIGN\", \"CRX\", \"CRY\", \"CRZ\"]:\n self.add_gate(gate.name, gate.targets[0] + start,\n gate.controls[0] + start, gate.arg_value,\n gate.arg_label)\n elif gate.name in [\"BERKELEY\", \"SWAPalpha\", \"SWAP\", \"ISWAP\",\n \"SQRTSWAP\", \"SQRTISWAP\"]:\n self.add_gate(gate.name, None,\n [gate.controls[0] + start,\n gate.controls[1] + start], None, None)\n elif gate.name in [\"TOFFOLI\"]:\n self.add_gate(gate.name, gate.targets[0] + start,\n [gate.controls[0] + start,\n gate.controls[1] + start], None, None)\n elif gate.name in [\"FREDKIN\"]:\n self.add_gate(gate.name,\n [gate.targets[0] + start,\n gate.targets[1] + start],\n gate.controls + start, None, None)\n\n def remove_gate(self, index=None, end=None, name=None, remove=\"first\"):\n \"\"\"\n Removes a gate from a specific index or between two indexes or the\n first, last or all instances of a particular gate.\n\n Parameters\n ----------\n index : Integer\n Location of gate to be removed.\n name : String\n Gate name to be removed.\n remove : String\n If first or all gate are to be removed.\n \"\"\"\n if index is not None and index <= self.N:\n if end is not None and end <= self.N:\n for i in range(end - index):\n self.gates.pop(index + i)\n elif end is not None and end > self.N:\n raise ValueError(\"End target exceeds number of gates.\")\n else:\n self.gates.pop(index)\n\n elif name is not None and remove == \"first\":\n for gate in self.gates:\n if name == gate.name:\n self.gates.remove(gate)\n break\n\n elif name is not None and remove == \"last\":\n for i in range(self.N + 1):\n if name == self.gates[self.N - i].name:\n self.gates.remove(self.gates[self.N - i])\n break\n\n elif name is not None and remove == \"all\":\n for j in range(self.N + 1):\n if name == self.gates[self.N - j].name:\n self.gates.remove(self.gates[self.N - j])\n\n else:\n self.gates.pop()\n\n def reverse_circuit(self):\n \"\"\"\n Reverses an entire circuit of unitary gates.\n\n Returns\n ----------\n qc : QubitCircuit\n Returns QubitCircuit of resolved gates for the qubit circuit in the\n reverse order.\n\n \"\"\"\n temp = QubitCircuit(self.N, self.reverse_states)\n\n for gate in reversed(self.gates):\n temp.add_gate(gate)\n\n return temp\n\n def resolve_gates(self, basis=[\"CNOT\", \"RX\", \"RY\", \"RZ\"]):\n \"\"\"\n Unitary matrix calculator for N qubits returning the individual\n steps as unitary matrices operating from left to right in the specified\n basis.\n\n Parameters\n ----------\n basis : list.\n Basis of the resolved circuit.\n\n Returns\n -------\n qc : QubitCircuit\n Returns QubitCircuit of resolved gates for the qubit circuit in the\n desired basis.\n \"\"\"\n qc_temp = QubitCircuit(self.N, self.reverse_states)\n temp_resolved = []\n\n basis_1q = []\n basis_2q = None\n\n basis_1q_valid = [\"RX\", \"RY\", \"RZ\"]\n basis_2q_valid = [\"CNOT\", \"CSIGN\", \"ISWAP\", \"SQRTSWAP\", \"SQRTISWAP\"]\n\n if isinstance(basis, list):\n for gate in basis:\n if gate not in (basis_1q_valid + basis_2q_valid):\n raise ValueError(\"%s is not a valid basis gate\" % gate)\n\n if gate in basis_2q_valid:\n if basis_2q is not None:\n raise ValueError(\"At most one two-qubit gate allowed\")\n basis_2q = gate\n\n else:\n basis_1q.append(gate)\n\n if len(basis_1q) == 1:\n raise ValueError(\"Not sufficient single-qubit gates in basis\")\n elif len(basis_1q) == 0:\n basis_1q = [\"RX\", \"RY\", \"RZ\"]\n\n else:\n basis_1q = [\"RX\", \"RY\", \"RZ\"]\n if basis in basis_2q_valid:\n basis_2q = basis\n else:\n raise ValueError(\"%s is not a valid two-qubit basis gate\"\n % basis)\n\n for gate in self.gates:\n if gate.name == \"RX\":\n temp_resolved.append(gate)\n elif gate.name == \"RY\":\n temp_resolved.append(gate)\n elif gate.name == \"RZ\":\n temp_resolved.append(gate)\n elif gate.name == \"SQRTNOT\":\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"SNOT\":\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"PHASEGATE\":\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=gate.arg_value / 2,\n arg_label=gate.arg_label))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n gate.arg_value, gate.arg_label))\n elif gate.name == \"CPHASE\":\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"CNOT\":\n temp_resolved.append(gate)\n elif gate.name == \"CSIGN\" and basis_2q is not \"CSIGN\":\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets, gate.controls))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n elif gate.name == \"BERKELEY\":\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"SWAPalpha\":\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"SWAP\" and basis_2q is not \"ISWAP\":\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.targets[0]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n elif gate.name == \"ISWAP\" and basis_2q is not \"ISWAP\":\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.targets[0]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"RZ\", gate.targets[0], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets[0], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"RY\", gate.targets[0], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"SQRTSWAP\" and basis_2q not in [\"SQRTSWAP\",\n \"ISWAP\"]:\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"SQRTISWAP\" and basis_2q not in [\"SQRTISWAP\",\n \"ISWAP\"]:\n raise NotImplementedError(\"Cannot be resolved in this basis\")\n elif gate.name == \"FREDKIN\":\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.controls))\n temp_resolved.append(Gate(\"RZ\", gate.controls, None,\n arg_value=np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"RZ\", [gate.targets[0]], None,\n arg_value=-np.pi / 8,\n arg_label=r\"-\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.controls))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets[1], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets[0], None,\n arg_value=np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.controls))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=-np.pi / 8,\n arg_label=r\"-\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.targets[0]))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.controls))\n temp_resolved.append(Gate(\"RZ\", gate.targets[1], None,\n arg_value=-np.pi / 8,\n arg_label=r\"-\\pi/8\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[1],\n gate.targets[0]))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets[1], None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets[0],\n gate.targets[1]))\n\n elif gate.name == \"TOFFOLI\":\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=1 * np.pi / 8,\n arg_label=r\"\\pi/8\"))\n temp_resolved.append(Gate(\"RZ\", gate.controls[1], None,\n arg_value=np.pi/2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RZ\", gate.controls[0], None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.controls[1],\n gate.controls[0]))\n temp_resolved.append(Gate(\"RZ\", gate.controls[1], None,\n arg_value=-np.pi / 4,\n arg_label=r\"-\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.controls[1],\n gate.controls[0]))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n temp_resolved.append(Gate(\"RZ\", gate.controls[1], None,\n arg_value=-np.pi / 4,\n arg_label=r\"-\\pi/4\"))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets,\n gate.controls[0]))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 4,\n arg_label=r\"-\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets,\n gate.controls[1]))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets,\n gate.controls[0]))\n temp_resolved.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 4,\n arg_label=r\"-\\pi/4\"))\n temp_resolved.append(Gate(\"CNOT\", gate.targets,\n gate.controls[1]))\n temp_resolved.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n temp_resolved.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi, arg_label=r\"\\pi\"))\n\n elif gate.name == \"GLOBALPHASE\":\n temp_resolved.append(Gate(gate.name, gate.targets,\n gate.controls,\n gate.arg_value, gate.arg_label))\n else:\n temp_resolved.append(gate)\n\n if basis_2q == \"CSIGN\":\n for gate in temp_resolved:\n if gate.name == \"CNOT\":\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"CSIGN\", gate.targets,\n gate.controls))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n elif basis_2q == \"ISWAP\":\n for gate in temp_resolved:\n if gate.name == \"CNOT\":\n qc_temp.gates.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n qc_temp.gates.append(Gate(\"ISWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RY\", gate.controls, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.controls, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n qc_temp.gates.append(Gate(\"ISWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"SWAP\":\n qc_temp.gates.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n qc_temp.gates.append(Gate(\"ISWAP\", gate.targets, None))\n qc_temp.gates.append(Gate(\"RX\", gate.targets[0], None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"ISWAP\", gate.targets, None))\n qc_temp.gates.append(Gate(\"RX\", gate.targets[1], None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"ISWAP\", [gate.targets[1],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RX\", gate.targets[0], None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n elif basis_2q == \"SQRTSWAP\":\n for gate in temp_resolved:\n if gate.name == \"CNOT\":\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n qc_temp.gates.append(Gate(\"SQRTSWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RZ\", gate.controls, None,\n arg_value=np.pi,\n arg_label=r\"\\pi\"))\n qc_temp.gates.append(Gate(\"SQRTSWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.controls, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n elif basis_2q == \"SQRTISWAP\":\n for gate in temp_resolved:\n if gate.name == \"CNOT\":\n qc_temp.gates.append(Gate(\"RY\", gate.controls, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RX\", gate.controls, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n qc_temp.gates.append(Gate(\"RX\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"SQRTISWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RX\", gate.controls, None,\n arg_value=np.pi,\n arg_label=r\"\\pi\"))\n qc_temp.gates.append(Gate(\"SQRTISWAP\", [gate.controls[0],\n gate.targets[0]],\n None))\n qc_temp.gates.append(Gate(\"RY\", gate.controls, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n qc_temp.gates.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=np.pi / 4,\n arg_label=r\"\\pi/4\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.controls, None,\n arg_value=np.pi,\n arg_label=r\"\\pi\"))\n qc_temp.gates.append(Gate(\"GLOBALPHASE\", None, None,\n arg_value=3 * np.pi / 2,\n arg_label=r\"3\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n else:\n qc_temp.gates = temp_resolved\n\n if len(basis_1q) == 2:\n temp_resolved = qc_temp.gates\n qc_temp.gates = []\n for gate in temp_resolved:\n if gate.name == \"RX\" and \"RX\" not in basis_1q:\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n gate.arg_value, gate.arg_label))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"RY\" and \"RY\" not in basis_1q:\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RX\", gate.targets, None,\n gate.arg_value, gate.arg_label))\n qc_temp.gates.append(Gate(\"RZ\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n elif gate.name == \"RZ\" and \"RZ\" not in basis_1q:\n qc_temp.gates.append(Gate(\"RX\", gate.targets, None,\n arg_value=-np.pi / 2,\n arg_label=r\"-\\pi/2\"))\n qc_temp.gates.append(Gate(\"RY\", gate.targets, None,\n gate.arg_value, gate.arg_label))\n qc_temp.gates.append(Gate(\"RX\", gate.targets, None,\n arg_value=np.pi / 2,\n arg_label=r\"\\pi/2\"))\n else:\n qc_temp.gates.append(gate)\n\n return qc_temp\n\n def adjacent_gates(self):\n \"\"\"\n Method to resolve two qubit gates with non-adjacent control/s or\n target/s in terms of gates with adjacent interactions.\n\n Returns\n ----------\n qc : QubitCircuit\n Returns QubitCircuit of the gates for the qubit circuit with the\n resolved non-adjacent gates.\n\n \"\"\"\n temp = QubitCircuit(self.N, self.reverse_states)\n swap_gates = [\"SWAP\", \"ISWAP\", \"SQRTISWAP\", \"SQRTSWAP\", \"BERKELEY\",\n \"SWAPalpha\"]\n\n for gate in self.gates:\n if gate.name == \"CNOT\" or gate.name == \"CSIGN\":\n start = min([gate.targets[0], gate.controls[0]])\n end = max([gate.targets[0], gate.controls[0]])\n i = start\n while i < end:\n if start + end - i - i == 1 and (end - start + 1) % 2 == 0:\n # Apply required gate if control, target are adjacent\n # to each other, provided |control-target| is even.\n if end == gate.controls[0]:\n temp.gates.append(Gate(gate.name, targets=[i],\n controls=[i + 1]))\n else:\n temp.gates.append(Gate(gate.name, targets=[i + 1],\n controls=[i]))\n elif (start + end - i - i == 2 and\n (end - start + 1) % 2 == 1):\n # Apply a swap between i and its adjacent gate, then\n # the required gate if and then another swap if control\n # and target have one qubit between them, provided\n # |control-target| is odd.\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n if end == gate.controls[0]:\n temp.gates.append(Gate(gate.name, targets=[i + 1],\n controls=[i + 2]))\n else:\n temp.gates.append(Gate(gate.name, targets=[i + 2],\n controls=[i + 1]))\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n i += 1\n else:\n # Swap the target/s and/or control with their adjacent\n # qubit to bring them closer.\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n temp.gates.append(Gate(\"SWAP\",\n targets=[start + end - i - 1,\n start + end - i]))\n i += 1\n\n elif gate.name in swap_gates:\n start = min([gate.targets[0], gate.targets[1]])\n end = max([gate.targets[0], gate.targets[1]])\n i = start\n while i < end:\n if start + end - i - i == 1 and (end - start + 1) % 2 == 0:\n temp.gates.append(Gate(gate.name, targets=[i, i + 1]))\n elif ((start + end - i - i) == 2 and\n (end - start + 1) % 2 == 1):\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n temp.gates.append(\n Gate(gate.name, targets=[i + 1, i + 2]))\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n i += 1\n else:\n temp.gates.append(Gate(\"SWAP\", targets=[i, i + 1]))\n temp.gates.append(Gate(\"SWAP\",\n targets=[start + end - i - 1,\n start + end - i]))\n i += 1\n\n else:\n temp.gates.append(gate)\n\n return temp\n\n def propagators(self):\n \"\"\"\n Propagator matrix calculator for N qubits returning the individual\n steps as unitary matrices operating from left to right.\n\n Returns\n -------\n U_list : list\n Returns list of unitary matrices for the qubit circuit.\n\n \"\"\"\n self.U_list = []\n\n for gate in self.gates:\n if gate.name == \"RX\":\n self.U_list.append(rx(gate.arg_value, self.N, gate.targets[0]))\n elif gate.name == \"RY\":\n self.U_list.append(ry(gate.arg_value, self.N, gate.targets[0]))\n elif gate.name == \"RZ\":\n self.U_list.append(rz(gate.arg_value, self.N, gate.targets[0]))\n elif gate.name == \"SQRTNOT\":\n self.U_list.append(sqrtnot(self.N, gate.targets[0]))\n elif gate.name == \"SNOT\":\n self.U_list.append(snot(self.N, gate.targets[0]))\n elif gate.name == \"PHASEGATE\":\n self.U_list.append(phasegate(gate.arg_value, self.N,\n gate.targets[0]))\n if gate.name == \"CRX\":\n self.U_list.append(controlled_gate(rx(gate.arg_value),\n N=self.N,\n control=gate.controls[0],\n target=gate.targets[0]))\n elif gate.name == \"CRY\":\n self.U_list.append(controlled_gate(ry(gate.arg_value),\n N=self.N,\n control=gate.controls[0],\n target=gate.targets[0]))\n elif gate.name == \"CRZ\":\n self.U_list.append(controlled_gate(rz(gate.arg_value),\n N=self.N,\n control=gate.controls[0],\n target=gate.targets[0]))\n elif gate.name == \"CPHASE\":\n self.U_list.append(cphase(gate.arg_value, self.N,\n gate.controls[0], gate.targets[0]))\n elif gate.name == \"CNOT\":\n self.U_list.append(cnot(self.N,\n gate.controls[0], gate.targets[0]))\n elif gate.name == \"CSIGN\":\n self.U_list.append(csign(self.N,\n gate.controls[0], gate.targets[0]))\n elif gate.name == \"BERKELEY\":\n self.U_list.append(berkeley(self.N, gate.targets))\n elif gate.name == \"SWAPalpha\":\n self.U_list.append(swapalpha(gate.arg_value, self.N,\n gate.targets))\n elif gate.name == \"SWAP\":\n self.U_list.append(swap(self.N, gate.targets))\n elif gate.name == \"ISWAP\":\n self.U_list.append(iswap(self.N, gate.targets))\n elif gate.name == \"SQRTSWAP\":\n self.U_list.append(sqrtswap(self.N, gate.targets))\n elif gate.name == \"SQRTISWAP\":\n self.U_list.append(sqrtiswap(self.N, gate.targets))\n elif gate.name == \"FREDKIN\":\n self.U_list.append(fredkin(self.N, gate.controls[0],\n gate.targets))\n elif gate.name == \"TOFFOLI\":\n self.U_list.append(toffoli(self.N, gate.controls,\n gate.targets[0]))\n elif gate.name == \"GLOBALPHASE\":\n self.U_list.append(globalphase(gate.arg_value, self.N))\n\n return self.U_list\n\n def latex_code(self):\n rows = []\n\n gates = self.gates\n\n for gate in gates:\n col = []\n for n in range(self.N):\n if gate.targets and n in gate.targets:\n\n if len(gate.targets) > 1:\n if ((self.reverse_states and n == max(gate.targets)) or\n (not self.reverse_states\n and n == min(gate.targets))):\n col.append(r\" \\multigate{%d}{%s} \" %\n (len(gate.targets) - 1,\n _gate_label(gate.name,\n gate.arg_label)))\n else:\n col.append(r\" \\ghost{%s} \" %\n (_gate_label(gate.name,\n gate.arg_label)))\n\n elif gate.name == \"CNOT\":\n col.append(r\" \\targ \")\n elif gate.name == \"SWAP\":\n col.append(r\" \\qswap \")\n else:\n col.append(r\" \\gate{%s} \" %\n _gate_label(gate.name, gate.arg_label))\n\n elif gate.controls and n in gate.controls:\n m = (gate.targets[0] - n) * (-1 if self.reverse_states\n else 1)\n if gate.name == \"SWAP\":\n col.append(r\" \\qswap \\ctrl{%d} \" % m)\n else:\n col.append(r\" \\ctrl{%d} \" % m)\n\n elif (not gate.controls and not gate.targets):\n # global gate\n if ((self.reverse_states and n == self.N - 1)\n or (not self.reverse_states and n == 0)):\n col.append(r\" \\multigate{%d}{%s} \" %\n (self.N - 1,\n _gate_label(gate.name, gate.arg_label)))\n else:\n col.append(r\" \\ghost{%s} \" %\n (_gate_label(gate.name, gate.arg_label)))\n\n else:\n col.append(r\" \\qw \")\n\n col.append(r\" \\qw \")\n rows.append(col)\n\n input_states = [\"\\lstick{\\ket{\" + x + \"}}\" if x is not None\n else \"\" for x in self.input_states]\n\n code = \"\"\n n_iter = (reversed(range(self.N)) if self.reverse_states\n else range(self.N))\n for n in n_iter:\n code += r\" & %s\" % input_states[n]\n for m in range(len(gates)):\n code += r\" & %s\" % rows[m][n]\n code += r\" & \\qw \\\\ \" + \"\\n\"\n\n return code\n\n def _repr_png_(self):\n return _latex_compile(self.latex_code(), format=\"png\")\n\n def _repr_svg_(self):\n return _latex_compile(self.latex_code(), format=\"svg\")\n\n @property\n def png(self):\n from IPython.display import Image\n return Image(self._repr_png_(), embed=True)\n\n @property\n def svg(self):\n from IPython.display import SVG\n return SVG(self._repr_svg_())\n\n def qasm(self):\n\n code = \"# qasm code generated by QuTiP\\n\\n\"\n\n for n in range(self.N):\n code += \"\\tqubit\\tq%d\\n\" % n\n\n code += \"\\n\"\n\n for gate in self.gates:\n code += \"\\t%s\\t\" % gate.name\n qtargets = [\"q%d\" %\n t for t in gate.targets] if gate.targets else []\n qcontrols = ([\"q%d\" % c for c in gate.controls] if gate.controls\n else [])\n code += \",\".join(qtargets + qcontrols)\n code += \"\\n\"\n\n return code\n", "path": "qutip/qip/circuit.py" } ]
diff --git a/qutip/qip/circuit.py b/qutip/qip/circuit.py index 491bb093f3..0ba190a5c7 100644 --- a/qutip/qip/circuit.py +++ b/qutip/qip/circuit.py @@ -367,8 +367,8 @@ def reverse_circuit(self): """ temp = QubitCircuit(self.N, self.reverse_states) - for i in range(self.N): - temp.append(self.gates[self.N - i - 1]) + for gate in reversed(self.gates): + temp.add_gate(gate) return temp diff --git a/qutip/tests/test_qubitcircuit.py b/qutip/tests/test_qubitcircuit.py index e078e1adb7..90e131510f 100644 --- a/qutip/tests/test_qubitcircuit.py +++ b/qutip/tests/test_qubitcircuit.py @@ -171,8 +171,6 @@ def test_add_state(self): assert_(qc.input_states[0] == "0") assert_(qc.input_states[2] == None) assert_(qc.output_states[1] == "+") - assert_(qc.output_states[1] == "+") - assert_(qc.output_states[2] == None) qc1 = QubitCircuit(10) @@ -196,5 +194,31 @@ def test_add_state(self): assert_(qc1.output_states[0] == "beta") + def test_reverse(self): + """ + Reverse a quantum circuit + """ + qc = QubitCircuit(3) + + qc.add_gate("RX", targets=[0], arg_value=3.141, + arg_label=r"\pi/2") + qc.add_gate("CNOT", targets=[1], controls=[0]) + qc.add_gate("SNOT", targets=[2]) + # Keep input output same + + qc.add_state("0", targets=[0]) + qc.add_state("+", targets=[1], state_type="output") + qc.add_state("-", targets=[1]) + + qc.reverse_circuit() + + assert_(qc.gates[2].name == "SNOT") + assert_(qc.gates[1].name == "CNOT") + assert_(qc.gates[0].name == "RX") + + assert_(qc.input_states[0] == "0") + assert_(qc.input_states[2] == None) + assert_(qc.output_states[1] == "+") + if __name__ == "__main__": run_module_suite()
pytorch__pytorch-2048
Small mistake in nn.Threshold documentation Hello, In the [documentation](http://pytorch.org/docs/master/nn.html?highlight=threshold#torch.nn.Threshold) it says ``` y = x if x >= threshold value if x < threshold ``` So the following: `torch.nn.Threshold(1,0)(torch.Tensor([1]))` should evaluate to 1, but instead returns 0. Maybe it should be corrected to: ``` y = x if x > threshold value if x <= threshold ```
[ { "content": "import torch\nfrom torch.nn.parameter import Parameter\n\nfrom .module import Module\nfrom .. import functional as F\n\n\nclass Threshold(Module):\n \"\"\"Thresholds each element of the input Tensor\n\n Threshold is defined as::\n\n y = x if x >= threshold\n value if x < threshold\n\n Args:\n threshold: The value to threshold at\n value: The value to replace with\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Threshold(0.1, 20)\n >>> input = Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, threshold, value, inplace=False):\n super(Threshold, self).__init__()\n self.threshold = threshold\n self.value = value\n self.inplace = inplace\n # TODO: check in THNN (if inplace == True, then assert value <= threshold)\n\n def forward(self, input):\n return F.threshold(input, self.threshold, self.value, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + str(self.threshold) \\\n + ', ' + str(self.value) \\\n + inplace_str + ')'\n\n\nclass ReLU(Threshold):\n \"\"\"Applies the rectified linear unit function element-wise\n :math:`{ReLU}(x)= max(0, x)`\n\n Args:\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.ReLU()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, inplace=False):\n super(ReLU, self).__init__(0, 0, inplace)\n\n def __repr__(self):\n inplace_str = 'inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + inplace_str + ')'\n\n\nclass RReLU(Module):\n\n def __init__(self, lower=1. / 8, upper=1. / 3, inplace=False):\n super(RReLU, self).__init__()\n self.lower = lower\n self.upper = upper\n self.inplace = inplace\n\n def forward(self, input):\n return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + str(self.lower) \\\n + ', ' + str(self.upper) \\\n + inplace_str + ')'\n\n\nclass Hardtanh(Module):\n \"\"\"Applies the HardTanh function element-wise\n\n HardTanh is defined as::\n\n f(x) = +1, if x > 1\n f(x) = -1, if x < -1\n f(x) = x, otherwise\n\n The range of the linear region :math:`[-1, 1]` can be adjusted\n\n Args:\n min_value: minimum value of the linear region range\n max_value: maximum value of the linear region range\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.HardTanh(-2, 2)\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, min_value=-1, max_value=1, inplace=False):\n super(Hardtanh, self).__init__()\n self.min_val = min_value\n self.max_val = max_value\n self.inplace = inplace\n assert self.max_val > self.min_val\n\n def forward(self, input):\n return F.hardtanh(input, self.min_val, self.max_val, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'min_val=' + str(self.min_val) \\\n + ', max_val=' + str(self.max_val) \\\n + inplace_str + ')'\n\n\nclass ReLU6(Hardtanh):\n \"\"\"Applies the element-wise function :math:`{ReLU6}(x) = min(max(0,x), 6)`\n\n Args:\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.ReLU6()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, inplace=False):\n super(ReLU6, self).__init__(0, 6, inplace)\n\n def __repr__(self):\n inplace_str = 'inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + inplace_str + ')'\n\n\nclass Sigmoid(Module):\n \"\"\"Applies the element-wise function :math:`f(x) = 1 / ( 1 + exp(-x))`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Sigmoid()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return torch.sigmoid(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Tanh(Module):\n \"\"\"Applies element-wise,\n :math:`f(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Tanh()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return torch.tanh(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass ELU(Module):\n \"\"\"Applies element-wise,\n :math:`f(x) = max(0,x) + min(0, alpha * (exp(x) - 1))`\n\n Args:\n alpha: the alpha value for the ELU formulation\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.ELU()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, alpha=1., inplace=False):\n super(ELU, self).__init__()\n self.alpha = alpha\n self.inplace = inplace\n\n def forward(self, input):\n return F.elu(input, self.alpha, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'alpha=' + str(self.alpha) \\\n + inplace_str + ')'\n\n\nclass SELU(Module):\n \"\"\"Applies element-wise,\n :math:`f(x) = scale * (\\max(0,x) + \\min(0, alpha * (\\exp(x) - 1)))`,\n with ``alpha=1.6732632423543772848170429916717`` and\n ``scale=1.0507009873554804934193349852946``.\n\n More details can be found in the paper `Self-Normalizing Neural Networks`_ .\n\n Args:\n inplace (bool, optional): can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.SELU()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n\n .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515\n \"\"\"\n\n def __init__(self, inplace=False):\n super(SELU, self).__init__()\n self.inplace = inplace\n\n def forward(self, input):\n return F.selu(input, self.inplace)\n\n def __repr__(self):\n inplace_str = ' (inplace)' if self.inplace else ''\n return self.__class__.__name__ + inplace_str\n\n\nclass GLU(Module):\n \"\"\"Applies the gated linear unit function\n :math:`{GLU}(a, b)= a \\otimes \\sigma(b)` where `a` is the first half of\n the input vector and `b` is the second half.\n\n Args:\n dim (int): the dimension on which to split the input\n\n Shape:\n - Input: :math:`(*, N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(*, N / 2, *)`\n\n Examples::\n\n >>> m = nn.GLU()\n >>> input = autograd.Variable(torch.randn(4, 2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, dim=-1):\n super(GLU, self).__init__()\n self.dim = dim\n\n def forward(self, input):\n return F.glu(input, self.dim)\n\n def __repr__(self):\n return '{} (dim={})'.format(self.__class__.__name__, self.dim)\n\n\nclass Hardshrink(Module):\n \"\"\"Applies the hard shrinkage function element-wise\n Hardshrink is defined as::\n f(x) = x, if x > lambda\n f(x) = x, if x < -lambda\n f(x) = 0, otherwise\n\n Args:\n lambd: the lambda value for the Hardshrink formulation. Default: 0.5\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Hardshrink()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, lambd=0.5):\n super(Hardshrink, self).__init__()\n self.lambd = lambd\n\n def forward(self, input):\n return F.hardshrink(input, self.lambd)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.lambd) + ')'\n\n\nclass LeakyReLU(Module):\n \"\"\"Applies element-wise,\n :math:`f(x) = max(0, x) + {negative\\_slope} * min(0, x)`\n\n Args:\n negative_slope: Controls the angle of the negative slope. Default: 1e-2\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.LeakyReLU(0.1)\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, negative_slope=1e-2, inplace=False):\n super(LeakyReLU, self).__init__()\n self.negative_slope = negative_slope\n self.inplace = inplace\n\n def forward(self, input):\n return F.leaky_relu(input, self.negative_slope, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + str(self.negative_slope) \\\n + inplace_str + ')'\n\n\nclass LogSigmoid(Module):\n \"\"\"Applies element-wise :math:`LogSigmoid(x) = log( 1 / (1 + exp(-x_i)))`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.LogSigmoid()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.logsigmoid(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Softplus(Module):\n \"\"\"Applies element-wise :math:`f(x) = 1/beta * log(1 + exp(beta * x_i))`\n\n SoftPlus is a smooth approximation to the ReLU function and can be used\n to constrain the output of a machine to always be positive.\n\n For numerical stability the implementation reverts to the linear function\n for inputs above a certain value.\n\n Args:\n beta: the beta value for the Softplus formulation. Default: 1\n threshold: values above this revert to a linear function. Default: 20\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Softplus()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, beta=1, threshold=20):\n super(Softplus, self).__init__()\n self.beta = beta\n self.threshold = threshold\n\n def forward(self, input):\n return F.softplus(input, self.beta, self.threshold)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + 'beta=' + str(self.beta) \\\n + ', threshold=' + str(self.threshold) + ')'\n\n\nclass Softshrink(Module):\n \"\"\"Applies the soft shrinkage function elementwise\n\n SoftShrinkage operator is defined as::\n\n f(x) = x-lambda, if x > lambda > f(x) = x+lambda, if x < -lambda\n f(x) = 0, otherwise\n\n Args:\n lambd: the lambda value for the Softshrink formulation. Default: 0.5\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Softshrink()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, lambd=0.5):\n super(Softshrink, self).__init__()\n self.lambd = lambd\n\n def forward(self, input):\n return F.softshrink(input, self.lambd)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.lambd) + ')'\n\n\nclass PReLU(Module):\n \"\"\"Applies element-wise the function\n :math:`PReLU(x) = max(0,x) + a * min(0,x)` Here \"a\" is a learnable\n parameter. When called without arguments, nn.PReLU() uses a single\n parameter \"a\" across all input channels. If called with nn.PReLU(nChannels),\n a separate \"a\" is used for each input channel.\n\n\n .. note::\n weight decay should not be used when learning \"a\" for good performance.\n\n Args:\n num_parameters: number of \"a\" to learn. Default: 1\n init: the initial value of \"a\". Default: 0.25\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.PReLU()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, num_parameters=1, init=0.25):\n self.num_parameters = num_parameters\n super(PReLU, self).__init__()\n self.weight = Parameter(torch.Tensor(num_parameters).fill_(init))\n\n def forward(self, input):\n return F.prelu(input, self.weight)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.num_parameters) + ')'\n\n\nclass Softsign(Module):\n \"\"\"Applies element-wise, the function :math:`f(x) = x / (1 + |x|)`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Softsign()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.softsign(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Tanhshrink(Module):\n \"\"\"Applies element-wise, :math:`Tanhshrink(x) = x - Tanh(x)`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Tanhshrink()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.tanhshrink(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Softmin(Module):\n \"\"\"Applies the Softmin function to an n-dimensional input Tensor\n rescaling them so that the elements of the n-dimensional output Tensor\n lie in the range `(0, 1)` and sum to 1\n\n :math:`f(x) = exp(-x_i - {shift}) / sum_j exp(-x_j - {shift})`\n\n where :math:`{shift} = max_i - x_i`\n\n Shape:\n - Input: :math:`(N, L)`\n - Output: :math:`(N, L)`\n\n Returns:\n a Tensor of the same dimension and shape as the input, with\n values in the range [0, 1]\n\n Examples::\n\n >>> m = nn.Softmin()\n >>> input = autograd.Variable(torch.randn(2, 3))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.softmin(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Softmax(Module):\n \"\"\"Applies the Softmax function to an n-dimensional input Tensor\n rescaling them so that the elements of the n-dimensional output Tensor\n lie in the range (0,1) and sum to 1\n\n Softmax is defined as\n :math:`f_i(x) = exp(x_i - shift) / sum_j exp(x_j - shift)`\n where `shift = max_i x_i`\n\n Shape:\n - Input: :math:`(N, L)`\n - Output: :math:`(N, L)`\n\n Returns:\n a Tensor of the same dimension and shape as the input with\n values in the range [0, 1]\n\n .. note::\n This module doesn't work directly with NLLLoss,\n which expects the Log to be computed between the Softmax and itself.\n Use Logsoftmax instead (it's faster).\n\n Examples::\n\n >>> m = nn.Softmax()\n >>> input = autograd.Variable(torch.randn(2, 3))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n assert input.dim() == 2, 'Softmax requires a 2D tensor as input'\n return F.softmax(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Softmax2d(Module):\n \"\"\"Applies SoftMax over features to each spatial location\n\n When given an image of Channels x Height x Width, it will\n\n apply Softmax to each location :math:`(Channels, h_i, w_j)`\n\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n\n Returns:\n a Tensor of the same dimension and shape as the input with\n values in the range [0, 1]\n\n Examples::\n\n >>> m = nn.Softmax2d()\n >>> # you softmax over the 2nd dimension\n >>> input = autograd.Variable(torch.randn(2, 3, 12, 13))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n assert input.dim() == 4, 'Softmax2d requires a 4D tensor as input'\n return F.softmax(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass LogSoftmax(Module):\n \"\"\"Applies the Log(Softmax(x)) function to an n-dimensional input Tensor.\n The LogSoftmax formulation can be simplified as\n\n :math:`f_i(x) = log(1 / a * exp(x_i))` where :math:`a = sum_j exp(x_j)`\n\n Shape:\n - Input: :math:`(N, L)`\n - Output: :math:`(N, L)`\n\n Returns:\n a Tensor of the same dimension and shape as the input with\n values in the range [-inf, 0)\n\n Examples::\n\n >>> m = nn.LogSoftmax()\n >>> input = autograd.Variable(torch.randn(2, 3))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.log_softmax(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n", "path": "torch/nn/modules/activation.py" } ]
[ { "content": "import torch\nfrom torch.nn.parameter import Parameter\n\nfrom .module import Module\nfrom .. import functional as F\n\n\nclass Threshold(Module):\n \"\"\"Thresholds each element of the input Tensor\n\n Threshold is defined as::\n\n y = x if x > threshold\n value if x <= threshold\n\n Args:\n threshold: The value to threshold at\n value: The value to replace with\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Threshold(0.1, 20)\n >>> input = Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, threshold, value, inplace=False):\n super(Threshold, self).__init__()\n self.threshold = threshold\n self.value = value\n self.inplace = inplace\n # TODO: check in THNN (if inplace == True, then assert value <= threshold)\n\n def forward(self, input):\n return F.threshold(input, self.threshold, self.value, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + str(self.threshold) \\\n + ', ' + str(self.value) \\\n + inplace_str + ')'\n\n\nclass ReLU(Threshold):\n \"\"\"Applies the rectified linear unit function element-wise\n :math:`{ReLU}(x)= max(0, x)`\n\n Args:\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.ReLU()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, inplace=False):\n super(ReLU, self).__init__(0, 0, inplace)\n\n def __repr__(self):\n inplace_str = 'inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + inplace_str + ')'\n\n\nclass RReLU(Module):\n\n def __init__(self, lower=1. / 8, upper=1. / 3, inplace=False):\n super(RReLU, self).__init__()\n self.lower = lower\n self.upper = upper\n self.inplace = inplace\n\n def forward(self, input):\n return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + str(self.lower) \\\n + ', ' + str(self.upper) \\\n + inplace_str + ')'\n\n\nclass Hardtanh(Module):\n \"\"\"Applies the HardTanh function element-wise\n\n HardTanh is defined as::\n\n f(x) = +1, if x > 1\n f(x) = -1, if x < -1\n f(x) = x, otherwise\n\n The range of the linear region :math:`[-1, 1]` can be adjusted\n\n Args:\n min_value: minimum value of the linear region range\n max_value: maximum value of the linear region range\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.HardTanh(-2, 2)\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, min_value=-1, max_value=1, inplace=False):\n super(Hardtanh, self).__init__()\n self.min_val = min_value\n self.max_val = max_value\n self.inplace = inplace\n assert self.max_val > self.min_val\n\n def forward(self, input):\n return F.hardtanh(input, self.min_val, self.max_val, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'min_val=' + str(self.min_val) \\\n + ', max_val=' + str(self.max_val) \\\n + inplace_str + ')'\n\n\nclass ReLU6(Hardtanh):\n \"\"\"Applies the element-wise function :math:`{ReLU6}(x) = min(max(0,x), 6)`\n\n Args:\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.ReLU6()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, inplace=False):\n super(ReLU6, self).__init__(0, 6, inplace)\n\n def __repr__(self):\n inplace_str = 'inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + inplace_str + ')'\n\n\nclass Sigmoid(Module):\n \"\"\"Applies the element-wise function :math:`f(x) = 1 / ( 1 + exp(-x))`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Sigmoid()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return torch.sigmoid(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Tanh(Module):\n \"\"\"Applies element-wise,\n :math:`f(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Tanh()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return torch.tanh(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass ELU(Module):\n \"\"\"Applies element-wise,\n :math:`f(x) = max(0,x) + min(0, alpha * (exp(x) - 1))`\n\n Args:\n alpha: the alpha value for the ELU formulation\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.ELU()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, alpha=1., inplace=False):\n super(ELU, self).__init__()\n self.alpha = alpha\n self.inplace = inplace\n\n def forward(self, input):\n return F.elu(input, self.alpha, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'alpha=' + str(self.alpha) \\\n + inplace_str + ')'\n\n\nclass SELU(Module):\n \"\"\"Applies element-wise,\n :math:`f(x) = scale * (\\max(0,x) + \\min(0, alpha * (\\exp(x) - 1)))`,\n with ``alpha=1.6732632423543772848170429916717`` and\n ``scale=1.0507009873554804934193349852946``.\n\n More details can be found in the paper `Self-Normalizing Neural Networks`_ .\n\n Args:\n inplace (bool, optional): can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.SELU()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n\n .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515\n \"\"\"\n\n def __init__(self, inplace=False):\n super(SELU, self).__init__()\n self.inplace = inplace\n\n def forward(self, input):\n return F.selu(input, self.inplace)\n\n def __repr__(self):\n inplace_str = ' (inplace)' if self.inplace else ''\n return self.__class__.__name__ + inplace_str\n\n\nclass GLU(Module):\n \"\"\"Applies the gated linear unit function\n :math:`{GLU}(a, b)= a \\otimes \\sigma(b)` where `a` is the first half of\n the input vector and `b` is the second half.\n\n Args:\n dim (int): the dimension on which to split the input\n\n Shape:\n - Input: :math:`(*, N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(*, N / 2, *)`\n\n Examples::\n\n >>> m = nn.GLU()\n >>> input = autograd.Variable(torch.randn(4, 2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, dim=-1):\n super(GLU, self).__init__()\n self.dim = dim\n\n def forward(self, input):\n return F.glu(input, self.dim)\n\n def __repr__(self):\n return '{} (dim={})'.format(self.__class__.__name__, self.dim)\n\n\nclass Hardshrink(Module):\n \"\"\"Applies the hard shrinkage function element-wise\n Hardshrink is defined as::\n f(x) = x, if x > lambda\n f(x) = x, if x < -lambda\n f(x) = 0, otherwise\n\n Args:\n lambd: the lambda value for the Hardshrink formulation. Default: 0.5\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Hardshrink()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, lambd=0.5):\n super(Hardshrink, self).__init__()\n self.lambd = lambd\n\n def forward(self, input):\n return F.hardshrink(input, self.lambd)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.lambd) + ')'\n\n\nclass LeakyReLU(Module):\n \"\"\"Applies element-wise,\n :math:`f(x) = max(0, x) + {negative\\_slope} * min(0, x)`\n\n Args:\n negative_slope: Controls the angle of the negative slope. Default: 1e-2\n inplace: can optionally do the operation in-place\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.LeakyReLU(0.1)\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, negative_slope=1e-2, inplace=False):\n super(LeakyReLU, self).__init__()\n self.negative_slope = negative_slope\n self.inplace = inplace\n\n def forward(self, input):\n return F.leaky_relu(input, self.negative_slope, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + str(self.negative_slope) \\\n + inplace_str + ')'\n\n\nclass LogSigmoid(Module):\n \"\"\"Applies element-wise :math:`LogSigmoid(x) = log( 1 / (1 + exp(-x_i)))`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.LogSigmoid()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.logsigmoid(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Softplus(Module):\n \"\"\"Applies element-wise :math:`f(x) = 1/beta * log(1 + exp(beta * x_i))`\n\n SoftPlus is a smooth approximation to the ReLU function and can be used\n to constrain the output of a machine to always be positive.\n\n For numerical stability the implementation reverts to the linear function\n for inputs above a certain value.\n\n Args:\n beta: the beta value for the Softplus formulation. Default: 1\n threshold: values above this revert to a linear function. Default: 20\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Softplus()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, beta=1, threshold=20):\n super(Softplus, self).__init__()\n self.beta = beta\n self.threshold = threshold\n\n def forward(self, input):\n return F.softplus(input, self.beta, self.threshold)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + 'beta=' + str(self.beta) \\\n + ', threshold=' + str(self.threshold) + ')'\n\n\nclass Softshrink(Module):\n \"\"\"Applies the soft shrinkage function elementwise\n\n SoftShrinkage operator is defined as::\n\n f(x) = x-lambda, if x > lambda > f(x) = x+lambda, if x < -lambda\n f(x) = 0, otherwise\n\n Args:\n lambd: the lambda value for the Softshrink formulation. Default: 0.5\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Softshrink()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, lambd=0.5):\n super(Softshrink, self).__init__()\n self.lambd = lambd\n\n def forward(self, input):\n return F.softshrink(input, self.lambd)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.lambd) + ')'\n\n\nclass PReLU(Module):\n \"\"\"Applies element-wise the function\n :math:`PReLU(x) = max(0,x) + a * min(0,x)` Here \"a\" is a learnable\n parameter. When called without arguments, nn.PReLU() uses a single\n parameter \"a\" across all input channels. If called with nn.PReLU(nChannels),\n a separate \"a\" is used for each input channel.\n\n\n .. note::\n weight decay should not be used when learning \"a\" for good performance.\n\n Args:\n num_parameters: number of \"a\" to learn. Default: 1\n init: the initial value of \"a\". Default: 0.25\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.PReLU()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def __init__(self, num_parameters=1, init=0.25):\n self.num_parameters = num_parameters\n super(PReLU, self).__init__()\n self.weight = Parameter(torch.Tensor(num_parameters).fill_(init))\n\n def forward(self, input):\n return F.prelu(input, self.weight)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.num_parameters) + ')'\n\n\nclass Softsign(Module):\n \"\"\"Applies element-wise, the function :math:`f(x) = x / (1 + |x|)`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Softsign()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.softsign(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Tanhshrink(Module):\n \"\"\"Applies element-wise, :math:`Tanhshrink(x) = x - Tanh(x)`\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> m = nn.Tanhshrink()\n >>> input = autograd.Variable(torch.randn(2))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.tanhshrink(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Softmin(Module):\n \"\"\"Applies the Softmin function to an n-dimensional input Tensor\n rescaling them so that the elements of the n-dimensional output Tensor\n lie in the range `(0, 1)` and sum to 1\n\n :math:`f(x) = exp(-x_i - {shift}) / sum_j exp(-x_j - {shift})`\n\n where :math:`{shift} = max_i - x_i`\n\n Shape:\n - Input: :math:`(N, L)`\n - Output: :math:`(N, L)`\n\n Returns:\n a Tensor of the same dimension and shape as the input, with\n values in the range [0, 1]\n\n Examples::\n\n >>> m = nn.Softmin()\n >>> input = autograd.Variable(torch.randn(2, 3))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.softmin(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Softmax(Module):\n \"\"\"Applies the Softmax function to an n-dimensional input Tensor\n rescaling them so that the elements of the n-dimensional output Tensor\n lie in the range (0,1) and sum to 1\n\n Softmax is defined as\n :math:`f_i(x) = exp(x_i - shift) / sum_j exp(x_j - shift)`\n where `shift = max_i x_i`\n\n Shape:\n - Input: :math:`(N, L)`\n - Output: :math:`(N, L)`\n\n Returns:\n a Tensor of the same dimension and shape as the input with\n values in the range [0, 1]\n\n .. note::\n This module doesn't work directly with NLLLoss,\n which expects the Log to be computed between the Softmax and itself.\n Use Logsoftmax instead (it's faster).\n\n Examples::\n\n >>> m = nn.Softmax()\n >>> input = autograd.Variable(torch.randn(2, 3))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n assert input.dim() == 2, 'Softmax requires a 2D tensor as input'\n return F.softmax(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass Softmax2d(Module):\n \"\"\"Applies SoftMax over features to each spatial location\n\n When given an image of Channels x Height x Width, it will\n\n apply Softmax to each location :math:`(Channels, h_i, w_j)`\n\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n\n Returns:\n a Tensor of the same dimension and shape as the input with\n values in the range [0, 1]\n\n Examples::\n\n >>> m = nn.Softmax2d()\n >>> # you softmax over the 2nd dimension\n >>> input = autograd.Variable(torch.randn(2, 3, 12, 13))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n assert input.dim() == 4, 'Softmax2d requires a 4D tensor as input'\n return F.softmax(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n\n\nclass LogSoftmax(Module):\n \"\"\"Applies the Log(Softmax(x)) function to an n-dimensional input Tensor.\n The LogSoftmax formulation can be simplified as\n\n :math:`f_i(x) = log(1 / a * exp(x_i))` where :math:`a = sum_j exp(x_j)`\n\n Shape:\n - Input: :math:`(N, L)`\n - Output: :math:`(N, L)`\n\n Returns:\n a Tensor of the same dimension and shape as the input with\n values in the range [-inf, 0)\n\n Examples::\n\n >>> m = nn.LogSoftmax()\n >>> input = autograd.Variable(torch.randn(2, 3))\n >>> print(input)\n >>> print(m(input))\n \"\"\"\n\n def forward(self, input):\n return F.log_softmax(input)\n\n def __repr__(self):\n return self.__class__.__name__ + ' ()'\n", "path": "torch/nn/modules/activation.py" } ]
diff --git a/torch/nn/modules/activation.py b/torch/nn/modules/activation.py index eb062a7b5c95c3..faaf6d12eebb9d 100644 --- a/torch/nn/modules/activation.py +++ b/torch/nn/modules/activation.py @@ -10,8 +10,8 @@ class Threshold(Module): Threshold is defined as:: - y = x if x >= threshold - value if x < threshold + y = x if x > threshold + value if x <= threshold Args: threshold: The value to threshold at
fidals__shopelectro-456
Repeat stb's fix for front_build dir https://github.com/fidals/stroyprombeton/pull/270
[ { "content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'django_select2',\n 'images',\n 'refarm_redirects',\n 'pages',\n 'catalog',\n 'search',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'refarm_redirects.middleware.RedirectAllMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATE_DIR],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nDATABASE_URL = os.environ[\"POSTGRES_URL\"]\n\n# to activate django connections pool for persistent connections.\n# https://docs.djangoproject.com/en/1.11/ref/databases/#persistent-connections\nCONN_MAX_AGE = None\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ['POSTGRES_DB'],\n 'USER': os.environ['POSTGRES_USER'],\n 'PASSWORD': os.environ['POSTGRES_PASSWORD'],\n 'HOST': os.environ['POSTGRES_URL'],\n 'PORT': '5432',\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'pages': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'catalog': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'search': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'ecommerce': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'images': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'shopelectro': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub')\nSELENIUM_WAIT_SECONDS = int(os.environ['SELENIUM_WAIT_SECONDS'])\nSELENIUM_TIMEOUT_SECONDS = int(os.environ['SELENIUM_TIMEOUT_SECONDS'])\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENTS = os.environ.get('EMAIL_RECIPIENTS', '[email protected]').split(',')\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\nENV_TYPE = os.environ.get('ENV_TYPE', 'PROD') # LOCAL | CI | PROD\n\n# 'Prod' <-> 'Product #1 of Category #0 of Category #1' = 0.17\n# About trigram similarity: https://goo.gl/uYFcxN\nTRIGRAM_MIN_SIMILARITY = 0.15\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\nCATEGORY_STEP_MULTIPLIERS = [12, 15, 24, 25, 48, 50, 60, 100]\n\n# Reduce retail product prices by PRICE_REDUCER.\n# It is required to make prices on shopelectro.ru and se78.ru unique.\nPRICE_REDUCER = 1\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n\ndef get_robots_content():\n with open(os.path.join(TEMPLATE_DIR, 'robots.txt')) as robots_file:\n return robots_file.read()\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n },\n 'robots': {\n 'slug': 'robots.txt',\n 'content': get_robots_content(),\n },\n}\n\nTAGS_URL_DELIMITER = '-or-'\nTAG_GROUPS_URL_DELIMITER = '-and-'\n\nTAGS_TITLE_DELIMITER = ' или '\nTAG_GROUPS_TITLE_DELIMITER = ' и '\n\nTAGS_ORDER = ['group__position', 'group__name', 'position', 'name']\n\n# -- App business logic --\n# every product price will be multiplied on this value\n# during import from 1C.\n# Multipliers are related to prices in this order:\n# big/medium/small/retail. First three are wholesale prices.\nPRICE_MULTIPLIERS = 1.0, 1.0, 1.0, 1.0\n", "path": "shopelectro/settings/base.py" } ]
[ { "content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'django_select2',\n 'images',\n 'refarm_redirects',\n 'pages',\n 'catalog',\n 'search',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'refarm_redirects.middleware.RedirectAllMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATE_DIR],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front_build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nDATABASE_URL = os.environ[\"POSTGRES_URL\"]\n\n# to activate django connections pool for persistent connections.\n# https://docs.djangoproject.com/en/1.11/ref/databases/#persistent-connections\nCONN_MAX_AGE = None\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ['POSTGRES_DB'],\n 'USER': os.environ['POSTGRES_USER'],\n 'PASSWORD': os.environ['POSTGRES_PASSWORD'],\n 'HOST': os.environ['POSTGRES_URL'],\n 'PORT': '5432',\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'pages': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'catalog': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'search': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'ecommerce': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'images': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'shopelectro': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://selenium:4444/wd/hub')\nSELENIUM_WAIT_SECONDS = int(os.environ['SELENIUM_WAIT_SECONDS'])\nSELENIUM_TIMEOUT_SECONDS = int(os.environ['SELENIUM_TIMEOUT_SECONDS'])\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENTS = os.environ.get('EMAIL_RECIPIENTS', '[email protected]').split(',')\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\nENV_TYPE = os.environ.get('ENV_TYPE', 'PROD') # LOCAL | CI | PROD\n\n# 'Prod' <-> 'Product #1 of Category #0 of Category #1' = 0.17\n# About trigram similarity: https://goo.gl/uYFcxN\nTRIGRAM_MIN_SIMILARITY = 0.15\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\nCATEGORY_STEP_MULTIPLIERS = [12, 15, 24, 25, 48, 50, 60, 100]\n\n# Reduce retail product prices by PRICE_REDUCER.\n# It is required to make prices on shopelectro.ru and se78.ru unique.\nPRICE_REDUCER = 1\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n\ndef get_robots_content():\n with open(os.path.join(TEMPLATE_DIR, 'robots.txt')) as robots_file:\n return robots_file.read()\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n },\n 'robots': {\n 'slug': 'robots.txt',\n 'content': get_robots_content(),\n },\n}\n\nTAGS_URL_DELIMITER = '-or-'\nTAG_GROUPS_URL_DELIMITER = '-and-'\n\nTAGS_TITLE_DELIMITER = ' или '\nTAG_GROUPS_TITLE_DELIMITER = ' и '\n\nTAGS_ORDER = ['group__position', 'group__name', 'position', 'name']\n\n# -- App business logic --\n# every product price will be multiplied on this value\n# during import from 1C.\n# Multipliers are related to prices in this order:\n# big/medium/small/retail. First three are wholesale prices.\nPRICE_MULTIPLIERS = 1.0, 1.0, 1.0, 1.0\n", "path": "shopelectro/settings/base.py" } ]
diff --git a/.drone.yml b/.drone.yml index 583aa046..389a46eb 100644 --- a/.drone.yml +++ b/.drone.yml @@ -74,7 +74,6 @@ pipeline: - SELENIUM_TIMEOUT_SECONDS=180 - SELENIUM_WAIT_SECONDS=180 commands: - - cp -r /drone/shopelectro/build/ front/ - cp -r /drone/deps/* /usr/local/lib/python3.6/site-packages - python manage.py migrate - python manage.py excel @@ -101,7 +100,7 @@ pipeline: --exclude=node_modules/**/* --exclude=static/**/* --exclude=media/**/* --exclude=doc/build/**/* --exclude=.idea/**/* - --exclude=front/build + --exclude=front_build --exclude=front/images --exclude=**/*.pyc --exclude=**/*.jpg @@ -142,7 +141,6 @@ pipeline: docker-up: image: docker/compose:1.17.1 commands: - - cp -r /drone/shopelectro/build/ front/ - cd docker - cp drone_env/* env_files/ - cp drone_env/.env . diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 492e1cdf..d6119f47 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -43,7 +43,7 @@ services: #- ../gulpfile.babel.js:/usr/app/src/gulpfile.babel.js - ../front:/usr/app/src/front - - ../build:/usr/app/src/build + - ../front_build:/usr/app/src/front_build env_file: - env_files/paths diff --git a/docker/images/python/Dockerfile.prod b/docker/images/python/Dockerfile.prod index 53cd1cd5..69d56f31 100644 --- a/docker/images/python/Dockerfile.prod +++ b/docker/images/python/Dockerfile.prod @@ -5,6 +5,6 @@ RUN pip install gunicorn COPY ./etc/gunicorn.py /etc/ # drone already built static to this folder at previous step. # See npm section of `.drone.yml` file -COPY ./front/build ./front/build +COPY ./front_build ./front_build # built frontend files are required RUN /bin/bash -c '[ "$(ls -A front/build)" ] || exit 1' diff --git a/gulpfile.babel.js b/gulpfile.babel.js index 2bf55221..6b5919a5 100755 --- a/gulpfile.babel.js +++ b/gulpfile.babel.js @@ -56,7 +56,7 @@ const plugins = [ }), ]; -const buildDir = 'build'; +const buildDir = 'front_build'; const ecommercePaths = getAppSrcPaths('ecommerce'); const genericAdminPaths = getAppSrcPaths('generic_admin'); diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py index b393c8f8..afd0fd0f 100644 --- a/shopelectro/settings/base.py +++ b/shopelectro/settings/base.py @@ -152,7 +152,7 @@ STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' STATICFILES_DIRS = [ - os.path.join(BASE_DIR, 'front/build'), + os.path.join(BASE_DIR, 'front_build'), ASSETS_DIR, ] diff --git a/shopelectro/tests/tests_selenium.py b/shopelectro/tests/tests_selenium.py index a627f87a..ce1447f6 100644 --- a/shopelectro/tests/tests_selenium.py +++ b/shopelectro/tests/tests_selenium.py @@ -1056,9 +1056,7 @@ def test_autocomplete_can_expand_and_collapse(self): # remove search term ... self.clear_input() - self.wait.until(EC.text_to_be_present_in_element( - (By.CLASS_NAME, 'js-search-input'), '') - ) + self.wait.until(EC.text_to_be_present_in_element(self.INPUT_LOCATOR, '')) # ... and autocomplete collapse self.assertFalse(self.autocomplete.is_displayed())
urllib3__urllib3-758
ca_cert_dir keyword argument may be passed to HTTPConnectionPool by accident. Seems like as part of #701 I missed the `SSL_KEYWORDS` block in `poolmanager.py`. This means that `ca_cert_dir` may accidentally be passed to the `HTTPConnectionPool`. This leads to the following error when attempting to use `ca_cert_dir` with a `PoolManager` and then making a plaintext HTTP connection: ``` >>> import urllib3 >>> p = urllib3.PoolManager(ca_cert_dir='/usr/local/etc/openssl') >>> p.urlopen('GET', 'http://http2bin.org/get') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "urllib3/poolmanager.py", line 162, in urlopen response = conn.urlopen(method, u.request_uri, **kw) File "urllib3/connectionpool.py", line 548, in urlopen conn = self._get_conn(timeout=pool_timeout) File "urllib3/connectionpool.py", line 250, in _get_conn return conn or self._new_conn() File "urllib3/connectionpool.py", line 211, in _new_conn strict=self.strict, **self.conn_kw) File "urllib3/connection.py", line 121, in __init__ _HTTPConnection.__init__(self, *args, **kw) TypeError: __init__() got an unexpected keyword argument 'ca_cert_dir' ```
[ { "content": "from __future__ import absolute_import\nimport logging\n\ntry: # Python 3\n from urllib.parse import urljoin\nexcept ImportError:\n from urlparse import urljoin\n\nfrom ._collections import RecentlyUsedContainer\nfrom .connectionpool import HTTPConnectionPool, HTTPSConnectionPool\nfrom .connectionpool import port_by_scheme\nfrom .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown\nfrom .request import RequestMethods\nfrom .util.url import parse_url\nfrom .util.retry import Retry\n\n\n__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']\n\n\npool_classes_by_scheme = {\n 'http': HTTPConnectionPool,\n 'https': HTTPSConnectionPool,\n}\n\nlog = logging.getLogger(__name__)\n\nSSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',\n 'ssl_version')\n\n\nclass PoolManager(RequestMethods):\n \"\"\"\n Allows for arbitrary requests while transparently keeping track of\n necessary connection pools for you.\n\n :param num_pools:\n Number of connection pools to cache before discarding the least\n recently used pool.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param \\**connection_pool_kw:\n Additional parameters are used to create fresh\n :class:`urllib3.connectionpool.ConnectionPool` instances.\n\n Example::\n\n >>> manager = PoolManager(num_pools=2)\n >>> r = manager.request('GET', 'http://google.com/')\n >>> r = manager.request('GET', 'http://google.com/mail')\n >>> r = manager.request('GET', 'http://yahoo.com/')\n >>> len(manager.pools)\n 2\n\n \"\"\"\n\n proxy = None\n\n def __init__(self, num_pools=10, headers=None, **connection_pool_kw):\n RequestMethods.__init__(self, headers)\n self.connection_pool_kw = connection_pool_kw\n self.pools = RecentlyUsedContainer(num_pools,\n dispose_func=lambda p: p.close())\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.clear()\n # Return False to re-raise any potential exceptions\n return False\n\n def _new_pool(self, scheme, host, port):\n \"\"\"\n Create a new :class:`ConnectionPool` based on host, port and scheme.\n\n This method is used to actually create the connection pools handed out\n by :meth:`connection_from_url` and companion methods. It is intended\n to be overridden for customization.\n \"\"\"\n pool_cls = pool_classes_by_scheme[scheme]\n kwargs = self.connection_pool_kw\n if scheme == 'http':\n kwargs = self.connection_pool_kw.copy()\n for kw in SSL_KEYWORDS:\n kwargs.pop(kw, None)\n\n return pool_cls(host, port, **kwargs)\n\n def clear(self):\n \"\"\"\n Empty our store of pools and direct them all to close.\n\n This will not affect in-flight connections, but they will not be\n re-used after completion.\n \"\"\"\n self.pools.clear()\n\n def connection_from_host(self, host, port=None, scheme='http'):\n \"\"\"\n Get a :class:`ConnectionPool` based on the host, port, and scheme.\n\n If ``port`` isn't given, it will be derived from the ``scheme`` using\n ``urllib3.connectionpool.port_by_scheme``.\n \"\"\"\n\n if not host:\n raise LocationValueError(\"No host specified.\")\n\n scheme = scheme or 'http'\n port = port or port_by_scheme.get(scheme, 80)\n pool_key = (scheme, host, port)\n\n with self.pools.lock:\n # If the scheme, host, or port doesn't match existing open\n # connections, open a new ConnectionPool.\n pool = self.pools.get(pool_key)\n if pool:\n return pool\n\n # Make a fresh ConnectionPool of the desired type\n pool = self._new_pool(scheme, host, port)\n self.pools[pool_key] = pool\n\n return pool\n\n def connection_from_url(self, url):\n \"\"\"\n Similar to :func:`urllib3.connectionpool.connection_from_url` but\n doesn't pass any additional parameters to the\n :class:`urllib3.connectionpool.ConnectionPool` constructor.\n\n Additional parameters are taken from the :class:`.PoolManager`\n constructor.\n \"\"\"\n u = parse_url(url)\n return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)\n\n def urlopen(self, method, url, redirect=True, **kw):\n \"\"\"\n Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`\n with custom cross-host redirect logic and only sends the request-uri\n portion of the ``url``.\n\n The given ``url`` parameter must be absolute, such that an appropriate\n :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.\n \"\"\"\n u = parse_url(url)\n conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)\n\n kw['assert_same_host'] = False\n kw['redirect'] = False\n if 'headers' not in kw:\n kw['headers'] = self.headers\n\n if self.proxy is not None and u.scheme == \"http\":\n response = conn.urlopen(method, url, **kw)\n else:\n response = conn.urlopen(method, u.request_uri, **kw)\n\n redirect_location = redirect and response.get_redirect_location()\n if not redirect_location:\n return response\n\n # Support relative URLs for redirecting.\n redirect_location = urljoin(url, redirect_location)\n\n # RFC 7231, Section 6.4.4\n if response.status == 303:\n method = 'GET'\n\n retries = kw.get('retries')\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect)\n\n try:\n retries = retries.increment(method, url, response=response, _pool=conn)\n except MaxRetryError:\n if retries.raise_on_redirect:\n raise\n return response\n\n kw['retries'] = retries\n kw['redirect'] = redirect\n\n log.info(\"Redirecting %s -> %s\" % (url, redirect_location))\n return self.urlopen(method, redirect_location, **kw)\n\n\nclass ProxyManager(PoolManager):\n \"\"\"\n Behaves just like :class:`PoolManager`, but sends all requests through\n the defined proxy, using the CONNECT method for HTTPS URLs.\n\n :param proxy_url:\n The URL of the proxy to be used.\n\n :param proxy_headers:\n A dictionary contaning headers that will be sent to the proxy. In case\n of HTTP they are being sent with each request, while in the\n HTTPS/CONNECT case they are sent only once. Could be used for proxy\n authentication.\n\n Example:\n >>> proxy = urllib3.ProxyManager('http://localhost:3128/')\n >>> r1 = proxy.request('GET', 'http://google.com/')\n >>> r2 = proxy.request('GET', 'http://httpbin.org/')\n >>> len(proxy.pools)\n 1\n >>> r3 = proxy.request('GET', 'https://httpbin.org/')\n >>> r4 = proxy.request('GET', 'https://twitter.com/')\n >>> len(proxy.pools)\n 3\n\n \"\"\"\n\n def __init__(self, proxy_url, num_pools=10, headers=None,\n proxy_headers=None, **connection_pool_kw):\n\n if isinstance(proxy_url, HTTPConnectionPool):\n proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,\n proxy_url.port)\n proxy = parse_url(proxy_url)\n if not proxy.port:\n port = port_by_scheme.get(proxy.scheme, 80)\n proxy = proxy._replace(port=port)\n\n if proxy.scheme not in (\"http\", \"https\"):\n raise ProxySchemeUnknown(proxy.scheme)\n\n self.proxy = proxy\n self.proxy_headers = proxy_headers or {}\n\n connection_pool_kw['_proxy'] = self.proxy\n connection_pool_kw['_proxy_headers'] = self.proxy_headers\n\n super(ProxyManager, self).__init__(\n num_pools, headers, **connection_pool_kw)\n\n def connection_from_host(self, host, port=None, scheme='http'):\n if scheme == \"https\":\n return super(ProxyManager, self).connection_from_host(\n host, port, scheme)\n\n return super(ProxyManager, self).connection_from_host(\n self.proxy.host, self.proxy.port, self.proxy.scheme)\n\n def _set_proxy_headers(self, url, headers=None):\n \"\"\"\n Sets headers needed by proxies: specifically, the Accept and Host\n headers. Only sets headers not provided by the user.\n \"\"\"\n headers_ = {'Accept': '*/*'}\n\n netloc = parse_url(url).netloc\n if netloc:\n headers_['Host'] = netloc\n\n if headers:\n headers_.update(headers)\n return headers_\n\n def urlopen(self, method, url, redirect=True, **kw):\n \"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.\"\n u = parse_url(url)\n\n if u.scheme == \"http\":\n # For proxied HTTPS requests, httplib sets the necessary headers\n # on the CONNECT to the proxy. For HTTP, we'll definitely\n # need to set 'Host' at the very least.\n headers = kw.get('headers', self.headers)\n kw['headers'] = self._set_proxy_headers(url, headers)\n\n return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)\n\n\ndef proxy_from_url(url, **kw):\n return ProxyManager(proxy_url=url, **kw)\n", "path": "urllib3/poolmanager.py" } ]
[ { "content": "from __future__ import absolute_import\nimport logging\n\ntry: # Python 3\n from urllib.parse import urljoin\nexcept ImportError:\n from urlparse import urljoin\n\nfrom ._collections import RecentlyUsedContainer\nfrom .connectionpool import HTTPConnectionPool, HTTPSConnectionPool\nfrom .connectionpool import port_by_scheme\nfrom .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown\nfrom .request import RequestMethods\nfrom .util.url import parse_url\nfrom .util.retry import Retry\n\n\n__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']\n\n\npool_classes_by_scheme = {\n 'http': HTTPConnectionPool,\n 'https': HTTPSConnectionPool,\n}\n\nlog = logging.getLogger(__name__)\n\nSSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',\n 'ssl_version', 'ca_cert_dir')\n\n\nclass PoolManager(RequestMethods):\n \"\"\"\n Allows for arbitrary requests while transparently keeping track of\n necessary connection pools for you.\n\n :param num_pools:\n Number of connection pools to cache before discarding the least\n recently used pool.\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n\n :param \\**connection_pool_kw:\n Additional parameters are used to create fresh\n :class:`urllib3.connectionpool.ConnectionPool` instances.\n\n Example::\n\n >>> manager = PoolManager(num_pools=2)\n >>> r = manager.request('GET', 'http://google.com/')\n >>> r = manager.request('GET', 'http://google.com/mail')\n >>> r = manager.request('GET', 'http://yahoo.com/')\n >>> len(manager.pools)\n 2\n\n \"\"\"\n\n proxy = None\n\n def __init__(self, num_pools=10, headers=None, **connection_pool_kw):\n RequestMethods.__init__(self, headers)\n self.connection_pool_kw = connection_pool_kw\n self.pools = RecentlyUsedContainer(num_pools,\n dispose_func=lambda p: p.close())\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.clear()\n # Return False to re-raise any potential exceptions\n return False\n\n def _new_pool(self, scheme, host, port):\n \"\"\"\n Create a new :class:`ConnectionPool` based on host, port and scheme.\n\n This method is used to actually create the connection pools handed out\n by :meth:`connection_from_url` and companion methods. It is intended\n to be overridden for customization.\n \"\"\"\n pool_cls = pool_classes_by_scheme[scheme]\n kwargs = self.connection_pool_kw\n if scheme == 'http':\n kwargs = self.connection_pool_kw.copy()\n for kw in SSL_KEYWORDS:\n kwargs.pop(kw, None)\n\n return pool_cls(host, port, **kwargs)\n\n def clear(self):\n \"\"\"\n Empty our store of pools and direct them all to close.\n\n This will not affect in-flight connections, but they will not be\n re-used after completion.\n \"\"\"\n self.pools.clear()\n\n def connection_from_host(self, host, port=None, scheme='http'):\n \"\"\"\n Get a :class:`ConnectionPool` based on the host, port, and scheme.\n\n If ``port`` isn't given, it will be derived from the ``scheme`` using\n ``urllib3.connectionpool.port_by_scheme``.\n \"\"\"\n\n if not host:\n raise LocationValueError(\"No host specified.\")\n\n scheme = scheme or 'http'\n port = port or port_by_scheme.get(scheme, 80)\n pool_key = (scheme, host, port)\n\n with self.pools.lock:\n # If the scheme, host, or port doesn't match existing open\n # connections, open a new ConnectionPool.\n pool = self.pools.get(pool_key)\n if pool:\n return pool\n\n # Make a fresh ConnectionPool of the desired type\n pool = self._new_pool(scheme, host, port)\n self.pools[pool_key] = pool\n\n return pool\n\n def connection_from_url(self, url):\n \"\"\"\n Similar to :func:`urllib3.connectionpool.connection_from_url` but\n doesn't pass any additional parameters to the\n :class:`urllib3.connectionpool.ConnectionPool` constructor.\n\n Additional parameters are taken from the :class:`.PoolManager`\n constructor.\n \"\"\"\n u = parse_url(url)\n return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)\n\n def urlopen(self, method, url, redirect=True, **kw):\n \"\"\"\n Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`\n with custom cross-host redirect logic and only sends the request-uri\n portion of the ``url``.\n\n The given ``url`` parameter must be absolute, such that an appropriate\n :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.\n \"\"\"\n u = parse_url(url)\n conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)\n\n kw['assert_same_host'] = False\n kw['redirect'] = False\n if 'headers' not in kw:\n kw['headers'] = self.headers\n\n if self.proxy is not None and u.scheme == \"http\":\n response = conn.urlopen(method, url, **kw)\n else:\n response = conn.urlopen(method, u.request_uri, **kw)\n\n redirect_location = redirect and response.get_redirect_location()\n if not redirect_location:\n return response\n\n # Support relative URLs for redirecting.\n redirect_location = urljoin(url, redirect_location)\n\n # RFC 7231, Section 6.4.4\n if response.status == 303:\n method = 'GET'\n\n retries = kw.get('retries')\n if not isinstance(retries, Retry):\n retries = Retry.from_int(retries, redirect=redirect)\n\n try:\n retries = retries.increment(method, url, response=response, _pool=conn)\n except MaxRetryError:\n if retries.raise_on_redirect:\n raise\n return response\n\n kw['retries'] = retries\n kw['redirect'] = redirect\n\n log.info(\"Redirecting %s -> %s\" % (url, redirect_location))\n return self.urlopen(method, redirect_location, **kw)\n\n\nclass ProxyManager(PoolManager):\n \"\"\"\n Behaves just like :class:`PoolManager`, but sends all requests through\n the defined proxy, using the CONNECT method for HTTPS URLs.\n\n :param proxy_url:\n The URL of the proxy to be used.\n\n :param proxy_headers:\n A dictionary contaning headers that will be sent to the proxy. In case\n of HTTP they are being sent with each request, while in the\n HTTPS/CONNECT case they are sent only once. Could be used for proxy\n authentication.\n\n Example:\n >>> proxy = urllib3.ProxyManager('http://localhost:3128/')\n >>> r1 = proxy.request('GET', 'http://google.com/')\n >>> r2 = proxy.request('GET', 'http://httpbin.org/')\n >>> len(proxy.pools)\n 1\n >>> r3 = proxy.request('GET', 'https://httpbin.org/')\n >>> r4 = proxy.request('GET', 'https://twitter.com/')\n >>> len(proxy.pools)\n 3\n\n \"\"\"\n\n def __init__(self, proxy_url, num_pools=10, headers=None,\n proxy_headers=None, **connection_pool_kw):\n\n if isinstance(proxy_url, HTTPConnectionPool):\n proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,\n proxy_url.port)\n proxy = parse_url(proxy_url)\n if not proxy.port:\n port = port_by_scheme.get(proxy.scheme, 80)\n proxy = proxy._replace(port=port)\n\n if proxy.scheme not in (\"http\", \"https\"):\n raise ProxySchemeUnknown(proxy.scheme)\n\n self.proxy = proxy\n self.proxy_headers = proxy_headers or {}\n\n connection_pool_kw['_proxy'] = self.proxy\n connection_pool_kw['_proxy_headers'] = self.proxy_headers\n\n super(ProxyManager, self).__init__(\n num_pools, headers, **connection_pool_kw)\n\n def connection_from_host(self, host, port=None, scheme='http'):\n if scheme == \"https\":\n return super(ProxyManager, self).connection_from_host(\n host, port, scheme)\n\n return super(ProxyManager, self).connection_from_host(\n self.proxy.host, self.proxy.port, self.proxy.scheme)\n\n def _set_proxy_headers(self, url, headers=None):\n \"\"\"\n Sets headers needed by proxies: specifically, the Accept and Host\n headers. Only sets headers not provided by the user.\n \"\"\"\n headers_ = {'Accept': '*/*'}\n\n netloc = parse_url(url).netloc\n if netloc:\n headers_['Host'] = netloc\n\n if headers:\n headers_.update(headers)\n return headers_\n\n def urlopen(self, method, url, redirect=True, **kw):\n \"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.\"\n u = parse_url(url)\n\n if u.scheme == \"http\":\n # For proxied HTTPS requests, httplib sets the necessary headers\n # on the CONNECT to the proxy. For HTTP, we'll definitely\n # need to set 'Host' at the very least.\n headers = kw.get('headers', self.headers)\n kw['headers'] = self._set_proxy_headers(url, headers)\n\n return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)\n\n\ndef proxy_from_url(url, **kw):\n return ProxyManager(proxy_url=url, **kw)\n", "path": "urllib3/poolmanager.py" } ]
diff --git a/test/with_dummyserver/test_poolmanager.py b/test/with_dummyserver/test_poolmanager.py index c225afb667..4065ff8bab 100644 --- a/test/with_dummyserver/test_poolmanager.py +++ b/test/with_dummyserver/test_poolmanager.py @@ -162,6 +162,12 @@ def test_http_with_ssl_keywords(self): r = http.request('GET', 'http://%s:%s/' % (self.host, self.port)) self.assertEqual(r.status, 200) + def test_http_with_ca_cert_dir(self): + http = PoolManager(ca_certs='REQUIRED', ca_cert_dir='/nosuchdir') + + r = http.request('GET', 'http://%s:%s/' % (self.host, self.port)) + self.assertEqual(r.status, 200) + class TestIPv6PoolManager(IPv6HTTPDummyServerTestCase): if not HAS_IPV6: diff --git a/urllib3/poolmanager.py b/urllib3/poolmanager.py index e681d84903..f13e673d1f 100644 --- a/urllib3/poolmanager.py +++ b/urllib3/poolmanager.py @@ -26,7 +26,7 @@ log = logging.getLogger(__name__) SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs', - 'ssl_version') + 'ssl_version', 'ca_cert_dir') class PoolManager(RequestMethods):
opsdroid__opsdroid-1683
skill-seen broken with redis database? I've been testing opsdroid with a redis database and the seen skill appears to be having problems serializing python datetime objects. user: when did you last see user? opsdroid: Whoops there has been an error. opsdroid: Check the log for details. this is the opsdroid log with DEBUG logging enabled: ``` notrexroof_1 | DEBUG opsdroid.memory: Putting seen to memory. notrexroof_1 | DEBUG opsdroid.database.redis: Putting seen into Redis. notrexroof_1 | ERROR opsdroid.core: Exception when running skill 'seen'. notrexroof_1 | Traceback (most recent call last): notrexroof_1 | File "/usr/local/lib/python3.8/site-packages/opsdroid/core.py", line 427, in run_skill notrexroof_1 | return await skill(self, config, event) notrexroof_1 | File "/root/.local/share/opsdroid/opsdroid-modules/skill/seen/__init__.py", line 16, in last_seen notrexroof_1 | await message.respond("I last saw {} {}".format(name, human(seen[name], precision=1))) notrexroof_1 | File "/root/.local/share/opsdroid/site-packages/ago.py", line 55, in human notrexroof_1 | delta = get_delta_from_subject(subject) notrexroof_1 | File "/root/.local/share/opsdroid/site-packages/ago.py", line 16, in get_delta_from_subject notrexroof_1 | subject = float(subject) notrexroof_1 | TypeError: float() argument must be a string or a number, not 'dict' ``` I know this hasn't been touched in a few years, but I'm wondering if there is a general issue with serializing objects into a redis database within opsdroid.
[ { "content": "\"\"\"Module for storing data within Redis.\"\"\"\nimport json\nimport logging\n\nimport aioredis\nfrom aioredis import parser\nfrom voluptuous import Any\n\nfrom opsdroid.database import Database\nfrom opsdroid.helper import JSONEncoder, JSONDecoder\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\"host\": str, \"port\": Any(int, str), \"database\": int, \"password\": str}\n\n\nclass RedisDatabase(Database):\n \"\"\"Database class for storing data within a Redis instance.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Initialise the redis database.\n\n Set basic properties of the database. Initialise properties like\n name, connection arguments, database file, table name and config.\n\n Args:\n config (dict): The configuration of the database which consists\n of `file` and `table` name of the sqlite database\n specified in `configuration.yaml` file.\n opsdroid (OpsDroid): An instance of opsdroid.core.\n\n \"\"\"\n super().__init__(config, opsdroid=opsdroid)\n self.config = config\n self.client = None\n self.host = self.config.get(\"host\", \"localhost\")\n self.port = self.config.get(\"port\", 6379)\n self.database = self.config.get(\"database\", 0)\n self.password = self.config.get(\"password\", None)\n _LOGGER.debug(_(\"Loaded Redis database connector.\"))\n\n async def connect(self):\n \"\"\"Connect to the database.\n\n This method will connect to a Redis database. By default it will\n connect to Redis on localhost on port 6379\n\n \"\"\"\n try:\n self.client = await aioredis.create_pool(\n address=(self.host, int(self.port)),\n db=self.database,\n password=self.password,\n parser=parser.PyReader,\n )\n\n _LOGGER.info(\n _(\"Connected to Redis database %s from %s on port %s.\"),\n self.database,\n self.host,\n self.port,\n )\n except OSError:\n _LOGGER.warning(\n _(\"Unable to connect to Redis database on address: %s port: %s.\"),\n self.host,\n self.port,\n )\n\n async def put(self, key, data):\n \"\"\"Store the data object in Redis against the key.\n\n Args:\n key (string): The key to store the data object under.\n data (object): The data object to store.\n\n \"\"\"\n if self.client:\n _LOGGER.debug(_(\"Putting %s into Redis.\"), key)\n await self.client.execute(\"SET\", key, json.dumps(data, cls=JSONEncoder))\n\n async def get(self, key):\n \"\"\"Get data from Redis for a given key.\n\n Args:\n key (string): The key to lookup in the database.\n\n Returns:\n object or None: The data object stored for that key, or None if no\n object found for that key.\n\n \"\"\"\n if self.client:\n _LOGGER.debug(_(\"Getting %s from Redis.\"), key)\n data = await self.client.execute(\"GET\", key)\n\n if data:\n return json.loads(data, encoding=JSONDecoder)\n\n return None\n\n async def delete(self, key):\n \"\"\"Delete data from Redis for a given key.\n\n Args:\n key (string): The key to delete in the database.\n\n \"\"\"\n if self.client:\n _LOGGER.debug(_(\"Deleting %s from Redis.\"), key)\n await self.client.execute(\"DEL\", key)\n\n async def disconnect(self):\n \"\"\"Disconnect from the database.\"\"\"\n if self.client:\n self.client.close()\n", "path": "opsdroid/database/redis/__init__.py" } ]
[ { "content": "\"\"\"Module for storing data within Redis.\"\"\"\nimport json\nimport logging\n\nimport aioredis\nfrom aioredis import parser\nfrom voluptuous import Any\n\nfrom opsdroid.database import Database\nfrom opsdroid.helper import JSONEncoder, JSONDecoder\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\"host\": str, \"port\": Any(int, str), \"database\": int, \"password\": str}\n\n\nclass RedisDatabase(Database):\n \"\"\"Database class for storing data within a Redis instance.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Initialise the redis database.\n\n Set basic properties of the database. Initialise properties like\n name, connection arguments, database file, table name and config.\n\n Args:\n config (dict): The configuration of the database which consists\n of `file` and `table` name of the sqlite database\n specified in `configuration.yaml` file.\n opsdroid (OpsDroid): An instance of opsdroid.core.\n\n \"\"\"\n super().__init__(config, opsdroid=opsdroid)\n self.config = config\n self.client = None\n self.host = self.config.get(\"host\", \"localhost\")\n self.port = self.config.get(\"port\", 6379)\n self.database = self.config.get(\"database\", 0)\n self.password = self.config.get(\"password\", None)\n _LOGGER.debug(_(\"Loaded Redis database connector.\"))\n\n async def connect(self):\n \"\"\"Connect to the database.\n\n This method will connect to a Redis database. By default it will\n connect to Redis on localhost on port 6379\n\n \"\"\"\n try:\n self.client = await aioredis.create_pool(\n address=(self.host, int(self.port)),\n db=self.database,\n password=self.password,\n parser=parser.PyReader,\n )\n\n _LOGGER.info(\n _(\"Connected to Redis database %s from %s on port %s.\"),\n self.database,\n self.host,\n self.port,\n )\n except OSError:\n _LOGGER.warning(\n _(\"Unable to connect to Redis database on address: %s port: %s.\"),\n self.host,\n self.port,\n )\n\n async def put(self, key, data):\n \"\"\"Store the data object in Redis against the key.\n\n Args:\n key (string): The key to store the data object under.\n data (object): The data object to store.\n\n \"\"\"\n if self.client:\n _LOGGER.debug(_(\"Putting %s into Redis.\"), key)\n await self.client.execute(\"SET\", key, json.dumps(data, cls=JSONEncoder))\n\n async def get(self, key):\n \"\"\"Get data from Redis for a given key.\n\n Args:\n key (string): The key to lookup in the database.\n\n Returns:\n object or None: The data object stored for that key, or None if no\n object found for that key.\n\n \"\"\"\n if self.client:\n _LOGGER.debug(_(\"Getting %s from Redis.\"), key)\n data = await self.client.execute(\"GET\", key)\n\n if data:\n return json.loads(data, object_hook=JSONDecoder())\n\n return None\n\n async def delete(self, key):\n \"\"\"Delete data from Redis for a given key.\n\n Args:\n key (string): The key to delete in the database.\n\n \"\"\"\n if self.client:\n _LOGGER.debug(_(\"Deleting %s from Redis.\"), key)\n await self.client.execute(\"DEL\", key)\n\n async def disconnect(self):\n \"\"\"Disconnect from the database.\"\"\"\n if self.client:\n self.client.close()\n", "path": "opsdroid/database/redis/__init__.py" } ]
diff --git a/opsdroid/database/redis/__init__.py b/opsdroid/database/redis/__init__.py index 2bdf7d583..74759e20f 100644 --- a/opsdroid/database/redis/__init__.py +++ b/opsdroid/database/redis/__init__.py @@ -94,7 +94,7 @@ async def get(self, key): data = await self.client.execute("GET", key) if data: - return json.loads(data, encoding=JSONDecoder) + return json.loads(data, object_hook=JSONDecoder()) return None
cloud-custodian__cloud-custodian-654
related resource filter can depend on a side-effect Given this policy based largely on the examples from #541 (and the code that fixed it), an exception will be triggered. ``` policies: - name: related-rds-test description: | If databse is using default security group, adjust the retention. resource: rds filters: - type: security-group match-resource: true key: "GroupName" value: "default" actions: - type: retention days: 14 ``` Specifically, this will happen *any* time `match-resource` is `True` and `key` doesn't start with “tag:”. ``` Traceback (most recent call last): File "cloud-custodian/c7n/policy.py", line 167, in run resources = self.policy.resource_manager.resources() File "cloud-custodian/c7n/query.py", line 152, in resources return self.filter_resources(resources) File "cloud-custodian/c7n/manager.py", line 63, in filter_resources resources = f.process(resources, event) File "cloud-custodian/c7n/filters/related.py", line 96, in process return [r for r in resources if self.process_resource(r, related)] File "cloud-custodian/c7n/filters/related.py", line 70, in process_resource self.data['value'] = self.get_resource_value( File "cloud-custodian/c7n/filters/core.py", line 274, in get_resource_value self.expr = jmespath.compile(self.k) AttributeError: 'SecurityGroupFilter' object has no attribute 'k' ``` The policy passes validation. The problem is https://github.com/capitalone/cloud-custodian/blob/master/c7n/filters/related.py#L69 `self.get_resource_value()` can depend on `self.k` in some cases, but that’s only set up as a side-effect of calling `self.match()`. Just calling `self.match(None)` first will set up the properties then bail, which seems to work, but also seems like a hack. Moving lines 68-70 down below the `for` loop also seems to work, but I'm unsure of the impact. (The calls to `self.match()` could be influenced by the change to `self.data['value']`.)
[ { "content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nResource Filtering Logic\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport fnmatch\nimport logging\nimport operator\nimport re\n\nfrom dateutil.tz import tzutc\nfrom dateutil.parser import parse\nimport jmespath\nimport ipaddress\n\nfrom c7n.executor import ThreadPoolExecutor\nfrom c7n.registry import PluginRegistry\nfrom c7n.resolver import ValuesFrom\nfrom c7n.utils import set_annotation, type_schema, parse_cidr\n\n\nclass FilterValidationError(Exception): pass\n\n\n# Matching filters annotate their key onto objects\nANNOTATION_KEY = \"MatchedFilters\"\n\n\ndef glob_match(value, pattern):\n if not isinstance(value, basestring):\n return False\n return fnmatch.fnmatch(value, pattern)\n\n\ndef regex_match(value, regex):\n if not isinstance(value, basestring):\n return False\n # Note python 2.5+ internally cache regex\n # would be nice to use re2\n return bool(re.match(regex, value, flags=re.IGNORECASE))\n\n\ndef operator_in(x, y):\n return x in y\n\n\ndef operator_ni(x, y):\n return x not in y\n\n\nOPERATORS = {\n 'eq': operator.eq,\n 'equal': operator.eq,\n 'ne': operator.ne,\n 'not-equal': operator.ne,\n 'gt': operator.gt,\n 'greater-than': operator.gt,\n 'ge': operator.ge,\n 'gte': operator.ge,\n 'le': operator.le,\n 'lte': operator.le,\n 'lt': operator.lt,\n 'less-than': operator.lt,\n 'glob': glob_match,\n 'regex': regex_match,\n 'in': operator_in,\n 'ni': operator_ni,\n 'not-in': operator_ni}\n\n\nclass FilterRegistry(PluginRegistry):\n\n def __init__(self, *args, **kw):\n super(FilterRegistry, self).__init__(*args, **kw)\n self.register('value', ValueFilter)\n self.register('or', Or)\n self.register('and', And)\n self.register('event', EventFilter)\n\n def parse(self, data, manager):\n results = []\n for d in data:\n results.append(self.factory(d, manager))\n return results\n\n def factory(self, data, manager=None):\n \"\"\"Factory func for filters.\n\n data - policy config for filters\n manager - resource type manager (ec2, s3, etc)\n \"\"\"\n\n # Make the syntax a little nicer for common cases.\n if isinstance(data, dict) and len(data) == 1 and 'type' not in data:\n if data.keys()[0] == 'or':\n return Or(data, self, manager)\n elif data.keys()[0] == 'and':\n return And(data, self, manager)\n return ValueFilter(data, manager).validate()\n if isinstance(data, basestring):\n filter_type = data\n data = {'type': data}\n else:\n filter_type = data.get('type')\n if not filter_type:\n raise FilterValidationError(\n \"%s Invalid Filter %s\" % (\n self.plugin_type, data))\n filter_class = self.get(filter_type)\n if filter_class is not None:\n return filter_class(data, manager).validate()\n else:\n raise FilterValidationError(\n \"%s Invalid filter type %s\" % (\n self.plugin_type, data))\n\n\n# Really should be an abstract base class (abc) or\n# zope.interface\n\nclass Filter(object):\n\n executor_factory = ThreadPoolExecutor\n\n log = logging.getLogger('custodian.filters')\n\n metrics = ()\n\n schema = {'type': 'object'}\n\n def __init__(self, data, manager=None):\n self.data = data\n self.manager = manager\n\n def validate(self):\n \"\"\"validate filter config, return validation error or self\"\"\"\n return self\n\n def process(self, resources, event=None):\n \"\"\" Bulk process resources and return filtered set.\"\"\"\n return filter(self, resources)\n\n def __call__(self, instance):\n \"\"\" Process an individual resource.\"\"\"\n raise NotImplementedError()\n\n\nclass Or(Filter):\n\n def __init__(self, data, registry, manager):\n super(Or, self).__init__(data)\n self.registry = registry\n self.filters = registry.parse(self.data.values()[0], manager)\n self.manager = manager\n\n def process(self, resources, event=None):\n if self.manager:\n return self.process_set(resources, event)\n return super(Or, self).process(resources, event)\n\n def __call__(self, r):\n \"\"\"Fallback for older unit tests that don't utilize a query manager\"\"\"\n for f in self.filters:\n if f(r):\n return True\n return False\n\n def process_set(self, resources, event):\n resource_type = self.manager.get_model()\n resource_map = {r[resource_type.id]: r for r in resources}\n results = set()\n for f in self.filters:\n results = results.union([\n r[resource_type.id] for r in f.process(resources, event)])\n return [resource_map[r_id] for r_id in results]\n\n\nclass And(Filter):\n\n def __init__(self, data, registry, manager):\n super(And, self).__init__(data)\n self.registry = registry\n self.filters = registry.parse(self.data.values()[0], manager)\n\n def process(self, resources, events=None):\n for f in self.filters:\n resources = f.process(resources, events)\n return resources\n\n\nclass ValueFilter(Filter):\n \"\"\"Generic value filter using jmespath\n \"\"\"\n expr = None\n op = v = vtype = None\n\n schema = {\n 'type': 'object',\n # Doesn't mix well with inherits that extend\n 'additionalProperties': False,\n 'required': ['type'],\n 'properties': {\n # Doesn't mix well as enum with inherits that extend\n 'type': {'enum': ['value']},\n 'key': {'type': 'string'},\n 'value_type': {'enum': [\n 'age', 'integer', 'expiration', 'normalize', 'size',\n 'cidr', 'cidr_size', 'swap']},\n 'default': {'type': 'object'},\n 'value_from': ValuesFrom.schema,\n 'value': {'oneOf': [\n {'type': 'array'},\n {'type': 'string'},\n {'type': 'boolean'},\n {'type': 'number'}]},\n 'op': {'enum': OPERATORS.keys()}}}\n\n annotate = True\n\n def validate(self):\n if len(self.data) == 1:\n return self\n if 'key' not in self.data:\n raise FilterValidationError(\n \"Missing 'key' in value filter %s\" % self.data)\n if 'value' not in self.data and 'value_from' not in self.data:\n raise FilterValidationError(\n \"Missing 'value' in value filter %s\" % self.data)\n if 'op' in self.data:\n if not self.data['op'] in OPERATORS:\n raise FilterValidationError(\n \"Invalid operator in value filter %s\" % self.data)\n if self.data['op'] == 'regex':\n # Sanity check that we can compile\n try:\n re.compile(self.data['value'])\n except re.error as e:\n raise FilterValidationError(\n \"Invalid regex: %s %s\" % (e, self.data))\n return self\n\n def __call__(self, i):\n matched = self.match(i)\n if matched and self.annotate:\n set_annotation(i, ANNOTATION_KEY, self.k)\n return matched\n\n def get_resource_value(self, k, i):\n if k.startswith('tag:'):\n tk = k.split(':', 1)[1]\n r = None\n for t in i.get(\"Tags\", []):\n if t.get('Key') == tk:\n r = t.get('Value')\n break\n elif k in i:\n r = i.get(k)\n elif self.expr:\n r = self.expr.search(i)\n else:\n self.expr = jmespath.compile(self.k)\n r = self.expr.search(i)\n return r\n\n def match(self, i):\n if self.v is None and len(self.data) == 1:\n [(self.k, self.v)] = self.data.items()\n elif self.v is None:\n self.k = self.data.get('key')\n self.op = self.data.get('op')\n if 'value_from' in self.data:\n values = ValuesFrom(self.data['value_from'], self.manager)\n self.v = values.get_values()\n else:\n self.v = self.data.get('value')\n self.vtype = self.data.get('value_type')\n\n if i is None:\n return False\n\n # value extract\n r = self.get_resource_value(self.k, i)\n\n if self.op in ('in', 'not-in') and r is None:\n r = ()\n\n # value type conversion\n if self.vtype is not None:\n v, r = self.process_value_type(self.v, r)\n else:\n v = self.v\n\n # Value match\n if r is None and v == 'absent':\n return True\n elif r is not None and v == 'present':\n return True\n elif v == 'not-null' and r:\n return True\n elif v == 'empty' and not r:\n return True\n elif self.op:\n op = OPERATORS[self.op]\n try:\n return op(r, v)\n except TypeError:\n return False\n elif r == self.v:\n return True\n\n return False\n\n def process_value_type(self, sentinel, value):\n if self.vtype == 'normalize' and isinstance(value, basestring):\n return sentinel, value.strip().lower()\n\n elif self.vtype == 'integer':\n try:\n value = int(value.strip())\n except ValueError:\n value = 0\n elif self.vtype == 'size':\n try:\n return sentinel, len(value)\n except TypeError:\n return sentinel, 0\n elif self.vtype == 'swap':\n return value, sentinel\n elif self.vtype == 'age':\n if not isinstance(sentinel, datetime):\n sentinel = datetime.now(tz=tzutc()) - timedelta(sentinel)\n\n if not isinstance(value, datetime):\n # EMR bug when testing ages in EMR. This is due to\n # EMR not having more functionality.\n try:\n value = parse(value)\n except (AttributeError, TypeError):\n value = 0\n # Reverse the age comparison, we want to compare the value being\n # greater than the sentinel typically. Else the syntax for age\n # comparisons is intuitively wrong.\n return value, sentinel\n elif self.vtype == 'cidr':\n s = parse_cidr(sentinel)\n v = parse_cidr(value)\n if (isinstance(s, ipaddress._BaseAddress)\n and isinstance(v, ipaddress._BaseNetwork)):\n return v, s\n return s, v\n elif self.vtype == 'cidr_size':\n cidr = parse_cidr(value)\n if cidr:\n return sentinel, cidr.prefixlen\n return sentinel, 0\n\n # Allows for expiration filtering, for events in the future as opposed\n # to events in the past which age filtering allows for.\n elif self.vtype == 'expiration':\n if not isinstance(sentinel, datetime):\n sentinel = datetime.now(tz=tzutc()) + timedelta(sentinel)\n\n if not isinstance(value, datetime):\n value = parse(value)\n\n return sentinel, value\n return sentinel, value\n\n\nclass AgeFilter(Filter):\n \"\"\"Automatically filter resources older than a given date.\n \"\"\"\n threshold_date = None\n\n # The name of attribute to compare to threshold; must override in subclass\n date_attribute = None\n\n schema = None\n\n def validate(self):\n if not self.date_attribute:\n raise NotImplementedError(\n \"date_attribute must be overriden in subclass\")\n return self\n\n def get_resource_date(self, i):\n v = i[self.date_attribute]\n if not isinstance(v, datetime):\n v = parse(v)\n if not v.tzinfo:\n v = v.replace(tzinfo=tzutc())\n return v\n\n def __call__(self, i):\n v = self.get_resource_date(i)\n if v is None:\n return False\n op = OPERATORS[self.data.get('op', 'greater-than')]\n\n if not self.threshold_date:\n days = self.data.get('days', 60)\n # Work around placebo issues with tz\n if v.tzinfo:\n n = datetime.now(tz=tzutc())\n else:\n n = datetime.now()\n self.threshold_date = n - timedelta(days)\n\n return op(self.threshold_date, v)\n\n\nclass EventFilter(ValueFilter):\n \"\"\"Filter against a cloudwatch event associated to a resource type.\"\"\"\n\n schema = type_schema('event', rinherit=ValueFilter.schema)\n\n def validate(self):\n if 'mode' not in self.manager.data:\n raise FilterValidationError(\n \"Event filters can only be used with lambda policies\")\n return self\n\n def process(self, resources, event=None):\n if event is None:\n return resources\n if self(event):\n return resources\n return []\n\n", "path": "c7n/filters/core.py" } ]
[ { "content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nResource Filtering Logic\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport fnmatch\nimport logging\nimport operator\nimport re\n\nfrom dateutil.tz import tzutc\nfrom dateutil.parser import parse\nimport jmespath\nimport ipaddress\n\nfrom c7n.executor import ThreadPoolExecutor\nfrom c7n.registry import PluginRegistry\nfrom c7n.resolver import ValuesFrom\nfrom c7n.utils import set_annotation, type_schema, parse_cidr\n\n\nclass FilterValidationError(Exception): pass\n\n\n# Matching filters annotate their key onto objects\nANNOTATION_KEY = \"MatchedFilters\"\n\n\ndef glob_match(value, pattern):\n if not isinstance(value, basestring):\n return False\n return fnmatch.fnmatch(value, pattern)\n\n\ndef regex_match(value, regex):\n if not isinstance(value, basestring):\n return False\n # Note python 2.5+ internally cache regex\n # would be nice to use re2\n return bool(re.match(regex, value, flags=re.IGNORECASE))\n\n\ndef operator_in(x, y):\n return x in y\n\n\ndef operator_ni(x, y):\n return x not in y\n\n\nOPERATORS = {\n 'eq': operator.eq,\n 'equal': operator.eq,\n 'ne': operator.ne,\n 'not-equal': operator.ne,\n 'gt': operator.gt,\n 'greater-than': operator.gt,\n 'ge': operator.ge,\n 'gte': operator.ge,\n 'le': operator.le,\n 'lte': operator.le,\n 'lt': operator.lt,\n 'less-than': operator.lt,\n 'glob': glob_match,\n 'regex': regex_match,\n 'in': operator_in,\n 'ni': operator_ni,\n 'not-in': operator_ni}\n\n\nclass FilterRegistry(PluginRegistry):\n\n def __init__(self, *args, **kw):\n super(FilterRegistry, self).__init__(*args, **kw)\n self.register('value', ValueFilter)\n self.register('or', Or)\n self.register('and', And)\n self.register('event', EventFilter)\n\n def parse(self, data, manager):\n results = []\n for d in data:\n results.append(self.factory(d, manager))\n return results\n\n def factory(self, data, manager=None):\n \"\"\"Factory func for filters.\n\n data - policy config for filters\n manager - resource type manager (ec2, s3, etc)\n \"\"\"\n\n # Make the syntax a little nicer for common cases.\n if isinstance(data, dict) and len(data) == 1 and 'type' not in data:\n if data.keys()[0] == 'or':\n return Or(data, self, manager)\n elif data.keys()[0] == 'and':\n return And(data, self, manager)\n return ValueFilter(data, manager).validate()\n if isinstance(data, basestring):\n filter_type = data\n data = {'type': data}\n else:\n filter_type = data.get('type')\n if not filter_type:\n raise FilterValidationError(\n \"%s Invalid Filter %s\" % (\n self.plugin_type, data))\n filter_class = self.get(filter_type)\n if filter_class is not None:\n return filter_class(data, manager).validate()\n else:\n raise FilterValidationError(\n \"%s Invalid filter type %s\" % (\n self.plugin_type, data))\n\n\n# Really should be an abstract base class (abc) or\n# zope.interface\n\nclass Filter(object):\n\n executor_factory = ThreadPoolExecutor\n\n log = logging.getLogger('custodian.filters')\n\n metrics = ()\n\n schema = {'type': 'object'}\n\n def __init__(self, data, manager=None):\n self.data = data\n self.manager = manager\n\n def validate(self):\n \"\"\"validate filter config, return validation error or self\"\"\"\n return self\n\n def process(self, resources, event=None):\n \"\"\" Bulk process resources and return filtered set.\"\"\"\n return filter(self, resources)\n\n def __call__(self, instance):\n \"\"\" Process an individual resource.\"\"\"\n raise NotImplementedError()\n\n\nclass Or(Filter):\n\n def __init__(self, data, registry, manager):\n super(Or, self).__init__(data)\n self.registry = registry\n self.filters = registry.parse(self.data.values()[0], manager)\n self.manager = manager\n\n def process(self, resources, event=None):\n if self.manager:\n return self.process_set(resources, event)\n return super(Or, self).process(resources, event)\n\n def __call__(self, r):\n \"\"\"Fallback for older unit tests that don't utilize a query manager\"\"\"\n for f in self.filters:\n if f(r):\n return True\n return False\n\n def process_set(self, resources, event):\n resource_type = self.manager.get_model()\n resource_map = {r[resource_type.id]: r for r in resources}\n results = set()\n for f in self.filters:\n results = results.union([\n r[resource_type.id] for r in f.process(resources, event)])\n return [resource_map[r_id] for r_id in results]\n\n\nclass And(Filter):\n\n def __init__(self, data, registry, manager):\n super(And, self).__init__(data)\n self.registry = registry\n self.filters = registry.parse(self.data.values()[0], manager)\n\n def process(self, resources, events=None):\n for f in self.filters:\n resources = f.process(resources, events)\n return resources\n\n\nclass ValueFilter(Filter):\n \"\"\"Generic value filter using jmespath\n \"\"\"\n expr = None\n op = v = vtype = None\n\n schema = {\n 'type': 'object',\n # Doesn't mix well with inherits that extend\n 'additionalProperties': False,\n 'required': ['type'],\n 'properties': {\n # Doesn't mix well as enum with inherits that extend\n 'type': {'enum': ['value']},\n 'key': {'type': 'string'},\n 'value_type': {'enum': [\n 'age', 'integer', 'expiration', 'normalize', 'size',\n 'cidr', 'cidr_size', 'swap']},\n 'default': {'type': 'object'},\n 'value_from': ValuesFrom.schema,\n 'value': {'oneOf': [\n {'type': 'array'},\n {'type': 'string'},\n {'type': 'boolean'},\n {'type': 'number'}]},\n 'op': {'enum': OPERATORS.keys()}}}\n\n annotate = True\n\n def validate(self):\n if len(self.data) == 1:\n return self\n if 'key' not in self.data:\n raise FilterValidationError(\n \"Missing 'key' in value filter %s\" % self.data)\n if 'value' not in self.data and 'value_from' not in self.data:\n raise FilterValidationError(\n \"Missing 'value' in value filter %s\" % self.data)\n if 'op' in self.data:\n if not self.data['op'] in OPERATORS:\n raise FilterValidationError(\n \"Invalid operator in value filter %s\" % self.data)\n if self.data['op'] == 'regex':\n # Sanity check that we can compile\n try:\n re.compile(self.data['value'])\n except re.error as e:\n raise FilterValidationError(\n \"Invalid regex: %s %s\" % (e, self.data))\n return self\n\n def __call__(self, i):\n matched = self.match(i)\n if matched and self.annotate:\n set_annotation(i, ANNOTATION_KEY, self.k)\n return matched\n\n def get_resource_value(self, k, i):\n if k.startswith('tag:'):\n tk = k.split(':', 1)[1]\n r = None\n for t in i.get(\"Tags\", []):\n if t.get('Key') == tk:\n r = t.get('Value')\n break\n elif k in i:\n r = i.get(k)\n elif self.expr:\n r = self.expr.search(i)\n else:\n self.expr = jmespath.compile(k)\n r = self.expr.search(i)\n return r\n\n def match(self, i):\n if self.v is None and len(self.data) == 1:\n [(self.k, self.v)] = self.data.items()\n elif self.v is None:\n self.k = self.data.get('key')\n self.op = self.data.get('op')\n if 'value_from' in self.data:\n values = ValuesFrom(self.data['value_from'], self.manager)\n self.v = values.get_values()\n else:\n self.v = self.data.get('value')\n self.vtype = self.data.get('value_type')\n\n if i is None:\n return False\n\n # value extract\n r = self.get_resource_value(self.k, i)\n\n if self.op in ('in', 'not-in') and r is None:\n r = ()\n\n # value type conversion\n if self.vtype is not None:\n v, r = self.process_value_type(self.v, r)\n else:\n v = self.v\n\n # Value match\n if r is None and v == 'absent':\n return True\n elif r is not None and v == 'present':\n return True\n elif v == 'not-null' and r:\n return True\n elif v == 'empty' and not r:\n return True\n elif self.op:\n op = OPERATORS[self.op]\n try:\n return op(r, v)\n except TypeError:\n return False\n elif r == self.v:\n return True\n\n return False\n\n def process_value_type(self, sentinel, value):\n if self.vtype == 'normalize' and isinstance(value, basestring):\n return sentinel, value.strip().lower()\n\n elif self.vtype == 'integer':\n try:\n value = int(value.strip())\n except ValueError:\n value = 0\n elif self.vtype == 'size':\n try:\n return sentinel, len(value)\n except TypeError:\n return sentinel, 0\n elif self.vtype == 'swap':\n return value, sentinel\n elif self.vtype == 'age':\n if not isinstance(sentinel, datetime):\n sentinel = datetime.now(tz=tzutc()) - timedelta(sentinel)\n\n if not isinstance(value, datetime):\n # EMR bug when testing ages in EMR. This is due to\n # EMR not having more functionality.\n try:\n value = parse(value)\n except (AttributeError, TypeError):\n value = 0\n # Reverse the age comparison, we want to compare the value being\n # greater than the sentinel typically. Else the syntax for age\n # comparisons is intuitively wrong.\n return value, sentinel\n elif self.vtype == 'cidr':\n s = parse_cidr(sentinel)\n v = parse_cidr(value)\n if (isinstance(s, ipaddress._BaseAddress)\n and isinstance(v, ipaddress._BaseNetwork)):\n return v, s\n return s, v\n elif self.vtype == 'cidr_size':\n cidr = parse_cidr(value)\n if cidr:\n return sentinel, cidr.prefixlen\n return sentinel, 0\n\n # Allows for expiration filtering, for events in the future as opposed\n # to events in the past which age filtering allows for.\n elif self.vtype == 'expiration':\n if not isinstance(sentinel, datetime):\n sentinel = datetime.now(tz=tzutc()) + timedelta(sentinel)\n\n if not isinstance(value, datetime):\n value = parse(value)\n\n return sentinel, value\n return sentinel, value\n\n\nclass AgeFilter(Filter):\n \"\"\"Automatically filter resources older than a given date.\n \"\"\"\n threshold_date = None\n\n # The name of attribute to compare to threshold; must override in subclass\n date_attribute = None\n\n schema = None\n\n def validate(self):\n if not self.date_attribute:\n raise NotImplementedError(\n \"date_attribute must be overriden in subclass\")\n return self\n\n def get_resource_date(self, i):\n v = i[self.date_attribute]\n if not isinstance(v, datetime):\n v = parse(v)\n if not v.tzinfo:\n v = v.replace(tzinfo=tzutc())\n return v\n\n def __call__(self, i):\n v = self.get_resource_date(i)\n if v is None:\n return False\n op = OPERATORS[self.data.get('op', 'greater-than')]\n\n if not self.threshold_date:\n days = self.data.get('days', 60)\n # Work around placebo issues with tz\n if v.tzinfo:\n n = datetime.now(tz=tzutc())\n else:\n n = datetime.now()\n self.threshold_date = n - timedelta(days)\n\n return op(self.threshold_date, v)\n\n\nclass EventFilter(ValueFilter):\n \"\"\"Filter against a cloudwatch event associated to a resource type.\"\"\"\n\n schema = type_schema('event', rinherit=ValueFilter.schema)\n\n def validate(self):\n if 'mode' not in self.manager.data:\n raise FilterValidationError(\n \"Event filters can only be used with lambda policies\")\n return self\n\n def process(self, resources, event=None):\n if event is None:\n return resources\n if self(event):\n return resources\n return []\n\n", "path": "c7n/filters/core.py" } ]
diff --git a/c7n/filters/core.py b/c7n/filters/core.py index 1d2c786ee87..fefd17e3bb5 100644 --- a/c7n/filters/core.py +++ b/c7n/filters/core.py @@ -271,7 +271,7 @@ def get_resource_value(self, k, i): elif self.expr: r = self.expr.search(i) else: - self.expr = jmespath.compile(self.k) + self.expr = jmespath.compile(k) r = self.expr.search(i) return r
pytorch__audio-1583
Use of deprecated `AutoNonVariableTypeMode`. `AutoNonVariableTypeMode` is deprecated and will be removed in PyTorch 1.10. https://github.com/pytorch/audio/search?q=AutoNonVariableTypeMode Migration: https://github.com/pytorch/pytorch/blob/master/docs/cpp/source/notes/inference_mode.rst#migration-guide-from-autononvariabletypemode cc @carolineechen
[ { "content": "from . import extension # noqa: F401\nfrom torchaudio._internal import module_utils as _mod_utils # noqa: F401\nfrom torchaudio import (\n compliance,\n datasets,\n functional,\n kaldi_io,\n utils,\n sox_effects,\n transforms,\n)\n\nfrom torchaudio.backend import (\n list_audio_backends,\n get_audio_backend,\n set_audio_backend,\n)\n\ntry:\n from .version import __version__, git_version # noqa: F401\nexcept ImportError:\n pass\n\n__all__ = [\n 'compliance',\n 'datasets',\n 'functional',\n 'kaldi_io',\n 'utils',\n 'sox_effects',\n 'transforms',\n 'list_audio_backends',\n 'get_audio_backend',\n 'set_audio_backend',\n 'save_encinfo',\n 'sox_signalinfo_t',\n 'sox_encodinginfo_t',\n 'get_sox_option_t',\n 'get_sox_encoding_t',\n 'get_sox_bool',\n 'SignalInfo',\n 'EncodingInfo',\n]\n", "path": "torchaudio/__init__.py" } ]
[ { "content": "from . import extension # noqa: F401\nfrom torchaudio._internal import module_utils as _mod_utils # noqa: F401\nfrom torchaudio import (\n compliance,\n datasets,\n functional,\n kaldi_io,\n utils,\n sox_effects,\n transforms,\n)\n\nfrom torchaudio.backend import (\n list_audio_backends,\n get_audio_backend,\n set_audio_backend,\n)\n\ntry:\n from .version import __version__, git_version # noqa: F401\nexcept ImportError:\n pass\n\n__all__ = [\n 'compliance',\n 'datasets',\n 'functional',\n 'kaldi_io',\n 'utils',\n 'sox_effects',\n 'transforms',\n 'list_audio_backends',\n 'get_audio_backend',\n 'set_audio_backend',\n]\n", "path": "torchaudio/__init__.py" } ]
diff --git a/.circleci/unittest/linux/scripts/install.sh b/.circleci/unittest/linux/scripts/install.sh index 50a02f11e3..f5ad184390 100755 --- a/.circleci/unittest/linux/scripts/install.sh +++ b/.circleci/unittest/linux/scripts/install.sh @@ -23,21 +23,41 @@ eval "$("${conda_dir}/bin/conda" shell.bash hook)" conda activate "${env_dir}" # 1. Install PyTorch -if [ -z "${CUDA_VERSION:-}" ] ; then - if [ "${os}" == MacOSX ] ; then - cudatoolkit='' - else - cudatoolkit="cpuonly" - fi +# [2021/06/22 Temporary workaround] Disabling the original installation +# The orignal, conda-based instartion is working for GPUs, but not for CPUs +# For CPUs we use pip-based installation +# if [ -z "${CUDA_VERSION:-}" ] ; then +# if [ "${os}" == MacOSX ] ; then +# cudatoolkit='' +# else +# cudatoolkit="cpuonly" +# fi +# else +# version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" +# cudatoolkit="cudatoolkit=${version}" +# fi +# printf "Installing PyTorch with %s\n" "${cudatoolkit}" +# ( +# set -x +# conda install ${CONDA_CHANNEL_FLAGS:-} -y -c "pytorch-${UPLOAD_CHANNEL}" "pytorch-${UPLOAD_CHANNEL}::pytorch" ${cudatoolkit} +# ) + +if [ "${os}" == MacOSX ] || [ -z "${CUDA_VERSION:-}" ] ; then + device="cpu" + printf "Installing PyTorch with %s\n" "$device}" + ( + set -x + pip install --pre torch==1.10.0.dev20210618 -f "https://download.pytorch.org/whl/nightly/${device}/torch_nightly.html" + ) else version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" cudatoolkit="cudatoolkit=${version}" + printf "Installing PyTorch with %s\n" "${cudatoolkit}" + ( + set -x + conda install ${CONDA_CHANNEL_FLAGS:-} -y -c "pytorch-${UPLOAD_CHANNEL}" "pytorch-${UPLOAD_CHANNEL}::pytorch" ${cudatoolkit} + ) fi -printf "Installing PyTorch with %s\n" "${cudatoolkit}" -( - set -x - conda install ${CONDA_CHANNEL_FLAGS:-} -y -c "pytorch-${UPLOAD_CHANNEL}" "pytorch-${UPLOAD_CHANNEL}::pytorch" ${cudatoolkit} -) # 2. Install torchaudio printf "* Installing torchaudio\n" diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 81e37b4120..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,32 +0,0 @@ -# note dist: 'trusty' does not work here -dist: xenial - -language: python - -# cache miniconda installer and similar files -cache: - directories: - - /home/travis/download - -# This matrix tests that the code works on Python 3.5, 3.6, 3.7, passes -# lint and example tests. -matrix: - fast_finish: true - include: - - env: PYTHON_VERSION="3.6" RUN_EXAMPLE_TESTS="true" SKIP_TESTS="true" - allow_failures: - - env: PYTHON_VERSION="3.6" RUN_EXAMPLE_TESTS="true" SKIP_TESTS="true" - -addons: - apt: - packages: - sox - libsox-dev - libsox-fmt-all - portaudio19-dev - -notifications: - email: false - -install: source build_tools/travis/install.sh -script: bash build_tools/travis/test_script.sh diff --git a/test/torchaudio_unittest/models/wav2vec2/huggingface_intergration_test.py b/test/torchaudio_unittest/models/wav2vec2/huggingface_intergration_test.py index d7b2260bc4..ef28039c5b 100644 --- a/test/torchaudio_unittest/models/wav2vec2/huggingface_intergration_test.py +++ b/test/torchaudio_unittest/models/wav2vec2/huggingface_intergration_test.py @@ -86,7 +86,7 @@ def test_import(self, config): self.assertEqual(ref, hyp) # Feature projection x = torch.randn(3, 10, config['conv_dim'][-1]) - ref = original.wav2vec2.feature_projection(x) + ref = original.wav2vec2.feature_projection(x)[0] hyp = imported.encoder.feature_projection(x) self.assertEqual(ref, hyp) # Convolutional Positional Encoder diff --git a/test/torchscript_bc_test/README.md b/test/torchscript_bc_test/README.md deleted file mode 100644 index 4cf0e2b226..0000000000 --- a/test/torchscript_bc_test/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Torchscript Backward Compatibility Test Suite - -This directory contains tools to generate Torchscript object of a specific torchaudio version (given that you have the corresponding environments setup correctly) and validate it in the current version. - -## Usage - -### Generate torchscript object - -``` -./main.py --mode generate --version 0.6.0 -``` - -will generate Torchscript dump files in [`assets`](./assets/) directory. This requries your Python runtime to have torchaudio `0.6.0`. - - -### Validate torchscript object - - -``` -./main.py --mode validate --version 0.6.0 -``` - -will validate if the Torchscript files created in the previous step are compatible with the version of torchaudio available in your environment (master). diff --git a/test/torchscript_bc_test/main.py b/test/torchscript_bc_test/main.py deleted file mode 100755 index 3c1c8d974e..0000000000 --- a/test/torchscript_bc_test/main.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 -"""Generate torchscript object of specific torhcaudio version. - -This requires that the corresponding torchaudio (and torch) is installed. -""" -import os -import sys -import argparse - - -_BASE_OBJ_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets') - - -def _parse_args(): - parser = argparse.ArgumentParser( - description=__doc__ - ) - parser.add_argument( - '--mode', choices=['generate', 'validate'], required=True, - help=( - '"generate" generates Torchscript objects of the specific torchaudio ' - 'in the given directory. ' - '"validate" validates if the objects in the givcen directory are compatible ' - 'with the current torhcaudio.' - ) - ) - parser.add_argument( - '--version', choices=['0.6.0'], required=True, - help='torchaudio version.' - ) - parser.add_argument( - '--base-obj-dir', default=_BASE_OBJ_DIR, - help='Directory where objects are saved/loaded.' - ) - return parser.parse_args() - - -def _generate(version, output_dir): - if version == '0.6.0': - import ver_060 - ver_060.generate(output_dir) - else: - raise ValueError(f'Unexpected torchaudio version: {version}') - - -def _validate(version, input_dir): - if version == '0.6.0': - import ver_060 - ver_060.validate(input_dir) - else: - raise ValueError(f'Unexpected torchaudio version: {version}') - - -def _get_obj_dir(base_dir, version): - py_version = f'{sys.version_info.major}.{sys.version_info.minor}' - return os.path.join(base_dir, f'{version}-py{py_version}') - - -def _main(): - args = _parse_args() - obj_dir = _get_obj_dir(args.base_obj_dir, args.version) - if args.mode == 'generate': - _generate(args.version, obj_dir) - elif args.mode == 'validate': - _validate(args.version, obj_dir) - else: - raise ValueError(f'Unexpected mode: {args.mode}') - - -if __name__ == '__main__': - _main() diff --git a/test/torchscript_bc_test/ver_060.py b/test/torchscript_bc_test/ver_060.py deleted file mode 100644 index 78b8da228c..0000000000 --- a/test/torchscript_bc_test/ver_060.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -import tempfile -from typing import Optional -from packaging import version - -import torch -import torchaudio - -_MIN_VER = version.parse('0.6.0a0') -_MAX_VER = version.parse('0.7.0') -_RUNTIME_VER = version.parse(torchaudio.__version__) - - -def info(filepath: str) -> torchaudio.backend.sox_io_backend.AudioMetaData: - return torchaudio.info(filepath) - - -def load( - filepath: str, - frame_offset: int, - num_frames: int, - normalize: bool, - channels_first: bool): - return torchaudio.load(filepath, frame_offset, num_frames, normalize, channels_first) - - -def save( - filepath: str, - tensor: torch.Tensor, - sample_rate: int, - channels_first: bool = True, - compression: Optional[float] = None, -): - torchaudio.save(filepath, tensor, sample_rate, channels_first, compression) - - -def generate(output_dir): - if not (_MIN_VER <= _RUNTIME_VER < _MAX_VER): - raise RuntimeError(f'Invalid torchaudio runtime version: {_RUNTIME_VER}') - - torchaudio.set_audio_backend('sox_io') - - funcs = [ - info, - load, - save, - ] - - os.makedirs(output_dir, exist_ok=True) - for func in funcs: - torch.jit.script(func).save(os.path.join(output_dir, f'{func.__name__}.zip')) - - -def validate(input_dir): - torchaudio.set_audio_backend('sox_io') - - # See https://github.com/pytorch/pytorch/issues/42258 - # info_ = torch.jit.load(os.path.join(input_dir, 'info.zip')) - load_ = torch.jit.load(os.path.join(input_dir, 'load.zip')) - save_ = torch.jit.load(os.path.join(input_dir, 'save.zip')) - - sample_rate = 44100 - normalize = True - channels_first = True - with tempfile.TemporaryDirectory() as temp_dir: - temp_file = os.path.join(temp_dir, 'test.wav') - temp_data = torch.rand(2, sample_rate, dtype=torch.float32) - - save_(temp_file, temp_data, sample_rate, channels_first, 0.) - # info_(temp_file) - load_(temp_file, 0, -1, normalize, channels_first) diff --git a/torchaudio/__init__.py b/torchaudio/__init__.py index e72ecac4cb..dc6abf33c1 100644 --- a/torchaudio/__init__.py +++ b/torchaudio/__init__.py @@ -32,12 +32,4 @@ 'list_audio_backends', 'get_audio_backend', 'set_audio_backend', - 'save_encinfo', - 'sox_signalinfo_t', - 'sox_encodinginfo_t', - 'get_sox_option_t', - 'get_sox_encoding_t', - 'get_sox_bool', - 'SignalInfo', - 'EncodingInfo', ] diff --git a/torchaudio/csrc/rnnt/autograd.cpp b/torchaudio/csrc/rnnt/autograd.cpp index 73ad9f9b3c..0ea6514702 100644 --- a/torchaudio/csrc/rnnt/autograd.cpp +++ b/torchaudio/csrc/rnnt/autograd.cpp @@ -16,7 +16,6 @@ class RNNTLossFunction : public torch::autograd::Function<RNNTLossFunction> { double clamp, bool fused_log_smax = true, bool reuse_logits_for_grads = true) { - at::AutoNonVariableTypeMode g; torch::Tensor undef; auto result = rnnt_loss( logits, @@ -54,6 +53,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> rnnt_loss_autograd( double clamp, bool fused_log_smax = true, bool reuse_logits_for_grads = true) { + at::AutoDispatchBelowADInplaceOrView guard; auto results = RNNTLossFunction::apply( logits, targets,
pretalx__pretalx-668
Resource without a resource (FileField) addressed ## Current Behavior A resource with an empty FileField causes `active_resources` to return resources that have an empty `FileField` which then crashes the talk views. I'm not sure how this `Resource` instance occurred in our database, but I suspect something like a rejected file, broken transfer etc. to have caused it. ## Expected Behavior Properties of empty string `FIleField` are skipped. I think it's fixed by doing this: ``` @property def active_resources(self): return self.resources.filter(resource__isnull=False) ``` change to: ``` @property def active_resources(self): return self.resources.exclude(resource="").exclude(resource=None) ``` ## Steps to Reproduce Create a `Resource` instance for a submission with empty string in the `resource` field. ## Context DjangoCon Europe 2019 - currently happening in ` /conference/talk/8LMBGP/` ``` In [3]: models.Submission.objects.filter(code="8LMBGP")[0] Out[3]: <Submission: Submission(event=conference, code=8LMBGP, title=Building a custom model field from the ground up, state=confirmed)> In [4]: models.Submission.objects.filter(code="8LMBGP")[0].active_resources Out[4]: <QuerySet [<Resource: Resource(event=conference, submission=Building a custom model field from the ground up)>]> In [5]: models.Submission.objects.filter(code="8LMBGP")[0].active_resources[0] Out[5]: <Resource: Resource(event=conference, submission=Building a custom model field from the ground up)> In [6]: models.Submission.objects.filter(code="8LMBGP")[0].active_resources[0].resource Out[6]: <FieldFile: None> In [7]: models.Submission.objects.filter(code="8LMBGP")[0].active_resources[0].resource.path --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-7-6cb30323695b> in <module> ----> 1 models.Submission.objects.filter(code="8LMBGP")[0].active_resources[0].resource.path ~/.virtualenvs/venv/lib/python3.6/site-packages/django/db/models/fields/files.py in path(self) 54 @property 55 def path(self): ---> 56 self._require_file() 57 return self.storage.path(self.name) 58 ~/.virtualenvs/venv/lib/python3.6/site-packages/django/db/models/fields/files.py in _require_file(self) 36 def _require_file(self): 37 if not self: ---> 38 raise ValueError("The '%s' attribute has no file associated with it." % self.field.name) 39 40 def _get_file(self): ValueError: The 'resource' attribute has no file associated with it. ``` ## Your Environment ``` Traceback: File "venv/lib/python3.6/site-packages/django/template/base.py" in _resolve_lookup 829. current = current[bit] During handling of the above exception ('FieldFile' object is not subscriptable), another exception occurred: File "venv/lib/python3.6/site-packages/django/core/handlers/exception.py" in inner 34. response = get_response(request) File "venv/lib/python3.6/site-packages/django/core/handlers/base.py" in _get_response 156. response = self.process_exception_by_middleware(e, request) File "venv/lib/python3.6/site-packages/django/core/handlers/base.py" in _get_response 154. response = response.render() File "venv/lib/python3.6/site-packages/django/template/response.py" in render 106. self.content = self.rendered_content File "venv/lib/python3.6/site-packages/django/template/response.py" in rendered_content 83. content = template.render(context, self._request) File "venv/lib/python3.6/site-packages/django/template/backends/django.py" in render 61. return self.template.render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render 171. return self._render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in _render 163. return self.nodelist.render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render 937. bit = node.render_annotated(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render_annotated 904. return self.render(context) File "venv/lib/python3.6/site-packages/django/template/loader_tags.py" in render 150. return compiled_parent._render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in _render 163. return self.nodelist.render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render 937. bit = node.render_annotated(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render_annotated 904. return self.render(context) File "venv/lib/python3.6/site-packages/django/template/loader_tags.py" in render 150. return compiled_parent._render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in _render 163. return self.nodelist.render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render 937. bit = node.render_annotated(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render_annotated 904. return self.render(context) File "venv/lib/python3.6/site-packages/django/template/loader_tags.py" in render 62. result = block.nodelist.render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render 937. bit = node.render_annotated(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render_annotated 904. return self.render(context) File "venv/lib/python3.6/site-packages/django/template/loader_tags.py" in render 62. result = block.nodelist.render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render 937. bit = node.render_annotated(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render_annotated 904. return self.render(context) File "venv/lib/python3.6/site-packages/django/template/defaulttags.py" in render 309. return nodelist.render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render 937. bit = node.render_annotated(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render_annotated 904. return self.render(context) File "venv/lib/python3.6/site-packages/django/template/defaulttags.py" in render 309. return nodelist.render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render 937. bit = node.render_annotated(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render_annotated 904. return self.render(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in render 987. output = self.filter_expression.resolve(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in resolve 671. obj = self.var.resolve(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in resolve 796. value = self._resolve_lookup(context) File "venv/lib/python3.6/site-packages/django/template/base.py" in _resolve_lookup 837. current = getattr(current, bit) File "venv/lib/python3.6/site-packages/django/db/models/fields/files.py" in url 61. self._require_file() File "venv/lib/python3.6/site-packages/django/db/models/fields/files.py" in _require_file 38. raise ValueError("The '%s' attribute has no file associated with it." % self.field.name) Exception Type: ValueError at /conference/talk/8LMBGP/ Exception Value: The 'resource' attribute has no file associated with it. Request information: USER: AnonymousUser ```
[ { "content": "import re\nimport statistics\nimport string\nimport uuid\nimport warnings\nfrom contextlib import suppress\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models.fields.files import FieldFile\nfrom django.utils.crypto import get_random_string\nfrom django.utils.functional import cached_property\nfrom django.utils.timezone import now\nfrom django.utils.translation import pgettext, ugettext_lazy as _\n\nfrom pretalx.common.choices import Choices\nfrom pretalx.common.mixins import LogMixin\nfrom pretalx.common.phrases import phrases\nfrom pretalx.common.urls import EventUrls\nfrom pretalx.mail.context import template_context_from_submission\nfrom pretalx.mail.models import QueuedMail\nfrom pretalx.submission.signals import submission_state_change\n\nINSTANCE_IDENTIFIER = None\nwith suppress(Exception):\n from pretalx.common.models.settings import GlobalSettings\n INSTANCE_IDENTIFIER = GlobalSettings().get_instance_identifier()\n\n\ndef generate_invite_code(length=32):\n return get_random_string(length=length, allowed_chars=Submission.CODE_CHARSET)\n\n\nclass SubmissionError(Exception):\n pass\n\n\ndef submission_image_path(instance, filename):\n return f'{instance.event.slug}/images/{instance.code}/{filename}'\n\n\nclass SubmissionStates(Choices):\n SUBMITTED = 'submitted'\n ACCEPTED = 'accepted'\n REJECTED = 'rejected'\n CONFIRMED = 'confirmed'\n CANCELED = 'canceled'\n WITHDRAWN = 'withdrawn'\n DELETED = 'deleted'\n\n valid_choices = [\n (SUBMITTED, _('submitted')),\n (ACCEPTED, _('accepted')),\n (CONFIRMED, _('confirmed')),\n (REJECTED, _('rejected')),\n (CANCELED, _('canceled')),\n (WITHDRAWN, _('withdrawn')),\n (DELETED, _('deleted')),\n ]\n\n valid_next_states = {\n SUBMITTED: (REJECTED, WITHDRAWN, ACCEPTED),\n REJECTED: (ACCEPTED, SUBMITTED),\n ACCEPTED: (CONFIRMED, CANCELED, REJECTED, SUBMITTED),\n CONFIRMED: (ACCEPTED, CANCELED),\n CANCELED: (ACCEPTED, CONFIRMED),\n WITHDRAWN: (SUBMITTED),\n DELETED: tuple(),\n }\n\n method_names = {\n SUBMITTED: 'make_submitted',\n REJECTED: 'reject',\n ACCEPTED: 'accept',\n CONFIRMED: 'confirm',\n CANCELED: 'cancel',\n WITHDRAWN: 'withdraw',\n DELETED: 'remove',\n }\n\n\nclass SubmissionManager(models.Manager):\n def get_queryset(self):\n return super().get_queryset().exclude(state=SubmissionStates.DELETED)\n\n\nclass DeletedSubmissionManager(models.Manager):\n def get_queryset(self):\n return super().get_queryset().filter(state=SubmissionStates.DELETED)\n\n\nclass AllSubmissionManager(models.Manager):\n pass\n\n\nclass Submission(LogMixin, models.Model):\n code = models.CharField(max_length=16, unique=True)\n speakers = models.ManyToManyField(\n to='person.User', related_name='submissions', blank=True\n )\n event = models.ForeignKey(\n to='event.Event', on_delete=models.PROTECT, related_name='submissions'\n )\n title = models.CharField(max_length=200, verbose_name=_('Title'))\n submission_type = models.ForeignKey( # Reasonable default must be set in form/view\n to='submission.SubmissionType',\n related_name='submissions',\n on_delete=models.PROTECT,\n verbose_name=_('Submission type'),\n )\n track = models.ForeignKey(\n to='submission.Track',\n related_name='submissions',\n on_delete=models.PROTECT,\n verbose_name=_('Track'),\n null=True,\n blank=True,\n )\n state = models.CharField(\n max_length=SubmissionStates.get_max_length(),\n choices=SubmissionStates.get_choices(),\n default=SubmissionStates.SUBMITTED,\n verbose_name=_('Submission state'),\n )\n abstract = models.TextField(\n null=True,\n blank=True,\n verbose_name=_('Abstract'),\n help_text=phrases.base.use_markdown,\n )\n description = models.TextField(\n null=True,\n blank=True,\n verbose_name=_('Description'),\n help_text=phrases.base.use_markdown,\n )\n notes = models.TextField(\n null=True, blank=True, verbose_name=_('Notes'),\n help_text=_('These notes are meant for the organiser and won\\'t be made public.'),\n )\n internal_notes = models.TextField(\n null=True,\n blank=True,\n verbose_name=_('Internal notes'),\n help_text=_('Internal notes for other organisers/reviewers. Not visible to the speakers or the public.')\n )\n duration = models.PositiveIntegerField(\n null=True,\n blank=True,\n verbose_name=_('Duration'),\n help_text=_(\n 'The duration in minutes. Leave empty for default duration for this submission type.'\n ),\n )\n slot_count = models.PositiveIntegerField(\n default=1,\n verbose_name=_('Slot Count'),\n help_text=_(\n 'How many times this talk will be held.'\n ),\n )\n content_locale = models.CharField(\n max_length=32,\n default=settings.LANGUAGE_CODE,\n choices=settings.LANGUAGES,\n verbose_name=_('Language'),\n )\n is_featured = models.BooleanField(\n default=False,\n verbose_name=_(\n 'Show this talk on the public sneak peek page, if the sneak peek page is enabled and the talk was accepted.'\n ),\n )\n do_not_record = models.BooleanField(\n default=False, verbose_name=_('Don\\'t record this talk.')\n )\n image = models.ImageField(\n null=True,\n blank=True,\n upload_to=submission_image_path,\n verbose_name=_('Talk image'),\n help_text=_('Use this if you want an illustration to go with your submission.'),\n )\n recording_url = models.CharField(\n max_length=200, null=True, blank=True, verbose_name=_('Recording URL')\n )\n recording_source = models.CharField(\n choices=(('VOC', 'media.ccc.de'),),\n max_length=3,\n null=True,\n blank=True,\n verbose_name=_('Recording Source'),\n )\n invitation_token = models.CharField(max_length=32, default=generate_invite_code)\n review_code = models.CharField(\n max_length=32, unique=True, null=True, blank=True, default=generate_invite_code\n )\n CODE_CHARSET = list('ABCDEFGHJKLMNPQRSTUVWXYZ3789')\n\n objects = SubmissionManager()\n deleted_objects = DeletedSubmissionManager()\n all_objects = AllSubmissionManager()\n\n class urls(EventUrls):\n user_base = '{self.event.urls.user_submissions}{self.code}/'\n withdraw = '{user_base}withdraw'\n confirm = '{user_base}confirm'\n public_base = '{self.event.urls.base}talk/{self.code}'\n public = '{public_base}/'\n feedback = '{public}feedback/'\n ical = '{public_base}.ics'\n image = '{self.image_url}'\n invite = '{user_base}invite'\n accept_invitation = (\n '{self.event.urls.base}invitation/{self.code}/{self.invitation_token}'\n )\n review = '{self.event.urls.base}talk/review/{self.review_code}'\n\n class orga_urls(EventUrls):\n base = edit = '{self.event.orga_urls.submissions}{self.code}/'\n make_submitted = '{base}submit'\n accept = '{base}accept'\n reject = '{base}reject'\n confirm = '{base}confirm'\n delete = '{base}delete'\n withdraw = '{base}withdraw'\n cancel = '{base}cancel'\n speakers = '{base}speakers/'\n new_speaker = '{speakers}add'\n delete_speaker = '{speakers}delete'\n reviews = '{base}reviews/'\n feedback = '{base}feedback/'\n toggle_featured = '{base}toggle_featured'\n quick_schedule = '{self.event.orga_urls.schedule}quick/{self.code}/'\n\n @property\n def image_url(self):\n return self.image.url if self.image else ''\n\n def assign_code(self, length=6):\n # This omits some character pairs completely because they are hard to read even on screens (1/I and O/0)\n # and includes only one of two characters for some pairs because they are sometimes hard to distinguish in\n # handwriting (2/Z, 4/A, 5/S, 6/G).\n while True:\n code = get_random_string(length=length, allowed_chars=self.CODE_CHARSET)\n if not Submission.objects.filter(code__iexact=code).exists():\n self.code = code\n return\n\n def save(self, *args, **kwargs):\n if not self.code:\n self.assign_code()\n super().save(*args, **kwargs)\n\n @property\n def editable(self):\n if self.state == SubmissionStates.SUBMITTED:\n return self.event.cfp.is_open or (self.event.active_review_phase and self.event.active_review_phase.speakers_can_change_submissions)\n return self.state in (SubmissionStates.ACCEPTED, SubmissionStates.CONFIRMED)\n\n def get_duration(self):\n if self.duration is None:\n return self.submission_type.default_duration\n return self.duration\n\n def update_duration(self):\n for slot in self.event.wip_schedule.talks.filter(\n submission=self, start__isnull=False\n ):\n slot.end = slot.start + timedelta(minutes=self.get_duration())\n slot.save()\n\n def _set_state(self, new_state, force=False, person=None):\n \"\"\"\n Check if the new state is valid for this Submission (based on SubmissionStates.valid_next_states).\n\n If yes, set it and save the object. if no, raise a SubmissionError with a helpful message.\n \"\"\"\n valid_next_states = SubmissionStates.valid_next_states.get(self.state, [])\n\n if self.state == new_state:\n self.update_talk_slots()\n return\n if force or new_state in valid_next_states:\n old_state = self.state\n self.state = new_state\n self.save(update_fields=['state'])\n self.update_talk_slots()\n submission_state_change.send_robust(\n self.event, submission=self, old_state=old_state, user=person\n )\n else:\n source_states = (\n src\n for src, dsts in SubmissionStates.valid_next_states.items()\n if new_state in dsts\n )\n\n # build an error message mentioning all states, which are valid source states for the desired new state.\n trans_or = pgettext(\n 'used in talk confirm/accept/reject/...-errors, like \"... must be accepted OR foo OR bar ...\"',\n ' or ',\n )\n state_names = dict(SubmissionStates.get_choices())\n source_states = trans_or.join(\n str(state_names[state]) for state in source_states\n )\n raise SubmissionError(\n _(\n 'Submission must be {src_states} not {state} to be {new_state}.'\n ).format(\n src_states=source_states, state=self.state, new_state=new_state\n )\n )\n\n def update_talk_slots(self):\n from pretalx.schedule.models import TalkSlot\n\n if self.state not in [SubmissionStates.ACCEPTED, SubmissionStates.CONFIRMED]:\n TalkSlot.objects.filter(\n submission=self, schedule=self.event.wip_schedule\n ).delete()\n return\n\n slot_count_current = TalkSlot.objects.filter(\n submission=self,\n schedule=self.event.wip_schedule,\n ).count()\n diff = slot_count_current - self.slot_count\n\n if diff > 0:\n # We build a list of all IDs to delete as .delete() doesn't work on sliced querysets.\n # We delete unscheduled talks first.\n talks_to_delete = TalkSlot.objects.filter(\n submission=self,\n schedule=self.event.wip_schedule,\n room__isnull=True,\n start__isnull=True,\n ).order_by('start', 'is_visible')[:diff].values_list(\"id\", flat=True)\n TalkSlot.objects.filter(pk__in=list(talks_to_delete)).delete()\n elif diff < 0:\n for index in range(abs(diff)):\n TalkSlot.objects.create(\n submission=self,\n schedule=self.event.wip_schedule,\n )\n\n def make_submitted(self, person=None, force=False, orga=False):\n self._set_state(SubmissionStates.SUBMITTED, force, person=person)\n\n def confirm(self, person=None, force=False, orga=False):\n self._set_state(SubmissionStates.CONFIRMED, force, person=person)\n self.log_action('pretalx.submission.confirm', person=person, orga=orga)\n\n def accept(self, person=None, force=False, orga=True):\n previous = self.state\n self._set_state(SubmissionStates.ACCEPTED, force, person=person)\n self.log_action('pretalx.submission.accept', person=person, orga=True)\n\n if previous != SubmissionStates.CONFIRMED:\n for speaker in self.speakers.all():\n self.event.accept_template.to_mail(\n user=speaker,\n event=self.event,\n context=template_context_from_submission(self),\n locale=self.content_locale,\n )\n\n def reject(self, person=None, force=False, orga=True):\n self._set_state(SubmissionStates.REJECTED, force, person=person)\n self.log_action('pretalx.submission.reject', person=person, orga=True)\n\n for speaker in self.speakers.all():\n self.event.reject_template.to_mail(\n user=speaker,\n event=self.event,\n context=template_context_from_submission(self),\n locale=self.content_locale,\n )\n\n def cancel(self, person=None, force=False, orga=True):\n self._set_state(SubmissionStates.CANCELED, force, person=person)\n self.log_action('pretalx.submission.cancel', person=person, orga=True)\n\n def withdraw(self, person=None, force=False, orga=False):\n self._set_state(SubmissionStates.WITHDRAWN, force, person=person)\n self.log_action('pretalx.submission.withdraw', person=person, orga=orga)\n\n def remove(self, person=None, force=False, orga=True):\n self._set_state(SubmissionStates.DELETED, force, person=person)\n for answer in self.answers.all():\n answer.remove(person=person, force=force)\n self.log_action('pretalx.submission.deleted', person=person, orga=True)\n\n @cached_property\n def uuid(self):\n global INSTANCE_IDENTIFIER\n if not INSTANCE_IDENTIFIER:\n from pretalx.common.models.settings import GlobalSettings\n INSTANCE_IDENTIFIER = GlobalSettings().get_instance_identifier()\n return uuid.uuid5(INSTANCE_IDENTIFIER, self.code)\n\n @cached_property\n def frab_slug(self):\n title = re.sub(r'\\W+', '-', self.title)\n legal_chars = string.ascii_letters + string.digits + '-'\n pattern = f'[^{legal_chars}]+'\n title = re.sub(pattern, '', title)\n title = title.lower()\n title = title.strip('_')\n return f'{self.event.slug}-{self.pk}-{title}'\n\n @cached_property\n def integer_uuid(self):\n # For import into Engelsystem, we need to somehow convert our submission code into an unique integer. Luckily,\n # codes can contain 34 different characters (including compatibility with frab imported data) and normally have\n # 6 charactes. Since log2(34 **6) == 30.52, that just fits in to a positive 32-bit signed integer (that\n # Engelsystem expects), if we do it correctly.\n charset = self.CODE_CHARSET + [\n '1',\n '2',\n '4',\n '5',\n '6',\n '0',\n ] # compatibility with imported frab data\n base = len(charset)\n table = {char: i for i, char in enumerate(charset)}\n\n intval = 0\n for char in self.code:\n intval *= base\n intval += table[char]\n return intval\n\n @cached_property\n def slot(self):\n return (\n self.event.current_schedule.talks.filter(submission=self).first()\n if self.event.current_schedule\n else None\n )\n\n @cached_property\n def display_speaker_names(self):\n return ', '.join(speaker.get_display_name() for speaker in self.speakers.all())\n\n @cached_property\n def does_accept_feedback(self):\n slot = self.slot\n if slot and slot.start:\n end = slot.end or slot.start + slot.submission.get_duration()\n return end < now()\n return False\n\n @cached_property\n def rendered_recording_iframe(self):\n if self.recording_url and self.recording_source:\n warnings.warn(\n 'Please use a recording source plugin instead of pretalx core functionality.',\n DeprecationWarning,\n )\n from django.template import engines\n\n django_engine = engines['django']\n template = django_engine.from_string(\n '<div class=\"embed-responsive embed-responsive-16by9\"><iframe src=\"{{ url }}\" frameborder=\"0\" allowfullscreen></iframe></div>'\n )\n return template.render(context={'url': self.recording_url})\n\n @cached_property\n def median_score(self):\n scores = [r.score for r in self.reviews.all() if r.score is not None]\n return statistics.median(scores) if scores else None\n\n @cached_property\n def active_resources(self):\n return self.resources.filter(resource__isnull=False)\n\n @property\n def is_deleted(self):\n return self.state == SubmissionStates.DELETED\n\n def __str__(self):\n \"\"\"Help when debugging.\"\"\"\n return f'Submission(event={self.event.slug}, code={self.code}, title={self.title}, state={self.state})'\n\n @cached_property\n def export_duration(self):\n from pretalx.common.serialize import serialize_duration\n\n return serialize_duration(minutes=self.get_duration())\n\n @cached_property\n def speaker_profiles(self):\n from pretalx.person.models.profile import SpeakerProfile\n\n return SpeakerProfile.objects.filter(\n event=self.event, user__in=self.speakers.all()\n )\n\n @property\n def availabilities(self):\n from pretalx.schedule.models.availability import Availability\n\n all_availabilities = self.event.availabilities.filter(\n person__in=self.speaker_profiles\n )\n return Availability.intersection(all_availabilities)\n\n @cached_property\n def created(self):\n return getattr(\n self.logged_actions().order_by('timestamp').first(), 'timestamp', None\n )\n\n def get_content_for_mail(self):\n order = ['title', 'abstract', 'description', 'notes', 'duration', 'content_locale', 'do_not_record', 'image']\n data = []\n result = ''\n for field in order:\n field_content = getattr(self, field, None)\n if field_content:\n _field = self._meta.get_field(field)\n field_name = _field.verbose_name or _field.name\n data.append({'name': field_name, 'value': field_content})\n for answer in self.answers.all():\n if answer.answer:\n data.append({'name': answer.question.question, 'value': answer.answer})\n elif answer.answer_file:\n data.append({'name': answer.question.question, 'value': answer.answer_file})\n for content in data:\n field_name = content['name']\n field_content = content['value']\n if isinstance(field_content, bool):\n field_content = _('Yes') if field_content else _('No')\n elif isinstance(field_content, FieldFile):\n field_content = (self.event.settings.custom_domain or settings.SITE_URL) + field_content.url\n result += f'**{field_name}**: {field_content}\\n\\n'\n return result\n\n def send_invite(self, to, _from=None, subject=None, text=None):\n if not _from and (not subject or not text):\n raise Exception('Please tell me how to sign this invitation.')\n\n subject = subject or _('{speaker} invites you to join their talk!').format(\n speaker=_from.get_display_name()\n )\n subject = f'[{self.event.slug}] {subject}'\n text = text or _(\n '''Hi!\n\nI'd like to invite you to be a speaker in the talk\n\n “{title}”\n\nat {event}. Please follow this link to join:\n\n {url}\n\nI'm looking forward to it!\n{speaker}'''\n ).format(\n event=self.event.name,\n title=self.title,\n url=self.urls.accept_invitation.full(),\n speaker=_from.get_display_name(),\n )\n to = to.split(',') if isinstance(to, str) else to\n for invite in to:\n QueuedMail(\n event=self.event,\n to=invite,\n subject=subject,\n text=text,\n ).send()\n", "path": "src/pretalx/submission/models/submission.py" } ]
[ { "content": "import re\nimport statistics\nimport string\nimport uuid\nimport warnings\nfrom contextlib import suppress\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models.fields.files import FieldFile\nfrom django.utils.crypto import get_random_string\nfrom django.utils.functional import cached_property\nfrom django.utils.timezone import now\nfrom django.utils.translation import pgettext, ugettext_lazy as _\n\nfrom pretalx.common.choices import Choices\nfrom pretalx.common.mixins import LogMixin\nfrom pretalx.common.phrases import phrases\nfrom pretalx.common.urls import EventUrls\nfrom pretalx.mail.context import template_context_from_submission\nfrom pretalx.mail.models import QueuedMail\nfrom pretalx.submission.signals import submission_state_change\n\nINSTANCE_IDENTIFIER = None\nwith suppress(Exception):\n from pretalx.common.models.settings import GlobalSettings\n INSTANCE_IDENTIFIER = GlobalSettings().get_instance_identifier()\n\n\ndef generate_invite_code(length=32):\n return get_random_string(length=length, allowed_chars=Submission.CODE_CHARSET)\n\n\nclass SubmissionError(Exception):\n pass\n\n\ndef submission_image_path(instance, filename):\n return f'{instance.event.slug}/images/{instance.code}/{filename}'\n\n\nclass SubmissionStates(Choices):\n SUBMITTED = 'submitted'\n ACCEPTED = 'accepted'\n REJECTED = 'rejected'\n CONFIRMED = 'confirmed'\n CANCELED = 'canceled'\n WITHDRAWN = 'withdrawn'\n DELETED = 'deleted'\n\n valid_choices = [\n (SUBMITTED, _('submitted')),\n (ACCEPTED, _('accepted')),\n (CONFIRMED, _('confirmed')),\n (REJECTED, _('rejected')),\n (CANCELED, _('canceled')),\n (WITHDRAWN, _('withdrawn')),\n (DELETED, _('deleted')),\n ]\n\n valid_next_states = {\n SUBMITTED: (REJECTED, WITHDRAWN, ACCEPTED),\n REJECTED: (ACCEPTED, SUBMITTED),\n ACCEPTED: (CONFIRMED, CANCELED, REJECTED, SUBMITTED),\n CONFIRMED: (ACCEPTED, CANCELED),\n CANCELED: (ACCEPTED, CONFIRMED),\n WITHDRAWN: (SUBMITTED),\n DELETED: tuple(),\n }\n\n method_names = {\n SUBMITTED: 'make_submitted',\n REJECTED: 'reject',\n ACCEPTED: 'accept',\n CONFIRMED: 'confirm',\n CANCELED: 'cancel',\n WITHDRAWN: 'withdraw',\n DELETED: 'remove',\n }\n\n\nclass SubmissionManager(models.Manager):\n def get_queryset(self):\n return super().get_queryset().exclude(state=SubmissionStates.DELETED)\n\n\nclass DeletedSubmissionManager(models.Manager):\n def get_queryset(self):\n return super().get_queryset().filter(state=SubmissionStates.DELETED)\n\n\nclass AllSubmissionManager(models.Manager):\n pass\n\n\nclass Submission(LogMixin, models.Model):\n code = models.CharField(max_length=16, unique=True)\n speakers = models.ManyToManyField(\n to='person.User', related_name='submissions', blank=True\n )\n event = models.ForeignKey(\n to='event.Event', on_delete=models.PROTECT, related_name='submissions'\n )\n title = models.CharField(max_length=200, verbose_name=_('Title'))\n submission_type = models.ForeignKey( # Reasonable default must be set in form/view\n to='submission.SubmissionType',\n related_name='submissions',\n on_delete=models.PROTECT,\n verbose_name=_('Submission type'),\n )\n track = models.ForeignKey(\n to='submission.Track',\n related_name='submissions',\n on_delete=models.PROTECT,\n verbose_name=_('Track'),\n null=True,\n blank=True,\n )\n state = models.CharField(\n max_length=SubmissionStates.get_max_length(),\n choices=SubmissionStates.get_choices(),\n default=SubmissionStates.SUBMITTED,\n verbose_name=_('Submission state'),\n )\n abstract = models.TextField(\n null=True,\n blank=True,\n verbose_name=_('Abstract'),\n help_text=phrases.base.use_markdown,\n )\n description = models.TextField(\n null=True,\n blank=True,\n verbose_name=_('Description'),\n help_text=phrases.base.use_markdown,\n )\n notes = models.TextField(\n null=True, blank=True, verbose_name=_('Notes'),\n help_text=_('These notes are meant for the organiser and won\\'t be made public.'),\n )\n internal_notes = models.TextField(\n null=True,\n blank=True,\n verbose_name=_('Internal notes'),\n help_text=_('Internal notes for other organisers/reviewers. Not visible to the speakers or the public.')\n )\n duration = models.PositiveIntegerField(\n null=True,\n blank=True,\n verbose_name=_('Duration'),\n help_text=_(\n 'The duration in minutes. Leave empty for default duration for this submission type.'\n ),\n )\n slot_count = models.PositiveIntegerField(\n default=1,\n verbose_name=_('Slot Count'),\n help_text=_(\n 'How many times this talk will be held.'\n ),\n )\n content_locale = models.CharField(\n max_length=32,\n default=settings.LANGUAGE_CODE,\n choices=settings.LANGUAGES,\n verbose_name=_('Language'),\n )\n is_featured = models.BooleanField(\n default=False,\n verbose_name=_(\n 'Show this talk on the public sneak peek page, if the sneak peek page is enabled and the talk was accepted.'\n ),\n )\n do_not_record = models.BooleanField(\n default=False, verbose_name=_('Don\\'t record this talk.')\n )\n image = models.ImageField(\n null=True,\n blank=True,\n upload_to=submission_image_path,\n verbose_name=_('Talk image'),\n help_text=_('Use this if you want an illustration to go with your submission.'),\n )\n recording_url = models.CharField(\n max_length=200, null=True, blank=True, verbose_name=_('Recording URL')\n )\n recording_source = models.CharField(\n choices=(('VOC', 'media.ccc.de'),),\n max_length=3,\n null=True,\n blank=True,\n verbose_name=_('Recording Source'),\n )\n invitation_token = models.CharField(max_length=32, default=generate_invite_code)\n review_code = models.CharField(\n max_length=32, unique=True, null=True, blank=True, default=generate_invite_code\n )\n CODE_CHARSET = list('ABCDEFGHJKLMNPQRSTUVWXYZ3789')\n\n objects = SubmissionManager()\n deleted_objects = DeletedSubmissionManager()\n all_objects = AllSubmissionManager()\n\n class urls(EventUrls):\n user_base = '{self.event.urls.user_submissions}{self.code}/'\n withdraw = '{user_base}withdraw'\n confirm = '{user_base}confirm'\n public_base = '{self.event.urls.base}talk/{self.code}'\n public = '{public_base}/'\n feedback = '{public}feedback/'\n ical = '{public_base}.ics'\n image = '{self.image_url}'\n invite = '{user_base}invite'\n accept_invitation = (\n '{self.event.urls.base}invitation/{self.code}/{self.invitation_token}'\n )\n review = '{self.event.urls.base}talk/review/{self.review_code}'\n\n class orga_urls(EventUrls):\n base = edit = '{self.event.orga_urls.submissions}{self.code}/'\n make_submitted = '{base}submit'\n accept = '{base}accept'\n reject = '{base}reject'\n confirm = '{base}confirm'\n delete = '{base}delete'\n withdraw = '{base}withdraw'\n cancel = '{base}cancel'\n speakers = '{base}speakers/'\n new_speaker = '{speakers}add'\n delete_speaker = '{speakers}delete'\n reviews = '{base}reviews/'\n feedback = '{base}feedback/'\n toggle_featured = '{base}toggle_featured'\n quick_schedule = '{self.event.orga_urls.schedule}quick/{self.code}/'\n\n @property\n def image_url(self):\n return self.image.url if self.image else ''\n\n def assign_code(self, length=6):\n # This omits some character pairs completely because they are hard to read even on screens (1/I and O/0)\n # and includes only one of two characters for some pairs because they are sometimes hard to distinguish in\n # handwriting (2/Z, 4/A, 5/S, 6/G).\n while True:\n code = get_random_string(length=length, allowed_chars=self.CODE_CHARSET)\n if not Submission.objects.filter(code__iexact=code).exists():\n self.code = code\n return\n\n def save(self, *args, **kwargs):\n if not self.code:\n self.assign_code()\n super().save(*args, **kwargs)\n\n @property\n def editable(self):\n if self.state == SubmissionStates.SUBMITTED:\n return self.event.cfp.is_open or (self.event.active_review_phase and self.event.active_review_phase.speakers_can_change_submissions)\n return self.state in (SubmissionStates.ACCEPTED, SubmissionStates.CONFIRMED)\n\n def get_duration(self):\n if self.duration is None:\n return self.submission_type.default_duration\n return self.duration\n\n def update_duration(self):\n for slot in self.event.wip_schedule.talks.filter(\n submission=self, start__isnull=False\n ):\n slot.end = slot.start + timedelta(minutes=self.get_duration())\n slot.save()\n\n def _set_state(self, new_state, force=False, person=None):\n \"\"\"\n Check if the new state is valid for this Submission (based on SubmissionStates.valid_next_states).\n\n If yes, set it and save the object. if no, raise a SubmissionError with a helpful message.\n \"\"\"\n valid_next_states = SubmissionStates.valid_next_states.get(self.state, [])\n\n if self.state == new_state:\n self.update_talk_slots()\n return\n if force or new_state in valid_next_states:\n old_state = self.state\n self.state = new_state\n self.save(update_fields=['state'])\n self.update_talk_slots()\n submission_state_change.send_robust(\n self.event, submission=self, old_state=old_state, user=person\n )\n else:\n source_states = (\n src\n for src, dsts in SubmissionStates.valid_next_states.items()\n if new_state in dsts\n )\n\n # build an error message mentioning all states, which are valid source states for the desired new state.\n trans_or = pgettext(\n 'used in talk confirm/accept/reject/...-errors, like \"... must be accepted OR foo OR bar ...\"',\n ' or ',\n )\n state_names = dict(SubmissionStates.get_choices())\n source_states = trans_or.join(\n str(state_names[state]) for state in source_states\n )\n raise SubmissionError(\n _(\n 'Submission must be {src_states} not {state} to be {new_state}.'\n ).format(\n src_states=source_states, state=self.state, new_state=new_state\n )\n )\n\n def update_talk_slots(self):\n from pretalx.schedule.models import TalkSlot\n\n if self.state not in [SubmissionStates.ACCEPTED, SubmissionStates.CONFIRMED]:\n TalkSlot.objects.filter(\n submission=self, schedule=self.event.wip_schedule\n ).delete()\n return\n\n slot_count_current = TalkSlot.objects.filter(\n submission=self,\n schedule=self.event.wip_schedule,\n ).count()\n diff = slot_count_current - self.slot_count\n\n if diff > 0:\n # We build a list of all IDs to delete as .delete() doesn't work on sliced querysets.\n # We delete unscheduled talks first.\n talks_to_delete = TalkSlot.objects.filter(\n submission=self,\n schedule=self.event.wip_schedule,\n room__isnull=True,\n start__isnull=True,\n ).order_by('start', 'is_visible')[:diff].values_list(\"id\", flat=True)\n TalkSlot.objects.filter(pk__in=list(talks_to_delete)).delete()\n elif diff < 0:\n for index in range(abs(diff)):\n TalkSlot.objects.create(\n submission=self,\n schedule=self.event.wip_schedule,\n )\n\n def make_submitted(self, person=None, force=False, orga=False):\n self._set_state(SubmissionStates.SUBMITTED, force, person=person)\n\n def confirm(self, person=None, force=False, orga=False):\n self._set_state(SubmissionStates.CONFIRMED, force, person=person)\n self.log_action('pretalx.submission.confirm', person=person, orga=orga)\n\n def accept(self, person=None, force=False, orga=True):\n previous = self.state\n self._set_state(SubmissionStates.ACCEPTED, force, person=person)\n self.log_action('pretalx.submission.accept', person=person, orga=True)\n\n if previous != SubmissionStates.CONFIRMED:\n for speaker in self.speakers.all():\n self.event.accept_template.to_mail(\n user=speaker,\n event=self.event,\n context=template_context_from_submission(self),\n locale=self.content_locale,\n )\n\n def reject(self, person=None, force=False, orga=True):\n self._set_state(SubmissionStates.REJECTED, force, person=person)\n self.log_action('pretalx.submission.reject', person=person, orga=True)\n\n for speaker in self.speakers.all():\n self.event.reject_template.to_mail(\n user=speaker,\n event=self.event,\n context=template_context_from_submission(self),\n locale=self.content_locale,\n )\n\n def cancel(self, person=None, force=False, orga=True):\n self._set_state(SubmissionStates.CANCELED, force, person=person)\n self.log_action('pretalx.submission.cancel', person=person, orga=True)\n\n def withdraw(self, person=None, force=False, orga=False):\n self._set_state(SubmissionStates.WITHDRAWN, force, person=person)\n self.log_action('pretalx.submission.withdraw', person=person, orga=orga)\n\n def remove(self, person=None, force=False, orga=True):\n self._set_state(SubmissionStates.DELETED, force, person=person)\n for answer in self.answers.all():\n answer.remove(person=person, force=force)\n self.log_action('pretalx.submission.deleted', person=person, orga=True)\n\n @cached_property\n def uuid(self):\n global INSTANCE_IDENTIFIER\n if not INSTANCE_IDENTIFIER:\n from pretalx.common.models.settings import GlobalSettings\n INSTANCE_IDENTIFIER = GlobalSettings().get_instance_identifier()\n return uuid.uuid5(INSTANCE_IDENTIFIER, self.code)\n\n @cached_property\n def frab_slug(self):\n title = re.sub(r'\\W+', '-', self.title)\n legal_chars = string.ascii_letters + string.digits + '-'\n pattern = f'[^{legal_chars}]+'\n title = re.sub(pattern, '', title)\n title = title.lower()\n title = title.strip('_')\n return f'{self.event.slug}-{self.pk}-{title}'\n\n @cached_property\n def integer_uuid(self):\n # For import into Engelsystem, we need to somehow convert our submission code into an unique integer. Luckily,\n # codes can contain 34 different characters (including compatibility with frab imported data) and normally have\n # 6 charactes. Since log2(34 **6) == 30.52, that just fits in to a positive 32-bit signed integer (that\n # Engelsystem expects), if we do it correctly.\n charset = self.CODE_CHARSET + [\n '1',\n '2',\n '4',\n '5',\n '6',\n '0',\n ] # compatibility with imported frab data\n base = len(charset)\n table = {char: i for i, char in enumerate(charset)}\n\n intval = 0\n for char in self.code:\n intval *= base\n intval += table[char]\n return intval\n\n @cached_property\n def slot(self):\n return (\n self.event.current_schedule.talks.filter(submission=self).first()\n if self.event.current_schedule\n else None\n )\n\n @cached_property\n def display_speaker_names(self):\n return ', '.join(speaker.get_display_name() for speaker in self.speakers.all())\n\n @cached_property\n def does_accept_feedback(self):\n slot = self.slot\n if slot and slot.start:\n end = slot.end or slot.start + slot.submission.get_duration()\n return end < now()\n return False\n\n @cached_property\n def rendered_recording_iframe(self):\n if self.recording_url and self.recording_source:\n warnings.warn(\n 'Please use a recording source plugin instead of pretalx core functionality.',\n DeprecationWarning,\n )\n from django.template import engines\n\n django_engine = engines['django']\n template = django_engine.from_string(\n '<div class=\"embed-responsive embed-responsive-16by9\"><iframe src=\"{{ url }}\" frameborder=\"0\" allowfullscreen></iframe></div>'\n )\n return template.render(context={'url': self.recording_url})\n\n @cached_property\n def median_score(self):\n scores = [r.score for r in self.reviews.all() if r.score is not None]\n return statistics.median(scores) if scores else None\n\n @cached_property\n def active_resources(self):\n return self.resources.exclude(resource=None).exclude(resource=\"\")\n\n @property\n def is_deleted(self):\n return self.state == SubmissionStates.DELETED\n\n def __str__(self):\n \"\"\"Help when debugging.\"\"\"\n return f'Submission(event={self.event.slug}, code={self.code}, title={self.title}, state={self.state})'\n\n @cached_property\n def export_duration(self):\n from pretalx.common.serialize import serialize_duration\n\n return serialize_duration(minutes=self.get_duration())\n\n @cached_property\n def speaker_profiles(self):\n from pretalx.person.models.profile import SpeakerProfile\n\n return SpeakerProfile.objects.filter(\n event=self.event, user__in=self.speakers.all()\n )\n\n @property\n def availabilities(self):\n from pretalx.schedule.models.availability import Availability\n\n all_availabilities = self.event.availabilities.filter(\n person__in=self.speaker_profiles\n )\n return Availability.intersection(all_availabilities)\n\n @cached_property\n def created(self):\n return getattr(\n self.logged_actions().order_by('timestamp').first(), 'timestamp', None\n )\n\n def get_content_for_mail(self):\n order = ['title', 'abstract', 'description', 'notes', 'duration', 'content_locale', 'do_not_record', 'image']\n data = []\n result = ''\n for field in order:\n field_content = getattr(self, field, None)\n if field_content:\n _field = self._meta.get_field(field)\n field_name = _field.verbose_name or _field.name\n data.append({'name': field_name, 'value': field_content})\n for answer in self.answers.all():\n if answer.answer:\n data.append({'name': answer.question.question, 'value': answer.answer})\n elif answer.answer_file:\n data.append({'name': answer.question.question, 'value': answer.answer_file})\n for content in data:\n field_name = content['name']\n field_content = content['value']\n if isinstance(field_content, bool):\n field_content = _('Yes') if field_content else _('No')\n elif isinstance(field_content, FieldFile):\n field_content = (self.event.settings.custom_domain or settings.SITE_URL) + field_content.url\n result += f'**{field_name}**: {field_content}\\n\\n'\n return result\n\n def send_invite(self, to, _from=None, subject=None, text=None):\n if not _from and (not subject or not text):\n raise Exception('Please tell me how to sign this invitation.')\n\n subject = subject or _('{speaker} invites you to join their talk!').format(\n speaker=_from.get_display_name()\n )\n subject = f'[{self.event.slug}] {subject}'\n text = text or _(\n '''Hi!\n\nI'd like to invite you to be a speaker in the talk\n\n “{title}”\n\nat {event}. Please follow this link to join:\n\n {url}\n\nI'm looking forward to it!\n{speaker}'''\n ).format(\n event=self.event.name,\n title=self.title,\n url=self.urls.accept_invitation.full(),\n speaker=_from.get_display_name(),\n )\n to = to.split(',') if isinstance(to, str) else to\n for invite in to:\n QueuedMail(\n event=self.event,\n to=invite,\n subject=subject,\n text=text,\n ).send()\n", "path": "src/pretalx/submission/models/submission.py" } ]
diff --git a/src/pretalx/submission/models/submission.py b/src/pretalx/submission/models/submission.py index b82aed2b61..e0bc5ca38a 100644 --- a/src/pretalx/submission/models/submission.py +++ b/src/pretalx/submission/models/submission.py @@ -476,7 +476,7 @@ def median_score(self): @cached_property def active_resources(self): - return self.resources.filter(resource__isnull=False) + return self.resources.exclude(resource=None).exclude(resource="") @property def is_deleted(self):
sktime__sktime-5710
[BUG] Irreproducible results with `MultiRocketMultivariate` `random_state` does guarantee the same results for each run. ```python rng = np.random.default_rng() X = pd.DataFrame([ pd.Series([ pd.Series(rng.integers(0, 10, 100)).astype(float), pd.Series(rng.integers(0, 10, 100)).astype(float), ]), pd.Series([ pd.Series(rng.integers(0, 10, 100)).astype(float), pd.Series(rng.integers(0, 10, 100)).astype(float), ]), ]) MultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X) - MultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X) ``` The output should always be a `DataFrame` of zeros, but this is not the case. <details> <!-- Please run the following code snippet and paste the output here: from sktime import show_versions; show_versions() --> System: python: 3.9.6 (default, Aug 11 2023, 19:44:49) [Clang 15.0.0 (clang-1500.0.40.1)] executable: /Users/temp/sktime/venv/bin/python machine: macOS-14.1.2-arm64-arm-64bit Python dependencies: pip: 23.3.1 sktime: 0.25.0 sklearn: 1.3.2 skbase: 0.4.6 numpy: 1.26.1 scipy: 1.11.3 pandas: 2.1.4 matplotlib: 3.8.1 joblib: 1.3.2 numba: 0.58.1 statsmodels: 0.14.0 pmdarima: None statsforecast: None tsfresh: 0.20.1 tslearn: 0.6.3 torch: 2.1.0 tensorflow: None tensorflow_probability: None </details> <!-- Thanks for contributing! -->
[ { "content": "import multiprocessing\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.transformations.base import BaseTransformer\n\n\nclass MultiRocketMultivariate(BaseTransformer):\n \"\"\"Multi RandOm Convolutional KErnel Transform (MultiRocket).\n\n MultiRocket [1]_ is uses the same set of kernels as MiniRocket on both the raw\n series and the first order differenced series representation. It uses a different\n set of dilations and used for each representation. In addition to percentage of\n positive values (PPV) MultiRocket adds 3 pooling operators: Mean of Positive\n Values (MPV); Mean of Indices of Positive Values (MIPV); and Longest Stretch of\n Positive Values (LSPV). This version is the multivariate version.\n\n This transformer fits one set of paramereters per individual series,\n and applies the transform with fitted parameter i to the i-th series in transform.\n Vanilla use requires same number of series in fit and transform.\n\n To fit and transform series at the same time,\n without an identification of fit/transform instances,\n wrap this transformer in ``FitInTransform``,\n from ``sktime.transformations.compose``.\n\n Parameters\n ----------\n num_kernels : int, default=6,250\n number of random convolutional kernels. The calculated number of features is the\n nearest multiple of n_features_per_kernel(default 4)*84=336 < 50,000\n (2*n_features_per_kernel(default 4)*num_kernels(default 6,250)).\n max_dilations_per_kernel : int, default=32\n maximum number of dilations per kernel.\n n_features_per_kernel : int, default =4\n number of features per kernel.\n normalise : bool, default False\n n_jobs : int, default=1\n The number of jobs to run in parallel for `transform`. ``-1`` means using all\n processors.\n random_state : None or int, default = None\n\n Attributes\n ----------\n parameter : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X\n parameter1 : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X1 = np.diff(X, 1)\n\n See Also\n --------\n MultiRocketMultivariate, MiniRocket, MiniRocketMultivariate, Rocket\n\n References\n ----------\n .. [1] Tan, Chang Wei and Dempster, Angus and Bergmeir, Christoph and\n Webb, Geoffrey I, \"MultiRocket: Multiple pooling operators and transformations\n for fast and effective time series classification\",2022,\n https://link.springer.com/article/10.1007/s10618-022-00844-1\n https://arxiv.org/abs/2102.00457\n\n Examples\n --------\n >>> from sktime.transformations.panel.rocket import Rocket\n >>> from sktime.datasets import load_basic_motions\n >>> X_train, y_train = load_basic_motions(split=\"train\") # doctest: +SKIP\n >>> X_test, y_test = load_basic_motions(split=\"test\") # doctest: +SKIP\n >>> trf = MultiRocketMultivariate(num_kernels=512) # doctest: +SKIP\n >>> trf.fit(X_train) # doctest: +SKIP\n MultiRocketMultivariate(...)\n >>> X_train = trf.transform(X_train) # doctest: +SKIP\n >>> X_test = trf.transform(X_test) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"univariate-only\": False,\n \"fit_is_empty\": False,\n \"scitype:transform-input\": \"Series\",\n # what is the scitype of X: Series, or Panel\n \"scitype:transform-output\": \"Primitives\",\n # what is the scitype of y: None (not needed), Primitives, Series, Panel\n \"scitype:instancewise\": False, # is this an instance-wise transform?\n \"X_inner_mtype\": \"numpy3D\", # which mtypes do _fit/_predict support for X?\n \"y_inner_mtype\": \"None\", # which mtypes do _fit/_predict support for X?\n \"python_dependencies\": \"numba\",\n }\n\n def __init__(\n self,\n num_kernels=6_250,\n max_dilations_per_kernel=32,\n n_features_per_kernel=4,\n normalise=False,\n n_jobs=1,\n random_state=None,\n ):\n self.max_dilations_per_kernel = max_dilations_per_kernel\n self.n_features_per_kernel = n_features_per_kernel\n self.num_kernels = num_kernels\n self.normalise = normalise\n self.n_jobs = n_jobs\n self.random_state = random_state if isinstance(random_state, int) else None\n\n self.parameter = None\n self.parameter1 = None\n\n super().__init__()\n\n def _fit(self, X, y=None):\n \"\"\"Fit dilations and biases to input time series.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n self\n \"\"\"\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n if X.shape[2] < 10:\n # handling very short series (like PensDigit from the MTSC archive)\n # series have to be at least a length of 10 (including differencing)\n _X1 = np.zeros((X.shape[0], X.shape[1], 10), dtype=X.dtype)\n _X1[:, :, : X.shape[2]] = X\n X = _X1\n del _X1\n\n X = X.astype(np.float64)\n\n self.parameter = self._get_parameter(X)\n _X1 = np.diff(X, 1)\n\n self.parameter1 = self._get_parameter(_X1)\n\n return self\n\n def _transform(self, X, y=None):\n \"\"\"Transform input time series using random convolutional kernels.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n pandas DataFrame, transformed features\n \"\"\"\n from numba import get_num_threads, set_num_threads\n\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _transform,\n )\n\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n _X1 = np.diff(X, 1)\n\n # change n_jobs depended on value and existing cores\n prev_threads = get_num_threads()\n if self.n_jobs < 1 or self.n_jobs > multiprocessing.cpu_count():\n n_jobs = multiprocessing.cpu_count()\n else:\n n_jobs = self.n_jobs\n set_num_threads(n_jobs)\n\n X = _transform(\n X,\n _X1,\n self.parameter,\n self.parameter1,\n self.n_features_per_kernel,\n )\n X = np.nan_to_num(X)\n\n set_num_threads(prev_threads)\n\n return pd.DataFrame(X)\n\n def _get_parameter(self, X):\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _fit_biases,\n _fit_dilations,\n _quantiles,\n )\n\n _, num_channels, input_length = X.shape\n\n num_kernels = 84\n\n dilations, num_features_per_dilation = _fit_dilations(\n input_length, self.num_kernels, self.max_dilations_per_kernel\n )\n\n num_features_per_kernel = np.sum(num_features_per_dilation)\n\n quantiles = _quantiles(num_kernels * num_features_per_kernel)\n\n num_dilations = len(dilations)\n num_combinations = num_kernels * num_dilations\n\n max_num_channels = min(num_channels, 9)\n max_exponent = np.log2(max_num_channels + 1)\n\n num_channels_per_combination = (\n 2 ** np.random.uniform(0, max_exponent, num_combinations)\n ).astype(np.int32)\n\n channel_indices = np.zeros(num_channels_per_combination.sum(), dtype=np.int32)\n\n num_channels_start = 0\n for combination_index in range(num_combinations):\n num_channels_this_combination = num_channels_per_combination[\n combination_index\n ]\n num_channels_end = num_channels_start + num_channels_this_combination\n channel_indices[num_channels_start:num_channels_end] = np.random.choice(\n num_channels, num_channels_this_combination, replace=False\n )\n\n num_channels_start = num_channels_end\n\n biases = _fit_biases(\n X,\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n quantiles,\n self.random_state,\n )\n\n return (\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n biases,\n )\n", "path": "sktime/transformations/panel/rocket/_multirocket_multivariate.py" } ]
[ { "content": "import multiprocessing\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.transformations.base import BaseTransformer\n\n\nclass MultiRocketMultivariate(BaseTransformer):\n \"\"\"Multi RandOm Convolutional KErnel Transform (MultiRocket).\n\n MultiRocket [1]_ is uses the same set of kernels as MiniRocket on both the raw\n series and the first order differenced series representation. It uses a different\n set of dilations and used for each representation. In addition to percentage of\n positive values (PPV) MultiRocket adds 3 pooling operators: Mean of Positive\n Values (MPV); Mean of Indices of Positive Values (MIPV); and Longest Stretch of\n Positive Values (LSPV). This version is the multivariate version.\n\n This transformer fits one set of paramereters per individual series,\n and applies the transform with fitted parameter i to the i-th series in transform.\n Vanilla use requires same number of series in fit and transform.\n\n To fit and transform series at the same time,\n without an identification of fit/transform instances,\n wrap this transformer in ``FitInTransform``,\n from ``sktime.transformations.compose``.\n\n Parameters\n ----------\n num_kernels : int, default=6,250\n number of random convolutional kernels. The calculated number of features is the\n nearest multiple of n_features_per_kernel(default 4)*84=336 < 50,000\n (2*n_features_per_kernel(default 4)*num_kernels(default 6,250)).\n max_dilations_per_kernel : int, default=32\n maximum number of dilations per kernel.\n n_features_per_kernel : int, default =4\n number of features per kernel.\n normalise : bool, default False\n n_jobs : int, default=1\n The number of jobs to run in parallel for `transform`. ``-1`` means using all\n processors.\n random_state : None or int, default = None\n\n Attributes\n ----------\n parameter : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X\n parameter1 : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X1 = np.diff(X, 1)\n\n See Also\n --------\n MultiRocketMultivariate, MiniRocket, MiniRocketMultivariate, Rocket\n\n References\n ----------\n .. [1] Tan, Chang Wei and Dempster, Angus and Bergmeir, Christoph and\n Webb, Geoffrey I, \"MultiRocket: Multiple pooling operators and transformations\n for fast and effective time series classification\",2022,\n https://link.springer.com/article/10.1007/s10618-022-00844-1\n https://arxiv.org/abs/2102.00457\n\n Examples\n --------\n >>> from sktime.transformations.panel.rocket import Rocket\n >>> from sktime.datasets import load_basic_motions\n >>> X_train, y_train = load_basic_motions(split=\"train\") # doctest: +SKIP\n >>> X_test, y_test = load_basic_motions(split=\"test\") # doctest: +SKIP\n >>> trf = MultiRocketMultivariate(num_kernels=512) # doctest: +SKIP\n >>> trf.fit(X_train) # doctest: +SKIP\n MultiRocketMultivariate(...)\n >>> X_train = trf.transform(X_train) # doctest: +SKIP\n >>> X_test = trf.transform(X_test) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"univariate-only\": False,\n \"fit_is_empty\": False,\n \"scitype:transform-input\": \"Series\",\n # what is the scitype of X: Series, or Panel\n \"scitype:transform-output\": \"Primitives\",\n # what is the scitype of y: None (not needed), Primitives, Series, Panel\n \"scitype:instancewise\": False, # is this an instance-wise transform?\n \"X_inner_mtype\": \"numpy3D\", # which mtypes do _fit/_predict support for X?\n \"y_inner_mtype\": \"None\", # which mtypes do _fit/_predict support for X?\n \"python_dependencies\": \"numba\",\n }\n\n def __init__(\n self,\n num_kernels=6_250,\n max_dilations_per_kernel=32,\n n_features_per_kernel=4,\n normalise=False,\n n_jobs=1,\n random_state=None,\n ):\n self.max_dilations_per_kernel = max_dilations_per_kernel\n self.n_features_per_kernel = n_features_per_kernel\n self.num_kernels = num_kernels\n self.normalise = normalise\n self.n_jobs = n_jobs\n self.random_state = random_state if isinstance(random_state, int) else None\n\n self.parameter = None\n self.parameter1 = None\n\n super().__init__()\n\n def _fit(self, X, y=None):\n \"\"\"Fit dilations and biases to input time series.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n self\n \"\"\"\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n if X.shape[2] < 10:\n # handling very short series (like PensDigit from the MTSC archive)\n # series have to be at least a length of 10 (including differencing)\n _X1 = np.zeros((X.shape[0], X.shape[1], 10), dtype=X.dtype)\n _X1[:, :, : X.shape[2]] = X\n X = _X1\n del _X1\n\n X = X.astype(np.float64)\n\n self.parameter = self._get_parameter(X)\n _X1 = np.diff(X, 1)\n\n self.parameter1 = self._get_parameter(_X1)\n\n return self\n\n def _transform(self, X, y=None):\n \"\"\"Transform input time series using random convolutional kernels.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n pandas DataFrame, transformed features\n \"\"\"\n from numba import get_num_threads, set_num_threads\n\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _transform,\n )\n\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n _X1 = np.diff(X, 1)\n\n # change n_jobs depended on value and existing cores\n prev_threads = get_num_threads()\n if self.n_jobs < 1 or self.n_jobs > multiprocessing.cpu_count():\n n_jobs = multiprocessing.cpu_count()\n else:\n n_jobs = self.n_jobs\n set_num_threads(n_jobs)\n\n X = _transform(\n X,\n _X1,\n self.parameter,\n self.parameter1,\n self.n_features_per_kernel,\n )\n X = np.nan_to_num(X)\n\n set_num_threads(prev_threads)\n\n return pd.DataFrame(X)\n\n def _get_parameter(self, X):\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _fit_biases,\n _fit_dilations,\n _quantiles,\n )\n\n if self.random_state is not None:\n np.random.seed(self.random_state)\n\n _, num_channels, input_length = X.shape\n\n num_kernels = 84\n\n dilations, num_features_per_dilation = _fit_dilations(\n input_length, self.num_kernels, self.max_dilations_per_kernel\n )\n\n num_features_per_kernel = np.sum(num_features_per_dilation)\n\n quantiles = _quantiles(num_kernels * num_features_per_kernel)\n\n num_dilations = len(dilations)\n num_combinations = num_kernels * num_dilations\n\n max_num_channels = min(num_channels, 9)\n max_exponent = np.log2(max_num_channels + 1)\n\n num_channels_per_combination = (\n 2 ** np.random.uniform(0, max_exponent, num_combinations)\n ).astype(np.int32)\n\n channel_indices = np.zeros(num_channels_per_combination.sum(), dtype=np.int32)\n\n num_channels_start = 0\n for combination_index in range(num_combinations):\n num_channels_this_combination = num_channels_per_combination[\n combination_index\n ]\n num_channels_end = num_channels_start + num_channels_this_combination\n channel_indices[num_channels_start:num_channels_end] = np.random.choice(\n num_channels, num_channels_this_combination, replace=False\n )\n\n num_channels_start = num_channels_end\n\n biases = _fit_biases(\n X,\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n quantiles,\n self.random_state,\n )\n\n return (\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n biases,\n )\n", "path": "sktime/transformations/panel/rocket/_multirocket_multivariate.py" } ]
diff --git a/sktime/transformations/panel/rocket/_multirocket_multivariate.py b/sktime/transformations/panel/rocket/_multirocket_multivariate.py index 1fe4813f47d..41b905d9371 100644 --- a/sktime/transformations/panel/rocket/_multirocket_multivariate.py +++ b/sktime/transformations/panel/rocket/_multirocket_multivariate.py @@ -198,6 +198,9 @@ def _get_parameter(self, X): _quantiles, ) + if self.random_state is not None: + np.random.seed(self.random_state) + _, num_channels, input_length = X.shape num_kernels = 84
googleapis__google-cloud-python-5366
General: v0.33.0 pip install fails In a fresh Python v2.7.12 virtualenv on linux: ``` pip install google-cloud ``` Results in: ``` Traceback (most recent call last): File "<string>", line 1, in <module> File "/tmp/pip-install-3_n60m/google-cloud/setup.py", line 22, in <module> with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj: IOError: [Errno 2] No such file or directory: '/tmp/pip-install-3_n60m/google-cloud/setup-README.rst' ``` Note: ``` pip install google-cloud==0.32.0 ``` works fine. I believe it has to do with recent changes: https://github.com/GoogleCloudPlatform/google-cloud-python/commit/71e5d4bf94745580834b86c3e92ac4186c3115c0
[ { "content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\n\nfrom setuptools import setup\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\nREQUIREMENTS = [\n 'google-api-core >= 0.1.2, < 0.2.0dev',\n 'google-cloud-bigquery >= 0.28.0, < 0.29dev',\n 'google-cloud-bigquery-datatransfer >= 0.1.0, < 0.2dev',\n 'google-cloud-bigtable >= 0.28.1, < 0.29dev',\n 'google-cloud-container >= 0.1.0, < 0.2dev',\n 'google-cloud-core >= 0.28.0, < 0.29dev',\n 'google-cloud-datastore >= 1.4.0, < 1.5dev',\n 'google-cloud-dns >= 0.28.0, < 0.29dev',\n 'google-cloud-error-reporting >= 0.28.0, < 0.29dev',\n 'google-cloud-firestore >= 0.28.0, < 0.29dev',\n 'google-cloud-language >= 1.0.0, < 1.1dev',\n 'google-cloud-logging >= 1.4.0, < 1.5dev',\n 'google-cloud-monitoring >= 0.28.0, < 0.29dev',\n 'google-cloud-pubsub >= 0.30.0, < 0.31dev',\n 'google-cloud-resource-manager >= 0.28.0, < 0.29dev',\n 'google-cloud-runtimeconfig >= 0.28.0, < 0.29dev',\n 'google-cloud-spanner >= 0.29.0, < 0.30dev',\n 'google-cloud-speech >= 0.30.0, < 0.31dev',\n 'google-cloud-storage >= 1.6.0, < 1.7dev',\n 'google-cloud-trace >= 0.17.0, < 0.18dev',\n 'google-cloud-translate >= 1.3.0, < 1.4dev',\n 'google-cloud-videointelligence >= 1.0.0, < 1.1dev',\n 'google-cloud-vision >= 0.29.0, < 0.30dev',\n]\n\nsetup(\n name='google-cloud',\n version='0.33.0',\n description='API Client library for Google Cloud',\n long_description=README,\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n\nwarning = \"WARNING: The google-cloud Python package is deprecated. On \" \\\n \"June 18, 2018, this package will no longer install any other \" \\\n \"packages. Please install the product-specific google-cloud-* \" \\\n \"packages needed for your application. See \" \\\n \"https://github.com/GoogleCloudPlatform/google-cloud-python.\"\n\nlogging.warn(warning)\n", "path": "legacy/google-cloud/setup.py" } ]
[ { "content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\n\nfrom setuptools import setup\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\nREQUIREMENTS = [\n 'google-api-core >= 0.1.2, < 0.2.0dev',\n 'google-cloud-bigquery >= 0.28.0, < 0.29dev',\n 'google-cloud-bigquery-datatransfer >= 0.1.0, < 0.2dev',\n 'google-cloud-bigtable >= 0.28.1, < 0.29dev',\n 'google-cloud-container >= 0.1.0, < 0.2dev',\n 'google-cloud-core >= 0.28.0, < 0.29dev',\n 'google-cloud-datastore >= 1.4.0, < 1.5dev',\n 'google-cloud-dns >= 0.28.0, < 0.29dev',\n 'google-cloud-error-reporting >= 0.28.0, < 0.29dev',\n 'google-cloud-firestore >= 0.28.0, < 0.29dev',\n 'google-cloud-language >= 1.0.0, < 1.1dev',\n 'google-cloud-logging >= 1.4.0, < 1.5dev',\n 'google-cloud-monitoring >= 0.28.0, < 0.29dev',\n 'google-cloud-pubsub >= 0.30.0, < 0.31dev',\n 'google-cloud-resource-manager >= 0.28.0, < 0.29dev',\n 'google-cloud-runtimeconfig >= 0.28.0, < 0.29dev',\n 'google-cloud-spanner >= 0.29.0, < 0.30dev',\n 'google-cloud-speech >= 0.30.0, < 0.31dev',\n 'google-cloud-storage >= 1.6.0, < 1.7dev',\n 'google-cloud-trace >= 0.17.0, < 0.18dev',\n 'google-cloud-translate >= 1.3.0, < 1.4dev',\n 'google-cloud-videointelligence >= 1.0.0, < 1.1dev',\n 'google-cloud-vision >= 0.29.0, < 0.30dev',\n]\n\nsetup(\n name='google-cloud',\n version='0.33.1',\n description='API Client library for Google Cloud',\n long_description=README,\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n\nwarning = \"WARNING: The google-cloud Python package is deprecated. On \" \\\n \"June 18, 2018, this package will no longer install any other \" \\\n \"packages. Please install the product-specific google-cloud-* \" \\\n \"packages needed for your application. See \" \\\n \"https://github.com/GoogleCloudPlatform/google-cloud-python.\"\n\nlogging.warn(warning)\n", "path": "legacy/google-cloud/setup.py" } ]
diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index e835c5e94a5a..000000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include README.rst -global-exclude *.pyc diff --git a/legacy/google-cloud/MANIFEST.in b/legacy/google-cloud/MANIFEST.in new file mode 100644 index 000000000000..e73a89048601 --- /dev/null +++ b/legacy/google-cloud/MANIFEST.in @@ -0,0 +1,2 @@ +include setup-README.rst +global-exclude *.pyc diff --git a/legacy/google-cloud/setup.py b/legacy/google-cloud/setup.py index 7528d557594c..374382197d1a 100644 --- a/legacy/google-cloud/setup.py +++ b/legacy/google-cloud/setup.py @@ -76,7 +76,7 @@ setup( name='google-cloud', - version='0.33.0', + version='0.33.1', description='API Client library for Google Cloud', long_description=README, install_requires=REQUIREMENTS,
ansible-collections__community.aws-542
SSM connection plugin doesnt properly close connections ##### SUMMARY When trying to run a big playbook using the SSM connection plugin, it randomly hangs in the middle of it. Very rarely am I able to run the entire playbook without issues. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ssm connection plugin ##### ANSIBLE VERSION ``` ansible 2.10.5 config file = /Users/xxx/.ansible.cfg configured module search path = ['/Users/xxx/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/lib/python3.8/site-packages/ansible executable location = /usr/local/bin/ansible python version = 3.8.8 (default, Feb 21 2021, 10:35:39) [Clang 12.0.0 (clang-1200.0.32.29)] ``` ##### CONFIGURATION ``` DEFAULT_HOST_LIST(env: ANSIBLE_INVENTORY) = ['/Users/xxx/ansible_hosts'] DEFAULT_VAULT_IDENTITY_LIST(/Users/xxx/.ansible.cfg) = ['xx@~/.vault_pass.txt', 'xxx@~/.vault_pass_xxx.txt', 'xxx@~/.vault_pass_xxx.txt'] ``` Ansible variables used in the playbook for configuring the SSM plugin: ``` vars: ansible_connection: community.aws.aws_ssm ansible_aws_ssm_bucket_name: xxx-ansible-ssm ansible_aws_ssm_region: eu-west-1 ``` ##### OS / ENVIRONMENT Target OS: `Amazon-Linux 2` ##### STEPS TO REPRODUCE I dont have exact steps to replicate this issue, it seems to happen to bigger playbooks. And happens randomly, sometimes it dies immediately, sometimes it dies in the middle or end, and very rarely does it complete without issues. ##### EXPECTED RESULTS To complete the playbook without hanging. ##### ACTUAL RESULTS When running in verbose mode, these are the last lines printed, i left the playbook running for 10 minutes and no change happened after which i stopped it manually: ``` .... <i-xxx> ESTABLISH SSM CONNECTION TO: i-xxx <i-xxx> SSM CONNECTION ID: xxx-0a55f9c52a37613a0 <i-xxx> EXEC echo ~ ^C [ERROR]: User interrupted execution ``` If I SSH to the server, it seems there are a lot of connections left hanging, this is the output of `ps -e --forest -o ppid,pid,user,command`: ![output of ps command](https://user-images.githubusercontent.com/1773227/112132377-d483e380-8bca-11eb-83f0-fd0f081c6dde.png) This has been an issue for me for several releases of the ssm connection plugin.
[ { "content": "# Based on the ssh connection plugin by Michael DeHaan\n#\n# Copyright: (c) 2018, Pat Sharkey <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nauthor:\n- Pat Sharkey (@psharkey) <[email protected]>\n- HanumanthaRao MVL (@hanumantharaomvl) <[email protected]>\n- Gaurav Ashtikar (@gau1991 )<[email protected]>\nconnection: aws_ssm\nshort_description: execute via AWS Systems Manager\ndescription:\n- This connection plugin allows ansible to execute tasks on an EC2 instance via the aws ssm CLI.\nrequirements:\n- The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent).\n- The control machine must have the aws session manager plugin installed.\n- The remote EC2 linux instance must have the curl installed.\noptions:\n access_key_id:\n description: The STS access key to use when connecting via session-manager.\n vars:\n - name: ansible_aws_ssm_access_key_id\n version_added: 1.3.0\n secret_access_key:\n description: The STS secret key to use when connecting via session-manager.\n vars:\n - name: ansible_aws_ssm_secret_access_key\n version_added: 1.3.0\n session_token:\n description: The STS session token to use when connecting via session-manager.\n vars:\n - name: ansible_aws_ssm_session_token\n version_added: 1.3.0\n instance_id:\n description: The EC2 instance ID.\n vars:\n - name: ansible_aws_ssm_instance_id\n region:\n description: The region the EC2 instance is located.\n vars:\n - name: ansible_aws_ssm_region\n default: 'us-east-1'\n bucket_name:\n description: The name of the S3 bucket used for file transfers.\n vars:\n - name: ansible_aws_ssm_bucket_name\n plugin:\n description: This defines the location of the session-manager-plugin binary.\n vars:\n - name: ansible_aws_ssm_plugin\n default: '/usr/local/bin/session-manager-plugin'\n profile:\n description: Sets AWS profile to use.\n vars:\n - name: ansible_aws_ssm_profile\n version_added: 1.5.0\n retries:\n description: Number of attempts to connect.\n default: 3\n type: integer\n vars:\n - name: ansible_aws_ssm_retries\n ssm_timeout:\n description: Connection timeout seconds.\n default: 60\n type: integer\n vars:\n - name: ansible_aws_ssm_timeout\n'''\n\nEXAMPLES = r'''\n\n# Stop Spooler Process on Windows Instances\n- name: Stop Spooler Service on Windows Instances\n vars:\n ansible_connection: aws_ssm\n ansible_shell_type: powershell\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-east-1\n tasks:\n - name: Stop spooler service\n win_service:\n name: spooler\n state: stopped\n\n# Install a Nginx Package on Linux Instance\n- name: Install a Nginx Package\n vars:\n ansible_connection: aws_ssm\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-west-2\n tasks:\n - name: Install a Nginx Package\n yum:\n name: nginx\n state: present\n\n# Create a directory in Windows Instances\n- name: Create a directory in Windows Instance\n vars:\n ansible_connection: aws_ssm\n ansible_shell_type: powershell\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-east-1\n tasks:\n - name: Create a Directory\n win_file:\n path: C:\\Windows\\temp\n state: directory\n\n# Making use of Dynamic Inventory Plugin\n# =======================================\n# aws_ec2.yml (Dynamic Inventory - Linux)\n# This will return the Instance IDs matching the filter\n#plugin: aws_ec2\n#regions:\n# - us-east-1\n#hostnames:\n# - instance-id\n#filters:\n# tag:SSMTag: ssmlinux\n# -----------------------\n- name: install aws-cli\n hosts: all\n gather_facts: false\n vars:\n ansible_connection: aws_ssm\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-east-1\n tasks:\n - name: aws-cli\n raw: yum install -y awscli\n tags: aws-cli\n# Execution: ansible-playbook linux.yaml -i aws_ec2.yml\n# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.\n# =====================================================\n# aws_ec2.yml (Dynamic Inventory - Windows)\n#plugin: aws_ec2\n#regions:\n# - us-east-1\n#hostnames:\n# - instance-id\n#filters:\n# tag:SSMTag: ssmwindows\n# -----------------------\n- name: Create a dir.\n hosts: all\n gather_facts: false\n vars:\n ansible_connection: aws_ssm\n ansible_shell_type: powershell\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-east-1\n tasks:\n - name: Create the directory\n win_file:\n path: C:\\Temp\\SSM_Testing5\n state: directory\n# Execution: ansible-playbook win_file.yaml -i aws_ec2.yml\n# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.\n'''\n\nimport os\nimport getpass\nimport json\nimport pty\nimport random\nimport re\nimport select\nimport string\nimport subprocess\nimport time\n\ntry:\n import boto3\n from botocore.client import Config\n HAS_BOTO_3 = True\nexcept ImportError as e:\n HAS_BOTO_3_ERROR = str(e)\n HAS_BOTO_3 = False\n\nfrom functools import wraps\nfrom ansible.errors import AnsibleConnectionFailure, AnsibleError, AnsibleFileNotFound\nfrom ansible.module_utils.basic import missing_required_lib\nfrom ansible.module_utils.six.moves import xrange\nfrom ansible.module_utils._text import to_bytes, to_native, to_text\nfrom ansible.plugins.connection import ConnectionBase\nfrom ansible.plugins.shell.powershell import _common_args\nfrom ansible.utils.display import Display\n\ndisplay = Display()\n\n\ndef _ssm_retry(func):\n \"\"\"\n Decorator to retry in the case of a connection failure\n Will retry if:\n * an exception is caught\n Will not retry if\n * remaining_tries is <2\n * retries limit reached\n \"\"\"\n @wraps(func)\n def wrapped(self, *args, **kwargs):\n remaining_tries = int(self.get_option('retries')) + 1\n cmd_summary = \"%s...\" % args[0]\n for attempt in range(remaining_tries):\n cmd = args[0]\n\n try:\n return_tuple = func(self, *args, **kwargs)\n display.vvv(return_tuple, host=self.host)\n break\n\n except (AnsibleConnectionFailure, Exception) as e:\n if attempt == remaining_tries - 1:\n raise\n else:\n pause = 2 ** attempt - 1\n if pause > 30:\n pause = 30\n\n if isinstance(e, AnsibleConnectionFailure):\n msg = \"ssm_retry: attempt: %d, cmd (%s), pausing for %d seconds\" % (attempt, cmd_summary, pause)\n else:\n msg = \"ssm_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds\" % (attempt, e, cmd_summary, pause)\n\n display.vv(msg, host=self.host)\n\n time.sleep(pause)\n\n # Do not attempt to reuse the existing session on retries\n self.close()\n\n continue\n\n return return_tuple\n return wrapped\n\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n\nclass Connection(ConnectionBase):\n ''' AWS SSM based connections '''\n\n transport = 'community.aws.aws_ssm'\n allow_executable = False\n allow_extras = True\n has_pipelining = False\n is_windows = False\n _client = None\n _session = None\n _stdout = None\n _session_id = ''\n _timeout = False\n MARK_LENGTH = 26\n\n def __init__(self, *args, **kwargs):\n if not HAS_BOTO_3:\n raise AnsibleError('{0}: {1}'.format(missing_required_lib(\"boto3\"), HAS_BOTO_3_ERROR))\n\n super(Connection, self).__init__(*args, **kwargs)\n self.host = self._play_context.remote_addr\n\n if getattr(self._shell, \"SHELL_FAMILY\", '') == 'powershell':\n self.delegate = None\n self.has_native_async = True\n self.always_pipeline_modules = True\n self.module_implementation_preferences = ('.ps1', '.exe', '')\n self.protocol = None\n self.shell_id = None\n self._shell_type = 'powershell'\n self.is_windows = True\n\n def _connect(self):\n ''' connect to the host via ssm '''\n\n self._play_context.remote_user = getpass.getuser()\n\n if not self._session_id:\n self.start_session()\n return self\n\n def reset(self):\n ''' start a fresh ssm session '''\n display.vvvv('reset called on ssm connection')\n return self.start_session()\n\n def start_session(self):\n ''' start ssm session '''\n\n if self.get_option('instance_id') is None:\n self.instance_id = self.host\n else:\n self.instance_id = self.get_option('instance_id')\n\n display.vvv(u\"ESTABLISH SSM CONNECTION TO: {0}\".format(self.instance_id), host=self.host)\n\n executable = self.get_option('plugin')\n if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):\n raise AnsibleError(\"failed to find the executable specified %s.\"\n \" Please verify if the executable exists and re-try.\" % executable)\n\n profile_name = self.get_option('profile') or ''\n region_name = self.get_option('region')\n ssm_parameters = dict()\n client = self._get_boto_client('ssm', region_name=region_name, profile_name=profile_name)\n self._client = client\n response = client.start_session(Target=self.instance_id, Parameters=ssm_parameters)\n self._session_id = response['SessionId']\n\n cmd = [\n executable,\n json.dumps(response),\n region_name,\n \"StartSession\",\n profile_name,\n json.dumps({\"Target\": self.instance_id}),\n client.meta.endpoint_url\n ]\n\n display.vvvv(u\"SSM COMMAND: {0}\".format(to_text(cmd)), host=self.host)\n\n stdout_r, stdout_w = pty.openpty()\n session = subprocess.Popen(\n cmd,\n stdin=subprocess.PIPE,\n stdout=stdout_w,\n stderr=subprocess.PIPE,\n close_fds=True,\n bufsize=0,\n )\n\n os.close(stdout_w)\n self._stdout = os.fdopen(stdout_r, 'rb', 0)\n self._session = session\n self._poll_stdout = select.poll()\n self._poll_stdout.register(self._stdout, select.POLLIN)\n\n # Disable command echo and prompt.\n self._prepare_terminal()\n\n display.vvv(u\"SSM CONNECTION ID: {0}\".format(self._session_id), host=self.host)\n\n return session\n\n @_ssm_retry\n def exec_command(self, cmd, in_data=None, sudoable=True):\n ''' run a command on the ssm host '''\n\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n display.vvv(u\"EXEC {0}\".format(to_text(cmd)), host=self.host)\n\n session = self._session\n\n mark_begin = \"\".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])\n if self.is_windows:\n mark_start = mark_begin + \" $LASTEXITCODE\"\n else:\n mark_start = mark_begin\n mark_end = \"\".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])\n\n # Wrap command in markers accordingly for the shell used\n cmd = self._wrap_command(cmd, sudoable, mark_start, mark_end)\n\n self._flush_stderr(session)\n\n for chunk in chunks(cmd, 1024):\n session.stdin.write(to_bytes(chunk, errors='surrogate_or_strict'))\n\n # Read stdout between the markers\n stdout = ''\n win_line = ''\n begin = False\n stop_time = int(round(time.time())) + self.get_option('ssm_timeout')\n while session.poll() is None:\n remaining = stop_time - int(round(time.time()))\n if remaining < 1:\n self._timeout = True\n display.vvvv(u\"EXEC timeout stdout: {0}\".format(to_text(stdout)), host=self.host)\n raise AnsibleConnectionFailure(\"SSM exec_command timeout on host: %s\"\n % self.instance_id)\n if self._poll_stdout.poll(1000):\n line = self._filter_ansi(self._stdout.readline())\n display.vvvv(u\"EXEC stdout line: {0}\".format(to_text(line)), host=self.host)\n else:\n display.vvvv(u\"EXEC remaining: {0}\".format(remaining), host=self.host)\n continue\n\n if not begin and self.is_windows:\n win_line = win_line + line\n line = win_line\n\n if mark_start in line:\n begin = True\n if not line.startswith(mark_start):\n stdout = ''\n continue\n if begin:\n if mark_end in line:\n display.vvvv(u\"POST_PROCESS: {0}\".format(to_text(stdout)), host=self.host)\n returncode, stdout = self._post_process(stdout, mark_begin)\n break\n else:\n stdout = stdout + line\n\n stderr = self._flush_stderr(session)\n\n return (returncode, stdout, stderr)\n\n def _prepare_terminal(self):\n ''' perform any one-time terminal settings '''\n\n if not self.is_windows:\n cmd = \"stty -echo\\n\" + \"PS1=''\\n\"\n cmd = to_bytes(cmd, errors='surrogate_or_strict')\n self._session.stdin.write(cmd)\n\n def _wrap_command(self, cmd, sudoable, mark_start, mark_end):\n ''' wrap command so stdout and status can be extracted '''\n\n if self.is_windows:\n if not cmd.startswith(\" \".join(_common_args) + \" -EncodedCommand\"):\n cmd = self._shell._encode_script(cmd, preserve_rc=True)\n cmd = cmd + \"; echo \" + mark_start + \"\\necho \" + mark_end + \"\\n\"\n else:\n if sudoable:\n cmd = \"sudo \" + cmd\n cmd = \"echo \" + mark_start + \"\\n\" + cmd + \"\\necho $'\\\\n'$?\\n\" + \"echo \" + mark_end + \"\\n\"\n\n display.vvvv(u\"_wrap_command: '{0}'\".format(to_text(cmd)), host=self.host)\n return cmd\n\n def _post_process(self, stdout, mark_begin):\n ''' extract command status and strip unwanted lines '''\n\n if self.is_windows:\n # Value of $LASTEXITCODE will be the line after the mark\n trailer = stdout[stdout.rfind(mark_begin):]\n last_exit_code = trailer.splitlines()[1]\n if last_exit_code.isdigit:\n returncode = int(last_exit_code)\n else:\n returncode = -1\n # output to keep will be before the mark\n stdout = stdout[:stdout.rfind(mark_begin)]\n\n # If it looks like JSON remove any newlines\n if stdout.startswith('{'):\n stdout = stdout.replace('\\n', '')\n\n return (returncode, stdout)\n else:\n # Get command return code\n returncode = int(stdout.splitlines()[-2])\n\n # Throw away ending lines\n for x in range(0, 3):\n stdout = stdout[:stdout.rfind('\\n')]\n\n return (returncode, stdout)\n\n def _filter_ansi(self, line):\n ''' remove any ANSI terminal control codes '''\n line = to_text(line)\n\n if self.is_windows:\n osc_filter = re.compile(r'\\x1b\\][^\\x07]*\\x07')\n line = osc_filter.sub('', line)\n ansi_filter = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]')\n line = ansi_filter.sub('', line)\n\n # Replace or strip sequence (at terminal width)\n line = line.replace('\\r\\r\\n', '\\n')\n if len(line) == 201:\n line = line[:-1]\n\n return line\n\n def _flush_stderr(self, subprocess):\n ''' read and return stderr with minimal blocking '''\n\n poll_stderr = select.poll()\n poll_stderr.register(subprocess.stderr, select.POLLIN)\n stderr = ''\n\n while subprocess.poll() is None:\n if poll_stderr.poll(1):\n line = subprocess.stderr.readline()\n display.vvvv(u\"stderr line: {0}\".format(to_text(line)), host=self.host)\n stderr = stderr + line\n else:\n break\n\n return stderr\n\n def _get_url(self, client_method, bucket_name, out_path, http_method, profile_name):\n ''' Generate URL for get_object / put_object '''\n region_name = self.get_option('region') or 'us-east-1'\n client = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name)\n return client.generate_presigned_url(client_method, Params={'Bucket': bucket_name, 'Key': out_path}, ExpiresIn=3600, HttpMethod=http_method)\n\n def _get_boto_client(self, service, region_name=None, profile_name=None):\n ''' Gets a boto3 client based on the STS token '''\n\n aws_access_key_id = self.get_option('access_key_id')\n aws_secret_access_key = self.get_option('secret_access_key')\n aws_session_token = self.get_option('session_token')\n\n if aws_access_key_id is None:\n aws_access_key_id = os.environ.get(\"AWS_ACCESS_KEY_ID\", None)\n if aws_secret_access_key is None:\n aws_secret_access_key = os.environ.get(\"AWS_SECRET_ACCESS_KEY\", None)\n if aws_session_token is None:\n aws_session_token = os.environ.get(\"AWS_SESSION_TOKEN\", None)\n if not profile_name:\n profile_name = os.environ.get(\"AWS_PROFILE\", None)\n\n session_args = dict(\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n aws_session_token=aws_session_token,\n region_name=region_name,\n )\n if profile_name:\n session_args['profile_name'] = profile_name\n session = boto3.session.Session(**session_args)\n\n client = session.client(\n service,\n config=Config(signature_version=\"s3v4\")\n )\n return client\n\n @_ssm_retry\n def _file_transport_command(self, in_path, out_path, ssm_action):\n ''' transfer a file from using an intermediate S3 bucket '''\n\n path_unescaped = u\"{0}/{1}\".format(self.instance_id, out_path)\n s3_path = path_unescaped.replace('\\\\', '/')\n bucket_url = 's3://%s/%s' % (self.get_option('bucket_name'), s3_path)\n\n profile_name = self.get_option('profile')\n\n if self.is_windows:\n put_command = \"Invoke-WebRequest -Method PUT -InFile '%s' -Uri '%s' -UseBasicParsing\" % (\n in_path, self._get_url('put_object', self.get_option('bucket_name'), s3_path, 'PUT', profile_name))\n get_command = \"Invoke-WebRequest '%s' -OutFile '%s'\" % (\n self._get_url('get_object', self.get_option('bucket_name'), s3_path, 'GET', profile_name), out_path)\n else:\n put_command = \"curl --request PUT --upload-file '%s' '%s'\" % (\n in_path, self._get_url('put_object', self.get_option('bucket_name'), s3_path, 'PUT', profile_name))\n get_command = \"curl '%s' -o '%s'\" % (\n self._get_url('get_object', self.get_option('bucket_name'), s3_path, 'GET', profile_name), out_path)\n\n client = self._get_boto_client('s3', profile_name=profile_name)\n if ssm_action == 'get':\n (returncode, stdout, stderr) = self.exec_command(put_command, in_data=None, sudoable=False)\n with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data:\n client.download_fileobj(self.get_option('bucket_name'), s3_path, data)\n else:\n with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data:\n client.upload_fileobj(data, self.get_option('bucket_name'), s3_path)\n (returncode, stdout, stderr) = self.exec_command(get_command, in_data=None, sudoable=False)\n\n # Remove the files from the bucket after they've been transferred\n client.delete_object(Bucket=self.get_option('bucket_name'), Key=s3_path)\n\n # Check the return code\n if returncode == 0:\n return (returncode, stdout, stderr)\n else:\n raise AnsibleError(\"failed to transfer file to %s %s:\\n%s\\n%s\" %\n (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))\n\n def put_file(self, in_path, out_path):\n ''' transfer a file from local to remote '''\n\n super(Connection, self).put_file(in_path, out_path)\n\n display.vvv(u\"PUT {0} TO {1}\".format(in_path, out_path), host=self.host)\n if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):\n raise AnsibleFileNotFound(\"file or module does not exist: {0}\".format(to_native(in_path)))\n\n return self._file_transport_command(in_path, out_path, 'put')\n\n def fetch_file(self, in_path, out_path):\n ''' fetch a file from remote to local '''\n\n super(Connection, self).fetch_file(in_path, out_path)\n\n display.vvv(u\"FETCH {0} TO {1}\".format(in_path, out_path), host=self.host)\n return self._file_transport_command(in_path, out_path, 'get')\n\n def close(self):\n ''' terminate the connection '''\n if self._session_id:\n\n display.vvv(u\"CLOSING SSM CONNECTION TO: {0}\".format(self.instance_id), host=self.host)\n if self._timeout:\n self._session.terminate()\n else:\n cmd = b\"\\nexit\\n\"\n self._session.communicate(cmd)\n\n display.vvvv(u\"TERMINATE SSM SESSION: {0}\".format(self._session_id), host=self.host)\n self._client.terminate_session(SessionId=self._session_id)\n self._session_id = ''\n", "path": "plugins/connection/aws_ssm.py" } ]
[ { "content": "# Based on the ssh connection plugin by Michael DeHaan\n#\n# Copyright: (c) 2018, Pat Sharkey <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nauthor:\n- Pat Sharkey (@psharkey) <[email protected]>\n- HanumanthaRao MVL (@hanumantharaomvl) <[email protected]>\n- Gaurav Ashtikar (@gau1991 )<[email protected]>\nconnection: aws_ssm\nshort_description: execute via AWS Systems Manager\ndescription:\n- This connection plugin allows ansible to execute tasks on an EC2 instance via the aws ssm CLI.\nrequirements:\n- The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent).\n- The control machine must have the aws session manager plugin installed.\n- The remote EC2 linux instance must have the curl installed.\noptions:\n access_key_id:\n description: The STS access key to use when connecting via session-manager.\n vars:\n - name: ansible_aws_ssm_access_key_id\n version_added: 1.3.0\n secret_access_key:\n description: The STS secret key to use when connecting via session-manager.\n vars:\n - name: ansible_aws_ssm_secret_access_key\n version_added: 1.3.0\n session_token:\n description: The STS session token to use when connecting via session-manager.\n vars:\n - name: ansible_aws_ssm_session_token\n version_added: 1.3.0\n instance_id:\n description: The EC2 instance ID.\n vars:\n - name: ansible_aws_ssm_instance_id\n region:\n description: The region the EC2 instance is located.\n vars:\n - name: ansible_aws_ssm_region\n default: 'us-east-1'\n bucket_name:\n description: The name of the S3 bucket used for file transfers.\n vars:\n - name: ansible_aws_ssm_bucket_name\n plugin:\n description: This defines the location of the session-manager-plugin binary.\n vars:\n - name: ansible_aws_ssm_plugin\n default: '/usr/local/bin/session-manager-plugin'\n profile:\n description: Sets AWS profile to use.\n vars:\n - name: ansible_aws_ssm_profile\n version_added: 1.5.0\n retries:\n description: Number of attempts to connect.\n default: 3\n type: integer\n vars:\n - name: ansible_aws_ssm_retries\n ssm_timeout:\n description: Connection timeout seconds.\n default: 60\n type: integer\n vars:\n - name: ansible_aws_ssm_timeout\n'''\n\nEXAMPLES = r'''\n\n# Stop Spooler Process on Windows Instances\n- name: Stop Spooler Service on Windows Instances\n vars:\n ansible_connection: aws_ssm\n ansible_shell_type: powershell\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-east-1\n tasks:\n - name: Stop spooler service\n win_service:\n name: spooler\n state: stopped\n\n# Install a Nginx Package on Linux Instance\n- name: Install a Nginx Package\n vars:\n ansible_connection: aws_ssm\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-west-2\n tasks:\n - name: Install a Nginx Package\n yum:\n name: nginx\n state: present\n\n# Create a directory in Windows Instances\n- name: Create a directory in Windows Instance\n vars:\n ansible_connection: aws_ssm\n ansible_shell_type: powershell\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-east-1\n tasks:\n - name: Create a Directory\n win_file:\n path: C:\\Windows\\temp\n state: directory\n\n# Making use of Dynamic Inventory Plugin\n# =======================================\n# aws_ec2.yml (Dynamic Inventory - Linux)\n# This will return the Instance IDs matching the filter\n#plugin: aws_ec2\n#regions:\n# - us-east-1\n#hostnames:\n# - instance-id\n#filters:\n# tag:SSMTag: ssmlinux\n# -----------------------\n- name: install aws-cli\n hosts: all\n gather_facts: false\n vars:\n ansible_connection: aws_ssm\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-east-1\n tasks:\n - name: aws-cli\n raw: yum install -y awscli\n tags: aws-cli\n# Execution: ansible-playbook linux.yaml -i aws_ec2.yml\n# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.\n# =====================================================\n# aws_ec2.yml (Dynamic Inventory - Windows)\n#plugin: aws_ec2\n#regions:\n# - us-east-1\n#hostnames:\n# - instance-id\n#filters:\n# tag:SSMTag: ssmwindows\n# -----------------------\n- name: Create a dir.\n hosts: all\n gather_facts: false\n vars:\n ansible_connection: aws_ssm\n ansible_shell_type: powershell\n ansible_aws_ssm_bucket_name: nameofthebucket\n ansible_aws_ssm_region: us-east-1\n tasks:\n - name: Create the directory\n win_file:\n path: C:\\Temp\\SSM_Testing5\n state: directory\n# Execution: ansible-playbook win_file.yaml -i aws_ec2.yml\n# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.\n'''\n\nimport os\nimport getpass\nimport json\nimport pty\nimport random\nimport re\nimport select\nimport string\nimport subprocess\nimport time\n\ntry:\n import boto3\n from botocore.client import Config\n HAS_BOTO_3 = True\nexcept ImportError as e:\n HAS_BOTO_3_ERROR = str(e)\n HAS_BOTO_3 = False\n\nfrom functools import wraps\nfrom ansible.errors import AnsibleConnectionFailure, AnsibleError, AnsibleFileNotFound\nfrom ansible.module_utils.basic import missing_required_lib\nfrom ansible.module_utils.six.moves import xrange\nfrom ansible.module_utils._text import to_bytes, to_native, to_text\nfrom ansible.plugins.connection import ConnectionBase\nfrom ansible.plugins.shell.powershell import _common_args\nfrom ansible.utils.display import Display\n\ndisplay = Display()\n\n\ndef _ssm_retry(func):\n \"\"\"\n Decorator to retry in the case of a connection failure\n Will retry if:\n * an exception is caught\n Will not retry if\n * remaining_tries is <2\n * retries limit reached\n \"\"\"\n @wraps(func)\n def wrapped(self, *args, **kwargs):\n remaining_tries = int(self.get_option('retries')) + 1\n cmd_summary = \"%s...\" % args[0]\n for attempt in range(remaining_tries):\n cmd = args[0]\n\n try:\n return_tuple = func(self, *args, **kwargs)\n display.vvv(return_tuple, host=self.host)\n break\n\n except (AnsibleConnectionFailure, Exception) as e:\n if attempt == remaining_tries - 1:\n raise\n else:\n pause = 2 ** attempt - 1\n if pause > 30:\n pause = 30\n\n if isinstance(e, AnsibleConnectionFailure):\n msg = \"ssm_retry: attempt: %d, cmd (%s), pausing for %d seconds\" % (attempt, cmd_summary, pause)\n else:\n msg = \"ssm_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds\" % (attempt, e, cmd_summary, pause)\n\n display.vv(msg, host=self.host)\n\n time.sleep(pause)\n\n # Do not attempt to reuse the existing session on retries\n self.close()\n\n continue\n\n return return_tuple\n return wrapped\n\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n\nclass Connection(ConnectionBase):\n ''' AWS SSM based connections '''\n\n transport = 'community.aws.aws_ssm'\n allow_executable = False\n allow_extras = True\n has_pipelining = False\n is_windows = False\n _client = None\n _session = None\n _stdout = None\n _session_id = ''\n _timeout = False\n MARK_LENGTH = 26\n\n def __init__(self, *args, **kwargs):\n if not HAS_BOTO_3:\n raise AnsibleError('{0}: {1}'.format(missing_required_lib(\"boto3\"), HAS_BOTO_3_ERROR))\n\n super(Connection, self).__init__(*args, **kwargs)\n self.host = self._play_context.remote_addr\n\n if getattr(self._shell, \"SHELL_FAMILY\", '') == 'powershell':\n self.delegate = None\n self.has_native_async = True\n self.always_pipeline_modules = True\n self.module_implementation_preferences = ('.ps1', '.exe', '')\n self.protocol = None\n self.shell_id = None\n self._shell_type = 'powershell'\n self.is_windows = True\n\n def __del__(self):\n self.close()\n\n def _connect(self):\n ''' connect to the host via ssm '''\n\n self._play_context.remote_user = getpass.getuser()\n\n if not self._session_id:\n self.start_session()\n return self\n\n def reset(self):\n ''' start a fresh ssm session '''\n display.vvvv('reset called on ssm connection')\n return self.start_session()\n\n def start_session(self):\n ''' start ssm session '''\n\n if self.get_option('instance_id') is None:\n self.instance_id = self.host\n else:\n self.instance_id = self.get_option('instance_id')\n\n display.vvv(u\"ESTABLISH SSM CONNECTION TO: {0}\".format(self.instance_id), host=self.host)\n\n executable = self.get_option('plugin')\n if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):\n raise AnsibleError(\"failed to find the executable specified %s.\"\n \" Please verify if the executable exists and re-try.\" % executable)\n\n profile_name = self.get_option('profile') or ''\n region_name = self.get_option('region')\n ssm_parameters = dict()\n client = self._get_boto_client('ssm', region_name=region_name, profile_name=profile_name)\n self._client = client\n response = client.start_session(Target=self.instance_id, Parameters=ssm_parameters)\n self._session_id = response['SessionId']\n\n cmd = [\n executable,\n json.dumps(response),\n region_name,\n \"StartSession\",\n profile_name,\n json.dumps({\"Target\": self.instance_id}),\n client.meta.endpoint_url\n ]\n\n display.vvvv(u\"SSM COMMAND: {0}\".format(to_text(cmd)), host=self.host)\n\n stdout_r, stdout_w = pty.openpty()\n session = subprocess.Popen(\n cmd,\n stdin=subprocess.PIPE,\n stdout=stdout_w,\n stderr=subprocess.PIPE,\n close_fds=True,\n bufsize=0,\n )\n\n os.close(stdout_w)\n self._stdout = os.fdopen(stdout_r, 'rb', 0)\n self._session = session\n self._poll_stdout = select.poll()\n self._poll_stdout.register(self._stdout, select.POLLIN)\n\n # Disable command echo and prompt.\n self._prepare_terminal()\n\n display.vvv(u\"SSM CONNECTION ID: {0}\".format(self._session_id), host=self.host)\n\n return session\n\n @_ssm_retry\n def exec_command(self, cmd, in_data=None, sudoable=True):\n ''' run a command on the ssm host '''\n\n super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)\n\n display.vvv(u\"EXEC {0}\".format(to_text(cmd)), host=self.host)\n\n session = self._session\n\n mark_begin = \"\".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])\n if self.is_windows:\n mark_start = mark_begin + \" $LASTEXITCODE\"\n else:\n mark_start = mark_begin\n mark_end = \"\".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])\n\n # Wrap command in markers accordingly for the shell used\n cmd = self._wrap_command(cmd, sudoable, mark_start, mark_end)\n\n self._flush_stderr(session)\n\n for chunk in chunks(cmd, 1024):\n session.stdin.write(to_bytes(chunk, errors='surrogate_or_strict'))\n\n # Read stdout between the markers\n stdout = ''\n win_line = ''\n begin = False\n stop_time = int(round(time.time())) + self.get_option('ssm_timeout')\n while session.poll() is None:\n remaining = stop_time - int(round(time.time()))\n if remaining < 1:\n self._timeout = True\n display.vvvv(u\"EXEC timeout stdout: {0}\".format(to_text(stdout)), host=self.host)\n raise AnsibleConnectionFailure(\"SSM exec_command timeout on host: %s\"\n % self.instance_id)\n if self._poll_stdout.poll(1000):\n line = self._filter_ansi(self._stdout.readline())\n display.vvvv(u\"EXEC stdout line: {0}\".format(to_text(line)), host=self.host)\n else:\n display.vvvv(u\"EXEC remaining: {0}\".format(remaining), host=self.host)\n continue\n\n if not begin and self.is_windows:\n win_line = win_line + line\n line = win_line\n\n if mark_start in line:\n begin = True\n if not line.startswith(mark_start):\n stdout = ''\n continue\n if begin:\n if mark_end in line:\n display.vvvv(u\"POST_PROCESS: {0}\".format(to_text(stdout)), host=self.host)\n returncode, stdout = self._post_process(stdout, mark_begin)\n break\n else:\n stdout = stdout + line\n\n stderr = self._flush_stderr(session)\n\n return (returncode, stdout, stderr)\n\n def _prepare_terminal(self):\n ''' perform any one-time terminal settings '''\n\n if not self.is_windows:\n cmd = \"stty -echo\\n\" + \"PS1=''\\n\"\n cmd = to_bytes(cmd, errors='surrogate_or_strict')\n self._session.stdin.write(cmd)\n\n def _wrap_command(self, cmd, sudoable, mark_start, mark_end):\n ''' wrap command so stdout and status can be extracted '''\n\n if self.is_windows:\n if not cmd.startswith(\" \".join(_common_args) + \" -EncodedCommand\"):\n cmd = self._shell._encode_script(cmd, preserve_rc=True)\n cmd = cmd + \"; echo \" + mark_start + \"\\necho \" + mark_end + \"\\n\"\n else:\n if sudoable:\n cmd = \"sudo \" + cmd\n cmd = \"echo \" + mark_start + \"\\n\" + cmd + \"\\necho $'\\\\n'$?\\n\" + \"echo \" + mark_end + \"\\n\"\n\n display.vvvv(u\"_wrap_command: '{0}'\".format(to_text(cmd)), host=self.host)\n return cmd\n\n def _post_process(self, stdout, mark_begin):\n ''' extract command status and strip unwanted lines '''\n\n if self.is_windows:\n # Value of $LASTEXITCODE will be the line after the mark\n trailer = stdout[stdout.rfind(mark_begin):]\n last_exit_code = trailer.splitlines()[1]\n if last_exit_code.isdigit:\n returncode = int(last_exit_code)\n else:\n returncode = -1\n # output to keep will be before the mark\n stdout = stdout[:stdout.rfind(mark_begin)]\n\n # If it looks like JSON remove any newlines\n if stdout.startswith('{'):\n stdout = stdout.replace('\\n', '')\n\n return (returncode, stdout)\n else:\n # Get command return code\n returncode = int(stdout.splitlines()[-2])\n\n # Throw away ending lines\n for x in range(0, 3):\n stdout = stdout[:stdout.rfind('\\n')]\n\n return (returncode, stdout)\n\n def _filter_ansi(self, line):\n ''' remove any ANSI terminal control codes '''\n line = to_text(line)\n\n if self.is_windows:\n osc_filter = re.compile(r'\\x1b\\][^\\x07]*\\x07')\n line = osc_filter.sub('', line)\n ansi_filter = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]')\n line = ansi_filter.sub('', line)\n\n # Replace or strip sequence (at terminal width)\n line = line.replace('\\r\\r\\n', '\\n')\n if len(line) == 201:\n line = line[:-1]\n\n return line\n\n def _flush_stderr(self, subprocess):\n ''' read and return stderr with minimal blocking '''\n\n poll_stderr = select.poll()\n poll_stderr.register(subprocess.stderr, select.POLLIN)\n stderr = ''\n\n while subprocess.poll() is None:\n if poll_stderr.poll(1):\n line = subprocess.stderr.readline()\n display.vvvv(u\"stderr line: {0}\".format(to_text(line)), host=self.host)\n stderr = stderr + line\n else:\n break\n\n return stderr\n\n def _get_url(self, client_method, bucket_name, out_path, http_method, profile_name):\n ''' Generate URL for get_object / put_object '''\n region_name = self.get_option('region') or 'us-east-1'\n client = self._get_boto_client('s3', region_name=region_name, profile_name=profile_name)\n return client.generate_presigned_url(client_method, Params={'Bucket': bucket_name, 'Key': out_path}, ExpiresIn=3600, HttpMethod=http_method)\n\n def _get_boto_client(self, service, region_name=None, profile_name=None):\n ''' Gets a boto3 client based on the STS token '''\n\n aws_access_key_id = self.get_option('access_key_id')\n aws_secret_access_key = self.get_option('secret_access_key')\n aws_session_token = self.get_option('session_token')\n\n if aws_access_key_id is None:\n aws_access_key_id = os.environ.get(\"AWS_ACCESS_KEY_ID\", None)\n if aws_secret_access_key is None:\n aws_secret_access_key = os.environ.get(\"AWS_SECRET_ACCESS_KEY\", None)\n if aws_session_token is None:\n aws_session_token = os.environ.get(\"AWS_SESSION_TOKEN\", None)\n if not profile_name:\n profile_name = os.environ.get(\"AWS_PROFILE\", None)\n\n session_args = dict(\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n aws_session_token=aws_session_token,\n region_name=region_name,\n )\n if profile_name:\n session_args['profile_name'] = profile_name\n session = boto3.session.Session(**session_args)\n\n client = session.client(\n service,\n config=Config(signature_version=\"s3v4\")\n )\n return client\n\n @_ssm_retry\n def _file_transport_command(self, in_path, out_path, ssm_action):\n ''' transfer a file from using an intermediate S3 bucket '''\n\n path_unescaped = u\"{0}/{1}\".format(self.instance_id, out_path)\n s3_path = path_unescaped.replace('\\\\', '/')\n bucket_url = 's3://%s/%s' % (self.get_option('bucket_name'), s3_path)\n\n profile_name = self.get_option('profile')\n\n if self.is_windows:\n put_command = \"Invoke-WebRequest -Method PUT -InFile '%s' -Uri '%s' -UseBasicParsing\" % (\n in_path, self._get_url('put_object', self.get_option('bucket_name'), s3_path, 'PUT', profile_name))\n get_command = \"Invoke-WebRequest '%s' -OutFile '%s'\" % (\n self._get_url('get_object', self.get_option('bucket_name'), s3_path, 'GET', profile_name), out_path)\n else:\n put_command = \"curl --request PUT --upload-file '%s' '%s'\" % (\n in_path, self._get_url('put_object', self.get_option('bucket_name'), s3_path, 'PUT', profile_name))\n get_command = \"curl '%s' -o '%s'\" % (\n self._get_url('get_object', self.get_option('bucket_name'), s3_path, 'GET', profile_name), out_path)\n\n client = self._get_boto_client('s3', profile_name=profile_name)\n if ssm_action == 'get':\n (returncode, stdout, stderr) = self.exec_command(put_command, in_data=None, sudoable=False)\n with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data:\n client.download_fileobj(self.get_option('bucket_name'), s3_path, data)\n else:\n with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data:\n client.upload_fileobj(data, self.get_option('bucket_name'), s3_path)\n (returncode, stdout, stderr) = self.exec_command(get_command, in_data=None, sudoable=False)\n\n # Remove the files from the bucket after they've been transferred\n client.delete_object(Bucket=self.get_option('bucket_name'), Key=s3_path)\n\n # Check the return code\n if returncode == 0:\n return (returncode, stdout, stderr)\n else:\n raise AnsibleError(\"failed to transfer file to %s %s:\\n%s\\n%s\" %\n (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))\n\n def put_file(self, in_path, out_path):\n ''' transfer a file from local to remote '''\n\n super(Connection, self).put_file(in_path, out_path)\n\n display.vvv(u\"PUT {0} TO {1}\".format(in_path, out_path), host=self.host)\n if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):\n raise AnsibleFileNotFound(\"file or module does not exist: {0}\".format(to_native(in_path)))\n\n return self._file_transport_command(in_path, out_path, 'put')\n\n def fetch_file(self, in_path, out_path):\n ''' fetch a file from remote to local '''\n\n super(Connection, self).fetch_file(in_path, out_path)\n\n display.vvv(u\"FETCH {0} TO {1}\".format(in_path, out_path), host=self.host)\n return self._file_transport_command(in_path, out_path, 'get')\n\n def close(self):\n ''' terminate the connection '''\n if self._session_id:\n\n display.vvv(u\"CLOSING SSM CONNECTION TO: {0}\".format(self.instance_id), host=self.host)\n if self._timeout:\n self._session.terminate()\n else:\n cmd = b\"\\nexit\\n\"\n self._session.communicate(cmd)\n\n display.vvvv(u\"TERMINATE SSM SESSION: {0}\".format(self._session_id), host=self.host)\n self._client.terminate_session(SessionId=self._session_id)\n self._session_id = ''\n", "path": "plugins/connection/aws_ssm.py" } ]
diff --git a/changelogs/fragments/542-ensure-ssm-plugin-terminates-connections.yml b/changelogs/fragments/542-ensure-ssm-plugin-terminates-connections.yml new file mode 100644 index 00000000000..1cbe860d1d2 --- /dev/null +++ b/changelogs/fragments/542-ensure-ssm-plugin-terminates-connections.yml @@ -0,0 +1,2 @@ +bugfixes: + - aws_ssm - Adds destructor to SSM connection plugin to ensure connections are properly cleaned up after usage (https://github.com/ansible-collections/community.aws/pull/542). diff --git a/plugins/connection/aws_ssm.py b/plugins/connection/aws_ssm.py index b013ff5a71a..23e222614d0 100644 --- a/plugins/connection/aws_ssm.py +++ b/plugins/connection/aws_ssm.py @@ -280,6 +280,9 @@ def __init__(self, *args, **kwargs): self._shell_type = 'powershell' self.is_windows = True + def __del__(self): + self.close() + def _connect(self): ''' connect to the host via ssm '''
conda__conda-7244
conda's configuration context is not initialized in conda.exports root cause of https://github.com/conda-forge/conda-smithy/issues/762
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import Hashable\nfrom logging import getLogger\nimport threading\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .core.solve import Solver # NOQA\nSolver = Solver\n\nfrom .plan import display_actions # NOQA\ndisplay_actions = display_actions\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection.session import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .common.toposort import _toposort # NOQA\n_toposort = _toposort\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .gateways.connection.download import TmpDownload # NOQA\n\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index, get_index # NOQA\ndist_str_in_index, fetch_index, get_index = dist_str_in_index, fetch_index, get_index # NOQA\nfrom .core.package_cache_data import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, symlink_conda # NOQA\npackage_cache, prefix_placeholder, symlink_conda = package_cache, prefix_placeholder, symlink_conda\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.prefix_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, ResolvePackageNotFound, Resolve, Unsatisfiable # NOQA\nMatchSpec, Resolve = MatchSpec, Resolve\nUnsatisfiable = Unsatisfiable\nNoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound\n\nfrom .utils import hashsum_file, human_bytes, unix_path_to_win, url_path # NOQA\nfrom .common.path import win_path_to_unix # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nunix_path_to_win = unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .models.version import VersionOrder, normalized_version # NOQA\nVersionOrder, normalized_version = VersionOrder, normalized_version # NOQA\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix, non_x86_linux_machines, sys_rc_path # NOQA\nnon_x86_linux_machines, sys_rc_path = non_x86_linux_machines, sys_rc_path\nget_prefix = get_prefix\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nconda_private = conda.base.context.context.conda_private\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError, LinkError, CondaOSError, PathNotFoundError # NOQA\nPaddingError = PaddingError\nLinkError = LinkError\nCondaOSError = CondaOSError\n# PathNotFoundError is the conda 4.4.x name for it - let's plan ahead.\nPathNotFoundError = CondaFileNotFoundError = PathNotFoundError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\nfrom .models.records import PackageRecord # NOQA\nPackageRecord = IndexRecord = PackageRecord\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.subdir_data import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object): # pragma: no cover\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\n\nclass InstalledPackages(object):\n pass\n\n\nclass memoized(object): # pragma: no cover\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.lock = threading.Lock()\n\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(sorted(kw.items())))\n with self.lock:\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\nfrom .gateways.disk.delete import rm_rf as _rm_rf # NOQA\nfrom .core.prefix_data import delete_prefix_from_linked_data # NOQA\n\n\ndef rm_rf(path, max_retries=5, trash=True):\n _rm_rf(path, max_retries, trash)\n delete_prefix_from_linked_data(path)\n\n\n# ######################\n# signature.py\n# ######################\nKEYS = None\nKEYS_DIR = None\n\n\ndef hash_file(_):\n return None # pragma: no cover\n\n\ndef verify(_):\n return False # pragma: no cover\n\n\nfrom .plan import execute_actions, execute_instructions, execute_plan, install_actions # NOQA\nexecute_actions, execute_instructions = execute_actions, execute_instructions\nexecute_plan, install_actions = execute_plan, install_actions\n", "path": "conda/exports.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import Hashable\nfrom logging import getLogger\nimport threading\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom .base.context import reset_context # NOQA\nreset_context() # initialize context when conda.exports is imported\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .core.solve import Solver # NOQA\nSolver = Solver\n\nfrom .plan import display_actions # NOQA\ndisplay_actions = display_actions\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection.session import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .common.toposort import _toposort # NOQA\n_toposort = _toposort\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .gateways.connection.download import TmpDownload # NOQA\n\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index, get_index # NOQA\ndist_str_in_index, fetch_index, get_index = dist_str_in_index, fetch_index, get_index # NOQA\nfrom .core.package_cache_data import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, symlink_conda # NOQA\npackage_cache, prefix_placeholder, symlink_conda = package_cache, prefix_placeholder, symlink_conda\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.prefix_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, ResolvePackageNotFound, Resolve, Unsatisfiable # NOQA\nMatchSpec, Resolve = MatchSpec, Resolve\nUnsatisfiable = Unsatisfiable\nNoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound\n\nfrom .utils import hashsum_file, human_bytes, unix_path_to_win, url_path # NOQA\nfrom .common.path import win_path_to_unix # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nunix_path_to_win = unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .models.version import VersionOrder, normalized_version # NOQA\nVersionOrder, normalized_version = VersionOrder, normalized_version # NOQA\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix, non_x86_linux_machines, sys_rc_path # NOQA\nnon_x86_linux_machines, sys_rc_path = non_x86_linux_machines, sys_rc_path\nget_prefix = get_prefix\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nconda_private = conda.base.context.context.conda_private\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError, LinkError, CondaOSError, PathNotFoundError # NOQA\nPaddingError = PaddingError\nLinkError = LinkError\nCondaOSError = CondaOSError\n# PathNotFoundError is the conda 4.4.x name for it - let's plan ahead.\nPathNotFoundError = CondaFileNotFoundError = PathNotFoundError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\nfrom .models.records import PackageRecord # NOQA\nPackageRecord = IndexRecord = PackageRecord\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.subdir_data import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object): # pragma: no cover\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\n\nclass InstalledPackages(object):\n pass\n\n\nclass memoized(object): # pragma: no cover\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.lock = threading.Lock()\n\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(sorted(kw.items())))\n with self.lock:\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\nfrom .gateways.disk.delete import rm_rf as _rm_rf # NOQA\nfrom .core.prefix_data import delete_prefix_from_linked_data # NOQA\n\n\ndef rm_rf(path, max_retries=5, trash=True):\n _rm_rf(path, max_retries, trash)\n delete_prefix_from_linked_data(path)\n\n\n# ######################\n# signature.py\n# ######################\nKEYS = None\nKEYS_DIR = None\n\n\ndef hash_file(_):\n return None # pragma: no cover\n\n\ndef verify(_):\n return False # pragma: no cover\n\n\nfrom .plan import execute_actions, execute_instructions, execute_plan, install_actions # NOQA\nexecute_actions, execute_instructions = execute_actions, execute_instructions\nexecute_plan, install_actions = execute_plan, install_actions\n", "path": "conda/exports.py" } ]
diff --git a/conda/exports.py b/conda/exports.py index 67af538f63d..62b755d58f8 100644 --- a/conda/exports.py +++ b/conda/exports.py @@ -11,6 +11,9 @@ from . import CondaError # NOQA CondaError = CondaError +from .base.context import reset_context # NOQA +reset_context() # initialize context when conda.exports is imported + from . import compat, plan # NOQA compat, plan = compat, plan diff --git a/tests/cli/test_activate.py b/tests/cli/test_activate.py index 10bfc7b6e35..edd0ef7eef8 100644 --- a/tests/cli/test_activate.py +++ b/tests/cli/test_activate.py @@ -625,7 +625,7 @@ def test_activate_does_not_leak_echo_setting(shell): assert_equals(stdout, u'ECHO is on.', stderr) [email protected](datetime.now() < datetime(2018, 5, 1), reason="save for later") [email protected](True, reason="save for later") @pytest.mark.installed def test_activate_non_ascii_char_in_path(shell): shell_vars = _format_vars(shell) diff --git a/tests/test_create.py b/tests/test_create.py index d9e951bf968..0478a6f3fab 100644 --- a/tests/test_create.py +++ b/tests/test_create.py @@ -566,6 +566,7 @@ def test_create_empty_env(self): assert stderr == '' self.assertIsInstance(stdout, str) + @pytest.mark.skipif(True, reason="pip 10 dropped --egg") def test_list_with_pip_egg(self): from conda.exports import rm_rf as _rm_rf with make_temp_env("python=3.5 pip") as prefix: diff --git a/tests/test_export.py b/tests/test_export.py index 2a8167aa884..7a999c018a6 100644 --- a/tests/test_export.py +++ b/tests/test_export.py @@ -31,7 +31,7 @@ def test_basic(self): output2, error= run_command(Commands.LIST, prefix2, "-e") self.assertEqual(output, output2) - @pytest.mark.xfail(datetime.now() < datetime(2018, 5, 1), reason="Bring back `conda list --export` #3445", strict=True) + @pytest.mark.skipif(True, reason="Bring back `conda list --export` #3445", strict=True) def test_multi_channel_export(self): """ When try to import from txt
django-import-export__django-import-export-1039
DecimalWidget should be initialized from text As [per doc](https://docs.python.org/3/library/decimal.html): ```python >>> Decimal('3.14') Decimal('3.14') >>> Decimal(3.14) Decimal('3.140000000000000124344978758017532527446746826171875') ``` When I've changed this line: https://github.com/django-import-export/django-import-export/blob/3cf5e3f9796a5caf7c5ea3928119af7ce4706c0d/import_export/widgets.py#L88 to ```python return Decimal(force_text(value)) ``` the import of value `1.4` changed from: 1.~~40~~399999999999999911182158029987476766109466552734375 to: 1.4~~0~~
[ { "content": "import json\nfrom datetime import date, datetime\nfrom decimal import Decimal\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import datetime_safe, timezone\nfrom django.utils.dateparse import parse_duration\nfrom django.utils.encoding import force_text, smart_text\n\n\nclass Widget:\n \"\"\"\n A Widget takes care of converting between import and export representations.\n\n This is achieved by the two methods,\n :meth:`~import_export.widgets.Widget.clean` and\n :meth:`~import_export.widgets.Widget.render`.\n \"\"\"\n def clean(self, value, row=None, *args, **kwargs):\n \"\"\"\n Returns an appropriate Python object for an imported value.\n\n For example, if you import a value from a spreadsheet,\n :meth:`~import_export.widgets.Widget.clean` handles conversion\n of this value into the corresponding Python object.\n\n Numbers or dates can be *cleaned* to their respective data types and\n don't have to be imported as Strings.\n \"\"\"\n return value\n\n def render(self, value, obj=None):\n \"\"\"\n Returns an export representation of a Python value.\n\n For example, if you have an object you want to export,\n :meth:`~import_export.widgets.Widget.render` takes care of converting\n the object's field to a value that can be written to a spreadsheet.\n \"\"\"\n return force_text(value)\n\n\nclass NumberWidget(Widget):\n \"\"\"\n \"\"\"\n\n def is_empty(self, value):\n if isinstance(value, str):\n value = value.strip()\n # 0 is not empty\n return value is None or value == \"\"\n\n def render(self, value, obj=None):\n return value\n\n\nclass FloatWidget(NumberWidget):\n \"\"\"\n Widget for converting floats fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return float(value)\n\n\nclass IntegerWidget(NumberWidget):\n \"\"\"\n Widget for converting integer fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return int(float(value))\n\n\nclass DecimalWidget(NumberWidget):\n \"\"\"\n Widget for converting decimal fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return Decimal(value)\n\n\nclass CharWidget(Widget):\n \"\"\"\n Widget for converting text fields.\n \"\"\"\n\n def render(self, value, obj=None):\n return force_text(value)\n\n\nclass BooleanWidget(Widget):\n \"\"\"\n Widget for converting boolean fields.\n \"\"\"\n TRUE_VALUES = [\"1\", 1]\n FALSE_VALUE = \"0\"\n\n def render(self, value, obj=None):\n if value is None:\n return \"\"\n return self.TRUE_VALUES[0] if value else self.FALSE_VALUE\n\n def clean(self, value, row=None, *args, **kwargs):\n if value == \"\":\n return None\n return True if value in self.TRUE_VALUES else False\n\n\nclass DateWidget(Widget):\n \"\"\"\n Widget for converting date fields.\n\n Takes optional ``format`` parameter.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.DATE_INPUT_FORMATS:\n formats = (\"%Y-%m-%d\",)\n else:\n formats = settings.DATE_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n if isinstance(value, date):\n return value\n for format in self.formats:\n try:\n return datetime.strptime(value, format).date()\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid date.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n try:\n return value.strftime(self.formats[0])\n except:\n return datetime_safe.new_date(value).strftime(self.formats[0])\n\n\nclass DateTimeWidget(Widget):\n \"\"\"\n Widget for converting date fields.\n\n Takes optional ``format`` parameter. If none is set, either\n ``settings.DATETIME_INPUT_FORMATS`` or ``\"%Y-%m-%d %H:%M:%S\"`` is used.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.DATETIME_INPUT_FORMATS:\n formats = (\"%Y-%m-%d %H:%M:%S\",)\n else:\n formats = settings.DATETIME_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n if isinstance(value, datetime):\n return value\n for format in self.formats:\n try:\n dt = datetime.strptime(value, format)\n if settings.USE_TZ:\n # make datetime timezone aware so we don't compare\n # naive datetime to an aware one\n dt = timezone.make_aware(dt,\n timezone.get_default_timezone())\n return dt\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid date/time.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n if settings.USE_TZ:\n value = timezone.localtime(value)\n return value.strftime(self.formats[0])\n\n\nclass TimeWidget(Widget):\n \"\"\"\n Widget for converting time fields.\n\n Takes optional ``format`` parameter.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.TIME_INPUT_FORMATS:\n formats = (\"%H:%M:%S\",)\n else:\n formats = settings.TIME_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n for format in self.formats:\n try:\n return datetime.strptime(value, format).time()\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid time.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return value.strftime(self.formats[0])\n\n\nclass DurationWidget(Widget):\n \"\"\"\n Widget for converting time duration fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n\n try:\n return parse_duration(value)\n except (ValueError, TypeError):\n raise ValueError(\"Enter a valid duration.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return str(value)\n\n\nclass SimpleArrayWidget(Widget):\n \"\"\"\n Widget for an Array field. Can be used for Postgres' Array field.\n\n :param separator: Defaults to ``','``\n \"\"\"\n\n def __init__(self, separator=None):\n if separator is None:\n separator = ','\n self.separator = separator\n super().__init__()\n\n def clean(self, value, row=None, *args, **kwargs):\n return value.split(self.separator) if value else []\n\n def render(self, value, obj=None):\n return self.separator.join(str(v) for v in value)\n\n\nclass JSONWidget(Widget):\n \"\"\"\n Widget for a JSON object (especially required for jsonb fields in PostgreSQL database.)\n\n :param value: Defaults to JSON format.\n The widget covers two cases: Proper JSON string with double quotes, else it\n tries to use single quotes and then convert it to proper JSON.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n val = super().clean(value)\n if val:\n try:\n return json.loads(val)\n except json.decoder.JSONDecodeError:\n return json.loads(val.replace(\"'\", \"\\\"\"))\n\n def render(self, value, obj=None):\n if value:\n return json.dumps(value)\n\n\nclass ForeignKeyWidget(Widget):\n \"\"\"\n Widget for a ``ForeignKey`` field which looks up a related model using\n \"natural keys\" in both export an import.\n\n The lookup field defaults to using the primary key (``pk``) as lookup\n criterion but can be customised to use any field on the related model.\n\n Unlike specifying a related field in your resource like so…\n\n ::\n\n class Meta:\n fields = ('author__name',)\n\n …using a :class:`~import_export.widgets.ForeignKeyWidget` has the\n advantage that it can not only be used for exporting, but also importing\n data with foreign key relationships.\n\n Here's an example on how to use\n :class:`~import_export.widgets.ForeignKeyWidget` to lookup related objects\n using ``Author.name`` instead of ``Author.pk``::\n\n from import_export import fields, resources\n from import_export.widgets import ForeignKeyWidget\n\n class BookResource(resources.ModelResource):\n author = fields.Field(\n column_name='author',\n attribute='author',\n widget=ForeignKeyWidget(Author, 'name'))\n\n class Meta:\n fields = ('author',)\n\n :param model: The Model the ForeignKey refers to (required).\n :param field: A field on the related model used for looking up a particular object.\n \"\"\"\n def __init__(self, model, field='pk', *args, **kwargs):\n self.model = model\n self.field = field\n super().__init__(*args, **kwargs)\n\n def get_queryset(self, value, row, *args, **kwargs):\n \"\"\"\n Returns a queryset of all objects for this Model.\n\n Overwrite this method if you want to limit the pool of objects from\n which the related object is retrieved.\n\n :param value: The field's value in the datasource.\n :param row: The datasource's current row.\n\n As an example; if you'd like to have ForeignKeyWidget look up a Person\n by their pre- **and** lastname column, you could subclass the widget\n like so::\n\n class FullNameForeignKeyWidget(ForeignKeyWidget):\n def get_queryset(self, value, row):\n return self.model.objects.filter(\n first_name__iexact=row[\"first_name\"],\n last_name__iexact=row[\"last_name\"]\n )\n \"\"\"\n return self.model.objects.all()\n\n def clean(self, value, row=None, *args, **kwargs):\n val = super().clean(value)\n if val:\n return self.get_queryset(value, row, *args, **kwargs).get(**{self.field: val})\n else:\n return None\n\n def render(self, value, obj=None):\n if value is None:\n return \"\"\n\n attrs = self.field.split('__')\n for attr in attrs:\n try:\n value = getattr(value, attr, None)\n except (ValueError, ObjectDoesNotExist):\n # needs to have a primary key value before a many-to-many\n # relationship can be used.\n return None\n if value is None:\n return None\n\n return value\n\n\nclass ManyToManyWidget(Widget):\n \"\"\"\n Widget that converts between representations of a ManyToMany relationships\n as a list and an actual ManyToMany field.\n\n :param model: The model the ManyToMany field refers to (required).\n :param separator: Defaults to ``','``.\n :param field: A field on the related model. Default is ``pk``.\n \"\"\"\n\n def __init__(self, model, separator=None, field=None, *args, **kwargs):\n if separator is None:\n separator = ','\n if field is None:\n field = 'pk'\n self.model = model\n self.separator = separator\n self.field = field\n super().__init__(*args, **kwargs)\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return self.model.objects.none()\n if isinstance(value, (float, int)):\n ids = [int(value)]\n else:\n ids = value.split(self.separator)\n ids = filter(None, [i.strip() for i in ids])\n return self.model.objects.filter(**{\n '%s__in' % self.field: ids\n })\n\n def render(self, value, obj=None):\n ids = [smart_text(getattr(obj, self.field)) for obj in value.all()]\n return self.separator.join(ids)\n", "path": "import_export/widgets.py" } ]
[ { "content": "import json\nfrom datetime import date, datetime\nfrom decimal import Decimal\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import datetime_safe, timezone\nfrom django.utils.dateparse import parse_duration\nfrom django.utils.encoding import force_text, smart_text\n\n\nclass Widget:\n \"\"\"\n A Widget takes care of converting between import and export representations.\n\n This is achieved by the two methods,\n :meth:`~import_export.widgets.Widget.clean` and\n :meth:`~import_export.widgets.Widget.render`.\n \"\"\"\n def clean(self, value, row=None, *args, **kwargs):\n \"\"\"\n Returns an appropriate Python object for an imported value.\n\n For example, if you import a value from a spreadsheet,\n :meth:`~import_export.widgets.Widget.clean` handles conversion\n of this value into the corresponding Python object.\n\n Numbers or dates can be *cleaned* to their respective data types and\n don't have to be imported as Strings.\n \"\"\"\n return value\n\n def render(self, value, obj=None):\n \"\"\"\n Returns an export representation of a Python value.\n\n For example, if you have an object you want to export,\n :meth:`~import_export.widgets.Widget.render` takes care of converting\n the object's field to a value that can be written to a spreadsheet.\n \"\"\"\n return force_text(value)\n\n\nclass NumberWidget(Widget):\n \"\"\"\n \"\"\"\n\n def is_empty(self, value):\n if isinstance(value, str):\n value = value.strip()\n # 0 is not empty\n return value is None or value == \"\"\n\n def render(self, value, obj=None):\n return value\n\n\nclass FloatWidget(NumberWidget):\n \"\"\"\n Widget for converting floats fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return float(value)\n\n\nclass IntegerWidget(NumberWidget):\n \"\"\"\n Widget for converting integer fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return int(float(value))\n\n\nclass DecimalWidget(NumberWidget):\n \"\"\"\n Widget for converting decimal fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if self.is_empty(value):\n return None\n return Decimal(force_text(value))\n\n\nclass CharWidget(Widget):\n \"\"\"\n Widget for converting text fields.\n \"\"\"\n\n def render(self, value, obj=None):\n return force_text(value)\n\n\nclass BooleanWidget(Widget):\n \"\"\"\n Widget for converting boolean fields.\n \"\"\"\n TRUE_VALUES = [\"1\", 1]\n FALSE_VALUE = \"0\"\n\n def render(self, value, obj=None):\n if value is None:\n return \"\"\n return self.TRUE_VALUES[0] if value else self.FALSE_VALUE\n\n def clean(self, value, row=None, *args, **kwargs):\n if value == \"\":\n return None\n return True if value in self.TRUE_VALUES else False\n\n\nclass DateWidget(Widget):\n \"\"\"\n Widget for converting date fields.\n\n Takes optional ``format`` parameter.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.DATE_INPUT_FORMATS:\n formats = (\"%Y-%m-%d\",)\n else:\n formats = settings.DATE_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n if isinstance(value, date):\n return value\n for format in self.formats:\n try:\n return datetime.strptime(value, format).date()\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid date.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n try:\n return value.strftime(self.formats[0])\n except:\n return datetime_safe.new_date(value).strftime(self.formats[0])\n\n\nclass DateTimeWidget(Widget):\n \"\"\"\n Widget for converting date fields.\n\n Takes optional ``format`` parameter. If none is set, either\n ``settings.DATETIME_INPUT_FORMATS`` or ``\"%Y-%m-%d %H:%M:%S\"`` is used.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.DATETIME_INPUT_FORMATS:\n formats = (\"%Y-%m-%d %H:%M:%S\",)\n else:\n formats = settings.DATETIME_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n if isinstance(value, datetime):\n return value\n for format in self.formats:\n try:\n dt = datetime.strptime(value, format)\n if settings.USE_TZ:\n # make datetime timezone aware so we don't compare\n # naive datetime to an aware one\n dt = timezone.make_aware(dt,\n timezone.get_default_timezone())\n return dt\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid date/time.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n if settings.USE_TZ:\n value = timezone.localtime(value)\n return value.strftime(self.formats[0])\n\n\nclass TimeWidget(Widget):\n \"\"\"\n Widget for converting time fields.\n\n Takes optional ``format`` parameter.\n \"\"\"\n\n def __init__(self, format=None):\n if format is None:\n if not settings.TIME_INPUT_FORMATS:\n formats = (\"%H:%M:%S\",)\n else:\n formats = settings.TIME_INPUT_FORMATS\n else:\n formats = (format,)\n self.formats = formats\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n for format in self.formats:\n try:\n return datetime.strptime(value, format).time()\n except (ValueError, TypeError):\n continue\n raise ValueError(\"Enter a valid time.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return value.strftime(self.formats[0])\n\n\nclass DurationWidget(Widget):\n \"\"\"\n Widget for converting time duration fields.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return None\n\n try:\n return parse_duration(value)\n except (ValueError, TypeError):\n raise ValueError(\"Enter a valid duration.\")\n\n def render(self, value, obj=None):\n if not value:\n return \"\"\n return str(value)\n\n\nclass SimpleArrayWidget(Widget):\n \"\"\"\n Widget for an Array field. Can be used for Postgres' Array field.\n\n :param separator: Defaults to ``','``\n \"\"\"\n\n def __init__(self, separator=None):\n if separator is None:\n separator = ','\n self.separator = separator\n super().__init__()\n\n def clean(self, value, row=None, *args, **kwargs):\n return value.split(self.separator) if value else []\n\n def render(self, value, obj=None):\n return self.separator.join(str(v) for v in value)\n\n\nclass JSONWidget(Widget):\n \"\"\"\n Widget for a JSON object (especially required for jsonb fields in PostgreSQL database.)\n\n :param value: Defaults to JSON format.\n The widget covers two cases: Proper JSON string with double quotes, else it\n tries to use single quotes and then convert it to proper JSON.\n \"\"\"\n\n def clean(self, value, row=None, *args, **kwargs):\n val = super().clean(value)\n if val:\n try:\n return json.loads(val)\n except json.decoder.JSONDecodeError:\n return json.loads(val.replace(\"'\", \"\\\"\"))\n\n def render(self, value, obj=None):\n if value:\n return json.dumps(value)\n\n\nclass ForeignKeyWidget(Widget):\n \"\"\"\n Widget for a ``ForeignKey`` field which looks up a related model using\n \"natural keys\" in both export an import.\n\n The lookup field defaults to using the primary key (``pk``) as lookup\n criterion but can be customised to use any field on the related model.\n\n Unlike specifying a related field in your resource like so…\n\n ::\n\n class Meta:\n fields = ('author__name',)\n\n …using a :class:`~import_export.widgets.ForeignKeyWidget` has the\n advantage that it can not only be used for exporting, but also importing\n data with foreign key relationships.\n\n Here's an example on how to use\n :class:`~import_export.widgets.ForeignKeyWidget` to lookup related objects\n using ``Author.name`` instead of ``Author.pk``::\n\n from import_export import fields, resources\n from import_export.widgets import ForeignKeyWidget\n\n class BookResource(resources.ModelResource):\n author = fields.Field(\n column_name='author',\n attribute='author',\n widget=ForeignKeyWidget(Author, 'name'))\n\n class Meta:\n fields = ('author',)\n\n :param model: The Model the ForeignKey refers to (required).\n :param field: A field on the related model used for looking up a particular object.\n \"\"\"\n def __init__(self, model, field='pk', *args, **kwargs):\n self.model = model\n self.field = field\n super().__init__(*args, **kwargs)\n\n def get_queryset(self, value, row, *args, **kwargs):\n \"\"\"\n Returns a queryset of all objects for this Model.\n\n Overwrite this method if you want to limit the pool of objects from\n which the related object is retrieved.\n\n :param value: The field's value in the datasource.\n :param row: The datasource's current row.\n\n As an example; if you'd like to have ForeignKeyWidget look up a Person\n by their pre- **and** lastname column, you could subclass the widget\n like so::\n\n class FullNameForeignKeyWidget(ForeignKeyWidget):\n def get_queryset(self, value, row):\n return self.model.objects.filter(\n first_name__iexact=row[\"first_name\"],\n last_name__iexact=row[\"last_name\"]\n )\n \"\"\"\n return self.model.objects.all()\n\n def clean(self, value, row=None, *args, **kwargs):\n val = super().clean(value)\n if val:\n return self.get_queryset(value, row, *args, **kwargs).get(**{self.field: val})\n else:\n return None\n\n def render(self, value, obj=None):\n if value is None:\n return \"\"\n\n attrs = self.field.split('__')\n for attr in attrs:\n try:\n value = getattr(value, attr, None)\n except (ValueError, ObjectDoesNotExist):\n # needs to have a primary key value before a many-to-many\n # relationship can be used.\n return None\n if value is None:\n return None\n\n return value\n\n\nclass ManyToManyWidget(Widget):\n \"\"\"\n Widget that converts between representations of a ManyToMany relationships\n as a list and an actual ManyToMany field.\n\n :param model: The model the ManyToMany field refers to (required).\n :param separator: Defaults to ``','``.\n :param field: A field on the related model. Default is ``pk``.\n \"\"\"\n\n def __init__(self, model, separator=None, field=None, *args, **kwargs):\n if separator is None:\n separator = ','\n if field is None:\n field = 'pk'\n self.model = model\n self.separator = separator\n self.field = field\n super().__init__(*args, **kwargs)\n\n def clean(self, value, row=None, *args, **kwargs):\n if not value:\n return self.model.objects.none()\n if isinstance(value, (float, int)):\n ids = [int(value)]\n else:\n ids = value.split(self.separator)\n ids = filter(None, [i.strip() for i in ids])\n return self.model.objects.filter(**{\n '%s__in' % self.field: ids\n })\n\n def render(self, value, obj=None):\n ids = [smart_text(getattr(obj, self.field)) for obj in value.all()]\n return self.separator.join(ids)\n", "path": "import_export/widgets.py" } ]
diff --git a/import_export/widgets.py b/import_export/widgets.py index 9bb9a210b..58b943946 100644 --- a/import_export/widgets.py +++ b/import_export/widgets.py @@ -85,7 +85,7 @@ class DecimalWidget(NumberWidget): def clean(self, value, row=None, *args, **kwargs): if self.is_empty(value): return None - return Decimal(value) + return Decimal(force_text(value)) class CharWidget(Widget): diff --git a/tests/core/tests/test_widgets.py b/tests/core/tests/test_widgets.py index 2ac9c7072..40146538d 100644 --- a/tests/core/tests/test_widgets.py +++ b/tests/core/tests/test_widgets.py @@ -145,6 +145,7 @@ def setUp(self): def test_clean(self): self.assertEqual(self.widget.clean("11.111"), self.value) + self.assertEqual(self.widget.clean(11.111), self.value) def test_render(self): self.assertEqual(self.widget.render(self.value), self.value)
qutebrowser__qutebrowser-648
Logo qutebrowser still needs a logo! Some random ideas: - `qutebrowser` in some "cute" (fur?) font - A `q` which is composed of a globe (because browsers need a globe) and a snake "hanging" around it. Ideally with either the snake or the globe being cute. :grin:
[ { "content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Our own fork of shlex.split with some added and removed features.\"\"\"\n\nimport re\n\nfrom qutebrowser.utils import log\n\n\nclass ShellLexer:\n\n \"\"\"A lexical analyzer class for simple shell-like syntaxes.\n\n Based on Python's shlex, but cleaned up, removed some features, and added\n some features useful for qutebrowser.\n\n Attributes:\n FIXME\n \"\"\"\n\n def __init__(self, s):\n self.string = s\n self.whitespace = ' \\t\\r'\n self.quotes = '\\'\"'\n self.escape = '\\\\'\n self.escapedquotes = '\"'\n self.keep = False\n self.quoted = None\n self.escapedstate = None\n self.token = None\n self.state = None\n self.reset()\n\n def reset(self):\n \"\"\"Reset the state machine state to the defaults.\"\"\"\n self.quoted = False\n self.escapedstate = ' '\n self.token = ''\n self.state = ' '\n\n def __iter__(self): # noqa\n \"\"\"Read a raw token from the input stream.\"\"\"\n # pylint: disable=too-many-branches,too-many-statements\n self.reset()\n for nextchar in self.string:\n if self.state == ' ':\n if self.keep:\n self.token += nextchar\n if nextchar in self.whitespace:\n if self.token or self.quoted:\n yield self.token\n self.reset()\n elif nextchar in self.escape:\n self.escapedstate = 'a'\n self.state = nextchar\n elif nextchar in self.quotes:\n self.state = nextchar\n else:\n self.token = nextchar\n self.state = 'a'\n elif self.state in self.quotes:\n self.quoted = True\n if nextchar == self.state:\n if self.keep:\n self.token += nextchar\n self.state = 'a'\n elif (nextchar in self.escape and\n self.state in self.escapedquotes):\n if self.keep:\n self.token += nextchar\n self.escapedstate = self.state\n self.state = nextchar\n else:\n self.token += nextchar\n elif self.state in self.escape:\n # In posix shells, only the quote itself or the escape\n # character may be escaped within quotes.\n if (self.escapedstate in self.quotes and\n nextchar != self.state and\n nextchar != self.escapedstate and not self.keep):\n self.token += self.state\n self.token += nextchar\n self.state = self.escapedstate\n elif self.state == 'a':\n if nextchar in self.whitespace:\n self.state = ' '\n if self.token or self.quoted:\n yield self.token\n self.reset()\n if self.keep:\n yield nextchar\n elif nextchar in self.quotes:\n if self.keep:\n self.token += nextchar\n self.state = nextchar\n elif nextchar in self.escape:\n if self.keep:\n self.token += nextchar\n self.escapedstate = 'a'\n self.state = nextchar\n else:\n self.token += nextchar\n if self.state in self.escape and not self.keep:\n self.token += self.state\n if self.token or self.quoted:\n yield self.token\n\n\ndef split(s, keep=False):\n \"\"\"Split a string via ShellLexer.\n\n Args:\n keep: Whether to keep are special chars in the split output.\n \"\"\"\n lexer = ShellLexer(s)\n lexer.keep = keep\n tokens = list(lexer)\n if not tokens:\n return []\n out = []\n spaces = \"\"\n\n log.shlexer.vdebug(\"{!r} -> {!r}\".format(s, tokens))\n\n for t in tokens:\n if t.isspace():\n spaces += t\n else:\n out.append(spaces + t)\n spaces = \"\"\n if spaces:\n out.append(spaces)\n\n return out\n\n\ndef _combine_ws(parts, whitespace):\n \"\"\"Combine whitespace in a list with the element following it.\n\n Args:\n parts: A list of strings.\n whitespace: A string containing what's considered whitespace.\n\n Return:\n The modified list.\n \"\"\"\n out = []\n ws = ''\n for part in parts:\n if not part:\n continue\n elif part in whitespace:\n ws += part\n else:\n out.append(ws + part)\n ws = ''\n if ws:\n out.append(ws)\n return out\n\n\ndef simple_split(s, keep=False, maxsplit=None):\n \"\"\"Split a string on whitespace, optionally keeping the whitespace.\n\n Args:\n s: The string to split.\n keep: Whether to keep whitespace.\n maxsplit: The maximum count of splits.\n\n Return:\n A list of split strings.\n \"\"\"\n whitespace = '\\n\\t '\n if maxsplit == 0:\n # re.split with maxsplit=0 splits everything, while str.split splits\n # nothing (which is the behavior we want).\n if keep:\n return [s]\n else:\n return [s.strip(whitespace)]\n elif maxsplit is None:\n maxsplit = 0\n\n if keep:\n pattern = '([' + whitespace + '])'\n parts = re.split(pattern, s, maxsplit)\n return _combine_ws(parts, whitespace)\n else:\n pattern = '[' + whitespace + ']'\n parts = re.split(pattern, s, maxsplit)\n parts[-1] = parts[-1].rstrip()\n return [p for p in parts if p]\n", "path": "qutebrowser/misc/split.py" } ]
[ { "content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Our own fork of shlex.split with some added and removed features.\"\"\"\n\nimport re\n\nfrom qutebrowser.utils import log\n\n\nclass ShellLexer:\n\n \"\"\"A lexical analyzer class for simple shell-like syntaxes.\n\n Based on Python's shlex, but cleaned up, removed some features, and added\n some features useful for qutebrowser.\n\n Attributes:\n FIXME\n \"\"\"\n\n def __init__(self, s):\n self.string = s\n self.whitespace = ' \\t\\r'\n self.quotes = '\\'\"'\n self.escape = '\\\\'\n self.escapedquotes = '\"'\n self.keep = False\n self.quoted = None\n self.escapedstate = None\n self.token = None\n self.state = None\n self.reset()\n\n def reset(self):\n \"\"\"Reset the state machine state to the defaults.\"\"\"\n self.quoted = False\n self.escapedstate = ' '\n self.token = ''\n self.state = ' '\n\n def __iter__(self): # noqa\n \"\"\"Read a raw token from the input stream.\"\"\"\n # pylint: disable=too-many-branches,too-many-statements\n self.reset()\n for nextchar in self.string:\n if self.state == ' ':\n if self.keep:\n self.token += nextchar\n if nextchar in self.whitespace:\n if self.token or self.quoted:\n yield self.token\n self.reset()\n elif nextchar in self.escape:\n self.escapedstate = 'a'\n self.state = nextchar\n elif nextchar in self.quotes:\n self.state = nextchar\n else:\n self.token = nextchar\n self.state = 'a'\n elif self.state in self.quotes:\n self.quoted = True\n if nextchar == self.state:\n if self.keep:\n self.token += nextchar\n self.state = 'a'\n elif (nextchar in self.escape and\n self.state in self.escapedquotes):\n if self.keep:\n self.token += nextchar\n self.escapedstate = self.state\n self.state = nextchar\n else:\n self.token += nextchar\n elif self.state in self.escape:\n # In posix shells, only the quote itself or the escape\n # character may be escaped within quotes.\n if (self.escapedstate in self.quotes and\n nextchar != self.state and\n nextchar != self.escapedstate and not self.keep):\n self.token += self.state\n self.token += nextchar\n self.state = self.escapedstate\n elif self.state == 'a':\n if nextchar in self.whitespace:\n self.state = ' '\n if self.token or self.quoted:\n yield self.token\n self.reset()\n if self.keep:\n yield nextchar\n elif nextchar in self.quotes:\n if self.keep:\n self.token += nextchar\n self.state = nextchar\n elif nextchar in self.escape:\n if self.keep:\n self.token += nextchar\n self.escapedstate = 'a'\n self.state = nextchar\n else:\n self.token += nextchar\n if self.state in self.escape and not self.keep:\n self.token += self.state\n if self.token or self.quoted:\n yield self.token\n\n\ndef split(s, keep=False):\n \"\"\"Split a string via ShellLexer.\n\n Args:\n keep: Whether to keep special chars in the split output.\n \"\"\"\n lexer = ShellLexer(s)\n lexer.keep = keep\n tokens = list(lexer)\n if not tokens:\n return []\n out = []\n spaces = \"\"\n\n log.shlexer.vdebug(\"{!r} -> {!r}\".format(s, tokens))\n\n for t in tokens:\n if t.isspace():\n spaces += t\n else:\n out.append(spaces + t)\n spaces = \"\"\n if spaces:\n out.append(spaces)\n\n return out\n\n\ndef _combine_ws(parts, whitespace):\n \"\"\"Combine whitespace in a list with the element following it.\n\n Args:\n parts: A list of strings.\n whitespace: A string containing what's considered whitespace.\n\n Return:\n The modified list.\n \"\"\"\n out = []\n ws = ''\n for part in parts:\n if not part:\n continue\n elif part in whitespace:\n ws += part\n else:\n out.append(ws + part)\n ws = ''\n if ws:\n out.append(ws)\n return out\n\n\ndef simple_split(s, keep=False, maxsplit=None):\n \"\"\"Split a string on whitespace, optionally keeping the whitespace.\n\n Args:\n s: The string to split.\n keep: Whether to keep whitespace.\n maxsplit: The maximum count of splits.\n\n Return:\n A list of split strings.\n \"\"\"\n whitespace = '\\n\\t '\n if maxsplit == 0:\n # re.split with maxsplit=0 splits everything, while str.split splits\n # nothing (which is the behavior we want).\n if keep:\n return [s]\n else:\n return [s.strip(whitespace)]\n elif maxsplit is None:\n maxsplit = 0\n\n if keep:\n pattern = '([' + whitespace + '])'\n parts = re.split(pattern, s, maxsplit)\n return _combine_ws(parts, whitespace)\n else:\n pattern = '[' + whitespace + ']'\n parts = re.split(pattern, s, maxsplit)\n parts[-1] = parts[-1].rstrip()\n return [p for p in parts if p]\n", "path": "qutebrowser/misc/split.py" } ]
diff --git a/qutebrowser/misc/split.py b/qutebrowser/misc/split.py index bd1904763d6..b763d824652 100644 --- a/qutebrowser/misc/split.py +++ b/qutebrowser/misc/split.py @@ -127,7 +127,7 @@ def split(s, keep=False): """Split a string via ShellLexer. Args: - keep: Whether to keep are special chars in the split output. + keep: Whether to keep special chars in the split output. """ lexer = ShellLexer(s) lexer.keep = keep diff --git a/tests/commands/test_runners.py b/tests/commands/test_runners.py new file mode 100644 index 00000000000..a03eab9d892 --- /dev/null +++ b/tests/commands/test_runners.py @@ -0,0 +1,44 @@ +# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: + +# Copyright 2015 Florian Bruhin (The Compiler) <[email protected]> +# +# This file is part of qutebrowser. +# +# qutebrowser is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# qutebrowser is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. + +"""Tests for qutebrowser.commands.runners.""" + +import pytest + +from qutebrowser.commands import runners, cmdexc + + +class TestCommandRunner: + + """Tests for CommandRunner.""" + + def test_parse_all(self, cmdline_test): + """Test parsing of commands. + + See https://github.com/The-Compiler/qutebrowser/issues/615 + + Args: + cmdline_test: A pytest fixture which provides testcases. + """ + cr = runners.CommandRunner(0) + if cmdline_test.valid: + list(cr.parse_all(cmdline_test.cmd, aliases=False)) + else: + with pytest.raises(cmdexc.NoSuchCommandError): + list(cr.parse_all(cmdline_test.cmd, aliases=False)) diff --git a/tests/config/test_config.py b/tests/config/test_config.py index 6bee98ee40c..383fed23223 100644 --- a/tests/config/test_config.py +++ b/tests/config/test_config.py @@ -31,7 +31,9 @@ from PyQt5.QtGui import QColor import pytest -from qutebrowser.config import config, configexc +from qutebrowser.config import config, configexc, configdata +from qutebrowser.config.parsers import keyconf +from qutebrowser.commands import runners from qutebrowser.utils import objreg, standarddir @@ -155,6 +157,27 @@ def test_invalid_option_relaxed(self): self.cfg.get('general', 'bar') # pylint: disable=bad-config-call +class TestKeyConfigParser: + + """Test config.parsers.keyconf.KeyConfigParser.""" + + def test_cmd_binding(self, cmdline_test): + """Test various command bindings. + + See https://github.com/The-Compiler/qutebrowser/issues/615 + + Args: + cmdline_test: A pytest fixture which provides testcases. + """ + kcp = keyconf.KeyConfigParser(None, None) + kcp._cur_section = 'normal' + if cmdline_test.valid: + kcp._read_command(cmdline_test.cmd) + else: + with pytest.raises(keyconf.KeyConfigError): + kcp._read_command(cmdline_test.cmd) + + class TestDefaultConfig: """Test validating of the default config.""" @@ -164,6 +187,16 @@ def test_default_config(self): conf = config.ConfigManager(None, None) conf._validate_all() + def test_default_key_config(self): + """Test validating of the default key config.""" + # We import qutebrowser.app so the cmdutils.register decorators run. + import qutebrowser.app # pylint: disable=unused-variable + conf = keyconf.KeyConfigParser(None, None) + runner = runners.CommandRunner(win_id=0) + for sectname in configdata.KEY_DATA: + for cmd in conf.get_bindings_for(sectname).values(): + runner.parse(cmd, aliases=False) + class TestConfigInit: diff --git a/tests/conftest.py b/tests/conftest.py index d3411694c18..892a91912a7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -19,6 +19,9 @@ """The qutebrowser test suite contest file.""" +import collections +import itertools + import pytest @@ -49,15 +52,23 @@ def unicode_encode_err(): 'fake exception') # reason [email protected](scope='session') +def qnam(): + """Session-wide QNetworkAccessManager.""" + from PyQt5.QtNetwork import QNetworkAccessManager + nam = QNetworkAccessManager() + nam.setNetworkAccessible(QNetworkAccessManager.NotAccessible) + return nam + + @pytest.fixture -def webpage(): +def webpage(qnam): """Get a new QWebPage object.""" from PyQt5.QtWebKitWidgets import QWebPage - from PyQt5.QtNetwork import QNetworkAccessManager page = QWebPage() - nam = page.networkAccessManager() - nam.setNetworkAccessible(QNetworkAccessManager.NotAccessible) + page.networkAccessManager().deleteLater() + page.setNetworkAccessManager(qnam) return page @@ -76,3 +87,63 @@ def fake_keyevent(key, modifiers=0, text=''): return evtmock return fake_keyevent + + +def pytest_collection_modifyitems(items): + """Automatically add a 'gui' marker to all gui-related tests. + + pytest hook called after collection has been performed, adds a marker + named "gui" which can be used to filter gui tests from the command line. + For example: + + py.test -m "not gui" # run all tests except gui tests + py.test -m "gui" # run only gui tests + + Args: + items: list of _pytest.main.Node items, where each item represents + a python test that will be executed. + + Reference: + http://pytest.org/latest/plugins.html + """ + for item in items: + if 'qtbot' in getattr(item, 'fixturenames', ()): + item.add_marker('gui') + + +def _generate_cmdline_tests(): + """Generate testcases for test_split_binding.""" + # pylint: disable=invalid-name + TestCase = collections.namedtuple('TestCase', 'cmd, valid') + separators = [';;', ' ;; ', ';; ', ' ;;'] + invalid = ['foo', ''] + valid = ['leave-mode', 'hint all'] + # Valid command only -> valid + for item in valid: + yield TestCase(''.join(item), True) + # Invalid command only -> invalid + for item in valid: + yield TestCase(''.join(item), True) + # Invalid command combined with invalid command -> invalid + for item in itertools.product(invalid, separators, invalid): + yield TestCase(''.join(item), False) + # Valid command combined with valid command -> valid + for item in itertools.product(valid, separators, valid): + yield TestCase(''.join(item), True) + # Valid command combined with invalid command -> invalid + for item in itertools.product(valid, separators, invalid): + yield TestCase(''.join(item), False) + # Invalid command combined with valid command -> invalid + for item in itertools.product(invalid, separators, valid): + yield TestCase(''.join(item), False) + # Command with no_cmd_split combined with an "invalid" command -> valid + for item in itertools.product(['bind x open'], separators, invalid): + yield TestCase(''.join(item), True) + + [email protected](params=_generate_cmdline_tests()) +def cmdline_test(request): + """Fixture which generates tests for things validating commandlines.""" + # Import qutebrowser.app so all cmdutils.register decorators get run. + import qutebrowser.app # pylint: disable=unused-variable + return request.param diff --git a/tests/keyinput/test_basekeyparser.py b/tests/keyinput/test_basekeyparser.py index 7164bffbdad..29d556ac58e 100644 --- a/tests/keyinput/test_basekeyparser.py +++ b/tests/keyinput/test_basekeyparser.py @@ -63,44 +63,27 @@ class TestSplitCount: """Test the _split_count method. - Attributes: - kp: The BaseKeyParser we're testing. + Class Attributes: + TESTS: list of parameters for the tests, as tuples of + (input_key, supports_count, expected) """ - @pytest.fixture(autouse=True) - def setup(self): - self.kp = basekeyparser.BaseKeyParser(0, supports_count=True) - - def test_onlycount(self): + TESTS = [ + # (input_key, supports_count, expected) + ('10', True, (10, '')), + ('10foo', True, (10, 'foo')), + ('-1foo', True, (None, '-1foo')), + ('10e4foo', True, (10, 'e4foo')), + ('foo', True, (None, 'foo')), + ('10foo', False, (None, '10foo')), + ] + + @pytest.mark.parametrize('input_key, supports_count, expected', TESTS) + def test_splitcount(self, input_key, supports_count, expected): """Test split_count with only a count.""" - self.kp._keystring = '10' - assert self.kp._split_count() == (10, '') - - def test_normalcount(self): - """Test split_count with count and text.""" - self.kp._keystring = '10foo' - assert self.kp._split_count() == (10, 'foo') - - def test_minuscount(self): - """Test split_count with a negative count.""" - self.kp._keystring = '-1foo' - assert self.kp._split_count() == (None, '-1foo') - - def test_expcount(self): - """Test split_count with an exponential count.""" - self.kp._keystring = '10e4foo' - assert self.kp._split_count() == (10, 'e4foo') - - def test_nocount(self): - """Test split_count with only a command.""" - self.kp._keystring = 'foo' - assert self.kp._split_count() == (None, 'foo') - - def test_nosupport(self): - """Test split_count with a count when counts aren't supported.""" - self.kp._supports_count = False - self.kp._keystring = '10foo' - assert self.kp._split_count() == (None, '10foo') + kp = basekeyparser.BaseKeyParser(0, supports_count=supports_count) + kp._keystring = input_key + assert kp._split_count() == expected @pytest.mark.usefixtures('fake_keyconfig', 'mock_timer') diff --git a/tests/misc/test_miscwidgets.py b/tests/misc/test_miscwidgets.py new file mode 100644 index 00000000000..bd316935f73 --- /dev/null +++ b/tests/misc/test_miscwidgets.py @@ -0,0 +1,90 @@ +# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: + +# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]> +# +# This file is part of qutebrowser. +# +# qutebrowser is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# qutebrowser is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. + +"""Test widgets in miscwidgets module.""" + +from unittest import mock +from PyQt5.QtCore import Qt +from PyQt5.QtWidgets import QApplication +import pytest + +from qutebrowser.misc.miscwidgets import CommandLineEdit + + +class TestCommandLineEdit: + + """Tests for CommandLineEdit widget.""" + + @pytest.yield_fixture + def cmd_edit(self, qtbot): + """Fixture to initialize a CommandLineEdit.""" + cmd_edit = CommandLineEdit(None) + cmd_edit.set_prompt(':') + qtbot.add_widget(cmd_edit) + assert cmd_edit.text() == '' + yield cmd_edit + + @pytest.fixture + def mock_clipboard(self, mocker): + """Fixture to mock QApplication.clipboard. + + Return: + The mocked QClipboard object. + """ + mocker.patch.object(QApplication, 'clipboard') + clipboard = mock.MagicMock() + clipboard.supportsSelection.return_value = True + QApplication.clipboard.return_value = clipboard + return clipboard + + def test_position(self, qtbot, cmd_edit): + """Test cursor position based on the prompt.""" + qtbot.keyClicks(cmd_edit, ':hello') + assert cmd_edit.text() == ':hello' + assert cmd_edit.cursorPosition() == len(':hello') + + cmd_edit.home(mark=True) + assert cmd_edit.cursorPosition() == len(':hello') + qtbot.keyClick(cmd_edit, Qt.Key_Delete) + assert cmd_edit.text() == ':' + qtbot.keyClick(cmd_edit, Qt.Key_Backspace) + assert cmd_edit.text() == ':' + + qtbot.keyClicks(cmd_edit, 'hey again') + assert cmd_edit.text() == ':hey again' + + def test_invalid_prompt(self, qtbot, cmd_edit): + """Test preventing of an invalid prompt being entered.""" + qtbot.keyClicks(cmd_edit, '$hello') + assert cmd_edit.text() == '' + + def test_clipboard_paste(self, qtbot, cmd_edit, mock_clipboard): + """Test pasting commands from clipboard.""" + mock_clipboard.text.return_value = ':command' + qtbot.keyClick(cmd_edit, Qt.Key_Insert, Qt.ShiftModifier) + assert cmd_edit.text() == ':command' + + mock_clipboard.text.return_value = ' param1' + qtbot.keyClick(cmd_edit, Qt.Key_Insert, Qt.ShiftModifier) + assert cmd_edit.text() == ':command param1' + + cmd_edit.clear() + mock_clipboard.text.return_value = '$ command' + qtbot.keyClick(cmd_edit, Qt.Key_Insert, Qt.ShiftModifier) + assert cmd_edit.text() == ':command param1' diff --git a/tests/misc/test_split.py b/tests/misc/test_split.py index c2c930b5008..65fa218b1f5 100644 --- a/tests/misc/test_split.py +++ b/tests/misc/test_split.py @@ -18,8 +18,9 @@ # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Tests for qutebrowser.misc.split.""" +import collections -import unittest +import pytest from qutebrowser.misc import split @@ -29,7 +30,7 @@ # Format: input/split|output|without|keep/split|output|with|keep/ -test_data = r""" +test_data_str = r""" one two/one|two/one| two/ one "two three" four/one|two three|four/one| "two three"| four/ one 'two three' four/one|two three|four/one| 'two three'| four/ @@ -104,36 +105,56 @@ """ -class SplitTests(unittest.TestCase): +def _parse_split_test_data_str(): + """ + Parse the test data set into a namedtuple to use in tests. + + Returns: + A list of namedtuples with str attributes: input, keep, no_keep + """ + tuple_class = collections.namedtuple('TestCase', 'input, keep, no_keep') + + result = [] + for line in test_data_str.splitlines(): + if not line: + continue + data = line.split('/') + item = tuple_class(input=data[0], keep=data[1].split('|'), + no_keep=data[2].split('|')) + result.append(item) + return result + + +class TestSplit: """Test split.""" - def test_split(self): + @pytest.fixture(params=_parse_split_test_data_str()) + def split_test_case(self, request): + """Fixture to automatically parametrize all depending tests. + + It will use the test data from test_data_str, parsed using + _parse_split_test_data_str(). + """ + return request.param + + def test_split(self, split_test_case): """Test splitting.""" - for case in test_data.strip().splitlines(): - cmd, out = case.split('/')[:-2] - with self.subTest(cmd=cmd): - items = split.split(cmd) - self.assertEqual(items, out.split('|')) + items = split.split(split_test_case.input) + assert items == split_test_case.keep - def test_split_keep_original(self): + def test_split_keep_original(self, split_test_case): """Test if splitting with keep=True yields the original string.""" - for case in test_data.strip().splitlines(): - cmd = case.split('/')[0] - with self.subTest(cmd=cmd): - items = split.split(cmd, keep=True) - self.assertEqual(''.join(items), cmd) + items = split.split(split_test_case.input, keep=True) + assert ''.join(items) == split_test_case.input - def test_split_keep(self): + def test_split_keep(self, split_test_case): """Test splitting with keep=True.""" - for case in test_data.strip().splitlines(): - cmd, _mid, out = case.split('/')[:-1] - with self.subTest(cmd=cmd): - items = split.split(cmd, keep=True) - self.assertEqual(items, out.split('|')) + items = split.split(split_test_case.input, keep=True) + assert items == split_test_case.no_keep -class SimpleSplitTests(unittest.TestCase): +class TestSimpleSplit: """Test simple_split.""" @@ -145,27 +166,20 @@ class SimpleSplitTests(unittest.TestCase): 'foo\nbar': ['foo', '\nbar'], } - def test_str_split(self): + @pytest.mark.parametrize('test', TESTS) + def test_str_split(self, test): """Test if the behavior matches str.split.""" - for test in self.TESTS: - with self.subTest(string=test): - self.assertEqual(split.simple_split(test), - test.rstrip().split()) - - def test_str_split_maxsplit_1(self): - """Test if the behavior matches str.split with maxsplit=1.""" - string = "foo bar baz" - self.assertEqual(split.simple_split(string, maxsplit=1), - string.rstrip().split(maxsplit=1)) - - def test_str_split_maxsplit_0(self): - """Test if the behavior matches str.split with maxsplit=0.""" - string = " foo bar baz " - self.assertEqual(split.simple_split(string, maxsplit=0), - string.rstrip().split(maxsplit=0)) - - def test_split_keep(self): + assert split.simple_split(test) == test.rstrip().split() + + @pytest.mark.parametrize('s, maxsplit', + [("foo bar baz", 1), (" foo bar baz ", 0)]) + def test_str_split_maxsplit(self, s, maxsplit): + """Test if the behavior matches str.split with given maxsplit.""" + actual = split.simple_split(s, maxsplit=maxsplit) + expected = s.rstrip().split(maxsplit=maxsplit) + assert actual == expected + + @pytest.mark.parametrize('test, expected', TESTS.items()) + def test_split_keep(self, test, expected): """Test splitting with keep=True.""" - for test, expected in self.TESTS.items(): - with self.subTest(string=test): - self.assertEqual(split.simple_split(test, keep=True), expected) + assert split.simple_split(test, keep=True) == expected diff --git a/tests/utils/overflow_test_cases.py b/tests/utils/overflow_test_cases.py new file mode 100644 index 00000000000..08e6ae7a84c --- /dev/null +++ b/tests/utils/overflow_test_cases.py @@ -0,0 +1,70 @@ +# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: + +# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]> +# +# This file is part of qutebrowser. +# +# qutebrowser is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# qutebrowser is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. + +""" +Provides test data for overflow checking. + +Module attributes: + INT32_MIN: Minimum valid value for a signed int32. + INT32_MAX: Maximum valid value for a signed int32. + INT64_MIN: Minimum valid value for a signed int64. + INT64_MAX: Maximum valid value for a signed int64. + GOOD_VALUES: A dict of types mapped to a list of good values. + BAD_VALUES: A dict of types mapped to a list of bad values. +""" + +INT32_MIN = -(2 ** 31) +INT32_MAX = 2 ** 31 - 1 +INT64_MIN = -(2 ** 63) +INT64_MAX = 2 ** 63 - 1 + +GOOD_VALUES = { + 'int': [-1, 0, 1, 23.42, INT32_MIN, INT32_MAX], + 'int64': [-1, 0, 1, 23.42, INT64_MIN, INT64_MAX], +} + +BAD_VALUES = { + 'int': [(INT32_MIN - 1, INT32_MIN), + (INT32_MAX + 1, INT32_MAX), + (float(INT32_MAX + 1), INT32_MAX)], + 'int64': [(INT64_MIN - 1, INT64_MIN), + (INT64_MAX + 1, INT64_MAX), + (float(INT64_MAX + 1), INT64_MAX)], +} + + +def iter_good_values(): + """Yield "good" (C data type, value) tuples. + + Those should pass overflow checking. + """ + for ctype, values in GOOD_VALUES.items(): + for value in values: + yield ctype, value + + +def iter_bad_values(): + """Yield pairs of "bad" (C type, value, repl) tuples. + + Theose should not pass overflow checking. The third value is the value they + should be replaced with if overflow checking should not be fatal. + """ + for ctype, values in BAD_VALUES.items(): + for value, repl in values: + yield ctype, value, repl diff --git a/tests/utils/test_jinja.py b/tests/utils/test_jinja.py index eee703cdfd4..174a44df28e 100644 --- a/tests/utils/test_jinja.py +++ b/tests/utils/test_jinja.py @@ -20,47 +20,42 @@ """Tests for qutebrowser.utils.jinja.""" import os.path -import unittest -import unittest.mock - -from qutebrowser.utils import jinja +import pytest -def _read_file(path): - """Mocked utils.read_file.""" - if path == os.path.join('html', 'test.html'): - return """Hello {{var}}""" - else: - raise ValueError("Invalid path {}!".format(path)) +from qutebrowser.utils import jinja [email protected]('qutebrowser.utils.jinja.utils.read_file') -class JinjaTests(unittest.TestCase): [email protected](autouse=True) +def patch_read_file(monkeypatch): + """pytest fixture to patch utils.read_file.""" + def _read_file(path): + """A read_file which returns a simple template if the path is right.""" + if path == os.path.join('html', 'test.html'): + return """Hello {{var}}""" + else: + raise ValueError("Invalid path {}!".format(path)) - """Tests for getting template via jinja.""" + monkeypatch.setattr('qutebrowser.utils.jinja.utils.read_file', _read_file) - def test_simple_template(self, readfile_mock): - """Test with a simple template.""" - readfile_mock.side_effect = _read_file - template = jinja.env.get_template('test.html') - # https://bitbucket.org/logilab/pylint/issue/490/ - data = template.render(var='World') # pylint: disable=no-member - self.assertEqual(data, "Hello World") - def test_utf8(self, readfile_mock): - """Test rendering with an UTF8 template. +def test_simple_template(): + """Test with a simple template.""" + template = jinja.env.get_template('test.html') + # https://bitbucket.org/logilab/pylint/issue/490/ + data = template.render(var='World') # pylint: disable=no-member + assert data == "Hello World" - This was an attempt to get a failing test case for #127 but it seems - the issue is elsewhere. - https://github.com/The-Compiler/qutebrowser/issues/127 - """ - readfile_mock.side_effect = _read_file - template = jinja.env.get_template('test.html') - # https://bitbucket.org/logilab/pylint/issue/490/ - data = template.render(var='\u2603') # pylint: disable=no-member - self.assertEqual(data, "Hello \u2603") +def test_utf8(): + """Test rendering with an UTF8 template. + This was an attempt to get a failing test case for #127 but it seems + the issue is elsewhere. -if __name__ == '__main__': - unittest.main() + https://github.com/The-Compiler/qutebrowser/issues/127 + """ + template = jinja.env.get_template('test.html') + # https://bitbucket.org/logilab/pylint/issue/490/ + data = template.render(var='\u2603') # pylint: disable=no-member + assert data == "Hello \u2603" diff --git a/tests/utils/test_qtutils.py b/tests/utils/test_qtutils.py index 582abbf07b2..85a99c2f48f 100644 --- a/tests/utils/test_qtutils.py +++ b/tests/utils/test_qtutils.py @@ -20,107 +20,74 @@ """Tests for qutebrowser.utils.qtutils.""" import sys -import unittest + +import pytest from qutebrowser import qutebrowser from qutebrowser.utils import qtutils +import overflow_test_cases -class CheckOverflowTests(unittest.TestCase): - - """Test check_overflow. - - Class attributes: - INT32_MIN: Minimum valid value for a signed int32. - INT32_MAX: Maximum valid value for a signed int32. - INT64_MIN: Minimum valid value for a signed int64. - INT64_MAX: Maximum valid value for a signed int64. - GOOD_VALUES: A dict of types mapped to a list of good values. - BAD_VALUES: A dict of types mapped to a list of bad values. - """ - - INT32_MIN = -(2 ** 31) - INT32_MAX = 2 ** 31 - 1 - INT64_MIN = -(2 ** 63) - INT64_MAX = 2 ** 63 - 1 - - GOOD_VALUES = { - 'int': [-1, 0, 1, 23.42, INT32_MIN, INT32_MAX], - 'int64': [-1, 0, 1, 23.42, INT64_MIN, INT64_MAX], - } +class TestCheckOverflow: - BAD_VALUES = { - 'int': [(INT32_MIN - 1, INT32_MIN), - (INT32_MAX + 1, INT32_MAX), - (float(INT32_MAX + 1), INT32_MAX)], - 'int64': [(INT64_MIN - 1, INT64_MIN), - (INT64_MAX + 1, INT64_MAX), - (float(INT64_MAX + 1), INT64_MAX)], - } + """Test check_overflow.""" - def test_good_values(self): + @pytest.mark.parametrize('ctype, val', + overflow_test_cases.iter_good_values()) + def test_good_values(self, ctype, val): """Test values which are inside bounds.""" - for ctype, vals in self.GOOD_VALUES.items(): - for val in vals: - with self.subTest(ctype=ctype, val=val): - qtutils.check_overflow(val, ctype) + qtutils.check_overflow(val, ctype) - def test_bad_values_fatal(self): + @pytest.mark.parametrize('ctype, val', + [(ctype, val) for (ctype, val, _) in + overflow_test_cases.iter_bad_values()]) + def test_bad_values_fatal(self, ctype, val): """Test values which are outside bounds with fatal=True.""" - for ctype, vals in self.BAD_VALUES.items(): - for (val, _) in vals: - with self.subTest(ctype=ctype, val=val): - with self.assertRaises(OverflowError): - qtutils.check_overflow(val, ctype) + with pytest.raises(OverflowError): + qtutils.check_overflow(val, ctype) - def test_bad_values_nonfatal(self): + @pytest.mark.parametrize('ctype, val, repl', + overflow_test_cases.iter_bad_values()) + def test_bad_values_nonfatal(self, ctype, val, repl): """Test values which are outside bounds with fatal=False.""" - for ctype, vals in self.BAD_VALUES.items(): - for (val, replacement) in vals: - with self.subTest(ctype=ctype, val=val): - newval = qtutils.check_overflow(val, ctype, fatal=False) - self.assertEqual(newval, replacement) + newval = qtutils.check_overflow(val, ctype, fatal=False) + assert newval == repl -def argparser_exit(status=0, message=None): # pylint: disable=unused-argument - """Function to monkey-patch .exit() of the argparser so it doesn't exit.""" - raise Exception - - -class GetQtArgsTests(unittest.TestCase): +class TestGetQtArgs: """Tests for get_args.""" - def setUp(self): - self.parser = qutebrowser.get_argparser() - self.parser.exit = argparser_exit + @pytest.fixture + def parser(self, mocker): + """Fixture to provide an argparser. + + Monkey-patches .exit() of the argparser so it doesn't exit on errors. + """ + parser = qutebrowser.get_argparser() + mocker.patch.object(parser, 'exit', side_effect=Exception) + return parser - def test_no_qt_args(self): + def test_no_qt_args(self, parser): """Test commandline with no Qt arguments given.""" - args = self.parser.parse_args(['--debug']) - self.assertEqual(qtutils.get_args(args), [sys.argv[0]]) + args = parser.parse_args(['--debug']) + assert qtutils.get_args(args) == [sys.argv[0]] - def test_qt_flag(self): + def test_qt_flag(self, parser): """Test commandline with a Qt flag.""" - args = self.parser.parse_args(['--debug', '--qt-reverse', '--nocolor']) - self.assertEqual(qtutils.get_args(args), [sys.argv[0], '-reverse']) + args = parser.parse_args(['--debug', '--qt-reverse', '--nocolor']) + assert qtutils.get_args(args) == [sys.argv[0], '-reverse'] - def test_qt_arg(self): + def test_qt_arg(self, parser): """Test commandline with a Qt argument.""" - args = self.parser.parse_args(['--qt-stylesheet', 'foobar']) - self.assertEqual(qtutils.get_args(args), [sys.argv[0], '-stylesheet', - 'foobar']) + args = parser.parse_args(['--qt-stylesheet', 'foobar']) + assert qtutils.get_args(args) == [sys.argv[0], '-stylesheet', 'foobar'] - def test_qt_both(self): + def test_qt_both(self, parser): """Test commandline with a Qt argument and flag.""" - args = self.parser.parse_args(['--qt-stylesheet', 'foobar', - '--qt-reverse']) + args = parser.parse_args(['--qt-stylesheet', 'foobar', '--qt-reverse']) qt_args = qtutils.get_args(args) - self.assertEqual(qt_args[0], sys.argv[0]) - self.assertIn('-reverse', qt_args) - self.assertIn('-stylesheet', qt_args) - self.assertIn('foobar', qt_args) - - -if __name__ == '__main__': - unittest.main() + assert qt_args[0] == sys.argv[0] + assert '-reverse' in qt_args + assert '-stylesheet' in qt_args + assert 'foobar' in qt_args diff --git a/tox.ini b/tox.ini index 101d2b02c5c..d1e337271e1 100644 --- a/tox.ini +++ b/tox.ini @@ -26,7 +26,7 @@ deps = # on Ubuntu Trusty. commands = {envpython} scripts/link_pyqt.py --tox {envdir} - {envpython} -m py.test {posargs} + {envpython} -m py.test --strict {posargs} [testenv:coverage] deps = @@ -36,7 +36,7 @@ deps = cov-core==1.15.0 commands = {[testenv:mkvenv]commands} - {envpython} -m py.test --cov qutebrowser --cov-report term --cov-report html {posargs} + {envpython} -m py.test --strict --cov qutebrowser --cov-report term --cov-report html {posargs} [testenv:misc] commands = @@ -109,3 +109,5 @@ commands = [pytest] norecursedirs = .tox .venv +markers = + gui: Tests using the GUI (e.g. spawning widgets)
conda__conda-7241
conda's configuration context is not initialized in conda.exports root cause of https://github.com/conda-forge/conda-smithy/issues/762
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import Hashable\nfrom logging import getLogger\nimport threading\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .core.solve import Solver # NOQA\nSolver = Solver\n\nfrom .plan import display_actions # NOQA\ndisplay_actions = display_actions\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection.session import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .common.toposort import _toposort # NOQA\n_toposort = _toposort\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .gateways.connection.download import TmpDownload # NOQA\n\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index, get_index # NOQA\ndist_str_in_index, fetch_index, get_index = dist_str_in_index, fetch_index, get_index # NOQA\nfrom .core.package_cache_data import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, symlink_conda # NOQA\npackage_cache, prefix_placeholder, symlink_conda = package_cache, prefix_placeholder, symlink_conda\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.prefix_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, ResolvePackageNotFound, Resolve, Unsatisfiable # NOQA\nMatchSpec, Resolve = MatchSpec, Resolve\nUnsatisfiable = Unsatisfiable\nNoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound\n\nfrom .utils import hashsum_file, human_bytes, unix_path_to_win, url_path # NOQA\nfrom .common.path import win_path_to_unix # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nunix_path_to_win = unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .models.version import VersionOrder, normalized_version # NOQA\nVersionOrder, normalized_version = VersionOrder, normalized_version # NOQA\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix, non_x86_linux_machines, sys_rc_path # NOQA\nnon_x86_linux_machines, sys_rc_path = non_x86_linux_machines, sys_rc_path\nget_prefix = get_prefix\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nconda_private = conda.base.context.context.conda_private\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError, LinkError, CondaOSError, PathNotFoundError # NOQA\nPaddingError = PaddingError\nLinkError = LinkError\nCondaOSError = CondaOSError\n# PathNotFoundError is the conda 4.4.x name for it - let's plan ahead.\nPathNotFoundError = CondaFileNotFoundError = PathNotFoundError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\nfrom .models.records import PackageRecord # NOQA\nPackageRecord = IndexRecord = PackageRecord\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.subdir_data import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object): # pragma: no cover\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\n\nclass InstalledPackages(object):\n pass\n\n\nclass memoized(object): # pragma: no cover\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.lock = threading.Lock()\n\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(sorted(kw.items())))\n with self.lock:\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\nfrom .gateways.disk.delete import rm_rf as _rm_rf # NOQA\nfrom .core.prefix_data import delete_prefix_from_linked_data # NOQA\n\n\ndef rm_rf(path, max_retries=5, trash=True):\n _rm_rf(path, max_retries, trash)\n delete_prefix_from_linked_data(path)\n\n\n# ######################\n# signature.py\n# ######################\nKEYS = None\nKEYS_DIR = None\n\n\ndef hash_file(_):\n return None # pragma: no cover\n\n\ndef verify(_):\n return False # pragma: no cover\n\n\nfrom .plan import execute_actions, execute_instructions, execute_plan, install_actions # NOQA\nexecute_actions, execute_instructions = execute_actions, execute_instructions\nexecute_plan, install_actions = execute_plan, install_actions\n", "path": "conda/exports.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import Hashable\nfrom logging import getLogger\nimport threading\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom .base.context import reset_context # NOQA\nreset_context() # initialize context when conda.exports is imported\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .core.solve import Solver # NOQA\nSolver = Solver\n\nfrom .plan import display_actions # NOQA\ndisplay_actions = display_actions\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection.session import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .common.toposort import _toposort # NOQA\n_toposort = _toposort\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .gateways.connection.download import TmpDownload # NOQA\n\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index, get_index # NOQA\ndist_str_in_index, fetch_index, get_index = dist_str_in_index, fetch_index, get_index # NOQA\nfrom .core.package_cache_data import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, symlink_conda # NOQA\npackage_cache, prefix_placeholder, symlink_conda = package_cache, prefix_placeholder, symlink_conda\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.prefix_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, ResolvePackageNotFound, Resolve, Unsatisfiable # NOQA\nMatchSpec, Resolve = MatchSpec, Resolve\nUnsatisfiable = Unsatisfiable\nNoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound\n\nfrom .utils import hashsum_file, human_bytes, unix_path_to_win, url_path # NOQA\nfrom .common.path import win_path_to_unix # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nunix_path_to_win = unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .models.version import VersionOrder, normalized_version # NOQA\nVersionOrder, normalized_version = VersionOrder, normalized_version # NOQA\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix, non_x86_linux_machines, sys_rc_path # NOQA\nnon_x86_linux_machines, sys_rc_path = non_x86_linux_machines, sys_rc_path\nget_prefix = get_prefix\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nconda_private = conda.base.context.context.conda_private\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError, LinkError, CondaOSError, PathNotFoundError # NOQA\nPaddingError = PaddingError\nLinkError = LinkError\nCondaOSError = CondaOSError\n# PathNotFoundError is the conda 4.4.x name for it - let's plan ahead.\nPathNotFoundError = CondaFileNotFoundError = PathNotFoundError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\nfrom .models.records import PackageRecord # NOQA\nPackageRecord = IndexRecord = PackageRecord\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.subdir_data import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object): # pragma: no cover\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\n\nclass InstalledPackages(object):\n pass\n\n\nclass memoized(object): # pragma: no cover\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.lock = threading.Lock()\n\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(sorted(kw.items())))\n with self.lock:\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\nfrom .gateways.disk.delete import rm_rf as _rm_rf # NOQA\nfrom .core.prefix_data import delete_prefix_from_linked_data # NOQA\n\n\ndef rm_rf(path, max_retries=5, trash=True):\n _rm_rf(path, max_retries, trash)\n delete_prefix_from_linked_data(path)\n\n\n# ######################\n# signature.py\n# ######################\nKEYS = None\nKEYS_DIR = None\n\n\ndef hash_file(_):\n return None # pragma: no cover\n\n\ndef verify(_):\n return False # pragma: no cover\n\n\nfrom .plan import execute_actions, execute_instructions, execute_plan, install_actions # NOQA\nexecute_actions, execute_instructions = execute_actions, execute_instructions\nexecute_plan, install_actions = execute_plan, install_actions\n", "path": "conda/exports.py" } ]
diff --git a/conda/exports.py b/conda/exports.py index 67af538f63d..62b755d58f8 100644 --- a/conda/exports.py +++ b/conda/exports.py @@ -11,6 +11,9 @@ from . import CondaError # NOQA CondaError = CondaError +from .base.context import reset_context # NOQA +reset_context() # initialize context when conda.exports is imported + from . import compat, plan # NOQA compat, plan = compat, plan
ktbyers__netmiko-1073
Huawei vrpv8 commit func issue After commiting changes on huawei vrpv8, cli on devices look like this: ``` [~HUAWEI]dot1x enable [*HUAWEI]snmp-agent sys-info version all Warning: SNMPv1/SNMPv2c is not secure, and SNMPv3 in either authentication or privacy mode is recommended. [*HUAWEI]commit [~HUAWEI] ``` with following code: ``` from netmiko import Netmiko device = { "host": "10.0.0.3", "username": "yyy", "password": "xxx", "device_type": "huawei_vrpv8", "session_log": "log_file2.txt" } config_commands = ['dot1x enable','snmp-agent sys-info version all'] net_connect = Netmiko(**device) output = net_connect.send_config_set(config_commands,exit_config_mode=False) output += net_connect.commit() print(output) ``` i got this error: ``` Traceback (most recent call last): File "/home/kafooo/PycharmProjects/nornir_scripts/venv/huawei_netmiko_test.py", line 18, in <module> output2 = net_connect.commit() File "/home/kafooo/PycharmProjects/nornir_scripts/venv/lib/python3.6/site-packages/netmiko/huawei/huawei_ssh.py", line 114, in commit strip_command=False, delay_factor=delay_factor) File "/home/kafooo/PycharmProjects/nornir_scripts/venv/lib/python3.6/site-packages/netmiko/base_connection.py", line 1206, in send_command_expect return self.send_command(*args, **kwargs) File "/home/kafooo/PycharmProjects/nornir_scripts/venv/lib/python3.6/site-packages/netmiko/base_connection.py", line 1188, in send_command search_pattern)) OSError: Search pattern never detected in send_command_expect: \[\*HUAWEI\] ``` looks like netmiko is expecting [*hostname] after commit, but in reality there is [~hostname] after commit
[ { "content": "from __future__ import print_function\nfrom __future__ import unicode_literals\nimport time\nimport re\nfrom netmiko.cisco_base_connection import CiscoSSHConnection\nfrom netmiko import log\n\n\nclass HuaweiSSH(CiscoSSHConnection):\n def session_preparation(self):\n \"\"\"Prepare the session after the connection has been established.\"\"\"\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"screen-length 0 temporary\")\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()\n\n def config_mode(self, config_command=\"system-view\"):\n \"\"\"Enter configuration mode.\"\"\"\n return super(HuaweiSSH, self).config_mode(config_command=config_command)\n\n def exit_config_mode(self, exit_config=\"return\", pattern=r\">\"):\n \"\"\"Exit configuration mode.\"\"\"\n return super(HuaweiSSH, self).exit_config_mode(\n exit_config=exit_config, pattern=pattern\n )\n\n def check_config_mode(self, check_string=\"]\"):\n \"\"\"Checks whether in configuration mode. Returns a boolean.\"\"\"\n return super(HuaweiSSH, self).check_config_mode(check_string=check_string)\n\n def check_enable_mode(self, *args, **kwargs):\n \"\"\"Huawei has no enable mode.\"\"\"\n pass\n\n def enable(self, *args, **kwargs):\n \"\"\"Huawei has no enable mode.\"\"\"\n return \"\"\n\n def exit_enable_mode(self, *args, **kwargs):\n \"\"\"Huawei has no enable mode.\"\"\"\n return \"\"\n\n def set_base_prompt(\n self, pri_prompt_terminator=\">\", alt_prompt_terminator=\"]\", delay_factor=1\n ):\n \"\"\"\n Sets self.base_prompt\n\n Used as delimiter for stripping of trailing prompt in output.\n\n Should be set to something that is general and applies in multiple contexts. For Comware\n this will be the router prompt with < > or [ ] stripped off.\n\n This will be set on logging in, but not when entering system-view\n \"\"\"\n log.debug(\"In set_base_prompt\")\n delay_factor = self.select_delay_factor(delay_factor)\n self.clear_buffer()\n self.write_channel(self.RETURN)\n time.sleep(0.5 * delay_factor)\n\n prompt = self.read_channel()\n prompt = self.normalize_linefeeds(prompt)\n\n # If multiple lines in the output take the last line\n prompt = prompt.split(self.RESPONSE_RETURN)[-1]\n prompt = prompt.strip()\n\n # Check that ends with a valid terminator character\n if not prompt[-1] in (pri_prompt_terminator, alt_prompt_terminator):\n raise ValueError(\"Router prompt not found: {0}\".format(prompt))\n\n # Strip off any leading HRP_. characters for USGv5 HA\n prompt = re.sub(r\"^HRP_.\", \"\", prompt, flags=re.M)\n\n # Strip off leading and trailing terminator\n prompt = prompt[1:-1]\n prompt = prompt.strip()\n self.base_prompt = prompt\n log.debug(\"prompt: {0}\".format(self.base_prompt))\n\n return self.base_prompt\n\n def save_config(self, cmd=\"save\", confirm=False, confirm_response=\"\"):\n \"\"\" Save Config for HuaweiSSH\"\"\"\n return super(HuaweiSSH, self).save_config(cmd=cmd, confirm=confirm)\n\n\nclass HuaweiVrpv8SSH(HuaweiSSH):\n def commit(self, comment=\"\", delay_factor=1):\n \"\"\"\n Commit the candidate configuration.\n\n Commit the entered configuration. Raise an error and return the failure\n if the commit fails.\n\n default:\n command_string = commit\n comment:\n command_string = commit comment <comment>\n\n \"\"\"\n delay_factor = self.select_delay_factor(delay_factor)\n error_marker = \"Failed to generate committed config\"\n command_string = \"commit\"\n\n if comment:\n command_string += ' comment \"{}\"'.format(comment)\n\n output = self.config_mode()\n output += self.send_command_expect(\n command_string,\n strip_prompt=False,\n strip_command=False,\n delay_factor=delay_factor,\n )\n output += self.exit_config_mode()\n\n if error_marker in output:\n raise ValueError(\n \"Commit failed with following errors:\\n\\n{}\".format(output)\n )\n return output\n\n def save_config(self, cmd=\"\", confirm=True, confirm_response=\"\"):\n \"\"\"Not Implemented\"\"\"\n raise NotImplementedError\n", "path": "netmiko/huawei/huawei_ssh.py" } ]
[ { "content": "from __future__ import print_function\nfrom __future__ import unicode_literals\nimport time\nimport re\nfrom netmiko.cisco_base_connection import CiscoSSHConnection\nfrom netmiko import log\n\n\nclass HuaweiSSH(CiscoSSHConnection):\n def session_preparation(self):\n \"\"\"Prepare the session after the connection has been established.\"\"\"\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"screen-length 0 temporary\")\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()\n\n def config_mode(self, config_command=\"system-view\"):\n \"\"\"Enter configuration mode.\"\"\"\n return super(HuaweiSSH, self).config_mode(config_command=config_command)\n\n def exit_config_mode(self, exit_config=\"return\", pattern=r\">\"):\n \"\"\"Exit configuration mode.\"\"\"\n return super(HuaweiSSH, self).exit_config_mode(\n exit_config=exit_config, pattern=pattern\n )\n\n def check_config_mode(self, check_string=\"]\"):\n \"\"\"Checks whether in configuration mode. Returns a boolean.\"\"\"\n return super(HuaweiSSH, self).check_config_mode(check_string=check_string)\n\n def check_enable_mode(self, *args, **kwargs):\n \"\"\"Huawei has no enable mode.\"\"\"\n pass\n\n def enable(self, *args, **kwargs):\n \"\"\"Huawei has no enable mode.\"\"\"\n return \"\"\n\n def exit_enable_mode(self, *args, **kwargs):\n \"\"\"Huawei has no enable mode.\"\"\"\n return \"\"\n\n def set_base_prompt(\n self, pri_prompt_terminator=\">\", alt_prompt_terminator=\"]\", delay_factor=1\n ):\n \"\"\"\n Sets self.base_prompt\n\n Used as delimiter for stripping of trailing prompt in output.\n\n Should be set to something that is general and applies in multiple contexts. For Comware\n this will be the router prompt with < > or [ ] stripped off.\n\n This will be set on logging in, but not when entering system-view\n \"\"\"\n log.debug(\"In set_base_prompt\")\n delay_factor = self.select_delay_factor(delay_factor)\n self.clear_buffer()\n self.write_channel(self.RETURN)\n time.sleep(0.5 * delay_factor)\n\n prompt = self.read_channel()\n prompt = self.normalize_linefeeds(prompt)\n\n # If multiple lines in the output take the last line\n prompt = prompt.split(self.RESPONSE_RETURN)[-1]\n prompt = prompt.strip()\n\n # Check that ends with a valid terminator character\n if not prompt[-1] in (pri_prompt_terminator, alt_prompt_terminator):\n raise ValueError(\"Router prompt not found: {0}\".format(prompt))\n\n # Strip off any leading HRP_. characters for USGv5 HA\n prompt = re.sub(r\"^HRP_.\", \"\", prompt, flags=re.M)\n\n # Strip off leading and trailing terminator\n prompt = prompt[1:-1]\n prompt = prompt.strip()\n self.base_prompt = prompt\n log.debug(\"prompt: {0}\".format(self.base_prompt))\n\n return self.base_prompt\n\n def save_config(self, cmd=\"save\", confirm=False, confirm_response=\"\"):\n \"\"\" Save Config for HuaweiSSH\"\"\"\n return super(HuaweiSSH, self).save_config(cmd=cmd, confirm=confirm)\n\n\nclass HuaweiVrpv8SSH(HuaweiSSH):\n def commit(self, comment=\"\", delay_factor=1):\n \"\"\"\n Commit the candidate configuration.\n\n Commit the entered configuration. Raise an error and return the failure\n if the commit fails.\n\n default:\n command_string = commit\n comment:\n command_string = commit comment <comment>\n\n \"\"\"\n delay_factor = self.select_delay_factor(delay_factor)\n error_marker = \"Failed to generate committed config\"\n command_string = \"commit\"\n\n if comment:\n command_string += ' comment \"{}\"'.format(comment)\n\n output = self.config_mode()\n output += self.send_command_expect(\n command_string,\n strip_prompt=False,\n strip_command=False,\n delay_factor=delay_factor,\n expect_string=r\"]\",\n )\n output += self.exit_config_mode()\n\n if error_marker in output:\n raise ValueError(\n \"Commit failed with following errors:\\n\\n{}\".format(output)\n )\n return output\n\n def save_config(self, cmd=\"\", confirm=True, confirm_response=\"\"):\n \"\"\"Not Implemented\"\"\"\n raise NotImplementedError\n", "path": "netmiko/huawei/huawei_ssh.py" } ]
diff --git a/netmiko/huawei/huawei_ssh.py b/netmiko/huawei/huawei_ssh.py index 7cc06f5c3..ecac49755 100644 --- a/netmiko/huawei/huawei_ssh.py +++ b/netmiko/huawei/huawei_ssh.py @@ -115,6 +115,7 @@ def commit(self, comment="", delay_factor=1): strip_prompt=False, strip_command=False, delay_factor=delay_factor, + expect_string=r"]", ) output += self.exit_config_mode()
MongoEngine__mongoengine-1461
__neq__ protocol in datastructure.py ```python def __eq__(self, other): return self.items() == other.items() def __neq__(self, other): return self.items() != other.items() ``` defined in [base/datastructures.py](https://github.com/MongoEngine/mongoengine/blob/master/mongoengine/base/datastructures.py#L432), not sure it's a typo for `__ne__` or called in another place?
[ { "content": "import itertools\nimport weakref\n\nimport six\n\nfrom mongoengine.common import _import_class\nfrom mongoengine.errors import DoesNotExist, MultipleObjectsReturned\n\n__all__ = ('BaseDict', 'BaseList', 'EmbeddedDocumentList')\n\n\nclass BaseDict(dict):\n \"\"\"A special dict so we can watch any changes.\"\"\"\n\n _dereferenced = False\n _instance = None\n _name = None\n\n def __init__(self, dict_items, instance, name):\n Document = _import_class('Document')\n EmbeddedDocument = _import_class('EmbeddedDocument')\n\n if isinstance(instance, (Document, EmbeddedDocument)):\n self._instance = weakref.proxy(instance)\n self._name = name\n super(BaseDict, self).__init__(dict_items)\n\n def __getitem__(self, key, *args, **kwargs):\n value = super(BaseDict, self).__getitem__(key)\n\n EmbeddedDocument = _import_class('EmbeddedDocument')\n if isinstance(value, EmbeddedDocument) and value._instance is None:\n value._instance = self._instance\n elif not isinstance(value, BaseDict) and isinstance(value, dict):\n value = BaseDict(value, None, '%s.%s' % (self._name, key))\n super(BaseDict, self).__setitem__(key, value)\n value._instance = self._instance\n elif not isinstance(value, BaseList) and isinstance(value, list):\n value = BaseList(value, None, '%s.%s' % (self._name, key))\n super(BaseDict, self).__setitem__(key, value)\n value._instance = self._instance\n return value\n\n def __setitem__(self, key, value, *args, **kwargs):\n self._mark_as_changed(key)\n return super(BaseDict, self).__setitem__(key, value)\n\n def __delete__(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).__delete__(*args, **kwargs)\n\n def __delitem__(self, key, *args, **kwargs):\n self._mark_as_changed(key)\n return super(BaseDict, self).__delitem__(key)\n\n def __delattr__(self, key, *args, **kwargs):\n self._mark_as_changed(key)\n return super(BaseDict, self).__delattr__(key)\n\n def __getstate__(self):\n self.instance = None\n self._dereferenced = False\n return self\n\n def __setstate__(self, state):\n self = state\n return self\n\n def clear(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).clear()\n\n def pop(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).pop(*args, **kwargs)\n\n def popitem(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).popitem()\n\n def setdefault(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).setdefault(*args, **kwargs)\n\n def update(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).update(*args, **kwargs)\n\n def _mark_as_changed(self, key=None):\n if hasattr(self._instance, '_mark_as_changed'):\n if key:\n self._instance._mark_as_changed('%s.%s' % (self._name, key))\n else:\n self._instance._mark_as_changed(self._name)\n\n\nclass BaseList(list):\n \"\"\"A special list so we can watch any changes.\"\"\"\n\n _dereferenced = False\n _instance = None\n _name = None\n\n def __init__(self, list_items, instance, name):\n Document = _import_class('Document')\n EmbeddedDocument = _import_class('EmbeddedDocument')\n\n if isinstance(instance, (Document, EmbeddedDocument)):\n self._instance = weakref.proxy(instance)\n self._name = name\n super(BaseList, self).__init__(list_items)\n\n def __getitem__(self, key, *args, **kwargs):\n value = super(BaseList, self).__getitem__(key)\n\n EmbeddedDocument = _import_class('EmbeddedDocument')\n if isinstance(value, EmbeddedDocument) and value._instance is None:\n value._instance = self._instance\n elif not isinstance(value, BaseDict) and isinstance(value, dict):\n value = BaseDict(value, None, '%s.%s' % (self._name, key))\n super(BaseList, self).__setitem__(key, value)\n value._instance = self._instance\n elif not isinstance(value, BaseList) and isinstance(value, list):\n value = BaseList(value, None, '%s.%s' % (self._name, key))\n super(BaseList, self).__setitem__(key, value)\n value._instance = self._instance\n return value\n\n def __iter__(self):\n for i in xrange(self.__len__()):\n yield self[i]\n\n def __setitem__(self, key, value, *args, **kwargs):\n if isinstance(key, slice):\n self._mark_as_changed()\n else:\n self._mark_as_changed(key)\n return super(BaseList, self).__setitem__(key, value)\n\n def __delitem__(self, key, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).__delitem__(key)\n\n def __setslice__(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).__setslice__(*args, **kwargs)\n\n def __delslice__(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).__delslice__(*args, **kwargs)\n\n def __getstate__(self):\n self.instance = None\n self._dereferenced = False\n return self\n\n def __setstate__(self, state):\n self = state\n return self\n\n def __iadd__(self, other):\n self._mark_as_changed()\n return super(BaseList, self).__iadd__(other)\n\n def __imul__(self, other):\n self._mark_as_changed()\n return super(BaseList, self).__imul__(other)\n\n def append(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).append(*args, **kwargs)\n\n def extend(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).extend(*args, **kwargs)\n\n def insert(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).insert(*args, **kwargs)\n\n def pop(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).pop(*args, **kwargs)\n\n def remove(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).remove(*args, **kwargs)\n\n def reverse(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).reverse()\n\n def sort(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).sort(*args, **kwargs)\n\n def _mark_as_changed(self, key=None):\n if hasattr(self._instance, '_mark_as_changed'):\n if key:\n self._instance._mark_as_changed(\n '%s.%s' % (self._name, key % len(self))\n )\n else:\n self._instance._mark_as_changed(self._name)\n\n\nclass EmbeddedDocumentList(BaseList):\n\n @classmethod\n def __match_all(cls, embedded_doc, kwargs):\n \"\"\"Return True if a given embedded doc matches all the filter\n kwargs. If it doesn't return False.\n \"\"\"\n for key, expected_value in kwargs.items():\n doc_val = getattr(embedded_doc, key)\n if doc_val != expected_value and six.text_type(doc_val) != expected_value:\n return False\n return True\n\n @classmethod\n def __only_matches(cls, embedded_docs, kwargs):\n \"\"\"Return embedded docs that match the filter kwargs.\"\"\"\n if not kwargs:\n return embedded_docs\n return [doc for doc in embedded_docs if cls.__match_all(doc, kwargs)]\n\n def __init__(self, list_items, instance, name):\n super(EmbeddedDocumentList, self).__init__(list_items, instance, name)\n self._instance = instance\n\n def filter(self, **kwargs):\n \"\"\"\n Filters the list by only including embedded documents with the\n given keyword arguments.\n\n :param kwargs: The keyword arguments corresponding to the fields to\n filter on. *Multiple arguments are treated as if they are ANDed\n together.*\n :return: A new ``EmbeddedDocumentList`` containing the matching\n embedded documents.\n\n Raises ``AttributeError`` if a given keyword is not a valid field for\n the embedded document class.\n \"\"\"\n values = self.__only_matches(self, kwargs)\n return EmbeddedDocumentList(values, self._instance, self._name)\n\n def exclude(self, **kwargs):\n \"\"\"\n Filters the list by excluding embedded documents with the given\n keyword arguments.\n\n :param kwargs: The keyword arguments corresponding to the fields to\n exclude on. *Multiple arguments are treated as if they are ANDed\n together.*\n :return: A new ``EmbeddedDocumentList`` containing the non-matching\n embedded documents.\n\n Raises ``AttributeError`` if a given keyword is not a valid field for\n the embedded document class.\n \"\"\"\n exclude = self.__only_matches(self, kwargs)\n values = [item for item in self if item not in exclude]\n return EmbeddedDocumentList(values, self._instance, self._name)\n\n def count(self):\n \"\"\"\n The number of embedded documents in the list.\n\n :return: The length of the list, equivalent to the result of ``len()``.\n \"\"\"\n return len(self)\n\n def get(self, **kwargs):\n \"\"\"\n Retrieves an embedded document determined by the given keyword\n arguments.\n\n :param kwargs: The keyword arguments corresponding to the fields to\n search on. *Multiple arguments are treated as if they are ANDed\n together.*\n :return: The embedded document matched by the given keyword arguments.\n\n Raises ``DoesNotExist`` if the arguments used to query an embedded\n document returns no results. ``MultipleObjectsReturned`` if more\n than one result is returned.\n \"\"\"\n values = self.__only_matches(self, kwargs)\n if len(values) == 0:\n raise DoesNotExist(\n '%s matching query does not exist.' % self._name\n )\n elif len(values) > 1:\n raise MultipleObjectsReturned(\n '%d items returned, instead of 1' % len(values)\n )\n\n return values[0]\n\n def first(self):\n \"\"\"Return the first embedded document in the list, or ``None``\n if empty.\n \"\"\"\n if len(self) > 0:\n return self[0]\n\n def create(self, **values):\n \"\"\"\n Creates a new embedded document and saves it to the database.\n\n .. note::\n The embedded document changes are not automatically saved\n to the database after calling this method.\n\n :param values: A dictionary of values for the embedded document.\n :return: The new embedded document instance.\n \"\"\"\n name = self._name\n EmbeddedClass = self._instance._fields[name].field.document_type_obj\n self._instance[self._name].append(EmbeddedClass(**values))\n\n return self._instance[self._name][-1]\n\n def save(self, *args, **kwargs):\n \"\"\"\n Saves the ancestor document.\n\n :param args: Arguments passed up to the ancestor Document's save\n method.\n :param kwargs: Keyword arguments passed up to the ancestor Document's\n save method.\n \"\"\"\n self._instance.save(*args, **kwargs)\n\n def delete(self):\n \"\"\"\n Deletes the embedded documents from the database.\n\n .. note::\n The embedded document changes are not automatically saved\n to the database after calling this method.\n\n :return: The number of entries deleted.\n \"\"\"\n values = list(self)\n for item in values:\n self._instance[self._name].remove(item)\n\n return len(values)\n\n def update(self, **update):\n \"\"\"\n Updates the embedded documents with the given update values.\n\n .. note::\n The embedded document changes are not automatically saved\n to the database after calling this method.\n\n :param update: A dictionary of update values to apply to each\n embedded document.\n :return: The number of entries updated.\n \"\"\"\n if len(update) == 0:\n return 0\n values = list(self)\n for item in values:\n for k, v in update.items():\n setattr(item, k, v)\n\n return len(values)\n\n\nclass StrictDict(object):\n __slots__ = ()\n _special_fields = set(['get', 'pop', 'iteritems', 'items', 'keys', 'create'])\n _classes = {}\n\n def __init__(self, **kwargs):\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n key = '_reserved_' + key if key in self._special_fields else key\n try:\n return getattr(self, key)\n except AttributeError:\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n key = '_reserved_' + key if key in self._special_fields else key\n return setattr(self, key, value)\n\n def __contains__(self, key):\n return hasattr(self, key)\n\n def get(self, key, default=None):\n try:\n return self[key]\n except KeyError:\n return default\n\n def pop(self, key, default=None):\n v = self.get(key, default)\n try:\n delattr(self, key)\n except AttributeError:\n pass\n return v\n\n def iteritems(self):\n for key in self:\n yield key, self[key]\n\n def items(self):\n return [(k, self[k]) for k in iter(self)]\n\n def iterkeys(self):\n return iter(self)\n\n def keys(self):\n return list(iter(self))\n\n def __iter__(self):\n return (key for key in self.__slots__ if hasattr(self, key))\n\n def __len__(self):\n return len(list(self.iteritems()))\n\n def __eq__(self, other):\n return self.items() == other.items()\n\n def __neq__(self, other):\n return self.items() != other.items()\n\n @classmethod\n def create(cls, allowed_keys):\n allowed_keys_tuple = tuple(('_reserved_' + k if k in cls._special_fields else k) for k in allowed_keys)\n allowed_keys = frozenset(allowed_keys_tuple)\n if allowed_keys not in cls._classes:\n class SpecificStrictDict(cls):\n __slots__ = allowed_keys_tuple\n\n def __repr__(self):\n return '{%s}' % ', '.join('\"{0!s}\": {1!r}'.format(k, v) for k, v in self.items())\n\n cls._classes[allowed_keys] = SpecificStrictDict\n return cls._classes[allowed_keys]\n\n\nclass SemiStrictDict(StrictDict):\n __slots__ = ('_extras', )\n _classes = {}\n\n def __getattr__(self, attr):\n try:\n super(SemiStrictDict, self).__getattr__(attr)\n except AttributeError:\n try:\n return self.__getattribute__('_extras')[attr]\n except KeyError as e:\n raise AttributeError(e)\n\n def __setattr__(self, attr, value):\n try:\n super(SemiStrictDict, self).__setattr__(attr, value)\n except AttributeError:\n try:\n self._extras[attr] = value\n except AttributeError:\n self._extras = {attr: value}\n\n def __delattr__(self, attr):\n try:\n super(SemiStrictDict, self).__delattr__(attr)\n except AttributeError:\n try:\n del self._extras[attr]\n except KeyError as e:\n raise AttributeError(e)\n\n def __iter__(self):\n try:\n extras_iter = iter(self.__getattribute__('_extras'))\n except AttributeError:\n extras_iter = ()\n return itertools.chain(super(SemiStrictDict, self).__iter__(), extras_iter)\n", "path": "mongoengine/base/datastructures.py" } ]
[ { "content": "import itertools\nimport weakref\n\nimport six\n\nfrom mongoengine.common import _import_class\nfrom mongoengine.errors import DoesNotExist, MultipleObjectsReturned\n\n__all__ = ('BaseDict', 'BaseList', 'EmbeddedDocumentList')\n\n\nclass BaseDict(dict):\n \"\"\"A special dict so we can watch any changes.\"\"\"\n\n _dereferenced = False\n _instance = None\n _name = None\n\n def __init__(self, dict_items, instance, name):\n Document = _import_class('Document')\n EmbeddedDocument = _import_class('EmbeddedDocument')\n\n if isinstance(instance, (Document, EmbeddedDocument)):\n self._instance = weakref.proxy(instance)\n self._name = name\n super(BaseDict, self).__init__(dict_items)\n\n def __getitem__(self, key, *args, **kwargs):\n value = super(BaseDict, self).__getitem__(key)\n\n EmbeddedDocument = _import_class('EmbeddedDocument')\n if isinstance(value, EmbeddedDocument) and value._instance is None:\n value._instance = self._instance\n elif not isinstance(value, BaseDict) and isinstance(value, dict):\n value = BaseDict(value, None, '%s.%s' % (self._name, key))\n super(BaseDict, self).__setitem__(key, value)\n value._instance = self._instance\n elif not isinstance(value, BaseList) and isinstance(value, list):\n value = BaseList(value, None, '%s.%s' % (self._name, key))\n super(BaseDict, self).__setitem__(key, value)\n value._instance = self._instance\n return value\n\n def __setitem__(self, key, value, *args, **kwargs):\n self._mark_as_changed(key)\n return super(BaseDict, self).__setitem__(key, value)\n\n def __delete__(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).__delete__(*args, **kwargs)\n\n def __delitem__(self, key, *args, **kwargs):\n self._mark_as_changed(key)\n return super(BaseDict, self).__delitem__(key)\n\n def __delattr__(self, key, *args, **kwargs):\n self._mark_as_changed(key)\n return super(BaseDict, self).__delattr__(key)\n\n def __getstate__(self):\n self.instance = None\n self._dereferenced = False\n return self\n\n def __setstate__(self, state):\n self = state\n return self\n\n def clear(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).clear()\n\n def pop(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).pop(*args, **kwargs)\n\n def popitem(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).popitem()\n\n def setdefault(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).setdefault(*args, **kwargs)\n\n def update(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseDict, self).update(*args, **kwargs)\n\n def _mark_as_changed(self, key=None):\n if hasattr(self._instance, '_mark_as_changed'):\n if key:\n self._instance._mark_as_changed('%s.%s' % (self._name, key))\n else:\n self._instance._mark_as_changed(self._name)\n\n\nclass BaseList(list):\n \"\"\"A special list so we can watch any changes.\"\"\"\n\n _dereferenced = False\n _instance = None\n _name = None\n\n def __init__(self, list_items, instance, name):\n Document = _import_class('Document')\n EmbeddedDocument = _import_class('EmbeddedDocument')\n\n if isinstance(instance, (Document, EmbeddedDocument)):\n self._instance = weakref.proxy(instance)\n self._name = name\n super(BaseList, self).__init__(list_items)\n\n def __getitem__(self, key, *args, **kwargs):\n value = super(BaseList, self).__getitem__(key)\n\n EmbeddedDocument = _import_class('EmbeddedDocument')\n if isinstance(value, EmbeddedDocument) and value._instance is None:\n value._instance = self._instance\n elif not isinstance(value, BaseDict) and isinstance(value, dict):\n value = BaseDict(value, None, '%s.%s' % (self._name, key))\n super(BaseList, self).__setitem__(key, value)\n value._instance = self._instance\n elif not isinstance(value, BaseList) and isinstance(value, list):\n value = BaseList(value, None, '%s.%s' % (self._name, key))\n super(BaseList, self).__setitem__(key, value)\n value._instance = self._instance\n return value\n\n def __iter__(self):\n for i in xrange(self.__len__()):\n yield self[i]\n\n def __setitem__(self, key, value, *args, **kwargs):\n if isinstance(key, slice):\n self._mark_as_changed()\n else:\n self._mark_as_changed(key)\n return super(BaseList, self).__setitem__(key, value)\n\n def __delitem__(self, key, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).__delitem__(key)\n\n def __setslice__(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).__setslice__(*args, **kwargs)\n\n def __delslice__(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).__delslice__(*args, **kwargs)\n\n def __getstate__(self):\n self.instance = None\n self._dereferenced = False\n return self\n\n def __setstate__(self, state):\n self = state\n return self\n\n def __iadd__(self, other):\n self._mark_as_changed()\n return super(BaseList, self).__iadd__(other)\n\n def __imul__(self, other):\n self._mark_as_changed()\n return super(BaseList, self).__imul__(other)\n\n def append(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).append(*args, **kwargs)\n\n def extend(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).extend(*args, **kwargs)\n\n def insert(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).insert(*args, **kwargs)\n\n def pop(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).pop(*args, **kwargs)\n\n def remove(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).remove(*args, **kwargs)\n\n def reverse(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).reverse()\n\n def sort(self, *args, **kwargs):\n self._mark_as_changed()\n return super(BaseList, self).sort(*args, **kwargs)\n\n def _mark_as_changed(self, key=None):\n if hasattr(self._instance, '_mark_as_changed'):\n if key:\n self._instance._mark_as_changed(\n '%s.%s' % (self._name, key % len(self))\n )\n else:\n self._instance._mark_as_changed(self._name)\n\n\nclass EmbeddedDocumentList(BaseList):\n\n @classmethod\n def __match_all(cls, embedded_doc, kwargs):\n \"\"\"Return True if a given embedded doc matches all the filter\n kwargs. If it doesn't return False.\n \"\"\"\n for key, expected_value in kwargs.items():\n doc_val = getattr(embedded_doc, key)\n if doc_val != expected_value and six.text_type(doc_val) != expected_value:\n return False\n return True\n\n @classmethod\n def __only_matches(cls, embedded_docs, kwargs):\n \"\"\"Return embedded docs that match the filter kwargs.\"\"\"\n if not kwargs:\n return embedded_docs\n return [doc for doc in embedded_docs if cls.__match_all(doc, kwargs)]\n\n def __init__(self, list_items, instance, name):\n super(EmbeddedDocumentList, self).__init__(list_items, instance, name)\n self._instance = instance\n\n def filter(self, **kwargs):\n \"\"\"\n Filters the list by only including embedded documents with the\n given keyword arguments.\n\n :param kwargs: The keyword arguments corresponding to the fields to\n filter on. *Multiple arguments are treated as if they are ANDed\n together.*\n :return: A new ``EmbeddedDocumentList`` containing the matching\n embedded documents.\n\n Raises ``AttributeError`` if a given keyword is not a valid field for\n the embedded document class.\n \"\"\"\n values = self.__only_matches(self, kwargs)\n return EmbeddedDocumentList(values, self._instance, self._name)\n\n def exclude(self, **kwargs):\n \"\"\"\n Filters the list by excluding embedded documents with the given\n keyword arguments.\n\n :param kwargs: The keyword arguments corresponding to the fields to\n exclude on. *Multiple arguments are treated as if they are ANDed\n together.*\n :return: A new ``EmbeddedDocumentList`` containing the non-matching\n embedded documents.\n\n Raises ``AttributeError`` if a given keyword is not a valid field for\n the embedded document class.\n \"\"\"\n exclude = self.__only_matches(self, kwargs)\n values = [item for item in self if item not in exclude]\n return EmbeddedDocumentList(values, self._instance, self._name)\n\n def count(self):\n \"\"\"\n The number of embedded documents in the list.\n\n :return: The length of the list, equivalent to the result of ``len()``.\n \"\"\"\n return len(self)\n\n def get(self, **kwargs):\n \"\"\"\n Retrieves an embedded document determined by the given keyword\n arguments.\n\n :param kwargs: The keyword arguments corresponding to the fields to\n search on. *Multiple arguments are treated as if they are ANDed\n together.*\n :return: The embedded document matched by the given keyword arguments.\n\n Raises ``DoesNotExist`` if the arguments used to query an embedded\n document returns no results. ``MultipleObjectsReturned`` if more\n than one result is returned.\n \"\"\"\n values = self.__only_matches(self, kwargs)\n if len(values) == 0:\n raise DoesNotExist(\n '%s matching query does not exist.' % self._name\n )\n elif len(values) > 1:\n raise MultipleObjectsReturned(\n '%d items returned, instead of 1' % len(values)\n )\n\n return values[0]\n\n def first(self):\n \"\"\"Return the first embedded document in the list, or ``None``\n if empty.\n \"\"\"\n if len(self) > 0:\n return self[0]\n\n def create(self, **values):\n \"\"\"\n Creates a new embedded document and saves it to the database.\n\n .. note::\n The embedded document changes are not automatically saved\n to the database after calling this method.\n\n :param values: A dictionary of values for the embedded document.\n :return: The new embedded document instance.\n \"\"\"\n name = self._name\n EmbeddedClass = self._instance._fields[name].field.document_type_obj\n self._instance[self._name].append(EmbeddedClass(**values))\n\n return self._instance[self._name][-1]\n\n def save(self, *args, **kwargs):\n \"\"\"\n Saves the ancestor document.\n\n :param args: Arguments passed up to the ancestor Document's save\n method.\n :param kwargs: Keyword arguments passed up to the ancestor Document's\n save method.\n \"\"\"\n self._instance.save(*args, **kwargs)\n\n def delete(self):\n \"\"\"\n Deletes the embedded documents from the database.\n\n .. note::\n The embedded document changes are not automatically saved\n to the database after calling this method.\n\n :return: The number of entries deleted.\n \"\"\"\n values = list(self)\n for item in values:\n self._instance[self._name].remove(item)\n\n return len(values)\n\n def update(self, **update):\n \"\"\"\n Updates the embedded documents with the given update values.\n\n .. note::\n The embedded document changes are not automatically saved\n to the database after calling this method.\n\n :param update: A dictionary of update values to apply to each\n embedded document.\n :return: The number of entries updated.\n \"\"\"\n if len(update) == 0:\n return 0\n values = list(self)\n for item in values:\n for k, v in update.items():\n setattr(item, k, v)\n\n return len(values)\n\n\nclass StrictDict(object):\n __slots__ = ()\n _special_fields = set(['get', 'pop', 'iteritems', 'items', 'keys', 'create'])\n _classes = {}\n\n def __init__(self, **kwargs):\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n key = '_reserved_' + key if key in self._special_fields else key\n try:\n return getattr(self, key)\n except AttributeError:\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n key = '_reserved_' + key if key in self._special_fields else key\n return setattr(self, key, value)\n\n def __contains__(self, key):\n return hasattr(self, key)\n\n def get(self, key, default=None):\n try:\n return self[key]\n except KeyError:\n return default\n\n def pop(self, key, default=None):\n v = self.get(key, default)\n try:\n delattr(self, key)\n except AttributeError:\n pass\n return v\n\n def iteritems(self):\n for key in self:\n yield key, self[key]\n\n def items(self):\n return [(k, self[k]) for k in iter(self)]\n\n def iterkeys(self):\n return iter(self)\n\n def keys(self):\n return list(iter(self))\n\n def __iter__(self):\n return (key for key in self.__slots__ if hasattr(self, key))\n\n def __len__(self):\n return len(list(self.iteritems()))\n\n def __eq__(self, other):\n return self.items() == other.items()\n\n def __ne__(self, other):\n return self.items() != other.items()\n\n @classmethod\n def create(cls, allowed_keys):\n allowed_keys_tuple = tuple(('_reserved_' + k if k in cls._special_fields else k) for k in allowed_keys)\n allowed_keys = frozenset(allowed_keys_tuple)\n if allowed_keys not in cls._classes:\n class SpecificStrictDict(cls):\n __slots__ = allowed_keys_tuple\n\n def __repr__(self):\n return '{%s}' % ', '.join('\"{0!s}\": {1!r}'.format(k, v) for k, v in self.items())\n\n cls._classes[allowed_keys] = SpecificStrictDict\n return cls._classes[allowed_keys]\n\n\nclass SemiStrictDict(StrictDict):\n __slots__ = ('_extras', )\n _classes = {}\n\n def __getattr__(self, attr):\n try:\n super(SemiStrictDict, self).__getattr__(attr)\n except AttributeError:\n try:\n return self.__getattribute__('_extras')[attr]\n except KeyError as e:\n raise AttributeError(e)\n\n def __setattr__(self, attr, value):\n try:\n super(SemiStrictDict, self).__setattr__(attr, value)\n except AttributeError:\n try:\n self._extras[attr] = value\n except AttributeError:\n self._extras = {attr: value}\n\n def __delattr__(self, attr):\n try:\n super(SemiStrictDict, self).__delattr__(attr)\n except AttributeError:\n try:\n del self._extras[attr]\n except KeyError as e:\n raise AttributeError(e)\n\n def __iter__(self):\n try:\n extras_iter = iter(self.__getattribute__('_extras'))\n except AttributeError:\n extras_iter = ()\n return itertools.chain(super(SemiStrictDict, self).__iter__(), extras_iter)\n", "path": "mongoengine/base/datastructures.py" } ]
diff --git a/mongoengine/base/datastructures.py b/mongoengine/base/datastructures.py index 8a7681e5d..b9aca8fae 100644 --- a/mongoengine/base/datastructures.py +++ b/mongoengine/base/datastructures.py @@ -429,7 +429,7 @@ def __len__(self): def __eq__(self, other): return self.items() == other.items() - def __neq__(self, other): + def __ne__(self, other): return self.items() != other.items() @classmethod
dotkom__onlineweb4-402
Sort list of users when adding marks When adding a mark, the list of user which the mark should relate to is not sorted. It should be. (It is probably sorted on realname instead of username) - Change the list to display realname instead of username. - Make sure it's sorted. (Bonus would be to have a select2js-ish search on it as well, but don't use time on it.)
[ { "content": "# -*- coding: utf-8 -*-\n\nimport datetime\nfrom pytz import timezone\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\n\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30>\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=timezone.now().date())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Email\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=True)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=30, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.URLField(_(u\"hjemmeside\"), blank=True, null=True)\n\n\n image = models.ImageField(_(u\"bilde\"), max_length=200, upload_to=IMAGE_FOLDER, blank=True, null=True,\n default=settings.DEFAULT_PROFILE_PICTURE_URL)\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True)\n\n # TODO profile pictures\n # TODO checkbox for forwarding of @online.ntnu.no mail\n \n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if AllowedUsername.objects.filter(username=self.ntnu_username).filter(expiration_date__gte=timezone.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n return self.get_emails().filter(primary = True)[0]\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n @property\n def year(self):\n today = timezone.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 9 < self.field_of_study < 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n\n def __unicode__(self):\n return self.get_full_name()\n\n class Meta:\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"aktiv\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(u\"epost\"), max_length=254)\n token = models.CharField(_(u\"token\"), max_length=32)\n created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True)\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = timezone.now()\n return now < self.created + valid_period \n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"brukernavn\"), max_length=10)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utløpsdato\"))\n\n @property\n def is_active(self):\n return timezone.now().date() < self.expiration_date\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"tillatt brukernavn\")\n verbose_name_plural = _(u\"tillatte brukernavn\")\n ordering = (u\"username\",)\n", "path": "apps/authentication/models.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\nimport datetime\nfrom pytz import timezone\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\n\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30>\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=timezone.now().date())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Email\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=True)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=30, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.URLField(_(u\"hjemmeside\"), blank=True, null=True)\n\n\n image = models.ImageField(_(u\"bilde\"), max_length=200, upload_to=IMAGE_FOLDER, blank=True, null=True,\n default=settings.DEFAULT_PROFILE_PICTURE_URL)\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True)\n\n # TODO profile pictures\n # TODO checkbox for forwarding of @online.ntnu.no mail\n \n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if AllowedUsername.objects.filter(username=self.ntnu_username).filter(expiration_date__gte=timezone.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n return self.get_emails().filter(primary = True)[0]\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n @property\n def year(self):\n today = timezone.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 9 < self.field_of_study < 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n\n def __unicode__(self):\n return self.get_full_name()\n\n class Meta:\n ordering = ['first_name', 'last_name']\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"aktiv\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(u\"epost\"), max_length=254)\n token = models.CharField(_(u\"token\"), max_length=32)\n created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True)\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = timezone.now()\n return now < self.created + valid_period \n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"brukernavn\"), max_length=10)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utløpsdato\"))\n\n @property\n def is_active(self):\n return timezone.now().date() < self.expiration_date\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"tillatt brukernavn\")\n verbose_name_plural = _(u\"tillatte brukernavn\")\n ordering = (u\"username\",)\n", "path": "apps/authentication/models.py" } ]
diff --git a/apps/authentication/models.py b/apps/authentication/models.py index e6a918027..919e75fcf 100644 --- a/apps/authentication/models.py +++ b/apps/authentication/models.py @@ -116,6 +116,7 @@ def __unicode__(self): return self.get_full_name() class Meta: + ordering = ['first_name', 'last_name'] verbose_name = _(u"brukerprofil") verbose_name_plural = _(u"brukerprofiler")
beetbox__beets-3267
Data files are unreadable when beets is installed as an egg Following up from [a related Discourse thread](https://discourse.beets.io/t/error-running-from-source/739?u=arcresu), we noticed that beets isn't happy when it's installed as an egg. This is currently the default way beets is installed if you run `setup.py install` (not sure if #2083 changes this story). #### Problem In egg mode, the whole beets and beetsplug directory trees remain bundled in a zipfile and are not extracted. This causes problems when we try and read data files (aka package resources) like `config_default.yaml`, because we currently build up the path to such files relative to the executing module's source file and `open()` the resulting path. Obviously this doesn't work of the file is inside a zipfile. * Symptom: `ConfigError`s like `configuration error: verbose not found`. Caused by the failure to open `config_default.yaml` (aside: it would have been clearer if we got a `confit.ConfigReadError` rather than missing default values, but the loading of the default config file is inside a `if os.path.isfile(filename)` guard so it just silently skips the file when it doesn't exist even though beets depends on the default values being set). Needs to be fixed in confuse. * Symptom: `FileNotFoundError`s related to files like `lastgenre/genres.txt`. Caused by failure to open other data files needed by plugins. Needs to be fixed in beets. #### Solutions * Explicitly state that we don't support running beets as an egg. This seems unfortunate if it's the result of `setup.py install`. * Try and fix the way data files are accessed. There is [`pkg_resources`](https://setuptools.readthedocs.io/en/latest/pkg_resources.html) from `setuptools` that has helpers to transparently handle this. We could refactor these data file accesses into a helper for plugins. Data files are unreadable when beets is installed as an egg Following up from [a related Discourse thread](https://discourse.beets.io/t/error-running-from-source/739?u=arcresu), we noticed that beets isn't happy when it's installed as an egg. This is currently the default way beets is installed if you run `setup.py install` (not sure if #2083 changes this story). #### Problem In egg mode, the whole beets and beetsplug directory trees remain bundled in a zipfile and are not extracted. This causes problems when we try and read data files (aka package resources) like `config_default.yaml`, because we currently build up the path to such files relative to the executing module's source file and `open()` the resulting path. Obviously this doesn't work of the file is inside a zipfile. * Symptom: `ConfigError`s like `configuration error: verbose not found`. Caused by the failure to open `config_default.yaml` (aside: it would have been clearer if we got a `confit.ConfigReadError` rather than missing default values, but the loading of the default config file is inside a `if os.path.isfile(filename)` guard so it just silently skips the file when it doesn't exist even though beets depends on the default values being set). Needs to be fixed in confuse. * Symptom: `FileNotFoundError`s related to files like `lastgenre/genres.txt`. Caused by failure to open other data files needed by plugins. Needs to be fixed in beets. #### Solutions * Explicitly state that we don't support running beets as an egg. This seems unfortunate if it's the result of `setup.py install`. * Try and fix the way data files are accessed. There is [`pkg_resources`](https://setuptools.readthedocs.io/en/latest/pkg_resources.html) from `setuptools` that has helpers to transparently handle this. We could refactor these data file accesses into a helper for plugins.
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport subprocess\nimport shutil\nfrom setuptools import setup\n\n\ndef _read(fn):\n path = os.path.join(os.path.dirname(__file__), fn)\n return open(path).read()\n\n\ndef build_manpages():\n # Go into the docs directory and build the manpage.\n docdir = os.path.join(os.path.dirname(__file__), 'docs')\n curdir = os.getcwd()\n os.chdir(docdir)\n try:\n subprocess.check_call(['make', 'man'])\n except OSError:\n print(\"Could not build manpages (make man failed)!\", file=sys.stderr)\n return\n finally:\n os.chdir(curdir)\n\n # Copy resulting manpages.\n mandir = os.path.join(os.path.dirname(__file__), 'man')\n if os.path.exists(mandir):\n shutil.rmtree(mandir)\n shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)\n\n\n# Build manpages if we're making a source distribution tarball.\nif 'sdist' in sys.argv:\n build_manpages()\n\n\nsetup(\n name='beets',\n version='1.4.8',\n description='music tagger and library organizer',\n author='Adrian Sampson',\n author_email='[email protected]',\n url='http://beets.io/',\n license='MIT',\n platforms='ALL',\n long_description=_read('README.rst'),\n test_suite='test.testall.suite',\n include_package_data=True, # Install plugin resources.\n\n packages=[\n 'beets',\n 'beets.ui',\n 'beets.autotag',\n 'beets.util',\n 'beets.dbcore',\n 'beetsplug',\n 'beetsplug.bpd',\n 'beetsplug.web',\n 'beetsplug.lastgenre',\n 'beetsplug.metasync',\n ],\n entry_points={\n 'console_scripts': [\n 'beet = beets.ui:main',\n ],\n },\n\n install_requires=[\n 'six>=1.9',\n 'mutagen>=1.33',\n 'unidecode',\n 'musicbrainzngs>=0.4',\n 'pyyaml',\n ] + [\n # Avoid a version of munkres incompatible with Python 3.\n 'munkres~=1.0.0' if sys.version_info < (3, 5, 0) else\n 'munkres!=1.1.0,!=1.1.1' if sys.version_info < (3, 6, 0) else\n 'munkres>=1.0.0',\n ] + (\n # Use the backport of Python 3.4's `enum` module.\n ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []\n ) + (\n # Pin a Python 2-compatible version of Jellyfish.\n ['jellyfish==0.6.0'] if sys.version_info < (3, 4, 0) else ['jellyfish']\n ) + (\n # Support for ANSI console colors on Windows.\n ['colorama'] if (sys.platform == 'win32') else []\n ),\n\n tests_require=[\n 'beautifulsoup4',\n 'flask',\n 'mock',\n 'pylast',\n 'rarfile',\n 'responses',\n 'pyxdg',\n 'pathlib',\n 'python-mpd2',\n 'discogs-client'\n ],\n\n # Plugin (optional) dependencies:\n extras_require={\n 'absubmit': ['requests'],\n 'fetchart': ['requests', 'Pillow'],\n 'embedart': ['Pillow'],\n 'embyupdate': ['requests'],\n 'chroma': ['pyacoustid'],\n 'gmusic': ['gmusicapi'],\n 'discogs': ['discogs-client>=2.2.1'],\n 'beatport': ['requests-oauthlib>=0.6.1'],\n 'kodiupdate': ['requests'],\n 'lastgenre': ['pylast'],\n 'lastimport': ['pylast'],\n 'lyrics': ['requests', 'beautifulsoup4', 'langdetect'],\n 'mpdstats': ['python-mpd2>=0.4.2'],\n 'plexupdate': ['requests'],\n 'web': ['flask', 'flask-cors'],\n 'import': ['rarfile'],\n 'thumbnails': ['pyxdg', 'Pillow'] +\n (['pathlib'] if (sys.version_info < (3, 4, 0)) else []),\n 'metasync': ['dbus-python'],\n 'sonosupdate': ['soco'],\n 'bpd': ['PyGObject'],\n 'replaygain': ['PyGObject'],\n },\n # Non-Python/non-PyPI plugin dependencies:\n # chroma: chromaprint or fpcalc\n # convert: ffmpeg\n # badfiles: mp3val and flac\n # bpd: python-gi and GStreamer 1.0+\n # embedart: ImageMagick\n # absubmit: extractor binary from http://acousticbrainz.org/download\n # keyfinder: KeyFinder\n # replaygain: python-gi and GStreamer 1.0+ or mp3gain/aacgain\n # or Python Audio Tools\n # ipfs: go-ipfs\n\n classifiers=[\n 'Topic :: Multimedia :: Sound/Audio',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'License :: OSI Approved :: MIT License',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport subprocess\nimport shutil\nfrom setuptools import setup\n\n\ndef _read(fn):\n path = os.path.join(os.path.dirname(__file__), fn)\n return open(path).read()\n\n\ndef build_manpages():\n # Go into the docs directory and build the manpage.\n docdir = os.path.join(os.path.dirname(__file__), 'docs')\n curdir = os.getcwd()\n os.chdir(docdir)\n try:\n subprocess.check_call(['make', 'man'])\n except OSError:\n print(\"Could not build manpages (make man failed)!\", file=sys.stderr)\n return\n finally:\n os.chdir(curdir)\n\n # Copy resulting manpages.\n mandir = os.path.join(os.path.dirname(__file__), 'man')\n if os.path.exists(mandir):\n shutil.rmtree(mandir)\n shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)\n\n\n# Build manpages if we're making a source distribution tarball.\nif 'sdist' in sys.argv:\n build_manpages()\n\n\nsetup(\n name='beets',\n version='1.4.8',\n description='music tagger and library organizer',\n author='Adrian Sampson',\n author_email='[email protected]',\n url='http://beets.io/',\n license='MIT',\n platforms='ALL',\n long_description=_read('README.rst'),\n test_suite='test.testall.suite',\n zip_safe=False,\n include_package_data=True, # Install plugin resources.\n\n packages=[\n 'beets',\n 'beets.ui',\n 'beets.autotag',\n 'beets.util',\n 'beets.dbcore',\n 'beetsplug',\n 'beetsplug.bpd',\n 'beetsplug.web',\n 'beetsplug.lastgenre',\n 'beetsplug.metasync',\n ],\n entry_points={\n 'console_scripts': [\n 'beet = beets.ui:main',\n ],\n },\n\n install_requires=[\n 'six>=1.9',\n 'mutagen>=1.33',\n 'unidecode',\n 'musicbrainzngs>=0.4',\n 'pyyaml',\n ] + [\n # Avoid a version of munkres incompatible with Python 3.\n 'munkres~=1.0.0' if sys.version_info < (3, 5, 0) else\n 'munkres!=1.1.0,!=1.1.1' if sys.version_info < (3, 6, 0) else\n 'munkres>=1.0.0',\n ] + (\n # Use the backport of Python 3.4's `enum` module.\n ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []\n ) + (\n # Pin a Python 2-compatible version of Jellyfish.\n ['jellyfish==0.6.0'] if sys.version_info < (3, 4, 0) else ['jellyfish']\n ) + (\n # Support for ANSI console colors on Windows.\n ['colorama'] if (sys.platform == 'win32') else []\n ),\n\n tests_require=[\n 'beautifulsoup4',\n 'flask',\n 'mock',\n 'pylast',\n 'rarfile',\n 'responses',\n 'pyxdg',\n 'pathlib',\n 'python-mpd2',\n 'discogs-client'\n ],\n\n # Plugin (optional) dependencies:\n extras_require={\n 'absubmit': ['requests'],\n 'fetchart': ['requests', 'Pillow'],\n 'embedart': ['Pillow'],\n 'embyupdate': ['requests'],\n 'chroma': ['pyacoustid'],\n 'gmusic': ['gmusicapi'],\n 'discogs': ['discogs-client>=2.2.1'],\n 'beatport': ['requests-oauthlib>=0.6.1'],\n 'kodiupdate': ['requests'],\n 'lastgenre': ['pylast'],\n 'lastimport': ['pylast'],\n 'lyrics': ['requests', 'beautifulsoup4', 'langdetect'],\n 'mpdstats': ['python-mpd2>=0.4.2'],\n 'plexupdate': ['requests'],\n 'web': ['flask', 'flask-cors'],\n 'import': ['rarfile'],\n 'thumbnails': ['pyxdg', 'Pillow'] +\n (['pathlib'] if (sys.version_info < (3, 4, 0)) else []),\n 'metasync': ['dbus-python'],\n 'sonosupdate': ['soco'],\n 'bpd': ['PyGObject'],\n 'replaygain': ['PyGObject'],\n },\n # Non-Python/non-PyPI plugin dependencies:\n # chroma: chromaprint or fpcalc\n # convert: ffmpeg\n # badfiles: mp3val and flac\n # bpd: python-gi and GStreamer 1.0+\n # embedart: ImageMagick\n # absubmit: extractor binary from http://acousticbrainz.org/download\n # keyfinder: KeyFinder\n # replaygain: python-gi and GStreamer 1.0+ or mp3gain/aacgain\n # or Python Audio Tools\n # ipfs: go-ipfs\n\n classifiers=[\n 'Topic :: Multimedia :: Sound/Audio',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'License :: OSI Approved :: MIT License',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 1aacfc1c96..8e98282236 100755 --- a/setup.py +++ b/setup.py @@ -65,6 +65,7 @@ def build_manpages(): platforms='ALL', long_description=_read('README.rst'), test_suite='test.testall.suite', + zip_safe=False, include_package_data=True, # Install plugin resources. packages=[
fossasia__open-event-server-2795
Design issues on session, schedule, CfS page * [ ] On the event page the left sidebar stays there when the user scrolls down. Please implement this on the other pages as well, e.g. https://eventyay.com/e/18252ab6/schedule/ * [ ] sessions page * [ ] schedule page * [ ] call for speakers page * [ ] The tracks view on schedule page does not show colors of session (https://eventyay.com/e/18252ab6/schedule/) * [ ] The call for speaker page needs to include the year in the date format: Compare https://eventyay.com/e/18252ab6/cfs/
[ { "content": "from flask.ext.restplus import Namespace, reqparse\nfrom sqlalchemy.orm.collections import InstrumentedList\n\nfrom app.helpers.data import record_activity, save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.notification_email_triggers import trigger_new_session_notifications, \\\n trigger_session_schedule_change_notifications\nfrom app.helpers.notification_email_triggers import trigger_session_state_change_notifications\nfrom app.models.microlocation import Microlocation as MicrolocationModel\nfrom app.models.session import Session as SessionModel\nfrom app.models.session_type import SessionType as SessionTypeModel\nfrom app.models.speaker import Speaker as SpeakerModel\nfrom app.models.track import Track as TrackModel\nfrom .helpers import custom_fields as fields\nfrom .helpers.helpers import (\n can_create,\n can_update,\n can_delete\n)\nfrom .helpers.helpers import save_db_model, get_object_in_event, \\\n model_custom_form, requires_auth, parse_args\nfrom .helpers.special_fields import SessionLanguageField, SessionStateField\nfrom .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, ServiceDAO, \\\n PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, SERVICE_RESPONSES\nfrom .helpers.utils import Resource, ETAG_HEADER_DEFN\n\napi = Namespace('sessions', description='Sessions', path='/')\n\n# Create models\nSESSION_TRACK = api.model('SessionTrack', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n})\n\nSESSION_SPEAKER = api.model('SessionSpeaker', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n 'organisation': fields.String()\n})\n\nSESSION_MICROLOCATION = api.model('SessionMicrolocation', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n})\n\nSESSION_TYPE = api.model('SessionType', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(required=True),\n 'length': fields.String(required=True)\n})\n\nSESSION_TYPE_POST = api.clone('SessionTypePost', SESSION_TYPE)\ndel SESSION_TYPE_POST['id']\n\nSESSION = api.model('Session', {\n 'id': fields.Integer(required=True),\n 'title': fields.String(required=True),\n 'subtitle': fields.String(),\n 'short_abstract': fields.String(),\n 'long_abstract': fields.String(),\n 'comments': fields.String(),\n 'start_time': fields.DateTime(required=True),\n 'end_time': fields.DateTime(required=True),\n 'track': fields.Nested(SESSION_TRACK, allow_null=True),\n 'speakers': fields.List(fields.Nested(SESSION_SPEAKER)),\n 'language': SessionLanguageField(),\n 'microlocation': fields.Nested(SESSION_MICROLOCATION, allow_null=True),\n 'slides': fields.Upload(),\n 'video': fields.Upload(),\n 'audio': fields.Upload(),\n 'signup_url': fields.Uri(),\n 'state': SessionStateField(),\n 'session_type': fields.Nested(SESSION_TYPE, allow_null=True)\n})\n\nSESSION_PAGINATED = api.clone('SessionPaginated', PAGINATED_MODEL, {\n 'results': fields.List(fields.Nested(SESSION))\n})\n\nSESSION_POST = api.clone('SessionPost', SESSION, {\n 'track_id': fields.Integer(),\n 'speaker_ids': fields.List(fields.Integer()),\n 'microlocation_id': fields.Integer(),\n 'session_type_id': fields.Integer()\n})\n\ndel SESSION_POST['id']\ndel SESSION_POST['track']\ndel SESSION_POST['speakers']\ndel SESSION_POST['microlocation']\ndel SESSION_POST['session_type']\n\n\n# Create DAO\n\nclass SessionTypeDAO(ServiceDAO):\n \"\"\"\n SessionType DAO\n added for import/export feature\n \"\"\"\n version_key = 'sessions_ver'\n\n\nclass SessionDAO(ServiceDAO):\n version_key = 'sessions_ver'\n\n def _delete_fields(self, data):\n data = self._del(data, ['speaker_ids', 'track_id',\n 'microlocation_id', 'session_type_id'])\n # convert datetime fields\n for _ in ['start_time', 'end_time']:\n if _ in data:\n data[_] = SESSION_POST[_].from_str(data[_])\n return data\n\n def get_object(self, model, sid, event_id):\n \"\"\"\n returns object (model). Checks if object is in same event\n \"\"\"\n if sid is None:\n return None\n return get_object_in_event(model, sid, event_id)\n\n def fix_payload_post(self, event_id, data):\n \"\"\"\n Fixes payload of POST request\n \"\"\"\n if 'track_id' in data:\n data['track'] = self.get_object(\n TrackModel, data.get('track_id'), event_id)\n if 'microlocation_id' in data:\n data['microlocation'] = self.get_object(\n MicrolocationModel, data.get('microlocation_id'), event_id)\n if 'session_type_id' in data:\n data['session_type'] = self.get_object(\n SessionTypeModel, data.get('session_type_id'), event_id)\n if 'speaker_ids' in data:\n data['speakers'] = InstrumentedList(\n SpeakerModel.query.get(_) for _ in data.get('speaker_ids', [])\n if self.get_object(SpeakerModel, _, event_id) is not None\n )\n data['event_id'] = event_id\n data = self._delete_fields(data)\n return data\n\n def update(self, event_id, service_id, data):\n data = self.validate(data, event_id, check_required=False)\n data_copy = data.copy()\n data_copy = self.fix_payload_post(event_id, data_copy)\n data = self._delete_fields(data)\n session = DataGetter.get_session(service_id) # session before any updates are made\n obj = ServiceDAO.update(self, event_id, service_id, data, validate=False) # session after update\n\n if 'state' in data:\n if data['state'] == 'pending' and session.state == 'draft':\n trigger_new_session_notifications(session.id, event_id=event_id)\n\n if (data['state'] == 'accepted' and session.state != 'accepted') \\\n or (data['state'] == 'rejected' and session.state != 'rejected'):\n trigger_session_state_change_notifications(obj, event_id=event_id, state=data['state'])\n\n if session.start_time != obj.start_time or session.end_time != obj.end_time:\n trigger_session_schedule_change_notifications(obj, event_id)\n\n for f in ['track', 'microlocation', 'speakers', 'session_type']:\n if f in data_copy:\n setattr(obj, f, data_copy[f])\n obj = save_db_model(obj, SessionModel.__name__, event_id)\n return obj\n\n def create(self, event_id, data, url):\n data = self.validate(data, event_id)\n payload = self.fix_payload_post(event_id, data)\n speakers = payload.pop('speakers', None)\n session, status_code, location = ServiceDAO.create(self, event_id, payload, url, validate=False)\n if speakers:\n session.speakers = speakers\n save_to_db(session)\n if not self.is_importing and session.state == 'pending':\n trigger_new_session_notifications(session.id, event_id=event_id)\n return session, status_code, location\n\n def validate(self, data, event_id, check_required=True):\n form = DataGetter.get_custom_form_elements(event_id)\n model = None\n if form:\n model = model_custom_form(form.session_form, self.post_api_model)\n return ServiceDAO.validate(\n self, data, model=model, check_required=check_required)\n\n\nDAO = SessionDAO(SessionModel, SESSION_POST)\nTypeDAO = SessionTypeDAO(SessionTypeModel, SESSION_TYPE_POST)\n\n# Define Params\n\nSESSIONS_PARAMS = {\n 'start_time_gt': {},\n 'start_time_lt': {},\n 'end_time_gt': {},\n 'end_time_lt': {},\n 'order_by': {\n 'description': 'Order by a field, example \"start_time.asc\" or \"end_time.desc\"'\n }\n}\n\n\n# #########\n# Resources\n# #########\n\n\nclass SessionResource():\n \"\"\"\n Session Resource Base class\n \"\"\"\n session_parser = reqparse.RequestParser()\n session_parser.add_argument('start_time_gt', dest='__sessions_start_time_gt')\n session_parser.add_argument('start_time_lt', dest='__sessions_start_time_lt')\n session_parser.add_argument('end_time_gt', dest='__sessions_end_time_gt')\n session_parser.add_argument('end_time_lt', dest='__sessions_end_time_lt')\n session_parser.add_argument('order_by', dest='__sessions_order_by')\n\n\[email protected]('/events/<int:event_id>/sessions/<int:session_id>')\[email protected](responses=SERVICE_RESPONSES)\nclass Session(Resource):\n @api.doc('get_session')\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_with(SESSION)\n def get(self, event_id, session_id):\n \"\"\"Fetch a session given its id\"\"\"\n return DAO.get(event_id, session_id)\n\n @requires_auth\n @can_delete(DAO)\n @api.doc('delete_session')\n @api.marshal_with(SESSION)\n def delete(self, event_id, session_id):\n \"\"\"Delete a session given its id\"\"\"\n return DAO.delete(event_id, session_id)\n\n @requires_auth\n @can_update(DAO)\n @api.doc('update_session', responses=PUT_RESPONSES)\n @api.marshal_with(SESSION)\n @api.expect(SESSION_POST)\n def put(self, event_id, session_id):\n \"\"\"Update a session given its id\"\"\"\n return DAO.update(event_id, session_id, self.api.payload)\n\n\[email protected]('/events/<int:event_id>/sessions')\nclass SessionList(Resource, SessionResource):\n @api.doc('list_sessions', params=SESSIONS_PARAMS)\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_list_with(SESSION)\n def get(self, event_id):\n \"\"\"List all sessions\"\"\"\n return DAO.list(event_id, **parse_args(self.session_parser))\n\n @requires_auth\n @can_create(DAO)\n @api.doc('create_session', responses=POST_RESPONSES)\n @api.marshal_with(SESSION)\n @api.expect(SESSION_POST)\n def post(self, event_id):\n \"\"\"Create a session\"\"\"\n item = DAO.create(\n event_id,\n self.api.payload,\n self.api.url_for(self, event_id=event_id)\n )\n record_activity('create_session', session=item[0], event_id=event_id)\n return item\n\n\[email protected]('/events/<int:event_id>/sessions/page')\nclass SessionListPaginated(Resource, PaginatedResourceBase, SessionResource):\n @api.doc('list_sessions_paginated', params=PAGE_PARAMS)\n @api.doc(params=SESSIONS_PARAMS)\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_with(SESSION_PAGINATED)\n def get(self, event_id):\n \"\"\"List sessions in a paginated manner\"\"\"\n args = self.parser.parse_args()\n return DAO.paginated_list(\n args=args, event_id=event_id, **parse_args(self.session_parser)\n )\n\n\n# Use Session DAO to check for permission\n\[email protected]('/events/<int:event_id>/sessions/types')\nclass SessionTypeList(Resource):\n @api.doc('list_session_types')\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_list_with(SESSION_TYPE)\n def get(self, event_id):\n \"\"\"List all session types\"\"\"\n return TypeDAO.list(event_id)\n\n @requires_auth\n @can_create(DAO)\n @api.doc('create_session_type', responses=POST_RESPONSES)\n @api.marshal_with(SESSION_TYPE)\n @api.expect(SESSION_TYPE_POST)\n def post(self, event_id):\n \"\"\"Create a session type\"\"\"\n return TypeDAO.create(\n event_id,\n self.api.payload,\n self.api.url_for(self, event_id=event_id)\n )\n\n\[email protected]('/events/<int:event_id>/sessions/types/<int:type_id>')\nclass SessionType(Resource):\n @requires_auth\n @can_delete(DAO)\n @api.doc('delete_session_type')\n @api.marshal_with(SESSION_TYPE)\n def delete(self, event_id, type_id):\n \"\"\"Delete a session type given its id\"\"\"\n return TypeDAO.delete(event_id, type_id)\n\n @requires_auth\n @can_update(DAO)\n @api.doc('update_session_type', responses=PUT_RESPONSES)\n @api.marshal_with(SESSION_TYPE)\n @api.expect(SESSION_TYPE_POST)\n def put(self, event_id, type_id):\n \"\"\"Update a session type given its id\"\"\"\n return TypeDAO.update(event_id, type_id, self.api.payload)\n\n @api.hide\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_with(SESSION_TYPE)\n def get(self, event_id, type_id):\n \"\"\"Fetch a session type given its id\"\"\"\n return TypeDAO.get(event_id, type_id)\n", "path": "app/api/sessions.py" } ]
[ { "content": "from flask.ext.restplus import Namespace, reqparse\nfrom sqlalchemy.orm.collections import InstrumentedList\n\nfrom app.helpers.data import record_activity, save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.notification_email_triggers import trigger_new_session_notifications, \\\n trigger_session_schedule_change_notifications\nfrom app.helpers.notification_email_triggers import trigger_session_state_change_notifications\nfrom app.models.microlocation import Microlocation as MicrolocationModel\nfrom app.models.session import Session as SessionModel\nfrom app.models.session_type import SessionType as SessionTypeModel\nfrom app.models.speaker import Speaker as SpeakerModel\nfrom app.models.track import Track as TrackModel\nfrom .helpers import custom_fields as fields\nfrom .helpers.helpers import (\n can_create,\n can_update,\n can_delete\n)\nfrom .helpers.helpers import save_db_model, get_object_in_event, \\\n model_custom_form, requires_auth, parse_args\nfrom .helpers.special_fields import SessionLanguageField, SessionStateField\nfrom .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, ServiceDAO, \\\n PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, SERVICE_RESPONSES\nfrom .helpers.utils import Resource, ETAG_HEADER_DEFN\n\napi = Namespace('sessions', description='Sessions', path='/')\n\n# Create models\nSESSION_TRACK = api.model('SessionTrack', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n 'color': fields.Color(),\n})\n\nSESSION_SPEAKER = api.model('SessionSpeaker', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n 'organisation': fields.String()\n})\n\nSESSION_MICROLOCATION = api.model('SessionMicrolocation', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n})\n\nSESSION_TYPE = api.model('SessionType', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(required=True),\n 'length': fields.String(required=True)\n})\n\nSESSION_TYPE_POST = api.clone('SessionTypePost', SESSION_TYPE)\ndel SESSION_TYPE_POST['id']\n\nSESSION = api.model('Session', {\n 'id': fields.Integer(required=True),\n 'title': fields.String(required=True),\n 'subtitle': fields.String(),\n 'short_abstract': fields.String(),\n 'long_abstract': fields.String(),\n 'comments': fields.String(),\n 'start_time': fields.DateTime(required=True),\n 'end_time': fields.DateTime(required=True),\n 'track': fields.Nested(SESSION_TRACK, allow_null=True),\n 'speakers': fields.List(fields.Nested(SESSION_SPEAKER)),\n 'language': SessionLanguageField(),\n 'microlocation': fields.Nested(SESSION_MICROLOCATION, allow_null=True),\n 'slides': fields.Upload(),\n 'video': fields.Upload(),\n 'audio': fields.Upload(),\n 'signup_url': fields.Uri(),\n 'state': SessionStateField(),\n 'session_type': fields.Nested(SESSION_TYPE, allow_null=True)\n})\n\nSESSION_PAGINATED = api.clone('SessionPaginated', PAGINATED_MODEL, {\n 'results': fields.List(fields.Nested(SESSION))\n})\n\nSESSION_POST = api.clone('SessionPost', SESSION, {\n 'track_id': fields.Integer(),\n 'speaker_ids': fields.List(fields.Integer()),\n 'microlocation_id': fields.Integer(),\n 'session_type_id': fields.Integer()\n})\n\ndel SESSION_POST['id']\ndel SESSION_POST['track']\ndel SESSION_POST['speakers']\ndel SESSION_POST['microlocation']\ndel SESSION_POST['session_type']\n\n\n# Create DAO\n\nclass SessionTypeDAO(ServiceDAO):\n \"\"\"\n SessionType DAO\n added for import/export feature\n \"\"\"\n version_key = 'sessions_ver'\n\n\nclass SessionDAO(ServiceDAO):\n version_key = 'sessions_ver'\n\n def _delete_fields(self, data):\n data = self._del(data, ['speaker_ids', 'track_id',\n 'microlocation_id', 'session_type_id'])\n # convert datetime fields\n for _ in ['start_time', 'end_time']:\n if _ in data:\n data[_] = SESSION_POST[_].from_str(data[_])\n return data\n\n def get_object(self, model, sid, event_id):\n \"\"\"\n returns object (model). Checks if object is in same event\n \"\"\"\n if sid is None:\n return None\n return get_object_in_event(model, sid, event_id)\n\n def fix_payload_post(self, event_id, data):\n \"\"\"\n Fixes payload of POST request\n \"\"\"\n if 'track_id' in data:\n data['track'] = self.get_object(\n TrackModel, data.get('track_id'), event_id)\n if 'microlocation_id' in data:\n data['microlocation'] = self.get_object(\n MicrolocationModel, data.get('microlocation_id'), event_id)\n if 'session_type_id' in data:\n data['session_type'] = self.get_object(\n SessionTypeModel, data.get('session_type_id'), event_id)\n if 'speaker_ids' in data:\n data['speakers'] = InstrumentedList(\n SpeakerModel.query.get(_) for _ in data.get('speaker_ids', [])\n if self.get_object(SpeakerModel, _, event_id) is not None\n )\n data['event_id'] = event_id\n data = self._delete_fields(data)\n return data\n\n def update(self, event_id, service_id, data):\n data = self.validate(data, event_id, check_required=False)\n data_copy = data.copy()\n data_copy = self.fix_payload_post(event_id, data_copy)\n data = self._delete_fields(data)\n session = DataGetter.get_session(service_id) # session before any updates are made\n obj = ServiceDAO.update(self, event_id, service_id, data, validate=False) # session after update\n\n if 'state' in data:\n if data['state'] == 'pending' and session.state == 'draft':\n trigger_new_session_notifications(session.id, event_id=event_id)\n\n if (data['state'] == 'accepted' and session.state != 'accepted') \\\n or (data['state'] == 'rejected' and session.state != 'rejected'):\n trigger_session_state_change_notifications(obj, event_id=event_id, state=data['state'])\n\n if session.start_time != obj.start_time or session.end_time != obj.end_time:\n trigger_session_schedule_change_notifications(obj, event_id)\n\n for f in ['track', 'microlocation', 'speakers', 'session_type']:\n if f in data_copy:\n setattr(obj, f, data_copy[f])\n obj = save_db_model(obj, SessionModel.__name__, event_id)\n return obj\n\n def create(self, event_id, data, url):\n data = self.validate(data, event_id)\n payload = self.fix_payload_post(event_id, data)\n speakers = payload.pop('speakers', None)\n session, status_code, location = ServiceDAO.create(self, event_id, payload, url, validate=False)\n if speakers:\n session.speakers = speakers\n save_to_db(session)\n if not self.is_importing and session.state == 'pending':\n trigger_new_session_notifications(session.id, event_id=event_id)\n return session, status_code, location\n\n def validate(self, data, event_id, check_required=True):\n form = DataGetter.get_custom_form_elements(event_id)\n model = None\n if form:\n model = model_custom_form(form.session_form, self.post_api_model)\n return ServiceDAO.validate(\n self, data, model=model, check_required=check_required)\n\n\nDAO = SessionDAO(SessionModel, SESSION_POST)\nTypeDAO = SessionTypeDAO(SessionTypeModel, SESSION_TYPE_POST)\n\n# Define Params\n\nSESSIONS_PARAMS = {\n 'start_time_gt': {},\n 'start_time_lt': {},\n 'end_time_gt': {},\n 'end_time_lt': {},\n 'order_by': {\n 'description': 'Order by a field, example \"start_time.asc\" or \"end_time.desc\"'\n }\n}\n\n\n# #########\n# Resources\n# #########\n\n\nclass SessionResource():\n \"\"\"\n Session Resource Base class\n \"\"\"\n session_parser = reqparse.RequestParser()\n session_parser.add_argument('start_time_gt', dest='__sessions_start_time_gt')\n session_parser.add_argument('start_time_lt', dest='__sessions_start_time_lt')\n session_parser.add_argument('end_time_gt', dest='__sessions_end_time_gt')\n session_parser.add_argument('end_time_lt', dest='__sessions_end_time_lt')\n session_parser.add_argument('order_by', dest='__sessions_order_by')\n\n\[email protected]('/events/<int:event_id>/sessions/<int:session_id>')\[email protected](responses=SERVICE_RESPONSES)\nclass Session(Resource):\n @api.doc('get_session')\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_with(SESSION)\n def get(self, event_id, session_id):\n \"\"\"Fetch a session given its id\"\"\"\n return DAO.get(event_id, session_id)\n\n @requires_auth\n @can_delete(DAO)\n @api.doc('delete_session')\n @api.marshal_with(SESSION)\n def delete(self, event_id, session_id):\n \"\"\"Delete a session given its id\"\"\"\n return DAO.delete(event_id, session_id)\n\n @requires_auth\n @can_update(DAO)\n @api.doc('update_session', responses=PUT_RESPONSES)\n @api.marshal_with(SESSION)\n @api.expect(SESSION_POST)\n def put(self, event_id, session_id):\n \"\"\"Update a session given its id\"\"\"\n return DAO.update(event_id, session_id, self.api.payload)\n\n\[email protected]('/events/<int:event_id>/sessions')\nclass SessionList(Resource, SessionResource):\n @api.doc('list_sessions', params=SESSIONS_PARAMS)\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_list_with(SESSION)\n def get(self, event_id):\n \"\"\"List all sessions\"\"\"\n return DAO.list(event_id, **parse_args(self.session_parser))\n\n @requires_auth\n @can_create(DAO)\n @api.doc('create_session', responses=POST_RESPONSES)\n @api.marshal_with(SESSION)\n @api.expect(SESSION_POST)\n def post(self, event_id):\n \"\"\"Create a session\"\"\"\n item = DAO.create(\n event_id,\n self.api.payload,\n self.api.url_for(self, event_id=event_id)\n )\n record_activity('create_session', session=item[0], event_id=event_id)\n return item\n\n\[email protected]('/events/<int:event_id>/sessions/page')\nclass SessionListPaginated(Resource, PaginatedResourceBase, SessionResource):\n @api.doc('list_sessions_paginated', params=PAGE_PARAMS)\n @api.doc(params=SESSIONS_PARAMS)\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_with(SESSION_PAGINATED)\n def get(self, event_id):\n \"\"\"List sessions in a paginated manner\"\"\"\n args = self.parser.parse_args()\n return DAO.paginated_list(\n args=args, event_id=event_id, **parse_args(self.session_parser)\n )\n\n\n# Use Session DAO to check for permission\n\[email protected]('/events/<int:event_id>/sessions/types')\nclass SessionTypeList(Resource):\n @api.doc('list_session_types')\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_list_with(SESSION_TYPE)\n def get(self, event_id):\n \"\"\"List all session types\"\"\"\n return TypeDAO.list(event_id)\n\n @requires_auth\n @can_create(DAO)\n @api.doc('create_session_type', responses=POST_RESPONSES)\n @api.marshal_with(SESSION_TYPE)\n @api.expect(SESSION_TYPE_POST)\n def post(self, event_id):\n \"\"\"Create a session type\"\"\"\n return TypeDAO.create(\n event_id,\n self.api.payload,\n self.api.url_for(self, event_id=event_id)\n )\n\n\[email protected]('/events/<int:event_id>/sessions/types/<int:type_id>')\nclass SessionType(Resource):\n @requires_auth\n @can_delete(DAO)\n @api.doc('delete_session_type')\n @api.marshal_with(SESSION_TYPE)\n def delete(self, event_id, type_id):\n \"\"\"Delete a session type given its id\"\"\"\n return TypeDAO.delete(event_id, type_id)\n\n @requires_auth\n @can_update(DAO)\n @api.doc('update_session_type', responses=PUT_RESPONSES)\n @api.marshal_with(SESSION_TYPE)\n @api.expect(SESSION_TYPE_POST)\n def put(self, event_id, type_id):\n \"\"\"Update a session type given its id\"\"\"\n return TypeDAO.update(event_id, type_id, self.api.payload)\n\n @api.hide\n @api.header(*ETAG_HEADER_DEFN)\n @api.marshal_with(SESSION_TYPE)\n def get(self, event_id, type_id):\n \"\"\"Fetch a session type given its id\"\"\"\n return TypeDAO.get(event_id, type_id)\n", "path": "app/api/sessions.py" } ]
diff --git a/app/api/sessions.py b/app/api/sessions.py index 7f16676763..80bcb3fa61 100644 --- a/app/api/sessions.py +++ b/app/api/sessions.py @@ -30,6 +30,7 @@ SESSION_TRACK = api.model('SessionTrack', { 'id': fields.Integer(required=True), 'name': fields.String(), + 'color': fields.Color(), }) SESSION_SPEAKER = api.model('SessionSpeaker', { diff --git a/app/static/js/admin/event/scheduler.js b/app/static/js/admin/event/scheduler.js index 56368a4498..c446ff3193 100644 --- a/app/static/js/admin/event/scheduler.js +++ b/app/static/js/admin/event/scheduler.js @@ -253,6 +253,7 @@ function addSessionToTimeline(sessionRef, position, shouldBroadcast) { var $mobileSessionElement = $(mobileSessionTemplate); $mobileSessionElement.find('.time').text(sessionRefObject.session.start_time.format('hh:mm A')); $mobileSessionElement.find('.event').text(sessionRefObject.session.title); + updateColor($mobileSessionElement.find('.event'), sessionRefObject.session.track); $mobileTimeline.find(".mobile-microlocation[data-microlocation-id=" + sessionRefObject.session.microlocation.id + "] > .mobile-sessions-holder").append($mobileSessionElement); if(sessionRefObject.session.hasOwnProperty('track') && !_.isNull(sessionRefObject.session.track)) { diff --git a/app/templates/gentelella/guest/event/cfs.html b/app/templates/gentelella/guest/event/cfs.html index 1fe12b8f7c..1845dcd58f 100644 --- a/app/templates/gentelella/guest/event/cfs.html +++ b/app/templates/gentelella/guest/event/cfs.html @@ -1,6 +1,8 @@ {% extends 'gentelella/guest/event/base.html' %} {% set active_page = 'cfs' %} +{% set carousel_height = 500 if active_page == 'info' else 300 -%} +{% set scrollspy_top = (carousel_height + 95) ~ "px" %} {% block head_css %} {{ super() }} @@ -23,9 +25,9 @@ <h1 style="font-weight: 300; font-size: 30px">{{ _("Call for Speakers") }}</h1> <span class="label label-info">{{ _("Yet to Open") }}</span> {% endif %} </p> - <p><strong>{{ call_for_speakers.start_date.strftime('%a, %B %d at %I:%M %p') }}</strong> + <p><strong>{{ call_for_speakers.start_date.strftime('%a, %B %d %Y at %I:%M %p') }}</strong> to - <strong>{{ call_for_speakers.end_date.strftime('%a, %B %d at %I:%M %p') }}</strong></p> + <strong>{{ call_for_speakers.end_date.strftime('%a, %B %d %Y at %I:%M %p') }}</strong></p> {{ call_for_speakers.announcement | safe }} {% if state == "now" or via_hash %} <a href="/e/{{ event.identifier }}/cfs/new"> @@ -44,10 +46,40 @@ <h1 style="font-weight: 300; font-size: 30px">{{ _("Call for Speakers") }}</h1> {{ super() }} <script src="{{ url_for('static', filename='js/jquery/jquery.multi-select.js') }}"></script> <script type="text/javascript" src="{{ url_for('static', filename='js/admin/session/new.js') }}"></script> + <script src="{{ url_for('static', filename='admin/lib/sticky-kit/jquery.sticky-kit.min.js') }}"></script> <script type="text/javascript"> $(document).ready(function () { $("textarea").summernote(summernoteConfig); }); - </script> + + var $scrollSpy = $('#scrollspy'); + $scrollSpy.stick_in_parent({ + offset_top: 20 + }); + + $scrollSpy.on('sticky_kit:unbottom', function () { + $scrollSpy.parent().css("position", "static"); + }); + $scrollSpy.on('sticky_kit:bottom', function () { + $scrollSpy.parent().css("bottom", "16px"); + }); + + $scrollSpy.on('sticky_kit:unstick', function () { + $scrollSpy.parent().css("position", "static"); + }); + $('body').scrollspy({target: '.scrollspy'}); + $('a[href*="#"]:not([href="#"])').click(function () { + if (location.pathname.replace(/^\//, '') == this.pathname.replace(/^\//, '') && location.hostname == this.hostname) { + var target = $(this.hash); + target = target.length ? target : $('[name=' + this.hash.slice(1) + ']'); + if (target.length) { + $('html, body').animate({ + scrollTop: target.offset().top + }, 1000); + return false; + } + } + }); +</script> {% endblock %} diff --git a/app/templates/gentelella/guest/event/schedule.html b/app/templates/gentelella/guest/event/schedule.html index 07f1ff3825..c666f6aefb 100644 --- a/app/templates/gentelella/guest/event/schedule.html +++ b/app/templates/gentelella/guest/event/schedule.html @@ -67,7 +67,38 @@ <h1 style="font-weight: 300; font-size: 24px">{{ _("Schedule") }} <script src="{{ url_for('static', filename='js/jquery/jquery.ellipsis.js') }}"></script> <script src="{{ url_for('static', filename='js/jquery/jquery.codezero.js') }}"></script> <script type="text/javascript" src="{{ url_for('static', filename='js/admin/event/scheduler.js') }}"></script> + <script src="{{ url_for('static', filename='admin/lib/sticky-kit/jquery.sticky-kit.min.js') }}"></script> <script type="text/javascript"> + var $scrollSpy = $('#scrollspy'); + $scrollSpy.stick_in_parent({ + offset_top: 20 + }); + + $scrollSpy.on('sticky_kit:unbottom', function () { + $scrollSpy.parent().css("position", "static"); + }); + + $scrollSpy.on('sticky_kit:bottom', function () { + $scrollSpy.parent().css("bottom", "16px"); + }); + + $scrollSpy.on('sticky_kit:unstick', function () { + $scrollSpy.parent().css("position", "static"); + }); + $('body').scrollspy({target: '.scrollspy'}); + + $('a[href*="#"]:not([href="#"])').click(function () { + if (location.pathname.replace(/^\//, '') == this.pathname.replace(/^\//, '') && location.hostname == this.hostname) { + var target = $(this.hash); + target = target.length ? target : $('[name=' + this.hash.slice(1) + ']'); + if (target.length) { + $('html, body').animate({ + scrollTop: target.offset().top + }, 1000); + return false; + } + } + }); $(".audio-btn").popover({ html: true, placement: 'top' diff --git a/app/templates/gentelella/guest/event/sessions.html b/app/templates/gentelella/guest/event/sessions.html index 14d9d4d217..874a288a05 100644 --- a/app/templates/gentelella/guest/event/sessions.html +++ b/app/templates/gentelella/guest/event/sessions.html @@ -20,6 +20,7 @@ <h1 style="font-weight: 300; font-size: 30px">{{ _("Sessions") }}</h1> {% block tail_js %} {{ super() }} + <script src="{{ url_for('static', filename='admin/lib/sticky-kit/jquery.sticky-kit.min.js') }}"></script> <script type="text/javascript"> $(".audio-btn").popover({ @@ -28,6 +29,41 @@ <h1 style="font-weight: 300; font-size: 30px">{{ _("Sessions") }}</h1> }).click(function (e) { e.preventDefault(); }); + var $scrollSpy = $('#scrollspy'); + $scrollSpy.stick_in_parent({ + offset_top: 20 + }); + + $scrollSpy.on('sticky_kit:unbottom', function () { + $scrollSpy.parent().css("position", "static"); + }); + + $scrollSpy.on('sticky_kit:bottom', function () { + $scrollSpy.parent().css("bottom", "16px"); + }); + + $scrollSpy.on('sticky_kit:unstick', function () { + $scrollSpy.parent().css("position", "static"); + }); + $('body').scrollspy({target: '.scrollspy'}); + $('a[href*="#"]:not([href="#"])').click(function () { + if (location.pathname.replace(/^\//, '') == this.pathname.replace(/^\//, '') && location.hostname == this.hostname) { + var target = $(this.hash); + target = target.length ? target : $('[name=' + this.hash.slice(1) + ']'); + if (target.length) { + $('html, body').animate({ + scrollTop: target.offset().top + }, 1000); + return false; + } + } + }); + $(".audio-btn").popover({ + html: true, + placement: 'top' + }).click(function (e) { + e.preventDefault(); + }); </script> {% endblock %}
ivy-llc__ivy-18357
[Feature Request]: size attribute of numpy ndarray **Is your feature request related to a problem? Please describe.** size attribute needs to be added to ndarray class of numpy frontend
[ { "content": "# global\n\n# local\nimport ivy\nimport ivy.functional.frontends.numpy as np_frontend\nfrom ivy.functional.frontends.numpy.func_wrapper import _to_ivy_array\n\n\nclass ndarray:\n def __init__(self, shape, dtype=\"float32\", order=None, _init_overload=False):\n if isinstance(dtype, np_frontend.dtype):\n dtype = dtype.ivy_dtype\n\n # in thise case shape is actually the desired array\n if _init_overload:\n self._ivy_array = (\n ivy.array(shape) if not isinstance(shape, ivy.Array) else shape\n )\n else:\n self._ivy_array = ivy.empty(shape=shape, dtype=dtype)\n\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", None],\n message=\"order must be one of 'C', 'F'\",\n )\n if order == \"F\":\n self._f_contiguous = True\n else:\n self._f_contiguous = False\n\n def __repr__(self):\n return str(self.ivy_array.__repr__()).replace(\n \"ivy.array\", \"ivy.frontends.numpy.ndarray\"\n )\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def T(self):\n return np_frontend.transpose(self)\n\n @property\n def shape(self):\n return self.ivy_array.shape\n\n @property\n def dtype(self):\n return self.ivy_array.dtype\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def flat(self):\n self = self.flatten()\n return self\n\n # Setters #\n # --------#\n\n @ivy_array.setter\n def ivy_array(self, array):\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n # Instance Methods #\n # ---------------- #\n\n def astype(self, dtype, order=\"K\", casting=\"unsafe\", subok=True, copy=True):\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", \"A\", \"K\"],\n message=\"order must be one of 'C', 'F', or 'A'\",\n )\n if copy and self._f_contiguous:\n ret = np_frontend.array(self.ivy_array, order=\"F\")\n else:\n ret = np_frontend.array(self.ivy_array) if copy else self\n\n dtype = np_frontend.to_ivy_dtype(dtype)\n if np_frontend.can_cast(ret, dtype, casting=casting):\n ret.ivy_array = ret.ivy_array.astype(dtype)\n else:\n raise ivy.utils.exceptions.IvyException(\n f\"Cannot cast array data from dtype('{ret.ivy_array.dtype}')\"\n f\" to dtype('{dtype}') according to the rule '{casting}'\"\n )\n if order == \"F\":\n ret._f_contiguous = True\n elif order == \"C\":\n ret._f_contiguous = False\n return ret\n\n def argmax(\n self,\n /,\n *,\n axis=None,\n out=None,\n keepdims=False,\n ):\n return np_frontend.argmax(\n self,\n axis=axis,\n out=out,\n keepdims=keepdims,\n )\n\n def reshape(self, newshape, /, *, order=\"C\"):\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", \"A\"],\n message=\"order must be one of 'C', 'F', or 'A'\",\n )\n if (order == \"A\" and self._f_contiguous) or order == \"F\":\n return np_frontend.reshape(self, newshape, order=\"F\")\n else:\n return np_frontend.reshape(self, newshape, order=\"C\")\n\n def resize(self, newshape, /, *, refcheck=True):\n return np_frontend.resize(self, newshape, refcheck)\n\n def transpose(self, axes, /):\n if axes and isinstance(axes[0], tuple):\n axes = axes[0]\n return np_frontend.transpose(self, axes=axes)\n\n def swapaxes(self, axis1, axis2, /):\n return np_frontend.swapaxes(self, axis1, axis2)\n\n def all(self, axis=None, out=None, keepdims=False, *, where=True):\n return np_frontend.all(self, axis, out, keepdims, where=where)\n\n def any(self, axis=None, out=None, keepdims=False, *, where=True):\n return np_frontend.any(self, axis, out, keepdims, where=where)\n\n def argsort(self, *, axis=-1, kind=None, order=None):\n return np_frontend.argsort(self, axis=axis, kind=kind, order=order)\n\n def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, where=True):\n return np_frontend.mean(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n keepdims=keepdims,\n where=where,\n )\n\n def min(self, *, axis=None, out=None, keepdims=False, initial=None, where=True):\n return np_frontend.amin(\n self,\n axis=axis,\n out=out,\n keepdims=keepdims,\n initial=initial,\n where=where,\n )\n\n def max(self, *, axis=None, out=None, keepdims=False, initial=None, where=True):\n return np_frontend.amax(\n self,\n axis=axis,\n out=out,\n keepdims=keepdims,\n initial=initial,\n where=where,\n )\n\n def argmin(\n self,\n /,\n *,\n axis=None,\n keepdims=False,\n out=None,\n ):\n return np_frontend.argmin(\n self,\n axis=axis,\n keepdims=keepdims,\n out=out,\n )\n\n def clip(\n self,\n min,\n max,\n /,\n out=None,\n *,\n where=True,\n casting=\"same_kind\",\n order=\"K\",\n dtype=None,\n subok=True,\n ):\n return np_frontend.clip(\n self,\n min,\n max,\n out=out,\n where=where,\n casting=casting,\n order=order,\n dtype=dtype,\n subok=subok,\n )\n\n def compress(self, condition, axis=None, out=None):\n return np_frontend.compress(\n condition=condition,\n a=self,\n axis=axis,\n out=out,\n )\n\n def conj(\n self,\n /,\n out=None,\n *,\n where=True,\n casting=\"same_kind\",\n order=\"K\",\n dtype=None,\n subok=True,\n ):\n return np_frontend.conj(\n self.ivy_array,\n out=out,\n where=where,\n casting=casting,\n order=order,\n dtype=dtype,\n subok=subok,\n )\n\n def cumprod(self, *, axis=None, dtype=None, out=None):\n return np_frontend.cumprod(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def cumsum(self, *, axis=None, dtype=None, out=None):\n return np_frontend.cumsum(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def dot(self, b, out=None):\n return np_frontend.dot(self, b, out=out)\n\n def diagonal(self, *, offset=0, axis1=0, axis2=1):\n return np_frontend.diagonal(\n self,\n offset=offset,\n axis1=axis1,\n axis2=axis2,\n )\n\n def sort(self, *, axis=-1, kind=None, order=None):\n return np_frontend.sort(self, axis=axis, kind=kind, order=order)\n\n def copy(self, order=\"C\"):\n return np_frontend.copy(self, order=order)\n\n def nonzero(\n self,\n ):\n return np_frontend.nonzero(self)[0]\n\n def ravel(self, order=\"C\"):\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", \"A\", \"K\"],\n message=\"order must be one of 'C', 'F', 'A', or 'K'\",\n )\n if (order in [\"K\", \"A\"] and self._f_contiguous) or order == \"F\":\n return np_frontend.ravel(self, order=\"F\")\n else:\n return np_frontend.ravel(self, order=\"C\")\n\n def flatten(self, order=\"C\"):\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", \"A\", \"K\"],\n message=\"order must be one of 'C', 'F', 'A', or 'K'\",\n )\n if (order in [\"K\", \"A\"] and self._f_contiguous) or order == \"F\":\n return np_frontend.ravel(self, order=\"F\")\n else:\n return np_frontend.ravel(self, order=\"C\")\n\n def fill(self, num):\n return np_frontend.fill(self, num)\n\n def repeat(self, repeats, axis=None):\n return np_frontend.repeat(self, repeats, axis=axis)\n\n def searchsorted(self, v, side=\"left\", sorter=None):\n return np_frontend.searchsorted(self, v, side=side, sorter=sorter)\n\n def squeeze(self, axis=None):\n return np_frontend.squeeze(self, axis=axis)\n\n def std(\n self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True\n ):\n return np_frontend.std(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n ddof=ddof,\n keepdims=keepdims,\n where=where,\n )\n\n def tobytes(self, order=\"C\") -> bytes:\n return np_frontend.tobytes(self, order=order)\n\n def tostring(self, order=\"C\") -> bytes:\n return np_frontend.tobytes(self.data, order=order)\n\n def prod(\n self,\n *,\n axis=None,\n dtype=None,\n out=None,\n keepdims=False,\n initial=None,\n where=True,\n ):\n return np_frontend.prod(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n initial=initial,\n where=where,\n out=out,\n )\n\n def tofile(self, fid, /, sep=\"\", format_=\"%s\"):\n if self.ndim == 0:\n string = str(self)\n else:\n string = sep.join([str(item) for item in self.tolist()])\n with open(fid, \"w\") as f:\n f.write(string)\n\n def tolist(self) -> list:\n return self._ivy_array.to_list()\n\n def view(self):\n return np_frontend.reshape(self, tuple(self.shape))\n\n def __add__(self, value, /):\n return np_frontend.add(self, value)\n\n def __radd__(self, value, /):\n return np_frontend.add(self, value)\n\n def __sub__(self, value, /):\n return np_frontend.subtract(self, value)\n\n def __mul__(self, value, /):\n return np_frontend.multiply(self, value)\n\n def __rmul__(self, value, /):\n return np_frontend.multiply(value, self)\n\n def __truediv__(self, value, /):\n return np_frontend.true_divide(self, value)\n\n def __floordiv__(self, value, /):\n return np_frontend.floor_divide(self, value)\n\n def __rtruediv__(self, value, /):\n return np_frontend.true_divide(value, self)\n\n def __pow__(self, value, /):\n return np_frontend.power(self, value)\n\n def __and__(self, value, /):\n return np_frontend.logical_and(self, value)\n\n def __or__(self, value, /):\n return np_frontend.logical_or(self, value)\n\n def __xor__(self, value, /):\n return np_frontend.logical_xor(self, value)\n\n def __matmul__(self, value, /):\n return np_frontend.matmul(self, value)\n\n def __copy__(\n self,\n ):\n return np_frontend.copy(self)\n\n def __deepcopy__(self, memo, /):\n return self.ivy_array.__deepcopy__(memo)\n\n def __neg__(\n self,\n ):\n return np_frontend.negative(self)\n\n def __pos__(\n self,\n ):\n return np_frontend.positive(self)\n\n def __bool__(\n self,\n ):\n if isinstance(self.ivy_array, int):\n return self.ivy_array != 0\n\n temp = ivy.squeeze(ivy.asarray(self.ivy_array), axis=None)\n shape = ivy.shape(temp)\n if shape:\n raise ValueError(\n \"The truth value of an array with more than one element is ambiguous. \"\n \"Use a.any() or a.all()\"\n )\n\n return temp != 0\n\n def __ne__(self, value, /):\n return np_frontend.not_equal(self, value)\n\n def __len__(self):\n return len(self.ivy_array)\n\n def __eq__(self, value, /):\n return np_frontend.equal(self, value)\n\n def __ge__(self, value, /):\n return np_frontend.greater_equal(self, value)\n\n def __gt__(self, value, /):\n return np_frontend.greater(self, value)\n\n def __le__(self, value, /):\n return np_frontend.less_equal(self, value)\n\n def __lt__(self, value, /):\n return np_frontend.less(self, value)\n\n def __int__(\n self,\n ):\n return ivy.to_scalar(ivy.reshape(self.ivy_array, (-1,)).astype(ivy.int64))\n\n def __float__(\n self,\n ):\n return ivy.to_scalar(ivy.reshape(self.ivy_array, (-1,)).astype(ivy.float64))\n\n def __complex__(\n self,\n ):\n return ivy.to_scalar(ivy.reshape(self.ivy_array, (-1,)).astype(ivy.complex128))\n\n def __contains__(self, key, /):\n return key in ivy.reshape(self.ivy_array, -1)\n\n def __iadd__(self, value, /):\n return np_frontend.add(self, value, out=self)\n\n def __isub__(self, value, /):\n return np_frontend.subtract(self, value, out=self)\n\n def __imul__(self, value, /):\n return np_frontend.multiply(self, value, out=self)\n\n def __itruediv__(self, value, /):\n return np_frontend.true_divide(self, value, out=self)\n\n def __ifloordiv__(self, value, /):\n return np_frontend.floor_divide(self, value, out=self)\n\n def __ipow__(self, value, /):\n return np_frontend.power(self, value, out=self)\n\n def __iand__(self, value, /):\n return np_frontend.logical_and(self, value, out=self)\n\n def __ior__(self, value, /):\n return np_frontend.logical_or(self, value, out=self)\n\n def __ixor__(self, value, /):\n return np_frontend.logical_xor(self, value, out=self)\n\n def __imod__(self, value, /):\n return np_frontend.mod(self, value, out=self)\n\n def __invert__(self, /):\n return ivy.bitwise_invert(self.ivy_array)\n\n def __abs__(self):\n return np_frontend.absolute(self)\n\n def __array__(self, dtype=None, /):\n if not dtype:\n return self\n return np_frontend.array(self, dtype=dtype)\n\n def __array_wrap__(self, array, context=None, /):\n if context is None:\n return np_frontend.array(array)\n else:\n return np_frontend.asarray(self)\n\n def __getitem__(self, key, /):\n ivy_args = ivy.nested_map([self, key], _to_ivy_array)\n ret = ivy.get_item(*ivy_args)\n return np_frontend.ndarray(ret, _init_overload=True)\n\n def __setitem__(self, key, value, /):\n key, value = ivy.nested_map([key, value], _to_ivy_array)\n self.ivy_array[key] = value\n\n def __iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d ndarray not supported\")\n for i in range(self.shape[0]):\n yield self[i]\n\n def __mod__(self, value, /):\n return np_frontend.mod(self, value, out=self)\n\n def ptp(self, *, axis=None, out=None, keepdims=False):\n xmax = self.max(axis=axis, out=out, keepdims=keepdims)\n xmin = self.min(axis=axis, out=out, keepdims=keepdims)\n return np_frontend.subtract(xmax, xmin)\n\n def __rshift__(self, value, /):\n return ivy.bitwise_right_shift(self.ivy_array, value)\n", "path": "ivy/functional/frontends/numpy/ndarray/ndarray.py" } ]
[ { "content": "# global\n\n# local\nimport ivy\nimport ivy.functional.frontends.numpy as np_frontend\nfrom ivy.functional.frontends.numpy.func_wrapper import _to_ivy_array\n\n\nclass ndarray:\n def __init__(self, shape, dtype=\"float32\", order=None, _init_overload=False):\n if isinstance(dtype, np_frontend.dtype):\n dtype = dtype.ivy_dtype\n\n # in thise case shape is actually the desired array\n if _init_overload:\n self._ivy_array = (\n ivy.array(shape) if not isinstance(shape, ivy.Array) else shape\n )\n else:\n self._ivy_array = ivy.empty(shape=shape, dtype=dtype)\n\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", None],\n message=\"order must be one of 'C', 'F'\",\n )\n if order == \"F\":\n self._f_contiguous = True\n else:\n self._f_contiguous = False\n\n def __repr__(self):\n return str(self.ivy_array.__repr__()).replace(\n \"ivy.array\", \"ivy.frontends.numpy.ndarray\"\n )\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def T(self):\n return np_frontend.transpose(self)\n\n @property\n def shape(self):\n return self.ivy_array.shape\n\n @property\n def size(self):\n return self.ivy_array.size\n\n @property\n def dtype(self):\n return self.ivy_array.dtype\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def flat(self):\n self = self.flatten()\n return self\n\n # Setters #\n # --------#\n\n @ivy_array.setter\n def ivy_array(self, array):\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n # Instance Methods #\n # ---------------- #\n\n def astype(self, dtype, order=\"K\", casting=\"unsafe\", subok=True, copy=True):\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", \"A\", \"K\"],\n message=\"order must be one of 'C', 'F', or 'A'\",\n )\n if copy and self._f_contiguous:\n ret = np_frontend.array(self.ivy_array, order=\"F\")\n else:\n ret = np_frontend.array(self.ivy_array) if copy else self\n\n dtype = np_frontend.to_ivy_dtype(dtype)\n if np_frontend.can_cast(ret, dtype, casting=casting):\n ret.ivy_array = ret.ivy_array.astype(dtype)\n else:\n raise ivy.utils.exceptions.IvyException(\n f\"Cannot cast array data from dtype('{ret.ivy_array.dtype}')\"\n f\" to dtype('{dtype}') according to the rule '{casting}'\"\n )\n if order == \"F\":\n ret._f_contiguous = True\n elif order == \"C\":\n ret._f_contiguous = False\n return ret\n\n def argmax(\n self,\n /,\n *,\n axis=None,\n out=None,\n keepdims=False,\n ):\n return np_frontend.argmax(\n self,\n axis=axis,\n out=out,\n keepdims=keepdims,\n )\n\n def reshape(self, newshape, /, *, order=\"C\"):\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", \"A\"],\n message=\"order must be one of 'C', 'F', or 'A'\",\n )\n if (order == \"A\" and self._f_contiguous) or order == \"F\":\n return np_frontend.reshape(self, newshape, order=\"F\")\n else:\n return np_frontend.reshape(self, newshape, order=\"C\")\n\n def resize(self, newshape, /, *, refcheck=True):\n return np_frontend.resize(self, newshape, refcheck)\n\n def transpose(self, axes, /):\n if axes and isinstance(axes[0], tuple):\n axes = axes[0]\n return np_frontend.transpose(self, axes=axes)\n\n def swapaxes(self, axis1, axis2, /):\n return np_frontend.swapaxes(self, axis1, axis2)\n\n def all(self, axis=None, out=None, keepdims=False, *, where=True):\n return np_frontend.all(self, axis, out, keepdims, where=where)\n\n def any(self, axis=None, out=None, keepdims=False, *, where=True):\n return np_frontend.any(self, axis, out, keepdims, where=where)\n\n def argsort(self, *, axis=-1, kind=None, order=None):\n return np_frontend.argsort(self, axis=axis, kind=kind, order=order)\n\n def mean(self, *, axis=None, dtype=None, out=None, keepdims=False, where=True):\n return np_frontend.mean(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n keepdims=keepdims,\n where=where,\n )\n\n def min(self, *, axis=None, out=None, keepdims=False, initial=None, where=True):\n return np_frontend.amin(\n self,\n axis=axis,\n out=out,\n keepdims=keepdims,\n initial=initial,\n where=where,\n )\n\n def max(self, *, axis=None, out=None, keepdims=False, initial=None, where=True):\n return np_frontend.amax(\n self,\n axis=axis,\n out=out,\n keepdims=keepdims,\n initial=initial,\n where=where,\n )\n\n def argmin(\n self,\n /,\n *,\n axis=None,\n keepdims=False,\n out=None,\n ):\n return np_frontend.argmin(\n self,\n axis=axis,\n keepdims=keepdims,\n out=out,\n )\n\n def clip(\n self,\n min,\n max,\n /,\n out=None,\n *,\n where=True,\n casting=\"same_kind\",\n order=\"K\",\n dtype=None,\n subok=True,\n ):\n return np_frontend.clip(\n self,\n min,\n max,\n out=out,\n where=where,\n casting=casting,\n order=order,\n dtype=dtype,\n subok=subok,\n )\n\n def compress(self, condition, axis=None, out=None):\n return np_frontend.compress(\n condition=condition,\n a=self,\n axis=axis,\n out=out,\n )\n\n def conj(\n self,\n /,\n out=None,\n *,\n where=True,\n casting=\"same_kind\",\n order=\"K\",\n dtype=None,\n subok=True,\n ):\n return np_frontend.conj(\n self.ivy_array,\n out=out,\n where=where,\n casting=casting,\n order=order,\n dtype=dtype,\n subok=subok,\n )\n\n def cumprod(self, *, axis=None, dtype=None, out=None):\n return np_frontend.cumprod(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def cumsum(self, *, axis=None, dtype=None, out=None):\n return np_frontend.cumsum(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n )\n\n def dot(self, b, out=None):\n return np_frontend.dot(self, b, out=out)\n\n def diagonal(self, *, offset=0, axis1=0, axis2=1):\n return np_frontend.diagonal(\n self,\n offset=offset,\n axis1=axis1,\n axis2=axis2,\n )\n\n def sort(self, *, axis=-1, kind=None, order=None):\n return np_frontend.sort(self, axis=axis, kind=kind, order=order)\n\n def copy(self, order=\"C\"):\n return np_frontend.copy(self, order=order)\n\n def nonzero(\n self,\n ):\n return np_frontend.nonzero(self)[0]\n\n def ravel(self, order=\"C\"):\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", \"A\", \"K\"],\n message=\"order must be one of 'C', 'F', 'A', or 'K'\",\n )\n if (order in [\"K\", \"A\"] and self._f_contiguous) or order == \"F\":\n return np_frontend.ravel(self, order=\"F\")\n else:\n return np_frontend.ravel(self, order=\"C\")\n\n def flatten(self, order=\"C\"):\n ivy.utils.assertions.check_elem_in_list(\n order,\n [\"C\", \"F\", \"A\", \"K\"],\n message=\"order must be one of 'C', 'F', 'A', or 'K'\",\n )\n if (order in [\"K\", \"A\"] and self._f_contiguous) or order == \"F\":\n return np_frontend.ravel(self, order=\"F\")\n else:\n return np_frontend.ravel(self, order=\"C\")\n\n def fill(self, num):\n return np_frontend.fill(self, num)\n\n def repeat(self, repeats, axis=None):\n return np_frontend.repeat(self, repeats, axis=axis)\n\n def searchsorted(self, v, side=\"left\", sorter=None):\n return np_frontend.searchsorted(self, v, side=side, sorter=sorter)\n\n def squeeze(self, axis=None):\n return np_frontend.squeeze(self, axis=axis)\n\n def std(\n self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True\n ):\n return np_frontend.std(\n self,\n axis=axis,\n dtype=dtype,\n out=out,\n ddof=ddof,\n keepdims=keepdims,\n where=where,\n )\n\n def tobytes(self, order=\"C\") -> bytes:\n return np_frontend.tobytes(self, order=order)\n\n def tostring(self, order=\"C\") -> bytes:\n return np_frontend.tobytes(self.data, order=order)\n\n def prod(\n self,\n *,\n axis=None,\n dtype=None,\n out=None,\n keepdims=False,\n initial=None,\n where=True,\n ):\n return np_frontend.prod(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n initial=initial,\n where=where,\n out=out,\n )\n\n def tofile(self, fid, /, sep=\"\", format_=\"%s\"):\n if self.ndim == 0:\n string = str(self)\n else:\n string = sep.join([str(item) for item in self.tolist()])\n with open(fid, \"w\") as f:\n f.write(string)\n\n def tolist(self) -> list:\n return self._ivy_array.to_list()\n\n def view(self):\n return np_frontend.reshape(self, tuple(self.shape))\n\n def __add__(self, value, /):\n return np_frontend.add(self, value)\n\n def __radd__(self, value, /):\n return np_frontend.add(self, value)\n\n def __sub__(self, value, /):\n return np_frontend.subtract(self, value)\n\n def __mul__(self, value, /):\n return np_frontend.multiply(self, value)\n\n def __rmul__(self, value, /):\n return np_frontend.multiply(value, self)\n\n def __truediv__(self, value, /):\n return np_frontend.true_divide(self, value)\n\n def __floordiv__(self, value, /):\n return np_frontend.floor_divide(self, value)\n\n def __rtruediv__(self, value, /):\n return np_frontend.true_divide(value, self)\n\n def __pow__(self, value, /):\n return np_frontend.power(self, value)\n\n def __and__(self, value, /):\n return np_frontend.logical_and(self, value)\n\n def __or__(self, value, /):\n return np_frontend.logical_or(self, value)\n\n def __xor__(self, value, /):\n return np_frontend.logical_xor(self, value)\n\n def __matmul__(self, value, /):\n return np_frontend.matmul(self, value)\n\n def __copy__(\n self,\n ):\n return np_frontend.copy(self)\n\n def __deepcopy__(self, memo, /):\n return self.ivy_array.__deepcopy__(memo)\n\n def __neg__(\n self,\n ):\n return np_frontend.negative(self)\n\n def __pos__(\n self,\n ):\n return np_frontend.positive(self)\n\n def __bool__(\n self,\n ):\n if isinstance(self.ivy_array, int):\n return self.ivy_array != 0\n\n temp = ivy.squeeze(ivy.asarray(self.ivy_array), axis=None)\n shape = ivy.shape(temp)\n if shape:\n raise ValueError(\n \"The truth value of an array with more than one element is ambiguous. \"\n \"Use a.any() or a.all()\"\n )\n\n return temp != 0\n\n def __ne__(self, value, /):\n return np_frontend.not_equal(self, value)\n\n def __len__(self):\n return len(self.ivy_array)\n\n def __eq__(self, value, /):\n return np_frontend.equal(self, value)\n\n def __ge__(self, value, /):\n return np_frontend.greater_equal(self, value)\n\n def __gt__(self, value, /):\n return np_frontend.greater(self, value)\n\n def __le__(self, value, /):\n return np_frontend.less_equal(self, value)\n\n def __lt__(self, value, /):\n return np_frontend.less(self, value)\n\n def __int__(\n self,\n ):\n return ivy.to_scalar(ivy.reshape(self.ivy_array, (-1,)).astype(ivy.int64))\n\n def __float__(\n self,\n ):\n return ivy.to_scalar(ivy.reshape(self.ivy_array, (-1,)).astype(ivy.float64))\n\n def __complex__(\n self,\n ):\n return ivy.to_scalar(ivy.reshape(self.ivy_array, (-1,)).astype(ivy.complex128))\n\n def __contains__(self, key, /):\n return key in ivy.reshape(self.ivy_array, -1)\n\n def __iadd__(self, value, /):\n return np_frontend.add(self, value, out=self)\n\n def __isub__(self, value, /):\n return np_frontend.subtract(self, value, out=self)\n\n def __imul__(self, value, /):\n return np_frontend.multiply(self, value, out=self)\n\n def __itruediv__(self, value, /):\n return np_frontend.true_divide(self, value, out=self)\n\n def __ifloordiv__(self, value, /):\n return np_frontend.floor_divide(self, value, out=self)\n\n def __ipow__(self, value, /):\n return np_frontend.power(self, value, out=self)\n\n def __iand__(self, value, /):\n return np_frontend.logical_and(self, value, out=self)\n\n def __ior__(self, value, /):\n return np_frontend.logical_or(self, value, out=self)\n\n def __ixor__(self, value, /):\n return np_frontend.logical_xor(self, value, out=self)\n\n def __imod__(self, value, /):\n return np_frontend.mod(self, value, out=self)\n\n def __invert__(self, /):\n return ivy.bitwise_invert(self.ivy_array)\n\n def __abs__(self):\n return np_frontend.absolute(self)\n\n def __array__(self, dtype=None, /):\n if not dtype:\n return self\n return np_frontend.array(self, dtype=dtype)\n\n def __array_wrap__(self, array, context=None, /):\n if context is None:\n return np_frontend.array(array)\n else:\n return np_frontend.asarray(self)\n\n def __getitem__(self, key, /):\n ivy_args = ivy.nested_map([self, key], _to_ivy_array)\n ret = ivy.get_item(*ivy_args)\n return np_frontend.ndarray(ret, _init_overload=True)\n\n def __setitem__(self, key, value, /):\n key, value = ivy.nested_map([key, value], _to_ivy_array)\n self.ivy_array[key] = value\n\n def __iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d ndarray not supported\")\n for i in range(self.shape[0]):\n yield self[i]\n\n def __mod__(self, value, /):\n return np_frontend.mod(self, value, out=self)\n\n def ptp(self, *, axis=None, out=None, keepdims=False):\n xmax = self.max(axis=axis, out=out, keepdims=keepdims)\n xmin = self.min(axis=axis, out=out, keepdims=keepdims)\n return np_frontend.subtract(xmax, xmin)\n\n def __rshift__(self, value, /):\n return ivy.bitwise_right_shift(self.ivy_array, value)\n", "path": "ivy/functional/frontends/numpy/ndarray/ndarray.py" } ]
diff --git a/ivy/functional/frontends/numpy/ndarray/ndarray.py b/ivy/functional/frontends/numpy/ndarray/ndarray.py index eabf4c2e8531a..dae5f288c02ac 100644 --- a/ivy/functional/frontends/numpy/ndarray/ndarray.py +++ b/ivy/functional/frontends/numpy/ndarray/ndarray.py @@ -49,6 +49,10 @@ def T(self): def shape(self): return self.ivy_array.shape + @property + def size(self): + return self.ivy_array.size + @property def dtype(self): return self.ivy_array.dtype diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py index 2bd63c64d66ea..239b2707ef1e6 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py @@ -92,6 +92,21 @@ def test_numpy_ndarray_property_ndim( ivy.utils.assertions.check_equal(x.ndim, data[0].ndim, as_array=False) +@given( + dtype_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid", prune_function=False), + ret_shape=True, + ), +) +def test_numpy_ndarray_property_size( + dtype_x, +): + dtype, data, shape = dtype_x + x = ndarray(shape, dtype[0]) + x.ivy_array = data[0] + ivy.utils.assertions.check_equal(x.size, data[0].size, as_array=False) + + @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False),
pypa__pipenv-2515
"pipenv shell" doesn't work for Python2 ##### Issue description Running "pipenv shell" for a Python 2.7 project fails. Using: * Python 2.7.15 * pip 10.0.1 * pipenv, version 2018.7.1 Seems like "subprocess.run()" has been introduced in shells.py ( in #2371 ), while this api was introduced only from Python 3. ##### Expected result A cmd shell with virtual environment active should be spawned. ##### Actual result ``` Launching subshell in virtual environment… Traceback (most recent call last): File "c:\program files\python27\lib\runpy.py", line 174, in _run_module_as_main "__main__", fname, loader, pkg_name) File "c:\program files\python27\lib\runpy.py", line 72, in _run_code exec code in run_globals File "C:\Program Files\Python27\Scripts\pipenv.exe\__main__.py", line 9, in <module> File "c:\program files\python27\lib\site-packages\pipenv\vendor\click\core.py", line 722, in __call__ return self.main(*args, **kwargs) File "c:\program files\python27\lib\site-packages\pipenv\vendor\click\core.py", line 697, in main rv = self.invoke(ctx) File "c:\program files\python27\lib\site-packages\pipenv\vendor\click\core.py", line 1066, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "c:\program files\python27\lib\site-packages\pipenv\vendor\click\core.py", line 895, in invoke return ctx.invoke(self.callback, **ctx.params) File "c:\program files\python27\lib\site-packages\pipenv\vendor\click\core.py", line 535, in invoke return callback(*args, **kwargs) File "c:\program files\python27\lib\site-packages\pipenv\cli.py", line 664, in shell three=three, python=python, fancy=fancy, shell_args=shell_args, pypi_mirror=pypi_mirror File "c:\program files\python27\lib\site-packages\pipenv\core.py", line 2159, in do_shell shell.fork(*fork_args) File "c:\program files\python27\lib\site-packages\pipenv\shells.py", line 97, in fork _handover(self.cmd, self.args + list(args)) File "c:\program files\python27\lib\site-packages\pipenv\shells.py", line 62, in _handover proc = subprocess.run(args, shell=True, universal_newlines=True) AttributeError: 'module' object has no attribute 'run' ``` ##### Steps to replicate ``` [packages] [requires] python_version = "2.7" ``` $ pipenv install ------------- <details><summary>$ pipenv --support</summary> Pipenv version: `'2018.7.1'` Pipenv location: `'c:\\program files\\python27\\lib\\site-packages\\pipenv'` Python location: `'c:\\program files\\python27\\python.exe'` Other Python installations in `PATH`: - `2.7`: `C:\Program Files\Python27\python.exe` - `2.7.15`: `C:\Program Files\Python27\python.exe` PEP 508 Information: ``` {'implementation_name': 'cpython', 'implementation_version': '0', 'os_name': 'nt', 'platform_machine': 'AMD64', 'platform_python_implementation': 'CPython', 'platform_release': '10', 'platform_system': 'Windows', 'platform_version': '10.0.14393', 'python_full_version': '2.7.15', 'python_version': '2.7', 'sys_platform': 'win32'} ``` System environment variables: - `TMP` - `COMPUTERNAME` - `VS110COMNTOOLS` - `USERDOMAIN` - `PSMODULEPATH` - `COMMANDER_PATH` - `IWBPATH` - `COMMONPROGRAMFILES` - `PROCESSOR_IDENTIFIER` - `WECSDK` - `PROGRAMFILES` - `PROCESSOR_REVISION` - `PATH` - `SYSTEMROOT` - `PROGRAMFILES(X86)` - `COMSPEC` - `TEMP` - `COMMONPROGRAMFILES(X86)` - `PROCESSOR_ARCHITECTURE` - `PIPENV_VENV_IN_PROJECT` - `ALLUSERSPROFILE` - `LOCALAPPDATA` - `HOMEPATH` - `USERDOMAIN_ROAMINGPROFILE` - `VS120COMNTOOLS` - `PROGRAMW6432` - `USERNAME` - `LOGONSERVER` - `PROMPT` - `SESSIONNAME` - `PROGRAMDATA` - `SAP_IPV6_ACTIVE` - `COMMANDER_DRIVE` - `COMMANDER_EXE` - `USERDNSDOMAIN` - `PYTHONDONTWRITEBYTECODE` - `UATDATA` - `PATHEXT` - `PIP_PYTHON_PATH` - `CGROAMINGPATH` - `WINDIR` - `APPDATA` - `HOMEDRIVE` - `COMMANDER_INI` - `SYSTEMDRIVE` - `CANOE_INSTALLDIR` - `PUBLIC` - `NUMBER_OF_PROCESSORS` - `CANOE_INSTALLDIR64` - `PROCESSOR_LEVEL` - `PYTHON_HOME` - `COMMONPROGRAMW6432` - `OS` - `SNC_LIB` - `USERPROFILE` Pipenv–specific environment variables: - `PIPENV_VENV_IN_PROJECT`: `1` Debug–specific environment variables: - `PATH`: `C:\Program Files\Python27\;C:\Program Files\Python27\\Scripts;C:\Program Files (x86)\Common Files\Oracle\Java\javapath;C:\Program Files (x86)\Intel\iCLS Client\;C:\Program Files\Intel\iCLS Client\;C:\WINDOWS\system32;C:\WINDOWS;C:\WINDOWS\System32\Wbem;C:\WINDOWS\System32\WindowsPowerShell\v1.0\;C:\Program Files (x86)\WebEx\Productivity Tools;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files (x86)\Intel\Intel(R) Management Engine Components\IPT;C:\Program Files\Intel\Intel(R) Management Engine Components\IPT;C:\Program Files\Intel\WiFi\bin\;C:\Program Files\Common Files\Intel\WirelessCommon\;C:\Program Files\Microsoft SQL Server\130\Tools\Binn\;C:\Program Files\Micro Focus\StarTeam SDK 16.0\lib;C:\Program Files\Micro Focus\StarTeam SDK 16.0\bin;C:\Program Files (x86)\Windows Kits\8.1\Windows Performance Toolkit\;C:\Program Files\Microsoft SQL Server\110\Tools\Binn\;C:\Program Files (x86)\Microsoft SDKs\TypeScript\1.0\;C:\Program Files\Microsoft SQL Server\120\Tools\Binn\;C:\Program Files\Git\cmd;C:\Users\xmnalepa\AppData\Local\Microsoft\WindowsApps;d:\Apps\Sysinternals Suite\;` --------------------------- Contents of `Pipfile` ('D:\\work\\scripts\\shell_issue\\Pipfile'): ```toml [packages] [requires] python_version = "2.7" ``` Contents of `Pipfile.lock` ('D:\\work\\scripts\\shell_issue\\Pipfile.lock'): ```json { "_meta": { "hash": { "sha256": "ae4bdd7d4157baab65ae9d0e8389a6011e6b640995372c45ec81fa5d1ddfae9f" }, "pipfile-spec": 6, "requires": { "python_version": "2.7" }, "sources": [ { "name": "pypi", "url": "https://pypi.org/simple", "verify_ssl": true } ] }, "default": {}, "develop": {} } ``` </details>
[ { "content": "import collections\nimport contextlib\nimport os\nimport signal\nimport subprocess\nimport sys\n\nfrom ._compat import get_terminal_size, Path\nfrom .environments import PIPENV_SHELL_EXPLICIT, PIPENV_SHELL, PIPENV_EMULATOR\nfrom .utils import temp_environ\nfrom .vendor import shellingham\n\n\nShellDetectionFailure = shellingham.ShellDetectionFailure\n\n\ndef _build_info(value):\n return (os.path.splitext(os.path.basename(value))[0], value)\n\n\ndef detect_info():\n if PIPENV_SHELL_EXPLICIT:\n return _build_info(PIPENV_SHELL_EXPLICIT)\n try:\n return shellingham.detect_shell()\n except (shellingham.ShellDetectionFailure, TypeError):\n if PIPENV_SHELL:\n return _build_info(PIPENV_SHELL)\n raise ShellDetectionFailure\n\n\ndef _get_activate_script(venv):\n \"\"\"Returns the string to activate a virtualenv.\n\n This is POSIX-only at the moment since the compat (pexpect-based) shell\n does not work elsewhere anyway.\n \"\"\"\n # Suffix and source command for other shells.\n # Support for fish shell.\n if PIPENV_SHELL and \"fish\" in PIPENV_SHELL:\n suffix = \".fish\"\n command = \"source\"\n # Support for csh shell.\n elif PIPENV_SHELL and \"csh\" in PIPENV_SHELL:\n suffix = \".csh\"\n command = \"source\"\n else:\n suffix = \"\"\n command = \".\"\n # Escape any spaces located within the virtualenv path to allow\n # for proper activation.\n venv_location = str(venv).replace(\" \", r\"\\ \")\n # The leading space can make history cleaner in some shells.\n return \" {2} {0}/bin/activate{1}\".format(venv_location, suffix, command)\n\n\ndef _handover(cmd, args):\n args = [cmd] + args\n if os.name != \"nt\":\n os.execvp(cmd, args)\n else:\n proc = subprocess.run(args, shell=True, universal_newlines=True)\n sys.exit(proc.returncode)\n\n\nclass Shell(object):\n def __init__(self, cmd):\n self.cmd = cmd\n self.args = []\n\n def __repr__(self):\n return '{type}(cmd={cmd!r})'.format(\n type=type(self).__name__,\n cmd=self.cmd,\n )\n\n @contextlib.contextmanager\n def inject_path(self, venv):\n with temp_environ():\n os.environ[\"PATH\"] = \"{0}{1}{2}\".format(\n os.pathsep.join(str(p.parent) for p in _iter_python(venv)),\n os.pathsep,\n os.environ[\"PATH\"],\n )\n yield\n\n def fork(self, venv, cwd, args):\n # FIXME: This isn't necessarily the correct prompt. We should read the\n # actual prompt by peeking into the activation script.\n name = os.path.basename(venv)\n os.environ[\"VIRTUAL_ENV\"] = str(venv)\n if \"PROMPT\" in os.environ:\n os.environ[\"PROMPT\"] = \"({0}) {1}\".format(name, os.environ[\"PROMPT\"])\n if \"PS1\" in os.environ:\n os.environ[\"PS1\"] = \"({0}) {1}\".format(name, os.environ[\"PS1\"])\n with self.inject_path(venv):\n os.chdir(cwd)\n _handover(self.cmd, self.args + list(args))\n\n def fork_compat(self, venv, cwd, args):\n from .vendor import pexpect\n\n # Grab current terminal dimensions to replace the hardcoded default\n # dimensions of pexpect.\n dims = get_terminal_size()\n with temp_environ():\n c = pexpect.spawn(self.cmd, [\"-i\"], dimensions=(dims.lines, dims.columns))\n c.sendline(_get_activate_script(venv))\n if args:\n c.sendline(\" \".join(args))\n\n # Handler for terminal resizing events\n # Must be defined here to have the shell process in its context, since\n # we can't pass it as an argument\n def sigwinch_passthrough(sig, data):\n dims = get_terminal_size()\n c.setwinsize(dims.lines, dims.columns)\n\n signal.signal(signal.SIGWINCH, sigwinch_passthrough)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n sys.exit(c.exitstatus)\n\n\nPOSSIBLE_ENV_PYTHON = [Path(\"bin\", \"python\"), Path(\"Scripts\", \"python.exe\")]\n\n\ndef _iter_python(venv):\n for path in POSSIBLE_ENV_PYTHON:\n full_path = Path(venv, path)\n if full_path.is_file():\n yield full_path\n\n\nclass Bash(Shell):\n # The usual PATH injection technique does not work with Bash.\n # https://github.com/berdario/pew/issues/58#issuecomment-102182346\n @contextlib.contextmanager\n def inject_path(self, venv):\n from ._compat import NamedTemporaryFile\n\n bashrc_path = Path.home().joinpath(\".bashrc\")\n with NamedTemporaryFile(\"w+\") as rcfile:\n if bashrc_path.is_file():\n base_rc_src = 'source \"{0}\"\\n'.format(bashrc_path.as_posix())\n rcfile.write(base_rc_src)\n\n export_path = 'export PATH=\"{0}:$PATH\"\\n'.format(\n \":\".join(python.parent.as_posix() for python in _iter_python(venv))\n )\n rcfile.write(export_path)\n rcfile.flush()\n self.args.extend([\"--rcfile\", rcfile.name])\n yield\n\n\nclass CmderEmulatedShell(Shell):\n def fork(self, venv, cwd, args):\n if cwd:\n os.environ[\"CMDER_START\"] = cwd\n super(CmderEmulatedShell, self).fork(venv, cwd, args)\n\n\nclass CmderCommandPrompt(CmderEmulatedShell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\init.bat\")\n if os.path.exists(rc):\n self.args.extend([\"/k\", rc])\n super(CmderCommandPrompt, self).fork(venv, cwd, args)\n\n\nclass CmderPowershell(Shell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\profile.ps1\")\n if os.path.exists(rc):\n self.args.extend(\n [\n \"-ExecutionPolicy\",\n \"Bypass\",\n \"-NoLogo\",\n \"-NoProfile\",\n \"-NoExit\",\n \"-Command\",\n \"Invoke-Expression '. ''{0}'''\".format(rc),\n ]\n )\n super(CmderPowershell, self).fork(venv, cwd, args)\n\n\n# Two dimensional dict. First is the shell type, second is the emulator type.\n# Example: SHELL_LOOKUP['powershell']['cmder'] => CmderPowershell.\nSHELL_LOOKUP = collections.defaultdict(\n lambda: collections.defaultdict(lambda: Shell),\n {\n \"bash\": collections.defaultdict(lambda: Bash),\n \"cmd\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderCommandPrompt}),\n \"powershell\": collections.defaultdict(\n lambda: Shell, {\"cmder\": CmderPowershell}\n ),\n \"pwsh\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderPowershell}),\n },\n)\n\n\ndef _detect_emulator():\n if os.environ.get(\"CMDER_ROOT\"):\n return \"cmder\"\n return \"\"\n\n\ndef choose_shell():\n emulator = PIPENV_EMULATOR or _detect_emulator()\n type_, command = detect_info()\n return SHELL_LOOKUP[type_][emulator](command)\n", "path": "pipenv/shells.py" } ]
[ { "content": "import collections\nimport contextlib\nimport os\nimport signal\nimport subprocess\nimport sys\n\nfrom ._compat import get_terminal_size, Path\nfrom .environments import PIPENV_SHELL_EXPLICIT, PIPENV_SHELL, PIPENV_EMULATOR\nfrom .utils import temp_environ\nfrom .vendor import shellingham\n\n\nShellDetectionFailure = shellingham.ShellDetectionFailure\n\n\ndef _build_info(value):\n return (os.path.splitext(os.path.basename(value))[0], value)\n\n\ndef detect_info():\n if PIPENV_SHELL_EXPLICIT:\n return _build_info(PIPENV_SHELL_EXPLICIT)\n try:\n return shellingham.detect_shell()\n except (shellingham.ShellDetectionFailure, TypeError):\n if PIPENV_SHELL:\n return _build_info(PIPENV_SHELL)\n raise ShellDetectionFailure\n\n\ndef _get_activate_script(venv):\n \"\"\"Returns the string to activate a virtualenv.\n\n This is POSIX-only at the moment since the compat (pexpect-based) shell\n does not work elsewhere anyway.\n \"\"\"\n # Suffix and source command for other shells.\n # Support for fish shell.\n if PIPENV_SHELL and \"fish\" in PIPENV_SHELL:\n suffix = \".fish\"\n command = \"source\"\n # Support for csh shell.\n elif PIPENV_SHELL and \"csh\" in PIPENV_SHELL:\n suffix = \".csh\"\n command = \"source\"\n else:\n suffix = \"\"\n command = \".\"\n # Escape any spaces located within the virtualenv path to allow\n # for proper activation.\n venv_location = str(venv).replace(\" \", r\"\\ \")\n # The leading space can make history cleaner in some shells.\n return \" {2} {0}/bin/activate{1}\".format(venv_location, suffix, command)\n\n\ndef _handover(cmd, args):\n args = [cmd] + args\n if os.name != \"nt\":\n os.execvp(cmd, args)\n else:\n sys.exit(subprocess.call(args, shell=True, universal_newlines=True))\n\n\nclass Shell(object):\n def __init__(self, cmd):\n self.cmd = cmd\n self.args = []\n\n def __repr__(self):\n return '{type}(cmd={cmd!r})'.format(\n type=type(self).__name__,\n cmd=self.cmd,\n )\n\n @contextlib.contextmanager\n def inject_path(self, venv):\n with temp_environ():\n os.environ[\"PATH\"] = \"{0}{1}{2}\".format(\n os.pathsep.join(str(p.parent) for p in _iter_python(venv)),\n os.pathsep,\n os.environ[\"PATH\"],\n )\n yield\n\n def fork(self, venv, cwd, args):\n # FIXME: This isn't necessarily the correct prompt. We should read the\n # actual prompt by peeking into the activation script.\n name = os.path.basename(venv)\n os.environ[\"VIRTUAL_ENV\"] = str(venv)\n if \"PROMPT\" in os.environ:\n os.environ[\"PROMPT\"] = \"({0}) {1}\".format(name, os.environ[\"PROMPT\"])\n if \"PS1\" in os.environ:\n os.environ[\"PS1\"] = \"({0}) {1}\".format(name, os.environ[\"PS1\"])\n with self.inject_path(venv):\n os.chdir(cwd)\n _handover(self.cmd, self.args + list(args))\n\n def fork_compat(self, venv, cwd, args):\n from .vendor import pexpect\n\n # Grab current terminal dimensions to replace the hardcoded default\n # dimensions of pexpect.\n dims = get_terminal_size()\n with temp_environ():\n c = pexpect.spawn(self.cmd, [\"-i\"], dimensions=(dims.lines, dims.columns))\n c.sendline(_get_activate_script(venv))\n if args:\n c.sendline(\" \".join(args))\n\n # Handler for terminal resizing events\n # Must be defined here to have the shell process in its context, since\n # we can't pass it as an argument\n def sigwinch_passthrough(sig, data):\n dims = get_terminal_size()\n c.setwinsize(dims.lines, dims.columns)\n\n signal.signal(signal.SIGWINCH, sigwinch_passthrough)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n sys.exit(c.exitstatus)\n\n\nPOSSIBLE_ENV_PYTHON = [Path(\"bin\", \"python\"), Path(\"Scripts\", \"python.exe\")]\n\n\ndef _iter_python(venv):\n for path in POSSIBLE_ENV_PYTHON:\n full_path = Path(venv, path)\n if full_path.is_file():\n yield full_path\n\n\nclass Bash(Shell):\n # The usual PATH injection technique does not work with Bash.\n # https://github.com/berdario/pew/issues/58#issuecomment-102182346\n @contextlib.contextmanager\n def inject_path(self, venv):\n from ._compat import NamedTemporaryFile\n\n bashrc_path = Path.home().joinpath(\".bashrc\")\n with NamedTemporaryFile(\"w+\") as rcfile:\n if bashrc_path.is_file():\n base_rc_src = 'source \"{0}\"\\n'.format(bashrc_path.as_posix())\n rcfile.write(base_rc_src)\n\n export_path = 'export PATH=\"{0}:$PATH\"\\n'.format(\n \":\".join(python.parent.as_posix() for python in _iter_python(venv))\n )\n rcfile.write(export_path)\n rcfile.flush()\n self.args.extend([\"--rcfile\", rcfile.name])\n yield\n\n\nclass CmderEmulatedShell(Shell):\n def fork(self, venv, cwd, args):\n if cwd:\n os.environ[\"CMDER_START\"] = cwd\n super(CmderEmulatedShell, self).fork(venv, cwd, args)\n\n\nclass CmderCommandPrompt(CmderEmulatedShell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\init.bat\")\n if os.path.exists(rc):\n self.args.extend([\"/k\", rc])\n super(CmderCommandPrompt, self).fork(venv, cwd, args)\n\n\nclass CmderPowershell(Shell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\profile.ps1\")\n if os.path.exists(rc):\n self.args.extend(\n [\n \"-ExecutionPolicy\",\n \"Bypass\",\n \"-NoLogo\",\n \"-NoProfile\",\n \"-NoExit\",\n \"-Command\",\n \"Invoke-Expression '. ''{0}'''\".format(rc),\n ]\n )\n super(CmderPowershell, self).fork(venv, cwd, args)\n\n\n# Two dimensional dict. First is the shell type, second is the emulator type.\n# Example: SHELL_LOOKUP['powershell']['cmder'] => CmderPowershell.\nSHELL_LOOKUP = collections.defaultdict(\n lambda: collections.defaultdict(lambda: Shell),\n {\n \"bash\": collections.defaultdict(lambda: Bash),\n \"cmd\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderCommandPrompt}),\n \"powershell\": collections.defaultdict(\n lambda: Shell, {\"cmder\": CmderPowershell}\n ),\n \"pwsh\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderPowershell}),\n },\n)\n\n\ndef _detect_emulator():\n if os.environ.get(\"CMDER_ROOT\"):\n return \"cmder\"\n return \"\"\n\n\ndef choose_shell():\n emulator = PIPENV_EMULATOR or _detect_emulator()\n type_, command = detect_info()\n return SHELL_LOOKUP[type_][emulator](command)\n", "path": "pipenv/shells.py" } ]
diff --git a/pipenv/shells.py b/pipenv/shells.py index 5961310c34..0081ef5d12 100644 --- a/pipenv/shells.py +++ b/pipenv/shells.py @@ -59,8 +59,7 @@ def _handover(cmd, args): if os.name != "nt": os.execvp(cmd, args) else: - proc = subprocess.run(args, shell=True, universal_newlines=True) - sys.exit(proc.returncode) + sys.exit(subprocess.call(args, shell=True, universal_newlines=True)) class Shell(object):
cupy__cupy-3570
cupy.percentile only calculates integer percentiles when the input data is an integer. This seems to be caused by a cast of the percentiles array `q` to the same type as the input array `a` in the cupy.percentile source : https://github.com/cupy/cupy/blob/adfcc44bc9a17886a340cd85b7c9ebadd94b38a1/cupy/statistics/order.py#L189 Example code to reproduce the issue: `cupy.percentile(cupy.arange(1001).astype(cupy.int16),[98, 99, 99.9, 100]).get()` `array([ 980., 990., 990., 1000.])` `cupy.percentile(cupy.arange(1001).astype(cupy.float16),[98, 99, 99.9, 100]).get()` `array([ 980., 990., 999., 1000.])` For comparison the numpy version always calculates correctly: `numpy.percentile(numpy.arange(1001).astype(numpy.int16),[98, 99, 99.9, 100])` `array([ 980., 990., 999., 1000.])` Cupy configuration: CuPy Version : 7.6.0 CUDA Root : C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2 CUDA Build Version : 10020 CUDA Driver Version : 10020 CUDA Runtime Version : 10020
[ { "content": "import warnings\n\nimport cupy\nfrom cupy import core\nfrom cupy.core import _routines_statistics as _statistics\nfrom cupy.core import _fusion_thread_local\nfrom cupy.logic import content\n\n\ndef amin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array or the minimum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n if _fusion_thread_local.is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amin does not support `keepdims` in fusion yet.')\n return _fusion_thread_local.call_reduction(\n _statistics.amin, a, axis=axis, out=out)\n\n # TODO(okuta): check type\n return a.min(axis=axis, out=out, keepdims=keepdims)\n\n\ndef amax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array or the maximum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amax`\n\n \"\"\"\n if _fusion_thread_local.is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amax does not support `keepdims` in fusion yet.')\n return _fusion_thread_local.call_reduction(\n _statistics.amax, a, axis=axis, out=out)\n\n # TODO(okuta): check type\n return a.max(axis=axis, out=out, keepdims=keepdims)\n\n\ndef nanmin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`numpy.nanmin`\n\n \"\"\"\n # TODO(niboshi): Avoid synchronization.\n res = core.nanmin(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any(): # synchronize!\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef nanmax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`numpy.nanmax`\n\n \"\"\"\n # TODO(niboshi): Avoid synchronization.\n res = core.nanmax(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any(): # synchronize!\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef ptp(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the range of values (maximum - minimum) along an axis.\n\n .. note::\n\n The name of the function comes from the acronym for 'peak to peak'.\n\n When at least one element is NaN, the corresponding ptp value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array over which to take the range.\n axis (int): Axis along which to take the minimum. The flattened\n array is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is retained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n return a.ptp(axis=axis, out=out, keepdims=keepdims)\n\n\ndef percentile(a, q, axis=None, out=None, interpolation='linear',\n keepdims=False):\n \"\"\"Computes the q-th percentile of the data along the specified axis.\n\n Args:\n a (cupy.ndarray): Array for which to compute percentiles.\n q (float, tuple of floats or cupy.ndarray): Percentiles to compute\n in the range between 0 and 100 inclusive.\n axis (int or tuple of ints): Along which axis or axes to compute the\n percentiles. The flattened array is used by default.\n out (cupy.ndarray): Output array.\n interpolation (str): Interpolation method when a quantile lies between\n two data points. ``linear`` interpolation is used by default.\n Supported interpolations are``lower``, ``higher``, ``midpoint``,\n ``nearest`` and ``linear``.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The percentiles of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.percentile`\n\n \"\"\"\n q = cupy.asarray(q, dtype=a.dtype)\n if q.ndim == 0:\n q = q[None]\n zerod = True\n else:\n zerod = False\n if q.ndim > 1:\n raise ValueError('Expected q to have a dimension of 1.\\n'\n 'Actual: {0} != 1'.format(q.ndim))\n\n if keepdims:\n if axis is None:\n keepdim = (1,) * a.ndim\n else:\n keepdim = list(a.shape)\n for ax in axis:\n keepdim[ax % a.ndim] = 1\n keepdim = tuple(keepdim)\n\n # Copy a since we need it sorted but without modifying the original array\n if isinstance(axis, int):\n axis = axis,\n if axis is None:\n ap = a.flatten()\n nkeep = 0\n else:\n # Reduce axes from a and put them last\n axis = tuple(ax % a.ndim for ax in axis)\n keep = set(range(a.ndim)) - set(axis)\n nkeep = len(keep)\n for i, s in enumerate(sorted(keep)):\n a = a.swapaxes(i, s)\n ap = a.reshape(a.shape[:nkeep] + (-1,)).copy()\n\n axis = -1\n ap.sort(axis=axis)\n Nx = ap.shape[axis]\n indices = q * 0.01 * (Nx - 1.) # percents to decimals\n\n if interpolation == 'lower':\n indices = cupy.floor(indices).astype(cupy.int32)\n elif interpolation == 'higher':\n indices = cupy.ceil(indices).astype(cupy.int32)\n elif interpolation == 'midpoint':\n indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices))\n elif interpolation == 'nearest':\n # TODO(hvy): Implement nearest using around\n raise ValueError('\\'nearest\\' interpolation is not yet supported. '\n 'Please use any other interpolation method.')\n elif interpolation == 'linear':\n pass\n else:\n raise ValueError('Unexpected interpolation method.\\n'\n 'Actual: \\'{0}\\' not in (\\'linear\\', \\'lower\\', '\n '\\'higher\\', \\'midpoint\\')'.format(interpolation))\n\n if indices.dtype == cupy.int32:\n ret = cupy.rollaxis(ap, axis)\n ret = ret.take(indices, axis=0, out=out)\n else:\n if out is None:\n ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64)\n else:\n ret = cupy.rollaxis(out, 0, out.ndim)\n\n cupy.ElementwiseKernel(\n 'S idx, raw T a, raw int32 offset', 'U ret',\n '''\n ptrdiff_t idx_below = floor(idx);\n U weight_above = idx - idx_below;\n\n ptrdiff_t offset_i = _ind.get()[0] * offset;\n ret = a[offset_i + idx_below] * (1.0 - weight_above)\n + a[offset_i + idx_below + 1] * weight_above;\n ''',\n 'percentile_weightnening'\n )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret)\n ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis\n\n if zerod:\n ret = ret.squeeze(0)\n if keepdims:\n if q.size > 1:\n keepdim = (-1,) + keepdim\n ret = ret.reshape(keepdim)\n\n return core._internal_ascontiguousarray(ret)\n", "path": "cupy/statistics/order.py" } ]
[ { "content": "import warnings\n\nimport cupy\nfrom cupy import core\nfrom cupy.core import _routines_statistics as _statistics\nfrom cupy.core import _fusion_thread_local\nfrom cupy.logic import content\n\n\ndef amin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array or the minimum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n if _fusion_thread_local.is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amin does not support `keepdims` in fusion yet.')\n return _fusion_thread_local.call_reduction(\n _statistics.amin, a, axis=axis, out=out)\n\n # TODO(okuta): check type\n return a.min(axis=axis, out=out, keepdims=keepdims)\n\n\ndef amax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array or the maximum along an axis.\n\n .. note::\n\n When at least one element is NaN, the corresponding min value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amax`\n\n \"\"\"\n if _fusion_thread_local.is_fusing():\n if keepdims:\n raise NotImplementedError(\n 'cupy.amax does not support `keepdims` in fusion yet.')\n return _fusion_thread_local.call_reduction(\n _statistics.amax, a, axis=axis, out=out)\n\n # TODO(okuta): check type\n return a.max(axis=axis, out=out, keepdims=keepdims)\n\n\ndef nanmin(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the minimum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the minimum.\n axis (int): Along which axis to take the minimum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`numpy.nanmin`\n\n \"\"\"\n # TODO(niboshi): Avoid synchronization.\n res = core.nanmin(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any(): # synchronize!\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef nanmax(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the maximum of an array along an axis ignoring NaN.\n\n When there is a slice whose elements are all NaN, a :class:`RuntimeWarning`\n is raised and NaN is returned.\n\n Args:\n a (cupy.ndarray): Array to take the maximum.\n axis (int): Along which axis to take the maximum. The flattened array\n is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The maximum of ``a``, along the axis if specified.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`numpy.nanmax`\n\n \"\"\"\n # TODO(niboshi): Avoid synchronization.\n res = core.nanmax(a, axis=axis, out=out, keepdims=keepdims)\n if content.isnan(res).any(): # synchronize!\n warnings.warn('All-NaN slice encountered', RuntimeWarning)\n return res\n\n\ndef ptp(a, axis=None, out=None, keepdims=False):\n \"\"\"Returns the range of values (maximum - minimum) along an axis.\n\n .. note::\n\n The name of the function comes from the acronym for 'peak to peak'.\n\n When at least one element is NaN, the corresponding ptp value will be\n NaN.\n\n Args:\n a (cupy.ndarray): Array over which to take the range.\n axis (int): Axis along which to take the minimum. The flattened\n array is used by default.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis is retained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The minimum of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.amin`\n\n \"\"\"\n return a.ptp(axis=axis, out=out, keepdims=keepdims)\n\n\ndef percentile(a, q, axis=None, out=None, interpolation='linear',\n keepdims=False):\n \"\"\"Computes the q-th percentile of the data along the specified axis.\n\n Args:\n a (cupy.ndarray): Array for which to compute percentiles.\n q (float, tuple of floats or cupy.ndarray): Percentiles to compute\n in the range between 0 and 100 inclusive.\n axis (int or tuple of ints): Along which axis or axes to compute the\n percentiles. The flattened array is used by default.\n out (cupy.ndarray): Output array.\n interpolation (str): Interpolation method when a quantile lies between\n two data points. ``linear`` interpolation is used by default.\n Supported interpolations are``lower``, ``higher``, ``midpoint``,\n ``nearest`` and ``linear``.\n keepdims (bool): If ``True``, the axis is remained as an axis of\n size one.\n\n Returns:\n cupy.ndarray: The percentiles of ``a``, along the axis if specified.\n\n .. seealso:: :func:`numpy.percentile`\n\n \"\"\"\n if not isinstance(q, cupy.ndarray):\n q = cupy.asarray(q, dtype='d')\n if q.ndim == 0:\n q = q[None]\n zerod = True\n else:\n zerod = False\n if q.ndim > 1:\n raise ValueError('Expected q to have a dimension of 1.\\n'\n 'Actual: {0} != 1'.format(q.ndim))\n\n if keepdims:\n if axis is None:\n keepdim = (1,) * a.ndim\n else:\n keepdim = list(a.shape)\n for ax in axis:\n keepdim[ax % a.ndim] = 1\n keepdim = tuple(keepdim)\n\n # Copy a since we need it sorted but without modifying the original array\n if isinstance(axis, int):\n axis = axis,\n if axis is None:\n ap = a.flatten()\n nkeep = 0\n else:\n # Reduce axes from a and put them last\n axis = tuple(ax % a.ndim for ax in axis)\n keep = set(range(a.ndim)) - set(axis)\n nkeep = len(keep)\n for i, s in enumerate(sorted(keep)):\n a = a.swapaxes(i, s)\n ap = a.reshape(a.shape[:nkeep] + (-1,)).copy()\n\n axis = -1\n ap.sort(axis=axis)\n Nx = ap.shape[axis]\n indices = q * 0.01 * (Nx - 1.) # percents to decimals\n\n if interpolation == 'lower':\n indices = cupy.floor(indices).astype(cupy.int32)\n elif interpolation == 'higher':\n indices = cupy.ceil(indices).astype(cupy.int32)\n elif interpolation == 'midpoint':\n indices = 0.5 * (cupy.floor(indices) + cupy.ceil(indices))\n elif interpolation == 'nearest':\n # TODO(hvy): Implement nearest using around\n raise ValueError('\\'nearest\\' interpolation is not yet supported. '\n 'Please use any other interpolation method.')\n elif interpolation == 'linear':\n pass\n else:\n raise ValueError('Unexpected interpolation method.\\n'\n 'Actual: \\'{0}\\' not in (\\'linear\\', \\'lower\\', '\n '\\'higher\\', \\'midpoint\\')'.format(interpolation))\n\n if indices.dtype == cupy.int32:\n ret = cupy.rollaxis(ap, axis)\n ret = ret.take(indices, axis=0, out=out)\n else:\n if out is None:\n ret = cupy.empty(ap.shape[:-1] + q.shape, dtype=cupy.float64)\n else:\n ret = cupy.rollaxis(out, 0, out.ndim)\n\n cupy.ElementwiseKernel(\n 'S idx, raw T a, raw int32 offset', 'U ret',\n '''\n ptrdiff_t idx_below = floor(idx);\n U weight_above = idx - idx_below;\n\n ptrdiff_t offset_i = _ind.get()[0] * offset;\n ret = a[offset_i + idx_below] * (1.0 - weight_above)\n + a[offset_i + idx_below + 1] * weight_above;\n ''',\n 'percentile_weightnening'\n )(indices, ap, ap.shape[-1] if ap.ndim > 1 else 0, ret)\n ret = cupy.rollaxis(ret, -1) # Roll q dimension back to first axis\n\n if zerod:\n ret = ret.squeeze(0)\n if keepdims:\n if q.size > 1:\n keepdim = (-1,) + keepdim\n ret = ret.reshape(keepdim)\n\n return core._internal_ascontiguousarray(ret)\n", "path": "cupy/statistics/order.py" } ]
diff --git a/cupy/statistics/order.py b/cupy/statistics/order.py index 77d29577046..2558876489f 100644 --- a/cupy/statistics/order.py +++ b/cupy/statistics/order.py @@ -186,7 +186,8 @@ def percentile(a, q, axis=None, out=None, interpolation='linear', .. seealso:: :func:`numpy.percentile` """ - q = cupy.asarray(q, dtype=a.dtype) + if not isinstance(q, cupy.ndarray): + q = cupy.asarray(q, dtype='d') if q.ndim == 0: q = q[None] zerod = True diff --git a/tests/cupy_tests/statics_tests/test_order.py b/tests/cupy_tests/statics_tests/test_order.py index 3f3f13ee62d..b9b90486cb7 100644 --- a/tests/cupy_tests/statics_tests/test_order.py +++ b/tests/cupy_tests/statics_tests/test_order.py @@ -31,6 +31,14 @@ def test_percentile_defaults(self, xp, dtype, interpolation): q = testing.shaped_random((3,), xp, dtype=dtype, scale=100) return xp.percentile(a, q, interpolation=interpolation) + @for_all_interpolations() + @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) + @testing.numpy_cupy_allclose() + def test_percentile_q_list(self, xp, dtype, interpolation): + a = testing.shaped_arange((1001,), xp, dtype) + q = [99, 99.9] + return xp.percentile(a, q, interpolation=interpolation) + @for_all_interpolations() @testing.for_all_dtypes(no_float16=True, no_bool=True, no_complex=True) @testing.numpy_cupy_allclose(rtol=1e-6)
beetbox__beets-806
mpdstats: single or last song isn't rated and counted The `mpdstats` plugin won't update `play_count`+`rating` for the last (or only) song in the playlist. (paging @pscn and @kljohann) mpdstats: single or last song isn't rated and counted The `mpdstats` plugin won't update `play_count`+`rating` for the last (or only) song in the playlist. (paging @pscn and @kljohann)
[ { "content": "# coding=utf-8\n# This file is part of beets.\n# Copyright 2013, Peter Schnebel and Johann Klähn.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nimport logging\nimport mpd\nimport socket\nimport select\nimport time\nimport os\n\nfrom beets import ui\nfrom beets import config\nfrom beets import plugins\nfrom beets import library\nfrom beets.util import displayable_path\n\nlog = logging.getLogger('beets')\n\n# If we lose the connection, how many times do we want to retry and how\n# much time should we wait between retries?\nRETRIES = 10\nRETRY_INTERVAL = 5\n\n\ndef is_url(path):\n \"\"\"Try to determine if the path is an URL.\n \"\"\"\n return path.split('://', 1)[0] in ['http', 'https']\n\n\n# Use the MPDClient internals to get unicode.\n# see http://www.tarmack.eu/code/mpdunicode.py for the general idea\nclass MPDClient(mpd.MPDClient):\n def _write_command(self, command, args=[]):\n args = [unicode(arg).encode('utf-8') for arg in args]\n super(MPDClient, self)._write_command(command, args)\n\n def _read_line(self):\n line = super(MPDClient, self)._read_line()\n if line is not None:\n return line.decode('utf-8')\n return None\n\n\nclass MPDClientWrapper(object):\n def __init__(self):\n self.music_directory = (\n config['mpdstats']['music_directory'].get(unicode))\n\n self.client = MPDClient()\n\n def connect(self):\n \"\"\"Connect to the MPD.\n \"\"\"\n host = config['mpd']['host'].get(unicode)\n port = config['mpd']['port'].get(int)\n\n if host[0] in ['/', '~']:\n host = os.path.expanduser(host)\n\n log.info(u'mpdstats: connecting to {0}:{1}'.format(host, port))\n try:\n self.client.connect(host, port)\n except socket.error as e:\n raise ui.UserError('could not connect to MPD: {0}'.format(e))\n\n password = config['mpd']['password'].get(unicode)\n if password:\n try:\n self.client.password(password)\n except mpd.CommandError as e:\n raise ui.UserError(\n 'could not authenticate to MPD: {0}'.format(e)\n )\n\n def disconnect(self):\n \"\"\"Disconnect from the MPD.\n \"\"\"\n self.client.close()\n self.client.disconnect()\n\n def get(self, command, retries=RETRIES):\n \"\"\"Wrapper for requests to the MPD server. Tries to re-connect if the\n connection was lost (f.ex. during MPD's library refresh).\n \"\"\"\n try:\n return getattr(self.client, command)()\n except (select.error, mpd.ConnectionError) as err:\n log.error(u'mpdstats: {0}'.format(err))\n\n if retries <= 0:\n # if we exited without breaking, we couldn't reconnect in time :(\n raise ui.UserError(u'communication with MPD server failed')\n\n time.sleep(RETRY_INTERVAL)\n\n try:\n self.disconnect()\n except mpd.ConnectionError:\n pass\n\n self.connect()\n return self.get(command, retries=retries - 1)\n\n def playlist(self):\n \"\"\"Return the currently active playlist. Prefixes paths with the\n music_directory, to get the absolute path.\n \"\"\"\n result = {}\n for entry in self.get('playlistinfo'):\n if not is_url(entry['file']):\n result[entry['id']] = os.path.join(\n self.music_directory, entry['file'])\n else:\n result[entry['id']] = entry['file']\n return result\n\n def status(self):\n \"\"\"Return the current status of the MPD.\n \"\"\"\n return self.get('status')\n\n def events(self):\n \"\"\"Return list of events. This may block a long time while waiting for\n an answer from MPD.\n \"\"\"\n return self.get('idle')\n\n\nclass MPDStats(object):\n def __init__(self, lib):\n self.lib = lib\n\n self.do_rating = config['mpdstats']['rating'].get(bool)\n self.rating_mix = config['mpdstats']['rating_mix'].get(float)\n self.time_threshold = 10.0 # TODO: maybe add config option?\n\n self.now_playing = None\n self.mpd = MPDClientWrapper()\n\n def rating(self, play_count, skip_count, rating, skipped):\n \"\"\"Calculate a new rating for a song based on play count, skip count,\n old rating and the fact if it was skipped or not.\n \"\"\"\n if skipped:\n rolling = (rating - rating / 2.0)\n else:\n rolling = (rating + (1.0 - rating) / 2.0)\n stable = (play_count + 1.0) / (play_count + skip_count + 2.0)\n return (self.rating_mix * stable\n + (1.0 - self.rating_mix) * rolling)\n\n def get_item(self, path):\n \"\"\"Return the beets item related to path.\n \"\"\"\n query = library.PathQuery('path', path)\n item = self.lib.items(query).get()\n if item:\n return item\n else:\n log.info(u'mpdstats: item not found: {0}'.format(\n displayable_path(path)\n ))\n\n @staticmethod\n def update_item(item, attribute, value=None, increment=None):\n \"\"\"Update the beets item. Set attribute to value or increment the value\n of attribute. If the increment argument is used the value is cast to\n the corresponding type.\n \"\"\"\n if item is None:\n return\n\n if increment is not None:\n item.load()\n value = type(increment)(item.get(attribute, 0)) + increment\n\n if value is not None:\n item[attribute] = value\n item.store()\n\n log.debug(u'mpdstats: updated: {0} = {1} [{2}]'.format(\n attribute,\n item[attribute],\n displayable_path(item.path),\n ))\n\n def update_rating(self, item, skipped):\n \"\"\"Update the rating for a beets item.\n \"\"\"\n item.load()\n rating = self.rating(\n int(item.get('play_count', 0)),\n int(item.get('skip_count', 0)),\n float(item.get('rating', 0.5)),\n skipped)\n\n self.update_item(item, 'rating', rating)\n\n def handle_song_change(self, song):\n \"\"\"Determine if a song was skipped or not and update its attributes.\n To this end the difference between the song's supposed end time\n and the current time is calculated. If it's greater than a threshold,\n the song is considered skipped.\n \"\"\"\n diff = abs(song['remaining'] - (time.time() - song['started']))\n\n skipped = diff >= self.time_threshold\n\n if skipped:\n self.handle_skipped(song)\n else:\n self.handle_played(song)\n\n if self.do_rating:\n self.update_rating(song['beets_item'], skipped)\n\n def handle_played(self, song):\n \"\"\"Updates the play count of a song.\n \"\"\"\n self.update_item(song['beets_item'], 'play_count', increment=1)\n log.info(u'mpdstats: played {0}'.format(\n displayable_path(song['path'])\n ))\n\n def handle_skipped(self, song):\n \"\"\"Updates the skip count of a song.\n \"\"\"\n self.update_item(song['beets_item'], 'skip_count', increment=1)\n log.info(u'mpdstats: skipped {0}'.format(\n displayable_path(song['path'])\n ))\n\n def on_stop(self, status):\n log.info(u'mpdstats: stop')\n self.now_playing = None\n\n def on_pause(self, status):\n log.info(u'mpdstats: pause')\n self.now_playing = None\n\n def on_play(self, status):\n playlist = self.mpd.playlist()\n path = playlist.get(status['songid'])\n\n if not path:\n return\n\n if is_url(path):\n log.info(u'mpdstats: playing stream {0}'.format(\n displayable_path(path)\n ))\n return\n\n played, duration = map(int, status['time'].split(':', 1))\n remaining = duration - played\n\n if self.now_playing and self.now_playing['path'] != path:\n self.handle_song_change(self.now_playing)\n\n log.info(u'mpdstats: playing {0}'.format(\n displayable_path(path)\n ))\n\n self.now_playing = {\n 'started': time.time(),\n 'remaining': remaining,\n 'path': path,\n 'beets_item': self.get_item(path),\n }\n\n self.update_item(self.now_playing['beets_item'],\n 'last_played', value=int(time.time()))\n\n def run(self):\n self.mpd.connect()\n events = ['player']\n\n while True:\n if 'player' in events:\n status = self.mpd.status()\n\n handler = getattr(self, 'on_' + status['state'], None)\n\n if handler:\n handler(status)\n else:\n log.debug(u'mpdstats: unhandled status \"{0}\"'.\n format(status))\n\n events = self.mpd.events()\n\n\nclass MPDStatsPlugin(plugins.BeetsPlugin):\n def __init__(self):\n super(MPDStatsPlugin, self).__init__()\n self.config.add({\n 'music_directory': config['directory'].as_filename(),\n 'rating': True,\n 'rating_mix': 0.75,\n })\n config['mpd'].add({\n 'host': u'localhost',\n 'port': 6600,\n 'password': u'',\n })\n\n def commands(self):\n cmd = ui.Subcommand(\n 'mpdstats',\n help='run a MPD client to gather play statistics')\n cmd.parser.add_option(\n '--host', dest='host', type='string',\n help='set the hostname of the server to connect to')\n cmd.parser.add_option(\n '--port', dest='port', type='int',\n help='set the port of the MPD server to connect to')\n cmd.parser.add_option(\n '--password', dest='password', type='string',\n help='set the password of the MPD server to connect to')\n\n def func(lib, opts, args):\n self.config.set_args(opts)\n\n # Overrides for MPD settings.\n if opts.host:\n config['mpd']['host'] = opts.host.decode('utf8')\n if opts.port:\n config['mpd']['host'] = int(opts.port)\n if opts.password:\n config['mpd']['password'] = opts.password.decode('utf8')\n\n try:\n MPDStats(lib).run()\n except KeyboardInterrupt:\n pass\n\n cmd.func = func\n return [cmd]\n", "path": "beetsplug/mpdstats.py" } ]
[ { "content": "# coding=utf-8\n# This file is part of beets.\n# Copyright 2013, Peter Schnebel and Johann Klähn.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nimport logging\nimport mpd\nimport socket\nimport select\nimport time\nimport os\n\nfrom beets import ui\nfrom beets import config\nfrom beets import plugins\nfrom beets import library\nfrom beets.util import displayable_path\n\nlog = logging.getLogger('beets')\n\n# If we lose the connection, how many times do we want to retry and how\n# much time should we wait between retries?\nRETRIES = 10\nRETRY_INTERVAL = 5\n\n\ndef is_url(path):\n \"\"\"Try to determine if the path is an URL.\n \"\"\"\n return path.split('://', 1)[0] in ['http', 'https']\n\n\n# Use the MPDClient internals to get unicode.\n# see http://www.tarmack.eu/code/mpdunicode.py for the general idea\nclass MPDClient(mpd.MPDClient):\n def _write_command(self, command, args=[]):\n args = [unicode(arg).encode('utf-8') for arg in args]\n super(MPDClient, self)._write_command(command, args)\n\n def _read_line(self):\n line = super(MPDClient, self)._read_line()\n if line is not None:\n return line.decode('utf-8')\n return None\n\n\nclass MPDClientWrapper(object):\n def __init__(self):\n self.music_directory = (\n config['mpdstats']['music_directory'].get(unicode))\n\n self.client = MPDClient()\n\n def connect(self):\n \"\"\"Connect to the MPD.\n \"\"\"\n host = config['mpd']['host'].get(unicode)\n port = config['mpd']['port'].get(int)\n\n if host[0] in ['/', '~']:\n host = os.path.expanduser(host)\n\n log.info(u'mpdstats: connecting to {0}:{1}'.format(host, port))\n try:\n self.client.connect(host, port)\n except socket.error as e:\n raise ui.UserError('could not connect to MPD: {0}'.format(e))\n\n password = config['mpd']['password'].get(unicode)\n if password:\n try:\n self.client.password(password)\n except mpd.CommandError as e:\n raise ui.UserError(\n 'could not authenticate to MPD: {0}'.format(e)\n )\n\n def disconnect(self):\n \"\"\"Disconnect from the MPD.\n \"\"\"\n self.client.close()\n self.client.disconnect()\n\n def get(self, command, retries=RETRIES):\n \"\"\"Wrapper for requests to the MPD server. Tries to re-connect if the\n connection was lost (f.ex. during MPD's library refresh).\n \"\"\"\n try:\n return getattr(self.client, command)()\n except (select.error, mpd.ConnectionError) as err:\n log.error(u'mpdstats: {0}'.format(err))\n\n if retries <= 0:\n # if we exited without breaking, we couldn't reconnect in time :(\n raise ui.UserError(u'communication with MPD server failed')\n\n time.sleep(RETRY_INTERVAL)\n\n try:\n self.disconnect()\n except mpd.ConnectionError:\n pass\n\n self.connect()\n return self.get(command, retries=retries - 1)\n\n def playlist(self):\n \"\"\"Return the currently active playlist. Prefixes paths with the\n music_directory, to get the absolute path.\n \"\"\"\n result = {}\n for entry in self.get('playlistinfo'):\n if not is_url(entry['file']):\n result[entry['id']] = os.path.join(\n self.music_directory, entry['file'])\n else:\n result[entry['id']] = entry['file']\n return result\n\n def status(self):\n \"\"\"Return the current status of the MPD.\n \"\"\"\n return self.get('status')\n\n def events(self):\n \"\"\"Return list of events. This may block a long time while waiting for\n an answer from MPD.\n \"\"\"\n return self.get('idle')\n\n\nclass MPDStats(object):\n def __init__(self, lib):\n self.lib = lib\n\n self.do_rating = config['mpdstats']['rating'].get(bool)\n self.rating_mix = config['mpdstats']['rating_mix'].get(float)\n self.time_threshold = 10.0 # TODO: maybe add config option?\n\n self.now_playing = None\n self.mpd = MPDClientWrapper()\n\n def rating(self, play_count, skip_count, rating, skipped):\n \"\"\"Calculate a new rating for a song based on play count, skip count,\n old rating and the fact if it was skipped or not.\n \"\"\"\n if skipped:\n rolling = (rating - rating / 2.0)\n else:\n rolling = (rating + (1.0 - rating) / 2.0)\n stable = (play_count + 1.0) / (play_count + skip_count + 2.0)\n return (self.rating_mix * stable\n + (1.0 - self.rating_mix) * rolling)\n\n def get_item(self, path):\n \"\"\"Return the beets item related to path.\n \"\"\"\n query = library.PathQuery('path', path)\n item = self.lib.items(query).get()\n if item:\n return item\n else:\n log.info(u'mpdstats: item not found: {0}'.format(\n displayable_path(path)\n ))\n\n @staticmethod\n def update_item(item, attribute, value=None, increment=None):\n \"\"\"Update the beets item. Set attribute to value or increment the value\n of attribute. If the increment argument is used the value is cast to\n the corresponding type.\n \"\"\"\n if item is None:\n return\n\n if increment is not None:\n item.load()\n value = type(increment)(item.get(attribute, 0)) + increment\n\n if value is not None:\n item[attribute] = value\n item.store()\n\n log.debug(u'mpdstats: updated: {0} = {1} [{2}]'.format(\n attribute,\n item[attribute],\n displayable_path(item.path),\n ))\n\n def update_rating(self, item, skipped):\n \"\"\"Update the rating for a beets item.\n \"\"\"\n item.load()\n rating = self.rating(\n int(item.get('play_count', 0)),\n int(item.get('skip_count', 0)),\n float(item.get('rating', 0.5)),\n skipped)\n\n self.update_item(item, 'rating', rating)\n\n def handle_song_change(self, song):\n \"\"\"Determine if a song was skipped or not and update its attributes.\n To this end the difference between the song's supposed end time\n and the current time is calculated. If it's greater than a threshold,\n the song is considered skipped.\n \"\"\"\n diff = abs(song['remaining'] - (time.time() - song['started']))\n\n skipped = diff >= self.time_threshold\n\n if skipped:\n self.handle_skipped(song)\n else:\n self.handle_played(song)\n\n if self.do_rating:\n self.update_rating(song['beets_item'], skipped)\n\n def handle_played(self, song):\n \"\"\"Updates the play count of a song.\n \"\"\"\n self.update_item(song['beets_item'], 'play_count', increment=1)\n log.info(u'mpdstats: played {0}'.format(\n displayable_path(song['path'])\n ))\n\n def handle_skipped(self, song):\n \"\"\"Updates the skip count of a song.\n \"\"\"\n self.update_item(song['beets_item'], 'skip_count', increment=1)\n log.info(u'mpdstats: skipped {0}'.format(\n displayable_path(song['path'])\n ))\n\n def on_stop(self, status):\n log.info(u'mpdstats: stop')\n\n if self.now_playing:\n self.handle_song_change(self.now_playing)\n\n self.now_playing = None\n\n def on_pause(self, status):\n log.info(u'mpdstats: pause')\n self.now_playing = None\n\n def on_play(self, status):\n playlist = self.mpd.playlist()\n path = playlist.get(status['songid'])\n\n if not path:\n return\n\n if is_url(path):\n log.info(u'mpdstats: playing stream {0}'.format(\n displayable_path(path)\n ))\n return\n\n played, duration = map(int, status['time'].split(':', 1))\n remaining = duration - played\n\n if self.now_playing and self.now_playing['path'] != path:\n self.handle_song_change(self.now_playing)\n\n log.info(u'mpdstats: playing {0}'.format(\n displayable_path(path)\n ))\n\n self.now_playing = {\n 'started': time.time(),\n 'remaining': remaining,\n 'path': path,\n 'beets_item': self.get_item(path),\n }\n\n self.update_item(self.now_playing['beets_item'],\n 'last_played', value=int(time.time()))\n\n def run(self):\n self.mpd.connect()\n events = ['player']\n\n while True:\n if 'player' in events:\n status = self.mpd.status()\n\n handler = getattr(self, 'on_' + status['state'], None)\n\n if handler:\n handler(status)\n else:\n log.debug(u'mpdstats: unhandled status \"{0}\"'.\n format(status))\n\n events = self.mpd.events()\n\n\nclass MPDStatsPlugin(plugins.BeetsPlugin):\n def __init__(self):\n super(MPDStatsPlugin, self).__init__()\n self.config.add({\n 'music_directory': config['directory'].as_filename(),\n 'rating': True,\n 'rating_mix': 0.75,\n })\n config['mpd'].add({\n 'host': u'localhost',\n 'port': 6600,\n 'password': u'',\n })\n\n def commands(self):\n cmd = ui.Subcommand(\n 'mpdstats',\n help='run a MPD client to gather play statistics')\n cmd.parser.add_option(\n '--host', dest='host', type='string',\n help='set the hostname of the server to connect to')\n cmd.parser.add_option(\n '--port', dest='port', type='int',\n help='set the port of the MPD server to connect to')\n cmd.parser.add_option(\n '--password', dest='password', type='string',\n help='set the password of the MPD server to connect to')\n\n def func(lib, opts, args):\n self.config.set_args(opts)\n\n # Overrides for MPD settings.\n if opts.host:\n config['mpd']['host'] = opts.host.decode('utf8')\n if opts.port:\n config['mpd']['host'] = int(opts.port)\n if opts.password:\n config['mpd']['password'] = opts.password.decode('utf8')\n\n try:\n MPDStats(lib).run()\n except KeyboardInterrupt:\n pass\n\n cmd.func = func\n return [cmd]\n", "path": "beetsplug/mpdstats.py" } ]
diff --git a/beetsplug/mpdstats.py b/beetsplug/mpdstats.py index 41a0e7d6a2..56522a8ded 100644 --- a/beetsplug/mpdstats.py +++ b/beetsplug/mpdstats.py @@ -245,6 +245,10 @@ def handle_skipped(self, song): def on_stop(self, status): log.info(u'mpdstats: stop') + + if self.now_playing: + self.handle_song_change(self.now_playing) + self.now_playing = None def on_pause(self, status):
conda__conda-6668
Installation of local packages breaks conda 4.4.6 Not sure whether this is a duplicate of https://github.com/conda/conda/issues/6621 Apparently, installing local packages (such as `conda install <path-to-pkg.tar.bz2>`) breaks the conda environment with conda 4.4.6. Consider the following script that 1. installs dask from the remote servers, 2. uninstalls it and 3. installs it again from the local, downloaded package. ```bash # install and activate miniconda wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh bash miniconda.sh -b -p $HOME/miniconda source $HOME/miniconda/bin/activate root conda update conda -y # install dask from remote servers conda install dask -y # now we uninstall dask and install it again from the file conda uninstall dask --force -y conda install ~/miniconda/pkgs/dask-0.*.tar.bz2 # now install any other package, e.g. anaconda-client conda install anaconda-client # fails! ``` The output of the last command is ``` Solving environment: failed # >>>>>>>>>>>>>>>>>>>>>> ERROR REPORT <<<<<<<<<<<<<<<<<<<<<< Traceback (most recent call last): File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/exceptions.py", line 724, in __call__ return func(*args, **kwargs) File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/cli/main.py", line 78, in _main exit_code = do_call(args, p) File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/cli/conda_argparse.py", line 76, in do_call exit_code = getattr(module, func_name)(args, parser) File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/cli/main_install.py", line 11, in execute install(args, parser, 'install') File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/cli/install.py", line 236, in install force_reinstall=context.force, File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/core/solve.py", line 504, in solve_for_transaction force_remove, force_reinstall) File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/core/solve.py", line 437, in solve_for_diff final_precs = self.solve_final_state(deps_modifier, prune, ignore_pinned, force_remove) File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/core/solve.py", line 407, in solve_final_state return IndexedSet(index[d] for d in solution) File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/_vendor/boltons/setutils.py", line 90, in __init__ self.update(other) File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/_vendor/boltons/setutils.py", line 304, in update for o in other: File "/mnt/data/home/psommer/miniconda/lib/python3.6/site-packages/conda/core/solve.py", line 407, in <genexpr> return IndexedSet(index[d] for d in solution) KeyError: Dist(channel='<unknown>', dist_name='dask-0.16.0-py36h73d177f_0', name='dask', version='0.16.0', build_string='py36h73d177f_0', build_number=0, base_url=None, platform=None) `$ /mnt/data/home/psommer/miniconda/bin/conda install anaconda-client` environment variables: CIO_TEST=<not set> CONDA_DEFAULT_ENV=root CONDA_PATH_BACKUP=/mnt/data/home/psommer/bin:/mnt/data/home/psommer/.local/bin:/usr/loca l/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/lo cal/games:/snap/bin CONDA_PREFIX=/mnt/data/home/psommer/miniconda CONDA_PS1_BACKUP= CONDA_ROOT=/mnt/data/home/psommer/miniconda PATH=/mnt/data/home/psommer/miniconda/bin:/mnt/data/home/psommer/bin:/mnt/d ata/home/psommer/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/ usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin REQUESTS_CA_BUNDLE=<not set> SSL_CERT_FILE=<not set> active environment : base active env location : /mnt/data/home/psommer/miniconda user config file : /mnt/data/home/psommer/.condarc populated config files : conda version : 4.4.6 conda-build version : not installed python version : 3.6.3.final.0 base environment : /mnt/data/home/psommer/miniconda (writable) channel URLs : https://repo.continuum.io/pkgs/main/linux-64 https://repo.continuum.io/pkgs/main/noarch https://repo.continuum.io/pkgs/free/linux-64 https://repo.continuum.io/pkgs/free/noarch https://repo.continuum.io/pkgs/r/linux-64 https://repo.continuum.io/pkgs/r/noarch https://repo.continuum.io/pkgs/pro/linux-64 https://repo.continuum.io/pkgs/pro/noarch package cache : /mnt/data/home/psommer/miniconda/pkgs /mnt/data/home/psommer/.conda/pkgs envs directories : /mnt/data/home/psommer/miniconda/envs /mnt/data/home/psommer/.conda/envs platform : linux-64 user-agent : conda/4.4.6 requests/2.18.4 CPython/3.6.3 Linux/4.4.0-104-generic ubuntu/16.04 glibc/2.23 UID:GID : 1042:1025 netrc file : None offline mode : False An unexpected error has occurred. Conda has prepared the above report. ``` I also tried it with other packages independent of dask. Additionally it is unimportant which package I used for the last install command (here `conda install anaconda-client`). It always fails.
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import namedtuple\nfrom logging import getLogger\nimport re\n\nfrom .channel import Channel\nfrom .index_record import IndexRecord, PackageRef\nfrom .package_info import PackageInfo\nfrom .. import CondaError\nfrom .._vendor.auxlib.entity import Entity, EntityType, IntegerField, StringField\nfrom ..base.constants import CONDA_TARBALL_EXTENSION, DEFAULTS_CHANNEL_NAME, UNKNOWN_CHANNEL\nfrom ..base.context import context\nfrom ..common.compat import ensure_text_type, text_type, with_metaclass\nfrom ..common.constants import NULL\nfrom ..common.url import has_platform, is_url, join_url\n\nlog = getLogger(__name__)\nDistDetails = namedtuple('DistDetails', ('name', 'version', 'build_string', 'build_number',\n 'dist_name'))\n\n\nclass DistType(EntityType):\n\n def __call__(cls, *args, **kwargs):\n if len(args) == 1 and not kwargs:\n value = args[0]\n if isinstance(value, Dist):\n return value\n elif hasattr(value, 'dist') and isinstance(value.dist, Dist):\n return value.dist\n elif isinstance(value, IndexRecord):\n return Dist.from_string(value.fn, channel_override=value.channel.canonical_name)\n elif isinstance(value, PackageInfo):\n return Dist.from_string(value.repodata_record.fn,\n channel_override=value.channel.canonical_name)\n elif isinstance(value, Channel):\n return Dist.from_url(value.url())\n else:\n return Dist.from_string(value)\n else:\n return super(DistType, cls).__call__(*args, **kwargs)\n\n\n@with_metaclass(DistType)\nclass Dist(Entity):\n _lazy_validate = True\n\n channel = StringField(required=False, nullable=True, immutable=True)\n\n dist_name = StringField(immutable=True)\n name = StringField(immutable=True)\n version = StringField(immutable=True)\n build_string = StringField(immutable=True)\n build_number = IntegerField(immutable=True)\n\n base_url = StringField(required=False, nullable=True, immutable=True)\n platform = StringField(required=False, nullable=True, immutable=True)\n\n def __init__(self, channel, dist_name=None, name=None, version=None, build_string=None,\n build_number=None, base_url=None, platform=None):\n super(Dist, self).__init__(channel=channel,\n dist_name=dist_name,\n name=name,\n version=version,\n build_string=build_string,\n build_number=build_number,\n base_url=base_url,\n platform=platform)\n\n def to_package_ref(self):\n return PackageRef(\n channel=self.channel,\n subdir=self.platform,\n name=self.name,\n version=self.version,\n build=self.build_string,\n build_number=self.build_number,\n )\n\n @property\n def full_name(self):\n return self.__str__()\n\n @property\n def build(self):\n return self.build_string\n\n @property\n def subdir(self):\n return self.platform\n\n @property\n def pair(self):\n return self.channel or DEFAULTS_CHANNEL_NAME, self.dist_name\n\n @property\n def quad(self):\n # returns: name, version, build_string, channel\n parts = self.dist_name.rsplit('-', 2) + ['', '']\n return parts[0], parts[1], parts[2], self.channel or DEFAULTS_CHANNEL_NAME\n\n def __str__(self):\n return \"%s::%s\" % (self.channel, self.dist_name) if self.channel else self.dist_name\n\n @property\n def is_feature_package(self):\n return self.dist_name.endswith('@')\n\n @property\n def is_channel(self):\n return bool(self.base_url and self.platform)\n\n def to_filename(self, extension='.tar.bz2'):\n if self.is_feature_package:\n return self.dist_name\n else:\n return self.dist_name + extension\n\n def to_matchspec(self):\n return ' '.join(self.quad[:3])\n\n @classmethod\n def from_string(cls, string, channel_override=NULL):\n string = text_type(string)\n\n if is_url(string) and channel_override == NULL:\n return cls.from_url(string)\n\n if string.endswith('@'):\n return cls(channel='@',\n name=string,\n version=\"\",\n build_string=\"\",\n build_number=0,\n dist_name=string)\n\n REGEX_STR = (r'(?:([^\\s\\[\\]]+)::)?' # optional channel\n r'([^\\s\\[\\]]+)' # 3.x dist\n r'(?:\\[([a-zA-Z0-9_-]+)\\])?' # with_features_depends\n )\n channel, original_dist, w_f_d = re.search(REGEX_STR, string).groups()\n\n if original_dist.endswith(CONDA_TARBALL_EXTENSION):\n original_dist = original_dist[:-len(CONDA_TARBALL_EXTENSION)]\n\n if channel_override != NULL:\n channel = channel_override\n elif channel is None:\n channel = UNKNOWN_CHANNEL\n\n # enforce dist format\n dist_details = cls.parse_dist_name(original_dist)\n return cls(channel=channel,\n name=dist_details.name,\n version=dist_details.version,\n build_string=dist_details.build_string,\n build_number=dist_details.build_number,\n dist_name=original_dist)\n\n @staticmethod\n def parse_dist_name(string):\n original_string = string\n try:\n string = ensure_text_type(string)\n\n no_tar_bz2_string = (string[:-len(CONDA_TARBALL_EXTENSION)]\n if string.endswith(CONDA_TARBALL_EXTENSION)\n else string)\n\n # remove any directory or channel information\n if '::' in no_tar_bz2_string:\n dist_name = no_tar_bz2_string.rsplit('::', 1)[-1]\n else:\n dist_name = no_tar_bz2_string.rsplit('/', 1)[-1]\n\n parts = dist_name.rsplit('-', 2)\n\n name = parts[0]\n version = parts[1]\n build_string = parts[2] if len(parts) >= 3 else ''\n build_number_as_string = ''.join(filter(lambda x: x.isdigit(),\n (build_string.rsplit('_')[-1]\n if build_string else '0')))\n build_number = int(build_number_as_string) if build_number_as_string else 0\n\n return DistDetails(name, version, build_string, build_number, dist_name)\n\n except:\n raise CondaError(\"dist_name is not a valid conda package: %s\" % original_string)\n\n @classmethod\n def from_url(cls, url):\n assert is_url(url), url\n if not url.endswith(CONDA_TARBALL_EXTENSION) and '::' not in url:\n raise CondaError(\"url '%s' is not a conda package\" % url)\n\n dist_details = cls.parse_dist_name(url)\n if '::' in url:\n url_no_tarball = url.rsplit('::', 1)[0]\n platform = context.subdir\n base_url = url_no_tarball.split('::')[0]\n channel = text_type(Channel(base_url))\n else:\n url_no_tarball = url.rsplit('/', 1)[0]\n platform = has_platform(url_no_tarball, context.known_subdirs)\n base_url = url_no_tarball.rsplit('/', 1)[0] if platform else url_no_tarball\n channel = Channel(base_url).canonical_name if platform else UNKNOWN_CHANNEL\n\n return cls(channel=channel,\n name=dist_details.name,\n version=dist_details.version,\n build_string=dist_details.build_string,\n build_number=dist_details.build_number,\n dist_name=dist_details.dist_name,\n base_url=base_url,\n platform=platform)\n\n def to_url(self):\n if not self.base_url:\n return None\n filename = self.dist_name + CONDA_TARBALL_EXTENSION\n return (join_url(self.base_url, self.platform, filename)\n if self.platform\n else join_url(self.base_url, filename))\n\n def __key__(self):\n return self.channel, self.dist_name\n\n def __lt__(self, other):\n assert isinstance(other, self.__class__)\n return self.__key__() < other.__key__()\n\n def __gt__(self, other):\n assert isinstance(other, self.__class__)\n return self.__key__() > other.__key__()\n\n def __le__(self, other):\n assert isinstance(other, self.__class__)\n return self.__key__() <= other.__key__()\n\n def __ge__(self, other):\n assert isinstance(other, self.__class__)\n return self.__key__() >= other.__key__()\n\n def __hash__(self):\n return hash(self.__key__())\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self.__key__() == other.__key__()\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n # ############ conda-build compatibility ################\n\n def split(self, sep=None, maxsplit=-1):\n assert sep == '::'\n return [self.channel, self.dist_name] if self.channel else [self.dist_name]\n\n def rsplit(self, sep=None, maxsplit=-1):\n assert sep == '-'\n assert maxsplit == 2\n name = '%s::%s' % (self.channel, self.quad[0]) if self.channel else self.quad[0]\n return name, self.quad[1], self.quad[2]\n\n def startswith(self, match):\n return self.dist_name.startswith(match)\n\n def __contains__(self, item):\n item = ensure_text_type(item)\n if item.endswith(CONDA_TARBALL_EXTENSION):\n item = item[:-len(CONDA_TARBALL_EXTENSION)]\n return item in self.__str__()\n\n @property\n def fn(self):\n return self.to_filename()\n", "path": "conda/models/dist.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import namedtuple\nfrom logging import getLogger\nimport re\n\nfrom .channel import Channel\nfrom .index_record import IndexRecord, PackageRef\nfrom .package_info import PackageInfo\nfrom .. import CondaError\nfrom .._vendor.auxlib.entity import Entity, EntityType, IntegerField, StringField\nfrom ..base.constants import CONDA_TARBALL_EXTENSION, DEFAULTS_CHANNEL_NAME, UNKNOWN_CHANNEL\nfrom ..base.context import context\nfrom ..common.compat import ensure_text_type, text_type, with_metaclass\nfrom ..common.constants import NULL\nfrom ..common.url import has_platform, is_url, join_url\n\nlog = getLogger(__name__)\nDistDetails = namedtuple('DistDetails', ('name', 'version', 'build_string', 'build_number',\n 'dist_name'))\n\n\nclass DistType(EntityType):\n\n def __call__(cls, *args, **kwargs):\n if len(args) == 1 and not kwargs:\n value = args[0]\n if isinstance(value, Dist):\n return value\n elif hasattr(value, 'dist') and isinstance(value.dist, Dist):\n return value.dist\n elif isinstance(value, IndexRecord):\n return Dist.from_string(value.fn, channel_override=value.channel.canonical_name)\n elif isinstance(value, PackageInfo):\n return Dist.from_string(value.repodata_record.fn,\n channel_override=value.channel.canonical_name)\n elif isinstance(value, Channel):\n return Dist.from_url(value.url())\n else:\n return Dist.from_string(value)\n else:\n return super(DistType, cls).__call__(*args, **kwargs)\n\n\n@with_metaclass(DistType)\nclass Dist(Entity):\n _lazy_validate = True\n\n channel = StringField(required=False, nullable=True, immutable=True)\n\n dist_name = StringField(immutable=True)\n name = StringField(immutable=True)\n version = StringField(immutable=True)\n build_string = StringField(immutable=True)\n build_number = IntegerField(immutable=True)\n\n base_url = StringField(required=False, nullable=True, immutable=True)\n platform = StringField(required=False, nullable=True, immutable=True)\n\n def __init__(self, channel, dist_name=None, name=None, version=None, build_string=None,\n build_number=None, base_url=None, platform=None):\n super(Dist, self).__init__(channel=channel,\n dist_name=dist_name,\n name=name,\n version=version,\n build_string=build_string,\n build_number=build_number,\n base_url=base_url,\n platform=platform)\n\n def to_package_ref(self):\n return PackageRef(\n channel=self.channel,\n subdir=self.platform,\n name=self.name,\n version=self.version,\n build=self.build_string,\n build_number=self.build_number,\n )\n\n @property\n def full_name(self):\n return self.__str__()\n\n @property\n def build(self):\n return self.build_string\n\n @property\n def subdir(self):\n return self.platform\n\n @property\n def pair(self):\n return self.channel or DEFAULTS_CHANNEL_NAME, self.dist_name\n\n @property\n def quad(self):\n # returns: name, version, build_string, channel\n parts = self.dist_name.rsplit('-', 2) + ['', '']\n return parts[0], parts[1], parts[2], self.channel or DEFAULTS_CHANNEL_NAME\n\n def __str__(self):\n return \"%s::%s\" % (self.channel, self.dist_name) if self.channel else self.dist_name\n\n @property\n def is_feature_package(self):\n return self.dist_name.endswith('@')\n\n @property\n def is_channel(self):\n return bool(self.base_url and self.platform)\n\n def to_filename(self, extension='.tar.bz2'):\n if self.is_feature_package:\n return self.dist_name\n else:\n return self.dist_name + extension\n\n def to_matchspec(self):\n return ' '.join(self.quad[:3])\n\n @classmethod\n def from_string(cls, string, channel_override=NULL):\n string = text_type(string)\n\n if is_url(string) and channel_override == NULL:\n return cls.from_url(string)\n\n if string.endswith('@'):\n return cls(channel='@',\n name=string,\n version=\"\",\n build_string=\"\",\n build_number=0,\n dist_name=string)\n\n REGEX_STR = (r'(?:([^\\s\\[\\]]+)::)?' # optional channel\n r'([^\\s\\[\\]]+)' # 3.x dist\n r'(?:\\[([a-zA-Z0-9_-]+)\\])?' # with_features_depends\n )\n channel, original_dist, w_f_d = re.search(REGEX_STR, string).groups()\n\n if original_dist.endswith(CONDA_TARBALL_EXTENSION):\n original_dist = original_dist[:-len(CONDA_TARBALL_EXTENSION)]\n\n if channel_override != NULL:\n channel = channel_override\n if channel is None:\n channel = UNKNOWN_CHANNEL\n\n # enforce dist format\n dist_details = cls.parse_dist_name(original_dist)\n return cls(channel=channel,\n name=dist_details.name,\n version=dist_details.version,\n build_string=dist_details.build_string,\n build_number=dist_details.build_number,\n dist_name=original_dist)\n\n @staticmethod\n def parse_dist_name(string):\n original_string = string\n try:\n string = ensure_text_type(string)\n\n no_tar_bz2_string = (string[:-len(CONDA_TARBALL_EXTENSION)]\n if string.endswith(CONDA_TARBALL_EXTENSION)\n else string)\n\n # remove any directory or channel information\n if '::' in no_tar_bz2_string:\n dist_name = no_tar_bz2_string.rsplit('::', 1)[-1]\n else:\n dist_name = no_tar_bz2_string.rsplit('/', 1)[-1]\n\n parts = dist_name.rsplit('-', 2)\n\n name = parts[0]\n version = parts[1]\n build_string = parts[2] if len(parts) >= 3 else ''\n build_number_as_string = ''.join(filter(lambda x: x.isdigit(),\n (build_string.rsplit('_')[-1]\n if build_string else '0')))\n build_number = int(build_number_as_string) if build_number_as_string else 0\n\n return DistDetails(name, version, build_string, build_number, dist_name)\n\n except:\n raise CondaError(\"dist_name is not a valid conda package: %s\" % original_string)\n\n @classmethod\n def from_url(cls, url):\n assert is_url(url), url\n if not url.endswith(CONDA_TARBALL_EXTENSION) and '::' not in url:\n raise CondaError(\"url '%s' is not a conda package\" % url)\n\n dist_details = cls.parse_dist_name(url)\n if '::' in url:\n url_no_tarball = url.rsplit('::', 1)[0]\n platform = context.subdir\n base_url = url_no_tarball.split('::')[0]\n channel = text_type(Channel(base_url))\n else:\n url_no_tarball = url.rsplit('/', 1)[0]\n platform = has_platform(url_no_tarball, context.known_subdirs)\n base_url = url_no_tarball.rsplit('/', 1)[0] if platform else url_no_tarball\n channel = Channel(base_url).canonical_name if platform else UNKNOWN_CHANNEL\n\n return cls(channel=channel,\n name=dist_details.name,\n version=dist_details.version,\n build_string=dist_details.build_string,\n build_number=dist_details.build_number,\n dist_name=dist_details.dist_name,\n base_url=base_url,\n platform=platform)\n\n def to_url(self):\n if not self.base_url:\n return None\n filename = self.dist_name + CONDA_TARBALL_EXTENSION\n return (join_url(self.base_url, self.platform, filename)\n if self.platform\n else join_url(self.base_url, filename))\n\n def __key__(self):\n return self.channel, self.dist_name\n\n def __lt__(self, other):\n assert isinstance(other, self.__class__)\n return self.__key__() < other.__key__()\n\n def __gt__(self, other):\n assert isinstance(other, self.__class__)\n return self.__key__() > other.__key__()\n\n def __le__(self, other):\n assert isinstance(other, self.__class__)\n return self.__key__() <= other.__key__()\n\n def __ge__(self, other):\n assert isinstance(other, self.__class__)\n return self.__key__() >= other.__key__()\n\n def __hash__(self):\n return hash(self.__key__())\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self.__key__() == other.__key__()\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n # ############ conda-build compatibility ################\n\n def split(self, sep=None, maxsplit=-1):\n assert sep == '::'\n return [self.channel, self.dist_name] if self.channel else [self.dist_name]\n\n def rsplit(self, sep=None, maxsplit=-1):\n assert sep == '-'\n assert maxsplit == 2\n name = '%s::%s' % (self.channel, self.quad[0]) if self.channel else self.quad[0]\n return name, self.quad[1], self.quad[2]\n\n def startswith(self, match):\n return self.dist_name.startswith(match)\n\n def __contains__(self, item):\n item = ensure_text_type(item)\n if item.endswith(CONDA_TARBALL_EXTENSION):\n item = item[:-len(CONDA_TARBALL_EXTENSION)]\n return item in self.__str__()\n\n @property\n def fn(self):\n return self.to_filename()\n", "path": "conda/models/dist.py" } ]
diff --git a/conda/models/dist.py b/conda/models/dist.py index c76c27b7621..4a87e146f2b 100644 --- a/conda/models/dist.py +++ b/conda/models/dist.py @@ -147,7 +147,7 @@ def from_string(cls, string, channel_override=NULL): if channel_override != NULL: channel = channel_override - elif channel is None: + if channel is None: channel = UNKNOWN_CHANNEL # enforce dist format
CTFd__CTFd-1531
solves undefined if visibility is set to hidden **Environment**: - CTFd Version/Commit: adc70fb320242d5e4df1a7ce2d107c0e2b8039e7 - Operating System: Ubuntu 18.04 - Web Browser and Version: Safari 13.1.1, Chrome 83.0.4103.106 **What happened?** When _Score Visibility_ or _Account Visibility_ is not public, users cannot see challenges. **What did you expect to happen?** Challenges should be able to be seen and solves are hidden. **How to reproduce your issue** In `CTFd/api/v1/challenges.py`, line 361 to line 376, `solves` is only defined if `scores_visible() is True and accounts_visible() is True`. Yet, `solves` is referenced later in the code. **Any associated stack traces or error logs** <img width="747" alt="error" src="https://user-images.githubusercontent.com/56060664/86547616-1bae4380-bf6c-11ea-89ac-cd6c5db663f0.png">
[ { "content": "import datetime\nfrom typing import List\n\nfrom flask import abort, render_template, request, url_for\nfrom flask_restx import Namespace, Resource\nfrom sqlalchemy.sql import and_\n\nfrom CTFd.api.v1.helpers.models import build_model_filters\nfrom CTFd.api.v1.helpers.request import validate_args\nfrom CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic\nfrom CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse\nfrom CTFd.cache import clear_standings\nfrom CTFd.constants import RawEnum\nfrom CTFd.models import ChallengeFiles as ChallengeFilesModel\nfrom CTFd.models import (\n Challenges,\n Fails,\n Flags,\n Hints,\n HintUnlocks,\n Solves,\n Submissions,\n Tags,\n db,\n)\nfrom CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class\nfrom CTFd.schemas.flags import FlagSchema\nfrom CTFd.schemas.hints import HintSchema\nfrom CTFd.schemas.tags import TagSchema\nfrom CTFd.utils import config, get_config\nfrom CTFd.utils import user as current_user\nfrom CTFd.utils.config.visibility import (\n accounts_visible,\n challenges_visible,\n scores_visible,\n)\nfrom CTFd.utils.dates import ctf_ended, ctf_paused, ctftime, isoformat, unix_time_to_utc\nfrom CTFd.utils.decorators import (\n admins_only,\n during_ctf_time_only,\n require_verified_emails,\n)\nfrom CTFd.utils.decorators.visibility import (\n check_challenge_visibility,\n check_score_visibility,\n)\nfrom CTFd.utils.logging import log\nfrom CTFd.utils.modes import generate_account_url, get_model\nfrom CTFd.utils.security.signing import serialize\nfrom CTFd.utils.user import authed, get_current_team, get_current_user, is_admin\n\nchallenges_namespace = Namespace(\n \"challenges\", description=\"Endpoint to retrieve Challenges\"\n)\n\nChallengeModel = sqlalchemy_to_pydantic(Challenges)\nTransientChallengeModel = sqlalchemy_to_pydantic(Challenges, exclude=[\"id\"])\n\n\nclass ChallengeDetailedSuccessResponse(APIDetailedSuccessResponse):\n data: ChallengeModel\n\n\nclass ChallengeListSuccessResponse(APIListSuccessResponse):\n data: List[ChallengeModel]\n\n\nchallenges_namespace.schema_model(\n \"ChallengeDetailedSuccessResponse\", ChallengeDetailedSuccessResponse.apidoc()\n)\n\nchallenges_namespace.schema_model(\n \"ChallengeListSuccessResponse\", ChallengeListSuccessResponse.apidoc()\n)\n\n\n@challenges_namespace.route(\"\")\nclass ChallengeList(Resource):\n @check_challenge_visibility\n @during_ctf_time_only\n @require_verified_emails\n @challenges_namespace.doc(\n description=\"Endpoint to get Challenge objects in bulk\",\n responses={\n 200: (\"Success\", \"ChallengeListSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n @validate_args(\n {\n \"name\": (str, None),\n \"max_attempts\": (int, None),\n \"value\": (int, None),\n \"category\": (str, None),\n \"type\": (str, None),\n \"state\": (str, None),\n \"q\": (str, None),\n \"field\": (\n RawEnum(\n \"ChallengeFields\",\n {\n \"name\": \"name\",\n \"description\": \"description\",\n \"category\": \"category\",\n \"type\": \"type\",\n \"state\": \"state\",\n },\n ),\n None,\n ),\n },\n location=\"query\",\n )\n def get(self, query_args):\n # Build filtering queries\n q = query_args.pop(\"q\", None)\n field = str(query_args.pop(\"field\", None))\n filters = build_model_filters(model=Challenges, query=q, field=field)\n\n # This can return None (unauth) if visibility is set to public\n user = get_current_user()\n\n # Admins can request to see everything\n if is_admin() and request.args.get(\"view\") == \"admin\":\n challenges = (\n Challenges.query.filter_by(**query_args)\n .filter(*filters)\n .order_by(Challenges.value)\n .all()\n )\n solve_ids = set([challenge.id for challenge in challenges])\n else:\n challenges = (\n Challenges.query.filter(\n and_(Challenges.state != \"hidden\", Challenges.state != \"locked\")\n )\n .filter_by(**query_args)\n .filter(*filters)\n .order_by(Challenges.value)\n .all()\n )\n\n if user:\n solve_ids = (\n Solves.query.with_entities(Solves.challenge_id)\n .filter_by(account_id=user.account_id)\n .order_by(Solves.challenge_id.asc())\n .all()\n )\n solve_ids = set([value for value, in solve_ids])\n\n # TODO: Convert this into a re-useable decorator\n if is_admin():\n pass\n else:\n if config.is_teams_mode() and get_current_team() is None:\n abort(403)\n else:\n solve_ids = set()\n\n response = []\n tag_schema = TagSchema(view=\"user\", many=True)\n for challenge in challenges:\n if challenge.requirements:\n requirements = challenge.requirements.get(\"prerequisites\", [])\n anonymize = challenge.requirements.get(\"anonymize\")\n prereqs = set(requirements)\n if solve_ids >= prereqs:\n pass\n else:\n if anonymize:\n response.append(\n {\n \"id\": challenge.id,\n \"type\": \"hidden\",\n \"name\": \"???\",\n \"value\": 0,\n \"category\": \"???\",\n \"tags\": [],\n \"template\": \"\",\n \"script\": \"\",\n }\n )\n # Fallthrough to continue\n continue\n\n challenge_type = get_chal_class(challenge.type)\n response.append(\n {\n \"id\": challenge.id,\n \"type\": challenge_type.name,\n \"name\": challenge.name,\n \"value\": challenge.value,\n \"category\": challenge.category,\n \"tags\": tag_schema.dump(challenge.tags).data,\n \"template\": challenge_type.templates[\"view\"],\n \"script\": challenge_type.scripts[\"view\"],\n }\n )\n\n db.session.close()\n return {\"success\": True, \"data\": response}\n\n @admins_only\n @challenges_namespace.doc(\n description=\"Endpoint to create a Challenge object\",\n responses={\n 200: (\"Success\", \"ChallengeDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def post(self):\n data = request.form or request.get_json()\n challenge_type = data[\"type\"]\n challenge_class = get_chal_class(challenge_type)\n challenge = challenge_class.create(request)\n response = challenge_class.read(challenge)\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/types\")\nclass ChallengeTypes(Resource):\n @admins_only\n def get(self):\n response = {}\n\n for class_id in CHALLENGE_CLASSES:\n challenge_class = CHALLENGE_CLASSES.get(class_id)\n response[challenge_class.id] = {\n \"id\": challenge_class.id,\n \"name\": challenge_class.name,\n \"templates\": challenge_class.templates,\n \"scripts\": challenge_class.scripts,\n \"create\": render_template(\n challenge_class.templates[\"create\"].lstrip(\"/\")\n ),\n }\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/<challenge_id>\")\nclass Challenge(Resource):\n @check_challenge_visibility\n @during_ctf_time_only\n @require_verified_emails\n @challenges_namespace.doc(\n description=\"Endpoint to get a specific Challenge object\",\n responses={\n 200: (\"Success\", \"ChallengeDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def get(self, challenge_id):\n if is_admin():\n chal = Challenges.query.filter(Challenges.id == challenge_id).first_or_404()\n else:\n chal = Challenges.query.filter(\n Challenges.id == challenge_id,\n and_(Challenges.state != \"hidden\", Challenges.state != \"locked\"),\n ).first_or_404()\n\n chal_class = get_chal_class(chal.type)\n\n if chal.requirements:\n requirements = chal.requirements.get(\"prerequisites\", [])\n anonymize = chal.requirements.get(\"anonymize\")\n if challenges_visible():\n user = get_current_user()\n if user:\n solve_ids = (\n Solves.query.with_entities(Solves.challenge_id)\n .filter_by(account_id=user.account_id)\n .order_by(Solves.challenge_id.asc())\n .all()\n )\n else:\n # We need to handle the case where a user is viewing challenges anonymously\n solve_ids = []\n solve_ids = set([value for value, in solve_ids])\n prereqs = set(requirements)\n if solve_ids >= prereqs or is_admin():\n pass\n else:\n if anonymize:\n return {\n \"success\": True,\n \"data\": {\n \"id\": chal.id,\n \"type\": \"hidden\",\n \"name\": \"???\",\n \"value\": 0,\n \"category\": \"???\",\n \"tags\": [],\n \"template\": \"\",\n \"script\": \"\",\n },\n }\n abort(403)\n else:\n abort(403)\n\n tags = [\n tag[\"value\"] for tag in TagSchema(\"user\", many=True).dump(chal.tags).data\n ]\n\n unlocked_hints = set()\n hints = []\n if authed():\n user = get_current_user()\n team = get_current_team()\n\n # TODO: Convert this into a re-useable decorator\n if is_admin():\n pass\n else:\n if config.is_teams_mode() and team is None:\n abort(403)\n\n unlocked_hints = set(\n [\n u.target\n for u in HintUnlocks.query.filter_by(\n type=\"hints\", account_id=user.account_id\n )\n ]\n )\n files = []\n for f in chal.files:\n token = {\n \"user_id\": user.id,\n \"team_id\": team.id if team else None,\n \"file_id\": f.id,\n }\n files.append(\n url_for(\"views.files\", path=f.location, token=serialize(token))\n )\n else:\n files = [url_for(\"views.files\", path=f.location) for f in chal.files]\n\n for hint in Hints.query.filter_by(challenge_id=chal.id).all():\n if hint.id in unlocked_hints or ctf_ended():\n hints.append(\n {\"id\": hint.id, \"cost\": hint.cost, \"content\": hint.content}\n )\n else:\n hints.append({\"id\": hint.id, \"cost\": hint.cost})\n\n response = chal_class.read(challenge=chal)\n\n Model = get_model()\n\n if scores_visible() is True and accounts_visible() is True:\n solves = Solves.query.join(Model, Solves.account_id == Model.id).filter(\n Solves.challenge_id == chal.id,\n Model.banned == False,\n Model.hidden == False,\n )\n\n # Only show solves that happened before freeze time if configured\n freeze = get_config(\"freeze\")\n if not is_admin() and freeze:\n solves = solves.filter(Solves.date < unix_time_to_utc(freeze))\n\n solves = solves.count()\n response[\"solves\"] = solves\n else:\n response[\"solves\"] = None\n\n if authed():\n # Get current attempts for the user\n attempts = Submissions.query.filter_by(\n account_id=user.account_id, challenge_id=challenge_id\n ).count()\n else:\n attempts = 0\n\n response[\"attempts\"] = attempts\n response[\"files\"] = files\n response[\"tags\"] = tags\n response[\"hints\"] = hints\n\n response[\"view\"] = render_template(\n chal_class.templates[\"view\"].lstrip(\"/\"),\n solves=solves,\n files=files,\n tags=tags,\n hints=[Hints(**h) for h in hints],\n max_attempts=chal.max_attempts,\n attempts=attempts,\n challenge=chal,\n )\n\n db.session.close()\n return {\"success\": True, \"data\": response}\n\n @admins_only\n @challenges_namespace.doc(\n description=\"Endpoint to edit a specific Challenge object\",\n responses={\n 200: (\"Success\", \"ChallengeDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def patch(self, challenge_id):\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n challenge_class = get_chal_class(challenge.type)\n challenge = challenge_class.update(challenge, request)\n response = challenge_class.read(challenge)\n return {\"success\": True, \"data\": response}\n\n @admins_only\n @challenges_namespace.doc(\n description=\"Endpoint to delete a specific Challenge object\",\n responses={200: (\"Success\", \"APISimpleSuccessResponse\")},\n )\n def delete(self, challenge_id):\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n chal_class = get_chal_class(challenge.type)\n chal_class.delete(challenge)\n\n return {\"success\": True}\n\n\n@challenges_namespace.route(\"/attempt\")\nclass ChallengeAttempt(Resource):\n @check_challenge_visibility\n @during_ctf_time_only\n @require_verified_emails\n def post(self):\n if authed() is False:\n return {\"success\": True, \"data\": {\"status\": \"authentication_required\"}}, 403\n\n if request.content_type != \"application/json\":\n request_data = request.form\n else:\n request_data = request.get_json()\n\n challenge_id = request_data.get(\"challenge_id\")\n\n if current_user.is_admin():\n preview = request.args.get(\"preview\", False)\n if preview:\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n chal_class = get_chal_class(challenge.type)\n status, message = chal_class.attempt(challenge, request)\n\n return {\n \"success\": True,\n \"data\": {\n \"status\": \"correct\" if status else \"incorrect\",\n \"message\": message,\n },\n }\n\n if ctf_paused():\n return (\n {\n \"success\": True,\n \"data\": {\n \"status\": \"paused\",\n \"message\": \"{} is paused\".format(config.ctf_name()),\n },\n },\n 403,\n )\n\n user = get_current_user()\n team = get_current_team()\n\n # TODO: Convert this into a re-useable decorator\n if config.is_teams_mode() and team is None:\n abort(403)\n\n fails = Fails.query.filter_by(\n account_id=user.account_id, challenge_id=challenge_id\n ).count()\n\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n\n if challenge.state == \"hidden\":\n abort(404)\n\n if challenge.state == \"locked\":\n abort(403)\n\n if challenge.requirements:\n requirements = challenge.requirements.get(\"prerequisites\", [])\n solve_ids = (\n Solves.query.with_entities(Solves.challenge_id)\n .filter_by(account_id=user.account_id)\n .order_by(Solves.challenge_id.asc())\n .all()\n )\n solve_ids = set([solve_id for solve_id, in solve_ids])\n prereqs = set(requirements)\n if solve_ids >= prereqs:\n pass\n else:\n abort(403)\n\n chal_class = get_chal_class(challenge.type)\n\n # Anti-bruteforce / submitting Flags too quickly\n kpm = current_user.get_wrong_submissions_per_minute(user.account_id)\n if kpm > 10:\n if ctftime():\n chal_class.fail(\n user=user, team=team, challenge=challenge, request=request\n )\n log(\n \"submissions\",\n \"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [TOO FAST]\",\n submission=request_data.get(\"submission\", \"\").encode(\"utf-8\"),\n challenge_id=challenge_id,\n kpm=kpm,\n )\n # Submitting too fast\n return (\n {\n \"success\": True,\n \"data\": {\n \"status\": \"ratelimited\",\n \"message\": \"You're submitting flags too fast. Slow down.\",\n },\n },\n 429,\n )\n\n solves = Solves.query.filter_by(\n account_id=user.account_id, challenge_id=challenge_id\n ).first()\n\n # Challenge not solved yet\n if not solves:\n # Hit max attempts\n max_tries = challenge.max_attempts\n if max_tries and fails >= max_tries > 0:\n return (\n {\n \"success\": True,\n \"data\": {\n \"status\": \"incorrect\",\n \"message\": \"You have 0 tries remaining\",\n },\n },\n 403,\n )\n\n status, message = chal_class.attempt(challenge, request)\n if status: # The challenge plugin says the input is right\n if ctftime() or current_user.is_admin():\n chal_class.solve(\n user=user, team=team, challenge=challenge, request=request\n )\n clear_standings()\n\n log(\n \"submissions\",\n \"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [CORRECT]\",\n submission=request_data.get(\"submission\", \"\").encode(\"utf-8\"),\n challenge_id=challenge_id,\n kpm=kpm,\n )\n return {\n \"success\": True,\n \"data\": {\"status\": \"correct\", \"message\": message},\n }\n else: # The challenge plugin says the input is wrong\n if ctftime() or current_user.is_admin():\n chal_class.fail(\n user=user, team=team, challenge=challenge, request=request\n )\n clear_standings()\n\n log(\n \"submissions\",\n \"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [WRONG]\",\n submission=request_data.get(\"submission\", \"\").encode(\"utf-8\"),\n challenge_id=challenge_id,\n kpm=kpm,\n )\n\n if max_tries:\n # Off by one since fails has changed since it was gotten\n attempts_left = max_tries - fails - 1\n tries_str = \"tries\"\n if attempts_left == 1:\n tries_str = \"try\"\n # Add a punctuation mark if there isn't one\n if message[-1] not in \"!().;?[]{}\":\n message = message + \".\"\n return {\n \"success\": True,\n \"data\": {\n \"status\": \"incorrect\",\n \"message\": \"{} You have {} {} remaining.\".format(\n message, attempts_left, tries_str\n ),\n },\n }\n else:\n return {\n \"success\": True,\n \"data\": {\"status\": \"incorrect\", \"message\": message},\n }\n\n # Challenge already solved\n else:\n log(\n \"submissions\",\n \"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [ALREADY SOLVED]\",\n submission=request_data.get(\"submission\", \"\").encode(\"utf-8\"),\n challenge_id=challenge_id,\n kpm=kpm,\n )\n return {\n \"success\": True,\n \"data\": {\n \"status\": \"already_solved\",\n \"message\": \"You already solved this\",\n },\n }\n\n\n@challenges_namespace.route(\"/<challenge_id>/solves\")\nclass ChallengeSolves(Resource):\n @check_challenge_visibility\n @check_score_visibility\n @during_ctf_time_only\n @require_verified_emails\n def get(self, challenge_id):\n response = []\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n\n # TODO: Need a generic challenge visibility call.\n # However, it should be stated that a solve on a gated challenge is not considered private.\n if challenge.state == \"hidden\" and is_admin() is False:\n abort(404)\n\n Model = get_model()\n\n solves = (\n Solves.query.join(Model, Solves.account_id == Model.id)\n .filter(\n Solves.challenge_id == challenge_id,\n Model.banned == False,\n Model.hidden == False,\n )\n .order_by(Solves.date.asc())\n )\n\n freeze = get_config(\"freeze\")\n if freeze:\n preview = request.args.get(\"preview\")\n if (is_admin() is False) or (is_admin() is True and preview):\n dt = datetime.datetime.utcfromtimestamp(freeze)\n solves = solves.filter(Solves.date < dt)\n\n for solve in solves:\n response.append(\n {\n \"account_id\": solve.account_id,\n \"name\": solve.account.name,\n \"date\": isoformat(solve.date),\n \"account_url\": generate_account_url(account_id=solve.account_id),\n }\n )\n\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/<challenge_id>/files\")\nclass ChallengeFiles(Resource):\n @admins_only\n def get(self, challenge_id):\n response = []\n\n challenge_files = ChallengeFilesModel.query.filter_by(\n challenge_id=challenge_id\n ).all()\n\n for f in challenge_files:\n response.append({\"id\": f.id, \"type\": f.type, \"location\": f.location})\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/<challenge_id>/tags\")\nclass ChallengeTags(Resource):\n @admins_only\n def get(self, challenge_id):\n response = []\n\n tags = Tags.query.filter_by(challenge_id=challenge_id).all()\n\n for t in tags:\n response.append(\n {\"id\": t.id, \"challenge_id\": t.challenge_id, \"value\": t.value}\n )\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/<challenge_id>/hints\")\nclass ChallengeHints(Resource):\n @admins_only\n def get(self, challenge_id):\n hints = Hints.query.filter_by(challenge_id=challenge_id).all()\n schema = HintSchema(many=True)\n response = schema.dump(hints)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n\n\n@challenges_namespace.route(\"/<challenge_id>/flags\")\nclass ChallengeFlags(Resource):\n @admins_only\n def get(self, challenge_id):\n flags = Flags.query.filter_by(challenge_id=challenge_id).all()\n schema = FlagSchema(many=True)\n response = schema.dump(flags)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n", "path": "CTFd/api/v1/challenges.py" } ]
[ { "content": "import datetime\nfrom typing import List\n\nfrom flask import abort, render_template, request, url_for\nfrom flask_restx import Namespace, Resource\nfrom sqlalchemy.sql import and_\n\nfrom CTFd.api.v1.helpers.models import build_model_filters\nfrom CTFd.api.v1.helpers.request import validate_args\nfrom CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic\nfrom CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse\nfrom CTFd.cache import clear_standings\nfrom CTFd.constants import RawEnum\nfrom CTFd.models import ChallengeFiles as ChallengeFilesModel\nfrom CTFd.models import (\n Challenges,\n Fails,\n Flags,\n Hints,\n HintUnlocks,\n Solves,\n Submissions,\n Tags,\n db,\n)\nfrom CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class\nfrom CTFd.schemas.flags import FlagSchema\nfrom CTFd.schemas.hints import HintSchema\nfrom CTFd.schemas.tags import TagSchema\nfrom CTFd.utils import config, get_config\nfrom CTFd.utils import user as current_user\nfrom CTFd.utils.config.visibility import (\n accounts_visible,\n challenges_visible,\n scores_visible,\n)\nfrom CTFd.utils.dates import ctf_ended, ctf_paused, ctftime, isoformat, unix_time_to_utc\nfrom CTFd.utils.decorators import (\n admins_only,\n during_ctf_time_only,\n require_verified_emails,\n)\nfrom CTFd.utils.decorators.visibility import (\n check_challenge_visibility,\n check_score_visibility,\n)\nfrom CTFd.utils.logging import log\nfrom CTFd.utils.modes import generate_account_url, get_model\nfrom CTFd.utils.security.signing import serialize\nfrom CTFd.utils.user import authed, get_current_team, get_current_user, is_admin\n\nchallenges_namespace = Namespace(\n \"challenges\", description=\"Endpoint to retrieve Challenges\"\n)\n\nChallengeModel = sqlalchemy_to_pydantic(Challenges)\nTransientChallengeModel = sqlalchemy_to_pydantic(Challenges, exclude=[\"id\"])\n\n\nclass ChallengeDetailedSuccessResponse(APIDetailedSuccessResponse):\n data: ChallengeModel\n\n\nclass ChallengeListSuccessResponse(APIListSuccessResponse):\n data: List[ChallengeModel]\n\n\nchallenges_namespace.schema_model(\n \"ChallengeDetailedSuccessResponse\", ChallengeDetailedSuccessResponse.apidoc()\n)\n\nchallenges_namespace.schema_model(\n \"ChallengeListSuccessResponse\", ChallengeListSuccessResponse.apidoc()\n)\n\n\n@challenges_namespace.route(\"\")\nclass ChallengeList(Resource):\n @check_challenge_visibility\n @during_ctf_time_only\n @require_verified_emails\n @challenges_namespace.doc(\n description=\"Endpoint to get Challenge objects in bulk\",\n responses={\n 200: (\"Success\", \"ChallengeListSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n @validate_args(\n {\n \"name\": (str, None),\n \"max_attempts\": (int, None),\n \"value\": (int, None),\n \"category\": (str, None),\n \"type\": (str, None),\n \"state\": (str, None),\n \"q\": (str, None),\n \"field\": (\n RawEnum(\n \"ChallengeFields\",\n {\n \"name\": \"name\",\n \"description\": \"description\",\n \"category\": \"category\",\n \"type\": \"type\",\n \"state\": \"state\",\n },\n ),\n None,\n ),\n },\n location=\"query\",\n )\n def get(self, query_args):\n # Build filtering queries\n q = query_args.pop(\"q\", None)\n field = str(query_args.pop(\"field\", None))\n filters = build_model_filters(model=Challenges, query=q, field=field)\n\n # This can return None (unauth) if visibility is set to public\n user = get_current_user()\n\n # Admins can request to see everything\n if is_admin() and request.args.get(\"view\") == \"admin\":\n challenges = (\n Challenges.query.filter_by(**query_args)\n .filter(*filters)\n .order_by(Challenges.value)\n .all()\n )\n solve_ids = set([challenge.id for challenge in challenges])\n else:\n challenges = (\n Challenges.query.filter(\n and_(Challenges.state != \"hidden\", Challenges.state != \"locked\")\n )\n .filter_by(**query_args)\n .filter(*filters)\n .order_by(Challenges.value)\n .all()\n )\n\n if user:\n solve_ids = (\n Solves.query.with_entities(Solves.challenge_id)\n .filter_by(account_id=user.account_id)\n .order_by(Solves.challenge_id.asc())\n .all()\n )\n solve_ids = set([value for value, in solve_ids])\n\n # TODO: Convert this into a re-useable decorator\n if is_admin():\n pass\n else:\n if config.is_teams_mode() and get_current_team() is None:\n abort(403)\n else:\n solve_ids = set()\n\n response = []\n tag_schema = TagSchema(view=\"user\", many=True)\n for challenge in challenges:\n if challenge.requirements:\n requirements = challenge.requirements.get(\"prerequisites\", [])\n anonymize = challenge.requirements.get(\"anonymize\")\n prereqs = set(requirements)\n if solve_ids >= prereqs:\n pass\n else:\n if anonymize:\n response.append(\n {\n \"id\": challenge.id,\n \"type\": \"hidden\",\n \"name\": \"???\",\n \"value\": 0,\n \"category\": \"???\",\n \"tags\": [],\n \"template\": \"\",\n \"script\": \"\",\n }\n )\n # Fallthrough to continue\n continue\n\n challenge_type = get_chal_class(challenge.type)\n response.append(\n {\n \"id\": challenge.id,\n \"type\": challenge_type.name,\n \"name\": challenge.name,\n \"value\": challenge.value,\n \"category\": challenge.category,\n \"tags\": tag_schema.dump(challenge.tags).data,\n \"template\": challenge_type.templates[\"view\"],\n \"script\": challenge_type.scripts[\"view\"],\n }\n )\n\n db.session.close()\n return {\"success\": True, \"data\": response}\n\n @admins_only\n @challenges_namespace.doc(\n description=\"Endpoint to create a Challenge object\",\n responses={\n 200: (\"Success\", \"ChallengeDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def post(self):\n data = request.form or request.get_json()\n challenge_type = data[\"type\"]\n challenge_class = get_chal_class(challenge_type)\n challenge = challenge_class.create(request)\n response = challenge_class.read(challenge)\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/types\")\nclass ChallengeTypes(Resource):\n @admins_only\n def get(self):\n response = {}\n\n for class_id in CHALLENGE_CLASSES:\n challenge_class = CHALLENGE_CLASSES.get(class_id)\n response[challenge_class.id] = {\n \"id\": challenge_class.id,\n \"name\": challenge_class.name,\n \"templates\": challenge_class.templates,\n \"scripts\": challenge_class.scripts,\n \"create\": render_template(\n challenge_class.templates[\"create\"].lstrip(\"/\")\n ),\n }\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/<challenge_id>\")\nclass Challenge(Resource):\n @check_challenge_visibility\n @during_ctf_time_only\n @require_verified_emails\n @challenges_namespace.doc(\n description=\"Endpoint to get a specific Challenge object\",\n responses={\n 200: (\"Success\", \"ChallengeDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def get(self, challenge_id):\n if is_admin():\n chal = Challenges.query.filter(Challenges.id == challenge_id).first_or_404()\n else:\n chal = Challenges.query.filter(\n Challenges.id == challenge_id,\n and_(Challenges.state != \"hidden\", Challenges.state != \"locked\"),\n ).first_or_404()\n\n chal_class = get_chal_class(chal.type)\n\n if chal.requirements:\n requirements = chal.requirements.get(\"prerequisites\", [])\n anonymize = chal.requirements.get(\"anonymize\")\n if challenges_visible():\n user = get_current_user()\n if user:\n solve_ids = (\n Solves.query.with_entities(Solves.challenge_id)\n .filter_by(account_id=user.account_id)\n .order_by(Solves.challenge_id.asc())\n .all()\n )\n else:\n # We need to handle the case where a user is viewing challenges anonymously\n solve_ids = []\n solve_ids = set([value for value, in solve_ids])\n prereqs = set(requirements)\n if solve_ids >= prereqs or is_admin():\n pass\n else:\n if anonymize:\n return {\n \"success\": True,\n \"data\": {\n \"id\": chal.id,\n \"type\": \"hidden\",\n \"name\": \"???\",\n \"value\": 0,\n \"category\": \"???\",\n \"tags\": [],\n \"template\": \"\",\n \"script\": \"\",\n },\n }\n abort(403)\n else:\n abort(403)\n\n tags = [\n tag[\"value\"] for tag in TagSchema(\"user\", many=True).dump(chal.tags).data\n ]\n\n unlocked_hints = set()\n hints = []\n if authed():\n user = get_current_user()\n team = get_current_team()\n\n # TODO: Convert this into a re-useable decorator\n if is_admin():\n pass\n else:\n if config.is_teams_mode() and team is None:\n abort(403)\n\n unlocked_hints = set(\n [\n u.target\n for u in HintUnlocks.query.filter_by(\n type=\"hints\", account_id=user.account_id\n )\n ]\n )\n files = []\n for f in chal.files:\n token = {\n \"user_id\": user.id,\n \"team_id\": team.id if team else None,\n \"file_id\": f.id,\n }\n files.append(\n url_for(\"views.files\", path=f.location, token=serialize(token))\n )\n else:\n files = [url_for(\"views.files\", path=f.location) for f in chal.files]\n\n for hint in Hints.query.filter_by(challenge_id=chal.id).all():\n if hint.id in unlocked_hints or ctf_ended():\n hints.append(\n {\"id\": hint.id, \"cost\": hint.cost, \"content\": hint.content}\n )\n else:\n hints.append({\"id\": hint.id, \"cost\": hint.cost})\n\n response = chal_class.read(challenge=chal)\n\n Model = get_model()\n\n if scores_visible() is True and accounts_visible() is True:\n solves = Solves.query.join(Model, Solves.account_id == Model.id).filter(\n Solves.challenge_id == chal.id,\n Model.banned == False,\n Model.hidden == False,\n )\n\n # Only show solves that happened before freeze time if configured\n freeze = get_config(\"freeze\")\n if not is_admin() and freeze:\n solves = solves.filter(Solves.date < unix_time_to_utc(freeze))\n\n solves = solves.count()\n response[\"solves\"] = solves\n else:\n response[\"solves\"] = None\n solves = None\n\n if authed():\n # Get current attempts for the user\n attempts = Submissions.query.filter_by(\n account_id=user.account_id, challenge_id=challenge_id\n ).count()\n else:\n attempts = 0\n\n response[\"attempts\"] = attempts\n response[\"files\"] = files\n response[\"tags\"] = tags\n response[\"hints\"] = hints\n\n response[\"view\"] = render_template(\n chal_class.templates[\"view\"].lstrip(\"/\"),\n solves=solves,\n files=files,\n tags=tags,\n hints=[Hints(**h) for h in hints],\n max_attempts=chal.max_attempts,\n attempts=attempts,\n challenge=chal,\n )\n\n db.session.close()\n return {\"success\": True, \"data\": response}\n\n @admins_only\n @challenges_namespace.doc(\n description=\"Endpoint to edit a specific Challenge object\",\n responses={\n 200: (\"Success\", \"ChallengeDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def patch(self, challenge_id):\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n challenge_class = get_chal_class(challenge.type)\n challenge = challenge_class.update(challenge, request)\n response = challenge_class.read(challenge)\n return {\"success\": True, \"data\": response}\n\n @admins_only\n @challenges_namespace.doc(\n description=\"Endpoint to delete a specific Challenge object\",\n responses={200: (\"Success\", \"APISimpleSuccessResponse\")},\n )\n def delete(self, challenge_id):\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n chal_class = get_chal_class(challenge.type)\n chal_class.delete(challenge)\n\n return {\"success\": True}\n\n\n@challenges_namespace.route(\"/attempt\")\nclass ChallengeAttempt(Resource):\n @check_challenge_visibility\n @during_ctf_time_only\n @require_verified_emails\n def post(self):\n if authed() is False:\n return {\"success\": True, \"data\": {\"status\": \"authentication_required\"}}, 403\n\n if request.content_type != \"application/json\":\n request_data = request.form\n else:\n request_data = request.get_json()\n\n challenge_id = request_data.get(\"challenge_id\")\n\n if current_user.is_admin():\n preview = request.args.get(\"preview\", False)\n if preview:\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n chal_class = get_chal_class(challenge.type)\n status, message = chal_class.attempt(challenge, request)\n\n return {\n \"success\": True,\n \"data\": {\n \"status\": \"correct\" if status else \"incorrect\",\n \"message\": message,\n },\n }\n\n if ctf_paused():\n return (\n {\n \"success\": True,\n \"data\": {\n \"status\": \"paused\",\n \"message\": \"{} is paused\".format(config.ctf_name()),\n },\n },\n 403,\n )\n\n user = get_current_user()\n team = get_current_team()\n\n # TODO: Convert this into a re-useable decorator\n if config.is_teams_mode() and team is None:\n abort(403)\n\n fails = Fails.query.filter_by(\n account_id=user.account_id, challenge_id=challenge_id\n ).count()\n\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n\n if challenge.state == \"hidden\":\n abort(404)\n\n if challenge.state == \"locked\":\n abort(403)\n\n if challenge.requirements:\n requirements = challenge.requirements.get(\"prerequisites\", [])\n solve_ids = (\n Solves.query.with_entities(Solves.challenge_id)\n .filter_by(account_id=user.account_id)\n .order_by(Solves.challenge_id.asc())\n .all()\n )\n solve_ids = set([solve_id for solve_id, in solve_ids])\n prereqs = set(requirements)\n if solve_ids >= prereqs:\n pass\n else:\n abort(403)\n\n chal_class = get_chal_class(challenge.type)\n\n # Anti-bruteforce / submitting Flags too quickly\n kpm = current_user.get_wrong_submissions_per_minute(user.account_id)\n if kpm > 10:\n if ctftime():\n chal_class.fail(\n user=user, team=team, challenge=challenge, request=request\n )\n log(\n \"submissions\",\n \"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [TOO FAST]\",\n submission=request_data.get(\"submission\", \"\").encode(\"utf-8\"),\n challenge_id=challenge_id,\n kpm=kpm,\n )\n # Submitting too fast\n return (\n {\n \"success\": True,\n \"data\": {\n \"status\": \"ratelimited\",\n \"message\": \"You're submitting flags too fast. Slow down.\",\n },\n },\n 429,\n )\n\n solves = Solves.query.filter_by(\n account_id=user.account_id, challenge_id=challenge_id\n ).first()\n\n # Challenge not solved yet\n if not solves:\n # Hit max attempts\n max_tries = challenge.max_attempts\n if max_tries and fails >= max_tries > 0:\n return (\n {\n \"success\": True,\n \"data\": {\n \"status\": \"incorrect\",\n \"message\": \"You have 0 tries remaining\",\n },\n },\n 403,\n )\n\n status, message = chal_class.attempt(challenge, request)\n if status: # The challenge plugin says the input is right\n if ctftime() or current_user.is_admin():\n chal_class.solve(\n user=user, team=team, challenge=challenge, request=request\n )\n clear_standings()\n\n log(\n \"submissions\",\n \"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [CORRECT]\",\n submission=request_data.get(\"submission\", \"\").encode(\"utf-8\"),\n challenge_id=challenge_id,\n kpm=kpm,\n )\n return {\n \"success\": True,\n \"data\": {\"status\": \"correct\", \"message\": message},\n }\n else: # The challenge plugin says the input is wrong\n if ctftime() or current_user.is_admin():\n chal_class.fail(\n user=user, team=team, challenge=challenge, request=request\n )\n clear_standings()\n\n log(\n \"submissions\",\n \"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [WRONG]\",\n submission=request_data.get(\"submission\", \"\").encode(\"utf-8\"),\n challenge_id=challenge_id,\n kpm=kpm,\n )\n\n if max_tries:\n # Off by one since fails has changed since it was gotten\n attempts_left = max_tries - fails - 1\n tries_str = \"tries\"\n if attempts_left == 1:\n tries_str = \"try\"\n # Add a punctuation mark if there isn't one\n if message[-1] not in \"!().;?[]{}\":\n message = message + \".\"\n return {\n \"success\": True,\n \"data\": {\n \"status\": \"incorrect\",\n \"message\": \"{} You have {} {} remaining.\".format(\n message, attempts_left, tries_str\n ),\n },\n }\n else:\n return {\n \"success\": True,\n \"data\": {\"status\": \"incorrect\", \"message\": message},\n }\n\n # Challenge already solved\n else:\n log(\n \"submissions\",\n \"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [ALREADY SOLVED]\",\n submission=request_data.get(\"submission\", \"\").encode(\"utf-8\"),\n challenge_id=challenge_id,\n kpm=kpm,\n )\n return {\n \"success\": True,\n \"data\": {\n \"status\": \"already_solved\",\n \"message\": \"You already solved this\",\n },\n }\n\n\n@challenges_namespace.route(\"/<challenge_id>/solves\")\nclass ChallengeSolves(Resource):\n @check_challenge_visibility\n @check_score_visibility\n @during_ctf_time_only\n @require_verified_emails\n def get(self, challenge_id):\n response = []\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n\n # TODO: Need a generic challenge visibility call.\n # However, it should be stated that a solve on a gated challenge is not considered private.\n if challenge.state == \"hidden\" and is_admin() is False:\n abort(404)\n\n Model = get_model()\n\n solves = (\n Solves.query.join(Model, Solves.account_id == Model.id)\n .filter(\n Solves.challenge_id == challenge_id,\n Model.banned == False,\n Model.hidden == False,\n )\n .order_by(Solves.date.asc())\n )\n\n freeze = get_config(\"freeze\")\n if freeze:\n preview = request.args.get(\"preview\")\n if (is_admin() is False) or (is_admin() is True and preview):\n dt = datetime.datetime.utcfromtimestamp(freeze)\n solves = solves.filter(Solves.date < dt)\n\n for solve in solves:\n response.append(\n {\n \"account_id\": solve.account_id,\n \"name\": solve.account.name,\n \"date\": isoformat(solve.date),\n \"account_url\": generate_account_url(account_id=solve.account_id),\n }\n )\n\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/<challenge_id>/files\")\nclass ChallengeFiles(Resource):\n @admins_only\n def get(self, challenge_id):\n response = []\n\n challenge_files = ChallengeFilesModel.query.filter_by(\n challenge_id=challenge_id\n ).all()\n\n for f in challenge_files:\n response.append({\"id\": f.id, \"type\": f.type, \"location\": f.location})\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/<challenge_id>/tags\")\nclass ChallengeTags(Resource):\n @admins_only\n def get(self, challenge_id):\n response = []\n\n tags = Tags.query.filter_by(challenge_id=challenge_id).all()\n\n for t in tags:\n response.append(\n {\"id\": t.id, \"challenge_id\": t.challenge_id, \"value\": t.value}\n )\n return {\"success\": True, \"data\": response}\n\n\n@challenges_namespace.route(\"/<challenge_id>/hints\")\nclass ChallengeHints(Resource):\n @admins_only\n def get(self, challenge_id):\n hints = Hints.query.filter_by(challenge_id=challenge_id).all()\n schema = HintSchema(many=True)\n response = schema.dump(hints)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n\n\n@challenges_namespace.route(\"/<challenge_id>/flags\")\nclass ChallengeFlags(Resource):\n @admins_only\n def get(self, challenge_id):\n flags = Flags.query.filter_by(challenge_id=challenge_id).all()\n schema = FlagSchema(many=True)\n response = schema.dump(flags)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n", "path": "CTFd/api/v1/challenges.py" } ]
diff --git a/CTFd/api/v1/challenges.py b/CTFd/api/v1/challenges.py index 440547010..5b5621f91 100644 --- a/CTFd/api/v1/challenges.py +++ b/CTFd/api/v1/challenges.py @@ -374,6 +374,7 @@ def get(self, challenge_id): response["solves"] = solves else: response["solves"] = None + solves = None if authed(): # Get current attempts for the user diff --git a/CTFd/themes/admin/assets/js/pages/challenge.js b/CTFd/themes/admin/assets/js/pages/challenge.js index dcd251cfd..d104549ba 100644 --- a/CTFd/themes/admin/assets/js/pages/challenge.js +++ b/CTFd/themes/admin/assets/js/pages/challenge.js @@ -399,7 +399,7 @@ $(() => { ezQuery({ title: "Missing Flags", body: - "This challenge does not have any flags meaning it is unsolveable. Are you sure you'd like to update this challenge?", + "This challenge does not have any flags meaning it may be unsolveable. Are you sure you'd like to update this challenge?", success: update_challenge }); } else { diff --git a/CTFd/themes/admin/static/js/pages/challenge.dev.js b/CTFd/themes/admin/static/js/pages/challenge.dev.js index 59e6bcb7e..4240a2cd5 100644 --- a/CTFd/themes/admin/static/js/pages/challenge.dev.js +++ b/CTFd/themes/admin/static/js/pages/challenge.dev.js @@ -222,7 +222,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\n /***/ (function(module, exports, __webpack_require__) { ; -eval("\n\n__webpack_require__(/*! ./main */ \"./CTFd/themes/admin/assets/js/pages/main.js\");\n\nvar _utils = __webpack_require__(/*! core/utils */ \"./CTFd/themes/core/assets/js/utils.js\");\n\nvar _jquery = _interopRequireDefault(__webpack_require__(/*! jquery */ \"./node_modules/jquery/dist/jquery.js\"));\n\n__webpack_require__(/*! bootstrap/js/dist/tab */ \"./node_modules/bootstrap/js/dist/tab.js\");\n\nvar _CTFd = _interopRequireDefault(__webpack_require__(/*! core/CTFd */ \"./CTFd/themes/core/assets/js/CTFd.js\"));\n\nvar _ezq = __webpack_require__(/*! core/ezq */ \"./CTFd/themes/core/assets/js/ezq.js\");\n\nvar _helpers = _interopRequireDefault(__webpack_require__(/*! core/helpers */ \"./CTFd/themes/core/assets/js/helpers.js\"));\n\nvar _files = __webpack_require__(/*! ../challenges/files */ \"./CTFd/themes/admin/assets/js/challenges/files.js\");\n\nvar _tags = __webpack_require__(/*! ../challenges/tags */ \"./CTFd/themes/admin/assets/js/challenges/tags.js\");\n\nvar _requirements = __webpack_require__(/*! ../challenges/requirements */ \"./CTFd/themes/admin/assets/js/challenges/requirements.js\");\n\nvar _styles = __webpack_require__(/*! ../styles */ \"./CTFd/themes/admin/assets/js/styles.js\");\n\nvar _hints = __webpack_require__(/*! ../challenges/hints */ \"./CTFd/themes/admin/assets/js/challenges/hints.js\");\n\nvar _flags = __webpack_require__(/*! ../challenges/flags */ \"./CTFd/themes/admin/assets/js/challenges/flags.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nvar md = _CTFd.default.lib.markdown();\n\nvar displayHint = function displayHint(data) {\n (0, _ezq.ezAlert)({\n title: \"Hint\",\n body: md.render(data.content),\n button: \"Got it!\"\n });\n};\n\nvar loadHint = function loadHint(id) {\n _CTFd.default.api.get_hint({\n hintId: id,\n preview: true\n }).then(function (response) {\n if (response.data.content) {\n displayHint(response.data);\n return;\n } // displayUnlock(id);\n\n });\n};\n\nfunction renderSubmissionResponse(response, cb) {\n var result = response.data;\n var result_message = (0, _jquery.default)(\"#result-message\");\n var result_notification = (0, _jquery.default)(\"#result-notification\");\n var answer_input = (0, _jquery.default)(\"#submission-input\");\n result_notification.removeClass();\n result_message.text(result.message);\n\n if (result.status === \"authentication_required\") {\n window.location = _CTFd.default.config.urlRoot + \"/login?next=\" + _CTFd.default.config.urlRoot + window.location.pathname + window.location.hash;\n return;\n } else if (result.status === \"incorrect\") {\n // Incorrect key\n result_notification.addClass(\"alert alert-danger alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.removeClass(\"correct\");\n answer_input.addClass(\"wrong\");\n setTimeout(function () {\n answer_input.removeClass(\"wrong\");\n }, 3000);\n } else if (result.status === \"correct\") {\n // Challenge Solved\n result_notification.addClass(\"alert alert-success alert-dismissable text-center\");\n result_notification.slideDown();\n (0, _jquery.default)(\".challenge-solves\").text(parseInt((0, _jquery.default)(\".challenge-solves\").text().split(\" \")[0]) + 1 + \" Solves\");\n answer_input.val(\"\");\n answer_input.removeClass(\"wrong\");\n answer_input.addClass(\"correct\");\n } else if (result.status === \"already_solved\") {\n // Challenge already solved\n result_notification.addClass(\"alert alert-info alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.addClass(\"correct\");\n } else if (result.status === \"paused\") {\n // CTF is paused\n result_notification.addClass(\"alert alert-warning alert-dismissable text-center\");\n result_notification.slideDown();\n } else if (result.status === \"ratelimited\") {\n // Keys per minute too high\n result_notification.addClass(\"alert alert-warning alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.addClass(\"too-fast\");\n setTimeout(function () {\n answer_input.removeClass(\"too-fast\");\n }, 3000);\n }\n\n setTimeout(function () {\n (0, _jquery.default)(\".alert\").slideUp();\n (0, _jquery.default)(\"#submit-key\").removeClass(\"disabled-button\");\n (0, _jquery.default)(\"#submit-key\").prop(\"disabled\", false);\n }, 3000);\n\n if (cb) {\n cb(result);\n }\n}\n\nfunction loadChalTemplate(challenge) {\n _CTFd.default._internal.challenge = {};\n\n _jquery.default.getScript(_CTFd.default.config.urlRoot + challenge.scripts.view, function () {\n var template_data = challenge.create;\n (0, _jquery.default)(\"#create-chal-entry-div\").html(template_data);\n (0, _styles.bindMarkdownEditors)();\n\n _jquery.default.getScript(_CTFd.default.config.urlRoot + challenge.scripts.create, function () {\n (0, _jquery.default)(\"#create-chal-entry-div form\").submit(function (event) {\n event.preventDefault();\n var params = (0, _jquery.default)(\"#create-chal-entry-div form\").serializeJSON();\n\n _CTFd.default.fetch(\"/api/v1/challenges\", {\n method: \"POST\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n body: JSON.stringify(params)\n }).then(function (response) {\n return response.json();\n }).then(function (response) {\n if (response.success) {\n (0, _jquery.default)(\"#challenge-create-options #challenge_id\").val(response.data.id);\n (0, _jquery.default)(\"#challenge-create-options\").modal();\n }\n });\n });\n });\n });\n}\n\nfunction handleChallengeOptions(event) {\n event.preventDefault();\n var params = (0, _jquery.default)(event.target).serializeJSON(true);\n var flag_params = {\n challenge_id: params.challenge_id,\n content: params.flag || \"\",\n type: params.flag_type,\n data: params.flag_data ? params.flag_data : \"\"\n }; // Define a save_challenge function\n\n var save_challenge = function save_challenge() {\n _CTFd.default.fetch(\"/api/v1/challenges/\" + params.challenge_id, {\n method: \"PATCH\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n body: JSON.stringify({\n state: params.state\n })\n }).then(function (response) {\n return response.json();\n }).then(function (data) {\n if (data.success) {\n setTimeout(function () {\n window.location = _CTFd.default.config.urlRoot + \"/admin/challenges/\" + params.challenge_id;\n }, 700);\n }\n });\n };\n\n Promise.all([// Save flag\n new Promise(function (resolve, _reject) {\n if (flag_params.content.length == 0) {\n resolve();\n return;\n }\n\n _CTFd.default.fetch(\"/api/v1/flags\", {\n method: \"POST\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n body: JSON.stringify(flag_params)\n }).then(function (response) {\n resolve(response.json());\n });\n }), // Upload files\n new Promise(function (resolve, _reject) {\n var form = event.target;\n var data = {\n challenge: params.challenge_id,\n type: \"challenge\"\n };\n var filepath = (0, _jquery.default)(form.elements[\"file\"]).val();\n\n if (filepath) {\n _helpers.default.files.upload(form, data);\n }\n\n resolve();\n })]).then(function (_responses) {\n save_challenge();\n });\n}\n\nfunction createChallenge(_event) {\n var challenge = (0, _jquery.default)(this).find(\"option:selected\").data(\"meta\");\n\n if (challenge === undefined) {\n (0, _jquery.default)(\"#create-chal-entry-div\").empty();\n return;\n }\n\n loadChalTemplate(challenge);\n}\n\n(0, _jquery.default)(function () {\n (0, _jquery.default)(\".preview-challenge\").click(function (_e) {\n window.challenge = new Object();\n _CTFd.default._internal.challenge = {};\n\n _jquery.default.get(_CTFd.default.config.urlRoot + \"/api/v1/challenges/\" + window.CHALLENGE_ID, function (response) {\n var challenge = _CTFd.default._internal.challenge;\n var challenge_data = response.data;\n challenge_data[\"solves\"] = null;\n\n _jquery.default.getScript(_CTFd.default.config.urlRoot + challenge_data.type_data.scripts.view, function () {\n (0, _jquery.default)(\"#challenge-window\").empty();\n (0, _jquery.default)(\"#challenge-window\").append(challenge_data.view);\n (0, _jquery.default)(\"#challenge-window #challenge-input\").addClass(\"form-control\");\n (0, _jquery.default)(\"#challenge-window #challenge-submit\").addClass(\"btn btn-md btn-outline-secondary float-right\");\n (0, _jquery.default)(\".challenge-solves\").hide();\n (0, _jquery.default)(\".nav-tabs a\").click(function (e) {\n e.preventDefault();\n (0, _jquery.default)(this).tab(\"show\");\n }); // Handle modal toggling\n\n (0, _jquery.default)(\"#challenge-window\").on(\"hide.bs.modal\", function (_event) {\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"wrong\");\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"correct\");\n (0, _jquery.default)(\"#incorrect-key\").slideUp();\n (0, _jquery.default)(\"#correct-key\").slideUp();\n (0, _jquery.default)(\"#already-solved\").slideUp();\n (0, _jquery.default)(\"#too-fast\").slideUp();\n });\n (0, _jquery.default)(\".load-hint\").on(\"click\", function (_event) {\n loadHint((0, _jquery.default)(this).data(\"hint-id\"));\n });\n (0, _jquery.default)(\"#challenge-submit\").click(function (e) {\n e.preventDefault();\n (0, _jquery.default)(\"#challenge-submit\").addClass(\"disabled-button\");\n (0, _jquery.default)(\"#challenge-submit\").prop(\"disabled\", true);\n\n _CTFd.default._internal.challenge.submit(true).then(renderSubmissionResponse); // Preview passed as true\n\n });\n (0, _jquery.default)(\"#challenge-input\").keyup(function (event) {\n if (event.keyCode == 13) {\n (0, _jquery.default)(\"#challenge-submit\").click();\n }\n });\n challenge.postRender();\n window.location.replace(window.location.href.split(\"#\")[0] + \"#preview\");\n (0, _jquery.default)(\"#challenge-window\").modal();\n });\n });\n });\n (0, _jquery.default)(\".delete-challenge\").click(function (_e) {\n (0, _ezq.ezQuery)({\n title: \"Delete Challenge\",\n body: \"Are you sure you want to delete {0}\".format(\"<strong>\" + (0, _utils.htmlEntities)(window.CHALLENGE_NAME) + \"</strong>\"),\n success: function success() {\n _CTFd.default.fetch(\"/api/v1/challenges/\" + window.CHALLENGE_ID, {\n method: \"DELETE\"\n }).then(function (response) {\n return response.json();\n }).then(function (response) {\n if (response.success) {\n window.location = _CTFd.default.config.urlRoot + \"/admin/challenges\";\n }\n });\n }\n });\n });\n (0, _jquery.default)(\"#challenge-update-container > form\").submit(function (e) {\n e.preventDefault();\n var params = (0, _jquery.default)(e.target).serializeJSON(true);\n\n _CTFd.default.fetch(\"/api/v1/challenges/\" + window.CHALLENGE_ID + \"/flags\", {\n method: \"GET\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n }\n }).then(function (response) {\n return response.json();\n }).then(function (response) {\n var update_challenge = function update_challenge() {\n _CTFd.default.fetch(\"/api/v1/challenges/\" + window.CHALLENGE_ID, {\n method: \"PATCH\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n body: JSON.stringify(params)\n }).then(function (response) {\n return response.json();\n }).then(function (response) {\n if (response.success) {\n (0, _jquery.default)(\".challenge-state\").text(response.data.state);\n\n switch (response.data.state) {\n case \"visible\":\n (0, _jquery.default)(\".challenge-state\").removeClass(\"badge-danger\").addClass(\"badge-success\");\n break;\n\n case \"hidden\":\n (0, _jquery.default)(\".challenge-state\").removeClass(\"badge-success\").addClass(\"badge-danger\");\n break;\n\n default:\n break;\n }\n\n (0, _ezq.ezToast)({\n title: \"Success\",\n body: \"Your challenge has been updated!\"\n });\n }\n });\n }; // Check if the challenge doesn't have any flags before marking visible\n\n\n if (response.data.length === 0 && params.state === \"visible\") {\n (0, _ezq.ezQuery)({\n title: \"Missing Flags\",\n body: \"This challenge does not have any flags meaning it is unsolveable. Are you sure you'd like to update this challenge?\",\n success: update_challenge\n });\n } else {\n update_challenge();\n }\n });\n });\n (0, _jquery.default)(\"#challenge-create-options form\").submit(handleChallengeOptions);\n (0, _jquery.default)(\"#tags-add-input\").keyup(_tags.addTag);\n (0, _jquery.default)(\".delete-tag\").click(_tags.deleteTag);\n (0, _jquery.default)(\"#prerequisite-add-form\").submit(_requirements.addRequirement);\n (0, _jquery.default)(\".delete-requirement\").click(_requirements.deleteRequirement);\n (0, _jquery.default)(\"#file-add-form\").submit(_files.addFile);\n (0, _jquery.default)(\".delete-file\").click(_files.deleteFile);\n (0, _jquery.default)(\"#hint-add-button\").click(_hints.showHintModal);\n (0, _jquery.default)(\".delete-hint\").click(_hints.deleteHint);\n (0, _jquery.default)(\".edit-hint\").click(_hints.showEditHintModal);\n (0, _jquery.default)(\"#hint-edit-form\").submit(_hints.editHint);\n (0, _jquery.default)(\"#flag-add-button\").click(_flags.addFlagModal);\n (0, _jquery.default)(\".delete-flag\").click(_flags.deleteFlag);\n (0, _jquery.default)(\"#flags-create-select\").change(_flags.flagTypeSelect);\n (0, _jquery.default)(\".edit-flag\").click(_flags.editFlagModal);\n\n _jquery.default.get(_CTFd.default.config.urlRoot + \"/api/v1/challenges/types\", function (response) {\n (0, _jquery.default)(\"#create-chals-select\").empty();\n var data = response.data;\n var chal_type_amt = Object.keys(data).length;\n\n if (chal_type_amt > 1) {\n var option = \"<option> -- </option>\";\n (0, _jquery.default)(\"#create-chals-select\").append(option);\n\n for (var key in data) {\n var challenge = data[key];\n\n var _option = (0, _jquery.default)(\"<option/>\");\n\n _option.attr(\"value\", challenge.type);\n\n _option.text(challenge.name);\n\n _option.data(\"meta\", challenge);\n\n (0, _jquery.default)(\"#create-chals-select\").append(_option);\n }\n\n (0, _jquery.default)(\"#create-chals-select-div\").show();\n (0, _jquery.default)(\"#create-chals-select\").val(\"standard\");\n loadChalTemplate(data[\"standard\"]);\n } else if (chal_type_amt == 1) {\n var _key = Object.keys(data)[0];\n (0, _jquery.default)(\"#create-chals-select\").empty();\n loadChalTemplate(data[_key]);\n }\n });\n\n (0, _jquery.default)(\"#create-chals-select\").change(createChallenge);\n});\n\n//# sourceURL=webpack:///./CTFd/themes/admin/assets/js/pages/challenge.js?"); +eval("\n\n__webpack_require__(/*! ./main */ \"./CTFd/themes/admin/assets/js/pages/main.js\");\n\nvar _utils = __webpack_require__(/*! core/utils */ \"./CTFd/themes/core/assets/js/utils.js\");\n\nvar _jquery = _interopRequireDefault(__webpack_require__(/*! jquery */ \"./node_modules/jquery/dist/jquery.js\"));\n\n__webpack_require__(/*! bootstrap/js/dist/tab */ \"./node_modules/bootstrap/js/dist/tab.js\");\n\nvar _CTFd = _interopRequireDefault(__webpack_require__(/*! core/CTFd */ \"./CTFd/themes/core/assets/js/CTFd.js\"));\n\nvar _ezq = __webpack_require__(/*! core/ezq */ \"./CTFd/themes/core/assets/js/ezq.js\");\n\nvar _helpers = _interopRequireDefault(__webpack_require__(/*! core/helpers */ \"./CTFd/themes/core/assets/js/helpers.js\"));\n\nvar _files = __webpack_require__(/*! ../challenges/files */ \"./CTFd/themes/admin/assets/js/challenges/files.js\");\n\nvar _tags = __webpack_require__(/*! ../challenges/tags */ \"./CTFd/themes/admin/assets/js/challenges/tags.js\");\n\nvar _requirements = __webpack_require__(/*! ../challenges/requirements */ \"./CTFd/themes/admin/assets/js/challenges/requirements.js\");\n\nvar _styles = __webpack_require__(/*! ../styles */ \"./CTFd/themes/admin/assets/js/styles.js\");\n\nvar _hints = __webpack_require__(/*! ../challenges/hints */ \"./CTFd/themes/admin/assets/js/challenges/hints.js\");\n\nvar _flags = __webpack_require__(/*! ../challenges/flags */ \"./CTFd/themes/admin/assets/js/challenges/flags.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nvar md = _CTFd.default.lib.markdown();\n\nvar displayHint = function displayHint(data) {\n (0, _ezq.ezAlert)({\n title: \"Hint\",\n body: md.render(data.content),\n button: \"Got it!\"\n });\n};\n\nvar loadHint = function loadHint(id) {\n _CTFd.default.api.get_hint({\n hintId: id,\n preview: true\n }).then(function (response) {\n if (response.data.content) {\n displayHint(response.data);\n return;\n } // displayUnlock(id);\n\n });\n};\n\nfunction renderSubmissionResponse(response, cb) {\n var result = response.data;\n var result_message = (0, _jquery.default)(\"#result-message\");\n var result_notification = (0, _jquery.default)(\"#result-notification\");\n var answer_input = (0, _jquery.default)(\"#submission-input\");\n result_notification.removeClass();\n result_message.text(result.message);\n\n if (result.status === \"authentication_required\") {\n window.location = _CTFd.default.config.urlRoot + \"/login?next=\" + _CTFd.default.config.urlRoot + window.location.pathname + window.location.hash;\n return;\n } else if (result.status === \"incorrect\") {\n // Incorrect key\n result_notification.addClass(\"alert alert-danger alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.removeClass(\"correct\");\n answer_input.addClass(\"wrong\");\n setTimeout(function () {\n answer_input.removeClass(\"wrong\");\n }, 3000);\n } else if (result.status === \"correct\") {\n // Challenge Solved\n result_notification.addClass(\"alert alert-success alert-dismissable text-center\");\n result_notification.slideDown();\n (0, _jquery.default)(\".challenge-solves\").text(parseInt((0, _jquery.default)(\".challenge-solves\").text().split(\" \")[0]) + 1 + \" Solves\");\n answer_input.val(\"\");\n answer_input.removeClass(\"wrong\");\n answer_input.addClass(\"correct\");\n } else if (result.status === \"already_solved\") {\n // Challenge already solved\n result_notification.addClass(\"alert alert-info alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.addClass(\"correct\");\n } else if (result.status === \"paused\") {\n // CTF is paused\n result_notification.addClass(\"alert alert-warning alert-dismissable text-center\");\n result_notification.slideDown();\n } else if (result.status === \"ratelimited\") {\n // Keys per minute too high\n result_notification.addClass(\"alert alert-warning alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.addClass(\"too-fast\");\n setTimeout(function () {\n answer_input.removeClass(\"too-fast\");\n }, 3000);\n }\n\n setTimeout(function () {\n (0, _jquery.default)(\".alert\").slideUp();\n (0, _jquery.default)(\"#submit-key\").removeClass(\"disabled-button\");\n (0, _jquery.default)(\"#submit-key\").prop(\"disabled\", false);\n }, 3000);\n\n if (cb) {\n cb(result);\n }\n}\n\nfunction loadChalTemplate(challenge) {\n _CTFd.default._internal.challenge = {};\n\n _jquery.default.getScript(_CTFd.default.config.urlRoot + challenge.scripts.view, function () {\n var template_data = challenge.create;\n (0, _jquery.default)(\"#create-chal-entry-div\").html(template_data);\n (0, _styles.bindMarkdownEditors)();\n\n _jquery.default.getScript(_CTFd.default.config.urlRoot + challenge.scripts.create, function () {\n (0, _jquery.default)(\"#create-chal-entry-div form\").submit(function (event) {\n event.preventDefault();\n var params = (0, _jquery.default)(\"#create-chal-entry-div form\").serializeJSON();\n\n _CTFd.default.fetch(\"/api/v1/challenges\", {\n method: \"POST\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n body: JSON.stringify(params)\n }).then(function (response) {\n return response.json();\n }).then(function (response) {\n if (response.success) {\n (0, _jquery.default)(\"#challenge-create-options #challenge_id\").val(response.data.id);\n (0, _jquery.default)(\"#challenge-create-options\").modal();\n }\n });\n });\n });\n });\n}\n\nfunction handleChallengeOptions(event) {\n event.preventDefault();\n var params = (0, _jquery.default)(event.target).serializeJSON(true);\n var flag_params = {\n challenge_id: params.challenge_id,\n content: params.flag || \"\",\n type: params.flag_type,\n data: params.flag_data ? params.flag_data : \"\"\n }; // Define a save_challenge function\n\n var save_challenge = function save_challenge() {\n _CTFd.default.fetch(\"/api/v1/challenges/\" + params.challenge_id, {\n method: \"PATCH\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n body: JSON.stringify({\n state: params.state\n })\n }).then(function (response) {\n return response.json();\n }).then(function (data) {\n if (data.success) {\n setTimeout(function () {\n window.location = _CTFd.default.config.urlRoot + \"/admin/challenges/\" + params.challenge_id;\n }, 700);\n }\n });\n };\n\n Promise.all([// Save flag\n new Promise(function (resolve, _reject) {\n if (flag_params.content.length == 0) {\n resolve();\n return;\n }\n\n _CTFd.default.fetch(\"/api/v1/flags\", {\n method: \"POST\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n body: JSON.stringify(flag_params)\n }).then(function (response) {\n resolve(response.json());\n });\n }), // Upload files\n new Promise(function (resolve, _reject) {\n var form = event.target;\n var data = {\n challenge: params.challenge_id,\n type: \"challenge\"\n };\n var filepath = (0, _jquery.default)(form.elements[\"file\"]).val();\n\n if (filepath) {\n _helpers.default.files.upload(form, data);\n }\n\n resolve();\n })]).then(function (_responses) {\n save_challenge();\n });\n}\n\nfunction createChallenge(_event) {\n var challenge = (0, _jquery.default)(this).find(\"option:selected\").data(\"meta\");\n\n if (challenge === undefined) {\n (0, _jquery.default)(\"#create-chal-entry-div\").empty();\n return;\n }\n\n loadChalTemplate(challenge);\n}\n\n(0, _jquery.default)(function () {\n (0, _jquery.default)(\".preview-challenge\").click(function (_e) {\n window.challenge = new Object();\n _CTFd.default._internal.challenge = {};\n\n _jquery.default.get(_CTFd.default.config.urlRoot + \"/api/v1/challenges/\" + window.CHALLENGE_ID, function (response) {\n var challenge = _CTFd.default._internal.challenge;\n var challenge_data = response.data;\n challenge_data[\"solves\"] = null;\n\n _jquery.default.getScript(_CTFd.default.config.urlRoot + challenge_data.type_data.scripts.view, function () {\n (0, _jquery.default)(\"#challenge-window\").empty();\n (0, _jquery.default)(\"#challenge-window\").append(challenge_data.view);\n (0, _jquery.default)(\"#challenge-window #challenge-input\").addClass(\"form-control\");\n (0, _jquery.default)(\"#challenge-window #challenge-submit\").addClass(\"btn btn-md btn-outline-secondary float-right\");\n (0, _jquery.default)(\".challenge-solves\").hide();\n (0, _jquery.default)(\".nav-tabs a\").click(function (e) {\n e.preventDefault();\n (0, _jquery.default)(this).tab(\"show\");\n }); // Handle modal toggling\n\n (0, _jquery.default)(\"#challenge-window\").on(\"hide.bs.modal\", function (_event) {\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"wrong\");\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"correct\");\n (0, _jquery.default)(\"#incorrect-key\").slideUp();\n (0, _jquery.default)(\"#correct-key\").slideUp();\n (0, _jquery.default)(\"#already-solved\").slideUp();\n (0, _jquery.default)(\"#too-fast\").slideUp();\n });\n (0, _jquery.default)(\".load-hint\").on(\"click\", function (_event) {\n loadHint((0, _jquery.default)(this).data(\"hint-id\"));\n });\n (0, _jquery.default)(\"#challenge-submit\").click(function (e) {\n e.preventDefault();\n (0, _jquery.default)(\"#challenge-submit\").addClass(\"disabled-button\");\n (0, _jquery.default)(\"#challenge-submit\").prop(\"disabled\", true);\n\n _CTFd.default._internal.challenge.submit(true).then(renderSubmissionResponse); // Preview passed as true\n\n });\n (0, _jquery.default)(\"#challenge-input\").keyup(function (event) {\n if (event.keyCode == 13) {\n (0, _jquery.default)(\"#challenge-submit\").click();\n }\n });\n challenge.postRender();\n window.location.replace(window.location.href.split(\"#\")[0] + \"#preview\");\n (0, _jquery.default)(\"#challenge-window\").modal();\n });\n });\n });\n (0, _jquery.default)(\".delete-challenge\").click(function (_e) {\n (0, _ezq.ezQuery)({\n title: \"Delete Challenge\",\n body: \"Are you sure you want to delete {0}\".format(\"<strong>\" + (0, _utils.htmlEntities)(window.CHALLENGE_NAME) + \"</strong>\"),\n success: function success() {\n _CTFd.default.fetch(\"/api/v1/challenges/\" + window.CHALLENGE_ID, {\n method: \"DELETE\"\n }).then(function (response) {\n return response.json();\n }).then(function (response) {\n if (response.success) {\n window.location = _CTFd.default.config.urlRoot + \"/admin/challenges\";\n }\n });\n }\n });\n });\n (0, _jquery.default)(\"#challenge-update-container > form\").submit(function (e) {\n e.preventDefault();\n var params = (0, _jquery.default)(e.target).serializeJSON(true);\n\n _CTFd.default.fetch(\"/api/v1/challenges/\" + window.CHALLENGE_ID + \"/flags\", {\n method: \"GET\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n }\n }).then(function (response) {\n return response.json();\n }).then(function (response) {\n var update_challenge = function update_challenge() {\n _CTFd.default.fetch(\"/api/v1/challenges/\" + window.CHALLENGE_ID, {\n method: \"PATCH\",\n credentials: \"same-origin\",\n headers: {\n Accept: \"application/json\",\n \"Content-Type\": \"application/json\"\n },\n body: JSON.stringify(params)\n }).then(function (response) {\n return response.json();\n }).then(function (response) {\n if (response.success) {\n (0, _jquery.default)(\".challenge-state\").text(response.data.state);\n\n switch (response.data.state) {\n case \"visible\":\n (0, _jquery.default)(\".challenge-state\").removeClass(\"badge-danger\").addClass(\"badge-success\");\n break;\n\n case \"hidden\":\n (0, _jquery.default)(\".challenge-state\").removeClass(\"badge-success\").addClass(\"badge-danger\");\n break;\n\n default:\n break;\n }\n\n (0, _ezq.ezToast)({\n title: \"Success\",\n body: \"Your challenge has been updated!\"\n });\n }\n });\n }; // Check if the challenge doesn't have any flags before marking visible\n\n\n if (response.data.length === 0 && params.state === \"visible\") {\n (0, _ezq.ezQuery)({\n title: \"Missing Flags\",\n body: \"This challenge does not have any flags meaning it may be unsolveable. Are you sure you'd like to update this challenge?\",\n success: update_challenge\n });\n } else {\n update_challenge();\n }\n });\n });\n (0, _jquery.default)(\"#challenge-create-options form\").submit(handleChallengeOptions);\n (0, _jquery.default)(\"#tags-add-input\").keyup(_tags.addTag);\n (0, _jquery.default)(\".delete-tag\").click(_tags.deleteTag);\n (0, _jquery.default)(\"#prerequisite-add-form\").submit(_requirements.addRequirement);\n (0, _jquery.default)(\".delete-requirement\").click(_requirements.deleteRequirement);\n (0, _jquery.default)(\"#file-add-form\").submit(_files.addFile);\n (0, _jquery.default)(\".delete-file\").click(_files.deleteFile);\n (0, _jquery.default)(\"#hint-add-button\").click(_hints.showHintModal);\n (0, _jquery.default)(\".delete-hint\").click(_hints.deleteHint);\n (0, _jquery.default)(\".edit-hint\").click(_hints.showEditHintModal);\n (0, _jquery.default)(\"#hint-edit-form\").submit(_hints.editHint);\n (0, _jquery.default)(\"#flag-add-button\").click(_flags.addFlagModal);\n (0, _jquery.default)(\".delete-flag\").click(_flags.deleteFlag);\n (0, _jquery.default)(\"#flags-create-select\").change(_flags.flagTypeSelect);\n (0, _jquery.default)(\".edit-flag\").click(_flags.editFlagModal);\n\n _jquery.default.get(_CTFd.default.config.urlRoot + \"/api/v1/challenges/types\", function (response) {\n (0, _jquery.default)(\"#create-chals-select\").empty();\n var data = response.data;\n var chal_type_amt = Object.keys(data).length;\n\n if (chal_type_amt > 1) {\n var option = \"<option> -- </option>\";\n (0, _jquery.default)(\"#create-chals-select\").append(option);\n\n for (var key in data) {\n var challenge = data[key];\n\n var _option = (0, _jquery.default)(\"<option/>\");\n\n _option.attr(\"value\", challenge.type);\n\n _option.text(challenge.name);\n\n _option.data(\"meta\", challenge);\n\n (0, _jquery.default)(\"#create-chals-select\").append(_option);\n }\n\n (0, _jquery.default)(\"#create-chals-select-div\").show();\n (0, _jquery.default)(\"#create-chals-select\").val(\"standard\");\n loadChalTemplate(data[\"standard\"]);\n } else if (chal_type_amt == 1) {\n var _key = Object.keys(data)[0];\n (0, _jquery.default)(\"#create-chals-select\").empty();\n loadChalTemplate(data[_key]);\n }\n });\n\n (0, _jquery.default)(\"#create-chals-select\").change(createChallenge);\n});\n\n//# sourceURL=webpack:///./CTFd/themes/admin/assets/js/pages/challenge.js?"); /***/ }) diff --git a/CTFd/themes/admin/static/js/pages/challenge.min.js b/CTFd/themes/admin/static/js/pages/challenge.min.js index 6634d6607..9fb8f53bf 100644 --- a/CTFd/themes/admin/static/js/pages/challenge.min.js +++ b/CTFd/themes/admin/static/js/pages/challenge.min.js @@ -1 +1 @@ -!function(d){function e(e){for(var t,o,n=e[0],a=e[1],s=e[2],i=0,l=[];i<n.length;i++)o=n[i],c[o]&&l.push(c[o][0]),c[o]=0;for(t in a)Object.prototype.hasOwnProperty.call(a,t)&&(d[t]=a[t]);for(m&&m(e);l.length;)l.shift()();return u.push.apply(u,s||[]),r()}function r(){for(var e,t=0;t<u.length;t++){for(var o=u[t],n=!0,a=1;a<o.length;a++){var s=o[a];0!==c[s]&&(n=!1)}n&&(u.splice(t--,1),e=i(i.s=o[0]))}return e}var o={},c={3:0,7:0},u=[];function i(e){if(o[e])return o[e].exports;var t=o[e]={i:e,l:!1,exports:{}};return d[e].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.m=d,i.c=o,i.d=function(e,t,o){i.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:o})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(t,e){if(1&e&&(t=i(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var o=Object.create(null);if(i.r(o),Object.defineProperty(o,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var n in t)i.d(o,n,function(e){return t[e]}.bind(null,n));return o},i.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(t,"a",t),t},i.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},i.p="/themes/admin/static/js";var t=window.webpackJsonp=window.webpackJsonp||[],n=t.push.bind(t);t.push=e,t=t.slice();for(var a=0;a<t.length;a++)e(t[a]);var m=n;u.push(["./CTFd/themes/admin/assets/js/pages/challenge.js",0,1]),r()}({"./CTFd/themes/admin/assets/js/challenges/files.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.addFile=function(e){e.preventDefault();var t=e.target,o={challenge:window.CHALLENGE_ID,type:"challenge"};s.default.files.upload(t,o,function(e){setTimeout(function(){window.location.reload()},700)})},t.deleteFile=function(e){var t=(0,n.default)(this).attr("file-id"),o=(0,n.default)(this).parent().parent();(0,i.ezQuery)({title:"Delete Files",body:"Are you sure you want to delete this file?",success:function(){a.default.fetch("/api/v1/files/"+t,{method:"DELETE"}).then(function(e){return e.json()}).then(function(e){e.success&&o.remove()})}})};var n=l(o("./node_modules/jquery/dist/jquery.js")),a=l(o("./CTFd/themes/core/assets/js/CTFd.js")),s=l(o("./CTFd/themes/core/assets/js/helpers.js")),i=o("./CTFd/themes/core/assets/js/ezq.js");function l(e){return e&&e.__esModule?e:{default:e}}},"./CTFd/themes/admin/assets/js/challenges/flags.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.deleteFlag=function(e){e.preventDefault();var t=(0,s.default)(this).attr("flag-id"),o=(0,s.default)(this).parent().parent();(0,n.ezQuery)({title:"Delete Flag",body:"Are you sure you want to delete this flag?",success:function(){i.default.fetch("/api/v1/flags/"+t,{method:"DELETE"}).then(function(e){return e.json()}).then(function(e){e.success&&o.remove()})}})},t.addFlagModal=function(e){s.default.get(i.default.config.urlRoot+"/api/v1/flags/types",function(e){var t=e.data,o=(0,s.default)("#flags-create-select");o.empty();var n=(0,s.default)("<option> -- </option>");for(var a in o.append(n),t)t.hasOwnProperty(a)&&(n=(0,s.default)("<option value='{0}'>{1}</option>".format(a,t[a].name)),o.append(n));(0,s.default)("#flag-edit-modal").modal()}),(0,s.default)("#flag-edit-modal form").submit(function(e){e.preventDefault();var t=(0,s.default)(this).serializeJSON(!0);t.challenge=window.CHALLENGE_ID,i.default.fetch("/api/v1/flags",{method:"POST",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(t)}).then(function(e){return e.json()}).then(function(e){window.location.reload()})}),(0,s.default)("#flag-edit-modal").modal()},t.editFlagModal=function(e){e.preventDefault();var n=(0,s.default)(this).attr("flag-id"),a=(0,s.default)(this).parent().parent();s.default.get(i.default.config.urlRoot+"/api/v1/flags/"+n,function(e){var o=e.data;s.default.get(i.default.config.urlRoot+o.templates.update,function(e){(0,s.default)("#edit-flags form").empty(),(0,s.default)("#edit-flags form").off();var t=l.default.compile(e);(0,s.default)("#edit-flags form").append(t.render(o)),(0,s.default)("#edit-flags form").submit(function(e){e.preventDefault();var t=(0,s.default)("#edit-flags form").serializeJSON();i.default.fetch("/api/v1/flags/"+n,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(t)}).then(function(e){return e.json()}).then(function(e){e.success&&((0,s.default)(a).find(".flag-content").text(e.data.content),(0,s.default)("#edit-flags").modal("toggle"))})}),(0,s.default)("#edit-flags").modal()})})},t.flagTypeSelect=function(e){e.preventDefault();var t=(0,s.default)(this).find("option:selected").text();s.default.get(i.default.config.urlRoot+"/api/v1/flags/types/"+t,function(e){var t=e.data;s.default.get(i.default.config.urlRoot+t.templates.create,function(e){var t=l.default.compile(e);(0,s.default)("#create-keys-entry-div").html(t.render()),(0,s.default)("#create-keys-button-div").show()})})};var s=a(o("./node_modules/jquery/dist/jquery.js")),i=a(o("./CTFd/themes/core/assets/js/CTFd.js")),l=a(o("./node_modules/nunjucks/browser/nunjucks.js")),n=o("./CTFd/themes/core/assets/js/ezq.js");function a(e){return e&&e.__esModule?e:{default:e}}},"./CTFd/themes/admin/assets/js/challenges/hints.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.showHintModal=function(e){e.preventDefault(),(0,a.default)("#hint-edit-modal form").find("input, textarea").val("").trigger("change"),(0,a.default)("#hint-edit-form textarea").each(function(e,t){t.hasOwnProperty("codemirror")&&t.codemirror.refresh()}),(0,a.default)("#hint-edit-modal").modal()},t.showEditHintModal=function(e){e.preventDefault();var t=(0,a.default)(this).attr("hint-id");s.default.fetch("/api/v1/hints/"+t+"?preview=true",{method:"GET",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"}}).then(function(e){return e.json()}).then(function(e){e.success&&((0,a.default)("#hint-edit-form input[name=content],textarea[name=content]").val(e.data.content).trigger("change"),(0,a.default)("#hint-edit-modal").on("shown.bs.modal",function(){(0,a.default)("#hint-edit-form textarea").each(function(e,t){t.hasOwnProperty("codemirror")&&t.codemirror.refresh()})}).on("hide.bs.modal",function(){(0,a.default)("#hint-edit-form textarea").each(function(e,t){(0,a.default)(t).val("").trigger("change"),t.hasOwnProperty("codemirror")&&t.codemirror.refresh()})}),(0,a.default)("#hint-edit-form input[name=cost]").val(e.data.cost),(0,a.default)("#hint-edit-form input[name=id]").val(e.data.id),(0,a.default)("#hint-edit-modal").modal())})},t.deleteHint=function(e){e.preventDefault();var t=(0,a.default)(this).attr("hint-id"),o=(0,a.default)(this).parent().parent();(0,n.ezQuery)({title:"Delete Hint",body:"Are you sure you want to delete this hint?",success:function(){s.default.fetch("/api/v1/hints/"+t,{method:"DELETE"}).then(function(e){return e.json()}).then(function(e){e.success&&o.remove()})}})},t.editHint=function(e){e.preventDefault();var t=(0,a.default)(this).serializeJSON(!0);t.challenge=window.CHALLENGE_ID;var o="POST",n="/api/v1/hints";t.id&&(o="PATCH",n="/api/v1/hints/"+t.id);s.default.fetch(n,{method:o,credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(t)}).then(function(e){return e.json()}).then(function(e){e.success&&window.location.reload()})};var a=i(o("./node_modules/jquery/dist/jquery.js")),s=i(o("./CTFd/themes/core/assets/js/CTFd.js")),n=o("./CTFd/themes/core/assets/js/ezq.js");function i(e){return e&&e.__esModule?e:{default:e}}},"./CTFd/themes/admin/assets/js/challenges/requirements.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.addRequirement=function(e){e.preventDefault();var t=(0,a.default)("#prerequisite-add-form").serializeJSON();if(!t.prerequisite)return;window.CHALLENGE_REQUIREMENTS.prerequisites.push(parseInt(t.prerequisite));var o={requirements:window.CHALLENGE_REQUIREMENTS};s.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(o)}).then(function(e){return e.json()}).then(function(e){e.success&&window.location.reload()})},t.deleteRequirement=function(e){var t=(0,a.default)(this).attr("challenge-id"),o=(0,a.default)(this).parent().parent();window.CHALLENGE_REQUIREMENTS.prerequisites.pop(t);var n={requirements:window.CHALLENGE_REQUIREMENTS};s.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(n)}).then(function(e){return e.json()}).then(function(e){e.success&&o.remove()})};var a=n(o("./node_modules/jquery/dist/jquery.js")),s=n(o("./CTFd/themes/core/assets/js/CTFd.js"));function n(e){return e&&e.__esModule?e:{default:e}}},"./CTFd/themes/admin/assets/js/challenges/tags.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.deleteTag=i,t.addTag=function(e){if(13!=e.keyCode)return;var t=(0,n.default)(this),o={value:t.val(),challenge:window.CHALLENGE_ID};a.default.api.post_tag_list({},o).then(function(e){if(e.success){var t=(0,n.default)("<span class='badge badge-primary mx-1 challenge-tag'><span>{0}</span><a class='btn-fa delete-tag' tag-id='{1}'>&times;</a></span>".format(e.data.value,e.data.id));(0,n.default)("#challenge-tags").append(t),t.click(i)}}),t.val("")};var n=s(o("./node_modules/jquery/dist/jquery.js")),a=s(o("./CTFd/themes/core/assets/js/CTFd.js"));function s(e){return e&&e.__esModule?e:{default:e}}function i(e){var t=(0,n.default)(this),o=t.attr("tag-id");a.default.api.delete_tag({tagId:o}).then(function(e){e.success&&t.parent().remove()})}},"./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue":function(e,t,o){o.r(t);var n=o("./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=template&id=50f8d42a&"),a=o("./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=script&lang=js&");for(var s in a)"default"!==s&&function(e){o.d(t,e,function(){return a[e]})}(s);var i=o("./node_modules/vue-loader/lib/runtime/componentNormalizer.js"),l=Object(i.a)(a.default,n.a,n.b,!1,null,null,null);l.options.__file="CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue",t.default=l.exports},"./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=script&lang=js&":function(e,t,o){o.r(t);var n=o("./node_modules/babel-loader/lib/index.js?!./node_modules/vue-loader/lib/index.js?!./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=script&lang=js&"),a=o.n(n);for(var s in n)"default"!==s&&function(e){o.d(t,e,function(){return n[e]})}(s);t.default=a.a},"./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=template&id=50f8d42a&":function(e,t,o){function n(){var o=this,e=o.$createElement,n=o._self._c||e;return n("div",{staticClass:"modal fade",attrs:{id:"media-modal",tabindex:"-1"}},[n("div",{staticClass:"modal-dialog modal-lg"},[n("div",{staticClass:"modal-content"},[o._m(0),o._v(" "),n("div",{staticClass:"modal-body"},[n("div",{staticClass:"modal-header"},[n("div",{staticClass:"container"},[n("div",{staticClass:"row mh-100"},[n("div",{staticClass:"col-md-6",attrs:{id:"media-library-list"}},o._l(o.files,function(t){return n("div",{key:t.id,staticClass:"media-item-wrapper"},[n("a",{attrs:{href:"javascript:void(0)"},on:{click:function(e){return o.selectFile(t),!1}}},[n("i",{class:o.getIconClass(t.location),attrs:{"aria-hidden":"true"}}),o._v(" "),n("small",{staticClass:"media-item-title"},[o._v(o._s(t.location.split("/").pop()))])])])}),0),o._v(" "),n("div",{staticClass:"col-md-6",attrs:{id:"media-library-details"}},[n("h4",{staticClass:"text-center"},[o._v("Media Details")]),o._v(" "),n("div",{attrs:{id:"media-item"}},[n("div",{staticClass:"text-center",attrs:{id:"media-icon"}},[this.selectedFile?n("div",["far fa-file-image"===o.getIconClass(this.selectedFile.location)?n("div",[n("img",{staticStyle:{"max-width":"100%","max-height":"100%","object-fit":"contain"},attrs:{src:o.buildSelectedFileUrl()}})]):n("div",[n("i",{class:o.getIconClass(this.selectedFile.location)+" fa-4x",attrs:{"aria-hidden":"true"}})])]):o._e()]),o._v(" "),n("br"),o._v(" "),this.selectedFile?n("div",{staticClass:"text-center",attrs:{id:"media-filename"}},[n("a",{attrs:{href:o.buildSelectedFileUrl(),target:"_blank"}},[o._v("\n "+o._s(this.selectedFile.location.split("/").pop())+"\n ")])]):o._e(),o._v(" "),n("br"),o._v(" "),n("div",{staticClass:"form-group"},[this.selectedFile?n("div",[o._v("\n Link:\n "),n("input",{staticClass:"form-control",attrs:{type:"text",id:"media-link",readonly:""},domProps:{value:o.buildSelectedFileUrl()}})]):n("div",[o._v("\n Link:\n "),n("input",{staticClass:"form-control",attrs:{type:"text",id:"media-link",readonly:""}})])]),o._v(" "),n("div",{staticClass:"form-group text-center"},[n("div",{staticClass:"row"},[n("div",{staticClass:"col-md-6"},[n("button",{staticClass:"btn btn-success w-100",attrs:{id:"media-insert","data-toggle":"tooltip","data-placement":"top",title:"Insert link into editor"},on:{click:o.insertSelectedFile}},[o._v("\n Insert\n ")])]),o._v(" "),n("div",{staticClass:"col-md-3"},[n("button",{staticClass:"btn btn-primary w-100",attrs:{id:"media-download","data-toggle":"tooltip","data-placement":"top",title:"Download file"},on:{click:o.downloadSelectedFile}},[n("i",{staticClass:"fas fa-download"})])]),o._v(" "),n("div",{staticClass:"col-md-3"},[n("button",{staticClass:"btn btn-danger w-100",attrs:{id:"media-delete","data-toggle":"tooltip","data-placement":"top",title:"Delete file"},on:{click:o.deleteSelectedFile}},[n("i",{staticClass:"far fa-trash-alt"})])])])])])])])])]),o._v(" "),o._m(1)]),o._v(" "),n("div",{staticClass:"modal-footer"},[n("div",{staticClass:"float-right"},[n("button",{staticClass:"btn btn-primary media-upload-button",attrs:{type:"submit"},on:{click:o.uploadChosenFiles}},[o._v("\n Upload\n ")])])])])])])}var a=[function(){var e=this,t=e.$createElement,o=e._self._c||t;return o("div",{staticClass:"modal-header"},[o("div",{staticClass:"container"},[o("div",{staticClass:"row"},[o("div",{staticClass:"col-md-12"},[o("h3",{staticClass:"text-center"},[e._v("Media Library")])])])]),e._v(" "),o("button",{staticClass:"close",attrs:{type:"button","data-dismiss":"modal","aria-label":"Close"}},[o("span",{attrs:{"aria-hidden":"true"}},[e._v("×")])])])},function(){var e=this,t=e.$createElement,o=e._self._c||t;return o("form",{attrs:{id:"media-library-upload",enctype:"multipart/form-data"}},[o("div",{staticClass:"form-group"},[o("label",{attrs:{for:"media-files"}},[e._v("\n Upload Files\n ")]),e._v(" "),o("input",{staticClass:"form-control-file",attrs:{type:"file",name:"file",id:"media-files",multiple:""}}),e._v(" "),o("sub",{staticClass:"help-block"},[e._v("\n Attach multiple files using Control+Click or Cmd+Click.\n ")])]),e._v(" "),o("input",{attrs:{type:"hidden",value:"page",name:"type"}})])}];n._withStripped=!0,o.d(t,"a",function(){return n}),o.d(t,"b",function(){return a})},"./CTFd/themes/admin/assets/js/pages/challenge.js":function(e,t,o){o("./CTFd/themes/admin/assets/js/pages/main.js");var n=o("./CTFd/themes/core/assets/js/utils.js"),l=f(o("./node_modules/jquery/dist/jquery.js"));o("./node_modules/bootstrap/js/dist/tab.js");var i=f(o("./CTFd/themes/core/assets/js/CTFd.js")),a=o("./CTFd/themes/core/assets/js/ezq.js"),d=f(o("./CTFd/themes/core/assets/js/helpers.js")),s=o("./CTFd/themes/admin/assets/js/challenges/files.js"),r=o("./CTFd/themes/admin/assets/js/challenges/tags.js"),c=o("./CTFd/themes/admin/assets/js/challenges/requirements.js"),u=o("./CTFd/themes/admin/assets/js/styles.js"),m=o("./CTFd/themes/admin/assets/js/challenges/hints.js"),p=o("./CTFd/themes/admin/assets/js/challenges/flags.js");function f(e){return e&&e.__esModule?e:{default:e}}function h(e){i.default.api.get_hint({hintId:e,preview:!0}).then(function(e){e.data.content&&function(e){(0,a.ezAlert)({title:"Hint",body:j.render(e.content),button:"Got it!"})}(e.data)})}var j=i.default.lib.markdown();function g(e,t){var o=e.data,n=(0,l.default)("#result-message"),a=(0,l.default)("#result-notification"),s=(0,l.default)("#submission-input");a.removeClass(),n.text(o.message),"authentication_required"!==o.status?("incorrect"===o.status?(a.addClass("alert alert-danger alert-dismissable text-center"),a.slideDown(),s.removeClass("correct"),s.addClass("wrong"),setTimeout(function(){s.removeClass("wrong")},3e3)):"correct"===o.status?(a.addClass("alert alert-success alert-dismissable text-center"),a.slideDown(),(0,l.default)(".challenge-solves").text(parseInt((0,l.default)(".challenge-solves").text().split(" ")[0])+1+" Solves"),s.val(""),s.removeClass("wrong"),s.addClass("correct")):"already_solved"===o.status?(a.addClass("alert alert-info alert-dismissable text-center"),a.slideDown(),s.addClass("correct")):"paused"===o.status?(a.addClass("alert alert-warning alert-dismissable text-center"),a.slideDown()):"ratelimited"===o.status&&(a.addClass("alert alert-warning alert-dismissable text-center"),a.slideDown(),s.addClass("too-fast"),setTimeout(function(){s.removeClass("too-fast")},3e3)),setTimeout(function(){(0,l.default)(".alert").slideUp(),(0,l.default)("#submit-key").removeClass("disabled-button"),(0,l.default)("#submit-key").prop("disabled",!1)},3e3),t&&t(o)):window.location=i.default.config.urlRoot+"/login?next="+i.default.config.urlRoot+window.location.pathname+window.location.hash}function _(t){i.default._internal.challenge={},l.default.getScript(i.default.config.urlRoot+t.scripts.view,function(){var e=t.create;(0,l.default)("#create-chal-entry-div").html(e),(0,u.bindMarkdownEditors)(),l.default.getScript(i.default.config.urlRoot+t.scripts.create,function(){(0,l.default)("#create-chal-entry-div form").submit(function(e){e.preventDefault();var t=(0,l.default)("#create-chal-entry-div form").serializeJSON();i.default.fetch("/api/v1/challenges",{method:"POST",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(t)}).then(function(e){return e.json()}).then(function(e){e.success&&((0,l.default)("#challenge-create-options #challenge_id").val(e.data.id),(0,l.default)("#challenge-create-options").modal())})})})})}function v(a){a.preventDefault();var s=(0,l.default)(a.target).serializeJSON(!0),o={challenge_id:s.challenge_id,content:s.flag||"",type:s.flag_type,data:s.flag_data?s.flag_data:""};Promise.all([new Promise(function(t,e){0!=o.content.length?i.default.fetch("/api/v1/flags",{method:"POST",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(o)}).then(function(e){t(e.json())}):t()}),new Promise(function(e,t){var o=a.target,n={challenge:s.challenge_id,type:"challenge"};(0,l.default)(o.elements.file).val()&&d.default.files.upload(o,n),e()})]).then(function(e){i.default.fetch("/api/v1/challenges/"+s.challenge_id,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify({state:s.state})}).then(function(e){return e.json()}).then(function(e){e.success&&setTimeout(function(){window.location=i.default.config.urlRoot+"/admin/challenges/"+s.challenge_id},700)})})}function y(e){var t=(0,l.default)(this).find("option:selected").data("meta");void 0!==t?_(t):(0,l.default)("#create-chal-entry-div").empty()}(0,l.default)(function(){(0,l.default)(".preview-challenge").click(function(e){window.challenge=new Object,i.default._internal.challenge={},l.default.get(i.default.config.urlRoot+"/api/v1/challenges/"+window.CHALLENGE_ID,function(e){var t=i.default._internal.challenge,o=e.data;o.solves=null,l.default.getScript(i.default.config.urlRoot+o.type_data.scripts.view,function(){(0,l.default)("#challenge-window").empty(),(0,l.default)("#challenge-window").append(o.view),(0,l.default)("#challenge-window #challenge-input").addClass("form-control"),(0,l.default)("#challenge-window #challenge-submit").addClass("btn btn-md btn-outline-secondary float-right"),(0,l.default)(".challenge-solves").hide(),(0,l.default)(".nav-tabs a").click(function(e){e.preventDefault(),(0,l.default)(this).tab("show")}),(0,l.default)("#challenge-window").on("hide.bs.modal",function(e){(0,l.default)("#challenge-input").removeClass("wrong"),(0,l.default)("#challenge-input").removeClass("correct"),(0,l.default)("#incorrect-key").slideUp(),(0,l.default)("#correct-key").slideUp(),(0,l.default)("#already-solved").slideUp(),(0,l.default)("#too-fast").slideUp()}),(0,l.default)(".load-hint").on("click",function(e){h((0,l.default)(this).data("hint-id"))}),(0,l.default)("#challenge-submit").click(function(e){e.preventDefault(),(0,l.default)("#challenge-submit").addClass("disabled-button"),(0,l.default)("#challenge-submit").prop("disabled",!0),i.default._internal.challenge.submit(!0).then(g)}),(0,l.default)("#challenge-input").keyup(function(e){13==e.keyCode&&(0,l.default)("#challenge-submit").click()}),t.postRender(),window.location.replace(window.location.href.split("#")[0]+"#preview"),(0,l.default)("#challenge-window").modal()})})}),(0,l.default)(".delete-challenge").click(function(e){(0,a.ezQuery)({title:"Delete Challenge",body:"Are you sure you want to delete {0}".format("<strong>"+(0,n.htmlEntities)(window.CHALLENGE_NAME)+"</strong>"),success:function(){i.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID,{method:"DELETE"}).then(function(e){return e.json()}).then(function(e){e.success&&(window.location=i.default.config.urlRoot+"/admin/challenges")})}})}),(0,l.default)("#challenge-update-container > form").submit(function(e){e.preventDefault();var o=(0,l.default)(e.target).serializeJSON(!0);i.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID+"/flags",{method:"GET",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"}}).then(function(e){return e.json()}).then(function(e){function t(){i.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(o)}).then(function(e){return e.json()}).then(function(e){if(e.success){switch((0,l.default)(".challenge-state").text(e.data.state),e.data.state){case"visible":(0,l.default)(".challenge-state").removeClass("badge-danger").addClass("badge-success");break;case"hidden":(0,l.default)(".challenge-state").removeClass("badge-success").addClass("badge-danger")}(0,a.ezToast)({title:"Success",body:"Your challenge has been updated!"})}})}0===e.data.length&&"visible"===o.state?(0,a.ezQuery)({title:"Missing Flags",body:"This challenge does not have any flags meaning it is unsolveable. Are you sure you'd like to update this challenge?",success:t}):t()})}),(0,l.default)("#challenge-create-options form").submit(v),(0,l.default)("#tags-add-input").keyup(r.addTag),(0,l.default)(".delete-tag").click(r.deleteTag),(0,l.default)("#prerequisite-add-form").submit(c.addRequirement),(0,l.default)(".delete-requirement").click(c.deleteRequirement),(0,l.default)("#file-add-form").submit(s.addFile),(0,l.default)(".delete-file").click(s.deleteFile),(0,l.default)("#hint-add-button").click(m.showHintModal),(0,l.default)(".delete-hint").click(m.deleteHint),(0,l.default)(".edit-hint").click(m.showEditHintModal),(0,l.default)("#hint-edit-form").submit(m.editHint),(0,l.default)("#flag-add-button").click(p.addFlagModal),(0,l.default)(".delete-flag").click(p.deleteFlag),(0,l.default)("#flags-create-select").change(p.flagTypeSelect),(0,l.default)(".edit-flag").click(p.editFlagModal),l.default.get(i.default.config.urlRoot+"/api/v1/challenges/types",function(e){(0,l.default)("#create-chals-select").empty();var t=e.data,o=Object.keys(t).length;if(1<o){for(var n in(0,l.default)("#create-chals-select").append("<option> -- </option>"),t){var a=t[n],s=(0,l.default)("<option/>");s.attr("value",a.type),s.text(a.name),s.data("meta",a),(0,l.default)("#create-chals-select").append(s)}(0,l.default)("#create-chals-select-div").show(),(0,l.default)("#create-chals-select").val("standard"),_(t.standard)}else if(1==o){var i=Object.keys(t)[0];(0,l.default)("#create-chals-select").empty(),_(t[i])}}),(0,l.default)("#create-chals-select").change(y)})},"./CTFd/themes/admin/assets/js/pages/main.js":function(e,t,o){var n=m(o("./CTFd/themes/core/assets/js/CTFd.js")),a=m(o("./node_modules/jquery/dist/jquery.js")),s=m(o("./node_modules/moment/moment.js")),i=m(o("./node_modules/nunjucks/browser/nunjucks.js")),l=o("./node_modules/howler/dist/howler.js"),d=m(o("./CTFd/themes/core/assets/js/events.js")),r=m(o("./CTFd/themes/core/assets/js/times.js")),c=m(o("./CTFd/themes/admin/assets/js/styles.js")),u=m(o("./CTFd/themes/core/assets/js/helpers.js"));function m(e){return e&&e.__esModule?e:{default:e}}n.default.init(window.init),window.CTFd=n.default,window.helpers=u.default,window.$=a.default,window.Moment=s.default,window.nunjucks=i.default,window.Howl=l.Howl,(0,a.default)(function(){(0,c.default)(),(0,r.default)(),(0,d.default)(n.default.config.urlRoot)})},"./CTFd/themes/admin/assets/js/styles.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.showMediaLibrary=r,t.bindMarkdownEditors=c,t.default=void 0,o("./node_modules/bootstrap/dist/js/bootstrap.bundle.js");var n=o("./CTFd/themes/core/assets/js/utils.js"),a=d(o("./node_modules/jquery/dist/jquery.js")),s=d(o("./node_modules/easymde/src/js/easymde.js")),i=d(o("./node_modules/vue/dist/vue.esm.browser.js")),l=d(o("./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue"));function d(e){return e&&e.__esModule?e:{default:e}}function r(e){var t=i.default.extend(l.default),o=document.createElement("div");document.querySelector("main").appendChild(o);var n=new t({propsData:{editor:e}}).$mount(o);(0,a.default)("#media-modal").on("hidden.bs.modal",function(e){n.$destroy(),(0,a.default)("#media-modal").remove()}),(0,a.default)("#media-modal").modal()}function c(){(0,a.default)("textarea.markdown").each(function(e,t){if(!1===t.hasOwnProperty("mde")){var o=new s.default({autoDownloadFontAwesome:!1,toolbar:["bold","italic","heading","|","quote","unordered-list","ordered-list","|","link","image",{name:"media",action:function(e){r(e)},className:"fas fa-file-upload",title:"Media Library"},"|","preview","guide"],element:this,initialValue:(0,a.default)(this).val(),forceSync:!0,minHeight:"200px"});this.mde=o,this.codemirror=o.codemirror,(0,a.default)(this).on("change keyup paste",function(){o.codemirror.getDoc().setValue((0,a.default)(this).val()),o.codemirror.refresh()})}})}t.default=function(){(0,a.default)(":input").each(function(){(0,a.default)(this).data("initial",(0,a.default)(this).val())}),(0,a.default)(function(){(0,a.default)("tr[data-href], td[data-href]").click(function(){if(!getSelection().toString()){var e=(0,a.default)(this).attr("data-href");e&&(window.location=e)}return!1}),(0,a.default)("[data-checkbox]").click(function(e){(0,a.default)(e.target).is("input[type=checkbox]")?e.stopImmediatePropagation():((0,a.default)(this).find("input[type=checkbox]").click(),e.stopImmediatePropagation())}),(0,a.default)("[data-checkbox-all]").on("click change",function(e){var t=(0,a.default)(this).prop("checked"),o=(0,a.default)(this).index()+1;(0,a.default)(this).closest("table").find("tr td:nth-child(".concat(o,") input[type=checkbox]")).prop("checked",t),e.stopImmediatePropagation()}),(0,a.default)("tr[data-href] a, tr[data-href] button").click(function(e){(0,a.default)(this).attr("data-dismiss")||e.stopPropagation()}),(0,a.default)(".page-select").change(function(){var e=new URL(window.location);e.searchParams.set("page",this.value),window.location.href=e.toString()}),(0,a.default)('a[data-toggle="tab"]').on("shown.bs.tab",function(e){sessionStorage.setItem("activeTab",(0,a.default)(e.target).attr("href"))});var e=sessionStorage.getItem("activeTab");if(e){var t=(0,a.default)('.nav-tabs a[href="'.concat(e,'"], .nav-pills a[href="').concat(e,'"]'));t.length?t.tab("show"):sessionStorage.removeItem("activeTab")}c(),(0,n.makeSortableTables)(),(0,a.default)('[data-toggle="tooltip"]').tooltip()})}},"./CTFd/themes/core/assets/js/CTFd.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=r(o("./CTFd/themes/core/assets/js/fetch.js")),a=r(o("./CTFd/themes/core/assets/js/config.js")),s=o("./CTFd/themes/core/assets/js/api.js");o("./CTFd/themes/core/assets/js/patch.js");var i=r(o("./node_modules/markdown-it/index.js")),l=r(o("./node_modules/jquery/dist/jquery.js")),d=r(o("./CTFd/themes/core/assets/js/ezq.js"));function r(e){return e&&e.__esModule?e:{default:e}}function c(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}var u=new s.API("/"),m={},p={ezq:d.default},f={$:l.default,markdown:function(e){var t=function(t){for(var e=1;e<arguments.length;e++)if(e%2){var o=null!=arguments[e]?arguments[e]:{},n=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(n=n.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),n.forEach(function(e){c(t,e,o[e])})}else Object.defineProperties(t,Object.getOwnPropertyDescriptors(arguments[e]));return t}({},{html:!0,linkify:!0},{},e),o=(0,i.default)(t);return o.renderer.rules.link_open=function(e,t,o,n,a){return e[t].attrPush(["target","_blank"]),a.renderToken(e,t,o)},o}},h=!1,j={run:function(e){e(g)}};var g={init:function(e){h||(h=!0,a.default.urlRoot=e.urlRoot||a.default.urlRoot,a.default.csrfNonce=e.csrfNonce||a.default.csrfNonce,a.default.userMode=e.userMode||a.default.userMode,u.domain=a.default.urlRoot+"/api/v1",m.id=e.userId)},config:a.default,fetch:n.default,user:m,ui:p,api:u,lib:f,_internal:{},plugin:j},_=g;t.default=_},"./CTFd/themes/core/assets/js/api.js":function(e,t,o){var c=n(o("./CTFd/themes/core/assets/js/fetch.js")),l=n(o("./node_modules/q/q.js"));function n(e){return e&&e.__esModule?e:{default:e}}function a(e){return(a="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var s=function(){"use strict";function e(e){var t="object"===a(e)?e.domain:e;if(this.domain=t||"",0===this.domain.length)throw new Error("Domain parameter must be specified as a string.")}function i(o,n){return o.$queryParameters&&Object.keys(o.$queryParameters).forEach(function(e){var t=o.$queryParameters[e];n[e]=t}),n}return e.prototype.request=function(e,t,o,n,a,s,i,l){var d=s&&Object.keys(s).length?function(e){var t=[];for(var o in e)e.hasOwnProperty(o)&&t.push(encodeURIComponent(o)+"="+encodeURIComponent(e[o]));return t.join("&")}(s):null,r=t+(d?"?"+d:"");n&&!Object.keys(n).length&&(n=void 0),(0,c.default)(r,{method:e,headers:a,body:JSON.stringify(n)}).then(function(e){return e.json()}).then(function(e){l.resolve(e)}).catch(function(e){l.reject(e)})},e.prototype.post_award_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/awards",e,{},a,n,{},t),t.promise},e.prototype.delete_award=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/awards/{award_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{award_id}",e.awardId),void 0===e.awardId?t.reject(new Error("Missing required parameter: awardId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_award=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/awards/{award_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{award_id}",e.awardId),void 0===e.awardId?t.reject(new Error("Missing required parameter: awardId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_challenge_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/challenges",e,{},a,n,{},t),t.promise},e.prototype.get_challenge_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/challenges",e,{},a,n,{},t),t.promise},e.prototype.post_challenge_attempt=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/challenges/attempt",e,{},a,n,{},t),t.promise},e.prototype.get_challenge_types=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/challenges/types",e,{},a,n,{},t),t.promise},e.prototype.patch_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_files=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/files",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_flags=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/flags",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_hints=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/hints",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/solves",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_tags=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/tags",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/configs",e,{},a,n,{},t),t.promise},e.prototype.patch_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("PATCH",o+"/configs",e,{},a,n,{},t),t.promise},e.prototype.get_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/configs",e,{},a,n,{},t),t.promise},e.prototype.patch_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_files_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/files",e,{},a,n,{},t),t.promise},e.prototype.get_files_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/files",e,{},a,n,{},t),t.promise},e.prototype.delete_files_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/files/{file_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{file_id}",e.fileId),void 0===e.fileId?t.reject(new Error("Missing required parameter: fileId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_files_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/files/{file_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{file_id}",e.fileId),void 0===e.fileId?t.reject(new Error("Missing required parameter: fileId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_flag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/flags",e,{},a,n,{},t),t.promise},e.prototype.get_flag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/flags",e,{},a,n,{},t),t.promise},e.prototype.get_flag_types=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/flags/types",e,{},a,n,{},t),t.promise},e.prototype.get_flag_types_1=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/types/{type_name}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{type_name}",e.typeName),void 0===e.typeName?t.reject(new Error("Missing required parameter: typeName")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.patch_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_hint_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/hints",e,{},a,n,{},t),t.promise},e.prototype.get_hint_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/hints",e,{},a,n,{},t),t.promise},e.prototype.patch_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_notification_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/notifications",e,{},a,n,{},t),t.promise},e.prototype.get_notification_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/notifications",e,{},a,n,{},t),t.promise},e.prototype.delete_notification=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/notifications/{notification_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{notification_id}",e.notificationId),void 0===e.notificationId?t.reject(new Error("Missing required parameter: notificationId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_notification=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/notifications/{notification_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{notification_id}",e.notificationId),void 0===e.notificationId?t.reject(new Error("Missing required parameter: notificationId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_page_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/pages",e,{},a,n,{},t),t.promise},e.prototype.get_page_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/pages",e,{},a,n,{},t),t.promise},e.prototype.patch_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_scoreboard_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/scoreboard",e,{},a,n,{},t),t.promise},e.prototype.get_scoreboard_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/scoreboard/top/{count}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{count}",e.count),void 0===e.count?t.reject(new Error("Missing required parameter: count")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_solve_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/challenges/solves",e,{},a,n,{},t),t.promise},e.prototype.get_challenge_solve_percentages=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/challenges/solves/percentages",e,{},a,n,{},t),t.promise},e.prototype.get_challenge_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/challenges/{column}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_submission_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/submissions/{column}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/teams",e,{},a,n,{},t),t.promise},e.prototype.get_user_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/users",e,{},a,n,{},t),t.promise},e.prototype.get_user_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/users/{column}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_submissions_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/submissions",e,{},a,n,{},t),t.promise},e.prototype.get_submissions_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/submissions",e,{},a,n,{},t),t.promise},e.prototype.delete_submission=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/submissions/{submission_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{submission_id}",e.submissionId),void 0===e.submissionId?t.reject(new Error("Missing required parameter: submissionId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_submission=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/submissions/{submission_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{submission_id}",e.submissionId),void 0===e.submissionId?t.reject(new Error("Missing required parameter: submissionId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_tag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/tags",e,{},a,n,{},t),t.promise},e.prototype.get_tag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/tags",e,{},a,n,{},t),t.promise},e.prototype.patch_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_team_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/teams",e,{},a,n,{},t),t.promise},e.prototype.get_team_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/teams",e,{},a,n,{},t),t.promise},e.prototype.patch_team_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.teamId&&(n.team_id=e.teamId),n=i(e,n),this.request("PATCH",o+"/teams/me",e,{},a,n,{},t),t.promise},e.prototype.get_team_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.teamId&&(n.team_id=e.teamId),n=i(e,n),this.request("GET",o+"/teams/me",e,{},a,n,{},t),t.promise},e.prototype.patch_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_awards=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/awards",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_fails=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/fails",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/solves",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_unlock_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/unlocks",e,{},a,n,{},t),t.promise},e.prototype.get_unlock_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/unlocks",e,{},a,n,{},t),t.promise},e.prototype.post_user_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/users",e,{},a,n,{},t),t.promise},e.prototype.get_user_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/users",e,{},a,n,{},t),t.promise},e.prototype.patch_user_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("PATCH",o+"/users/me",e,{},a,n,{},t),t.promise},e.prototype.get_user_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/users/me",e,{},a,n,{},t),t.promise},e.prototype.patch_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_user_awards=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/awards",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_user_fails=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/fails",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_user_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/solves",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e}();t.API=s},"./CTFd/themes/core/assets/js/config.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;t.default={urlRoot:"",csrfNonce:"",userMode:""}},"./CTFd/themes/core/assets/js/events.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var s=o("./node_modules/howler/dist/howler.js"),n=o("./node_modules/event-source-polyfill/src/eventsource.js"),i=o("./CTFd/themes/core/assets/js/ezq.js"),l=o("./CTFd/themes/core/assets/js/utils.js"),d=n.NativeEventSource||n.EventSourcePolyfill;t.default=function(e){var t=new d(e+"/events"),o=new l.WindowController,n=new s.Howl({src:[e+"/themes/core/static/sounds/notification.webm",e+"/themes/core/static/sounds/notification.mp3"]});function a(e){switch(e.type){case"toast":(0,l.inc_notification_counter)();var t=50<e.content.length?e.content.substring(0,47)+"...":e.content,o=!1;(0,i.ezToast)({title:e.title,body:t,onclick:function(){(0,i.ezAlert)({title:e.title,body:e.content,button:"Got it!",success:function(){o=!0,(0,l.dec_notification_counter)()}})},onclose:function(){o||(0,l.dec_notification_counter)()}});break;case"alert":(0,l.inc_notification_counter)(),(0,i.ezAlert)({title:e.title,body:e.content,button:"Got it!",success:function(){(0,l.dec_notification_counter)()}});break;case"background":default:(0,l.inc_notification_counter)()}e.sound&&n.play()}(0,l.init_notification_counter)(),o.notification=function(e){a(e)},o.masterDidChange=function(){this.isMaster?t.addEventListener("notification",function(e){var t=JSON.parse(e.data);o.broadcast("notification",t),a(t)},!1):t&&t.close()}}},"./CTFd/themes/core/assets/js/ezq.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.ezAlert=p,t.ezToast=f,t.ezQuery=h,t.ezProgressBar=j,t.ezBadge=g,t.default=void 0,o("./node_modules/bootstrap/js/dist/modal.js");var n,l=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};var s='<div class="modal fade" tabindex="-1" role="dialog"> <div class="modal-dialog" role="document"> <div class="modal-content"> <div class="modal-header"> <h5 class="modal-title">{0}</h5> <button type="button" class="close" data-dismiss="modal" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> </div> <div class="modal-body"> </div> <div class="modal-footer"> </div> </div> </div></div>',d='<div class="toast m-3" role="alert"> <div class="toast-header"> <strong class="mr-auto">{0}</strong> <button type="button" class="ml-2 mb-1 close" data-dismiss="toast" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> </div> <div class="toast-body">{1}</div></div>',i='<div class="progress"> <div class="progress-bar progress-bar-success progress-bar-striped progress-bar-animated" role="progressbar" style="width: {0}%"> </div></div>',a='<div class="alert alert-danger alert-dismissable" role="alert">\n <span class="sr-only">Error:</span>\n {0}\n <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>\n</div>',r='<div class="alert alert-success alert-dismissable submit-row" role="alert">\n <strong>Success!</strong>\n {0}\n <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>\n</div>',c='<button type="button" class="btn btn-primary" data-dismiss="modal">{0}</button>',u='<button type="button" class="btn btn-danger" data-dismiss="modal">No</button>',m='<button type="button" class="btn btn-primary" data-dismiss="modal">Yes</button>';function p(e){var t=s.format(e.title),o=(0,l.default)(t);"string"==typeof e.body?o.find(".modal-body").append("<p>".concat(e.body,"</p>")):o.find(".modal-body").append((0,l.default)(e.body));var n=(0,l.default)(c.format(e.button));return e.success&&(0,l.default)(n).click(function(){e.success()}),e.large&&o.find(".modal-dialog").addClass("modal-lg"),o.find(".modal-footer").append(n),(0,l.default)("main").append(o),o.modal("show"),(0,l.default)(o).on("hidden.bs.modal",function(){(0,l.default)(this).modal("dispose")}),o}function f(e){(0,l.default)("#ezq--notifications-toast-container").length||(0,l.default)("body").append((0,l.default)("<div/>").attr({id:"ezq--notifications-toast-container"}).css({position:"fixed",bottom:"0",right:"0","min-width":"20%"}));var t=d.format(e.title,e.body),o=(0,l.default)(t);if(e.onclose&&(0,l.default)(o).find("button[data-dismiss=toast]").click(function(){e.onclose()}),e.onclick){var n=(0,l.default)(o).find(".toast-body");n.addClass("cursor-pointer"),n.click(function(){e.onclick()})}var a=!1!==e.autohide,s=!1!==e.animation,i=e.delay||1e4;return(0,l.default)("#ezq--notifications-toast-container").prepend(o),o.toast({autohide:a,delay:i,animation:s}),o.toast("show"),o}function h(e){var t=s.format(e.title),o=(0,l.default)(t);"string"==typeof e.body?o.find(".modal-body").append("<p>".concat(e.body,"</p>")):o.find(".modal-body").append((0,l.default)(e.body));var n=(0,l.default)(m),a=(0,l.default)(u);return o.find(".modal-footer").append(a),o.find(".modal-footer").append(n),(0,l.default)("main").append(o),(0,l.default)(o).on("hidden.bs.modal",function(){(0,l.default)(this).modal("dispose")}),(0,l.default)(n).click(function(){e.success()}),o.modal("show"),o}function j(e){if(e.target){var t=(0,l.default)(e.target);return t.find(".progress-bar").css("width",e.width+"%"),t}var o=i.format(e.width),n=s.format(e.title),a=(0,l.default)(n);return a.find(".modal-body").append((0,l.default)(o)),(0,l.default)("main").append(a),a.modal("show")}function g(e){var t={success:r,error:a}[e.type].format(e.body);return(0,l.default)(t)}var _={ezAlert:p,ezToast:f,ezQuery:h,ezProgressBar:j,ezBadge:g};t.default=_},"./CTFd/themes/core/assets/js/fetch.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0,o("./node_modules/whatwg-fetch/fetch.js");var n,a=(n=o("./CTFd/themes/core/assets/js/config.js"))&&n.__esModule?n:{default:n};var s=window.fetch;t.default=function(e,t){return void 0===t&&(t={method:"GET",credentials:"same-origin",headers:{}}),e=a.default.urlRoot+e,void 0===t.headers&&(t.headers={}),t.credentials="same-origin",t.headers.Accept="application/json",t.headers["Content-Type"]="application/json",t.headers["CSRF-Token"]=a.default.csrfNonce,s(e,t)}},"./CTFd/themes/core/assets/js/patch.js":function(e,t,o){var n,l=(n=o("./node_modules/q/q.js"))&&n.__esModule?n:{default:n},a=o("./CTFd/themes/core/assets/js/api.js");function s(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function d(e,t){return function(t){for(var e=1;e<arguments.length;e++)if(e%2){var o=null!=arguments[e]?arguments[e]:{},n=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(n=n.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),n.forEach(function(e){s(t,e,o[e])})}else Object.defineProperties(t,Object.getOwnPropertyDescriptors(arguments[e]));return t}({},e,{},t)}a.API.prototype.requestRaw=function(e,t,o,n,a,s,i,l){var d=s&&Object.keys(s).length?function(e){var t=[];for(var o in e)e.hasOwnProperty(o)&&t.push(encodeURIComponent(o)+"="+encodeURIComponent(e[o]));return t.join("&")}(s):null,r=t+(d?"?"+d:"");n&&!Object.keys(n).length&&(n=void 0),fetch(r,{method:e,headers:a,body:n}).then(function(e){return e.json()}).then(function(e){l.resolve(e)}).catch(function(e){l.reject(e)})},a.API.prototype.patch_user_public=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a="/users/{user_id}",s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=a.replace("{user_id}",e.userId),void 0===e.userId?o.reject(new Error("Missing required parameter: userId")):this.request("PATCH",n+a,e,t,s,{},{},o),o.promise},a.API.prototype.patch_user_private=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],this.request("PATCH",n+"/users/me",e,t,a,{},{},o),o.promise},a.API.prototype.post_unlock_list=function(e,t){var o=l.default.defer(),n=this.domain,a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],this.request("POST",n+"/unlocks",e,t,a,{},{},o),o.promise},a.API.prototype.post_notification_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],this.request("POST",n+"/notifications",e,t,a,{},{},o),o.promise},a.API.prototype.post_files_list=function(e,t){var o=l.default.defer(),n=this.domain,a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],this.requestRaw("POST",n+"/files",e,t,a,{},{},o),o.promise},a.API.prototype.patch_config=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a="/configs/{config_key}",s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=a.replace("{config_key}",e.configKey),void 0===e.configKey?o.reject(new Error("Missing required parameter: configKey")):this.request("PATCH",n+a,e,t,s,{},{},o),o.promise},a.API.prototype.patch_config_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=d(e,a),this.request("PATCH",n+"/configs",e,t,s,a,{},o),o.promise},a.API.prototype.post_tag_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=d(e,a),this.request("POST",n+"/tags",e,t,s,a,{},o),o.promise},a.API.prototype.patch_team_public=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a="/teams/{team_id}",s={},i={};return i.Accept=["application/json"],i["Content-Type"]=["application/json"],a=a.replace("{team_id}",e.teamId),void 0===e.teamId?o.reject(new Error("Missing required parameter: teamId")):(s=d(e,s),this.request("PATCH",n+a,e,t,i,s,{},o)),o.promise},a.API.prototype.post_challenge_attempt=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=d(e,a),this.request("POST",n+"/challenges/attempt",e,t,s,a,{},o),o.promise},a.API.prototype.get_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(delete e.hintId,a=d(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise}},"./CTFd/themes/core/assets/js/times.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=s(o("./node_modules/moment/moment.js")),a=s(o("./node_modules/jquery/dist/jquery.js"));function s(e){return e&&e.__esModule?e:{default:e}}t.default=function(){(0,a.default)("[data-time]").each(function(e,t){t.innerText=(0,n.default)((0,a.default)(t).data("time")).local().format("MMMM Do, h:mm:ss A")})}},"./CTFd/themes/core/assets/js/utils.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.WindowController=a,t.colorHash=function(e){for(var t=0,o=0;o<e.length;o++)t=e.charCodeAt(o)+((t<<5)-t);for(var n="#",a=0;a<3;a++){n+=("00"+(t>>4*a&255).toString(16)).substr(-2)}return n},t.htmlEntities=function(e){return(0,i.default)("<div/>").text(e).html()},t.cumulativeSum=function(e){for(var t=e.concat(),o=0;o<e.length;o++)t[o]=e.slice(0,o+1).reduce(function(e,t){return e+t});return t},t.init_notification_counter=function(){var e=s.getItem(l);null===e?s.setItem(l,0):0<e&&(0,i.default)(".badge-notification").text(e)},t.set_notification_counter=function(e){s.setItem(l,e)},t.inc_notification_counter=function(){var e=s.getItem(l)||0;s.setItem(l,++e),(0,i.default)(".badge-notification").text(e)},t.dec_notification_counter=function(){var e=s.getItem(l)||0;0<e&&(s.setItem(l,--e),(0,i.default)(".badge-notification").text(e));0==e&&d()},t.clear_notification_counter=d,t.copyToClipboard=function(e,t){(0,i.default)(t).select(),document.execCommand("copy"),(0,i.default)(e.target).tooltip({title:"Copied!",trigger:"manual"}),(0,i.default)(e.target).tooltip("show"),setTimeout(function(){(0,i.default)(e.target).tooltip("hide")},1500)},t.makeSortableTables=function(){function s(e,t){return(0,i.default)(e).children("td").eq(t).text()}(0,i.default)("th.sort-col").append(' <i class="fas fa-sort"></i>'),(0,i.default)("th.sort-col").click(function(){var e=(0,i.default)(this).parents("table").eq(0),t=e.find("tr:gt(0)").toArray().sort(function(a){return function(e,t){var o=s(e,a),n=s(t,a);return i.default.isNumeric(o)&&i.default.isNumeric(n)?o-n:o.toString().localeCompare(n)}}((0,i.default)(this).index()));this.asc=!this.asc,this.asc||(t=t.reverse());for(var o=0;o<t.length;o++)e.append(t[o])})};var n,i=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};function a(){this.id=Math.random(),this.isMaster=!1,this.others={},window.addEventListener("storage",this,!1),window.addEventListener("unload",this,!1),this.broadcast("hello");var t=this;this._checkTimeout=setTimeout(function e(){t.check(),t._checkTimeout=setTimeout(e,9e3)},500),this._pingTimeout=setTimeout(function e(){t.sendPing(),t._pingTimeout=setTimeout(e,17e3)},17e3)}i.default.fn.serializeJSON=function(o){var n={},a=(0,i.default)(this),e=a.serializeArray();return(e=(e=e.concat(a.find("input[type=checkbox]:checked").map(function(){return{name:this.name,value:!0}}).get())).concat(a.find("input[type=checkbox]:not(:checked)").map(function(){return{name:this.name,value:!1}}).get())).map(function(e){if(o)if(null!==e.value&&""!==e.value)n[e.name]=e.value;else{var t=a.find(":input[name=".concat(e.name,"]"));t.data("initial")!==t.val()&&(n[e.name]=e.value)}else n[e.name]=e.value}),n},String.prototype.format=String.prototype.f=function(){for(var e=this,t=arguments.length;t--;)e=e.replace(new RegExp("\\{"+t+"\\}","gm"),arguments[t]);return e},String.prototype.hashCode=function(){var e,t,o=0;if(0==this.length)return o;for(e=0,t=this.length;e<t;e++)o=(o<<5)-o+this.charCodeAt(e),o|=0;return o},a.prototype.destroy=function(){clearTimeout(this._pingTimeout),clearTimeout(this._checkTimeout),window.removeEventListener("storage",this,!1),window.removeEventListener("unload",this,!1),this.broadcast("bye")},a.prototype.handleEvent=function(e){if("unload"===e.type)this.destroy();else if("broadcast"===e.key)try{var t=JSON.parse(e.newValue);t.id!==this.id&&this[t.type](t)}catch(e){}},a.prototype.sendPing=function(){this.broadcast("ping")},a.prototype.hello=function(e){this.ping(e),e.id<this.id?this.check():this.sendPing()},a.prototype.ping=function(e){this.others[e.id]=+new Date},a.prototype.bye=function(e){delete this.others[e.id],this.check()},a.prototype.check=function(e){var t,o=+new Date,n=!0;for(t in this.others)this.others[t]+23e3<o?delete this.others[t]:t<this.id&&(n=!1);this.isMaster!==n&&(this.isMaster=n,this.masterDidChange())},a.prototype.masterDidChange=function(){},a.prototype.broadcast=function(e,t){var o={id:this.id,type:e};for(var n in t)o[n]=t[n];try{localStorage.setItem("broadcast",JSON.stringify(o))}catch(e){console.log(e)}};var s=window.localStorage,l="unread_notifications";function d(){s.setItem(l,0),(0,i.default)(".badge-notification").empty()}},"./node_modules/babel-loader/lib/index.js?!./node_modules/vue-loader/lib/index.js?!./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=script&lang=js&":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=s(o("./CTFd/themes/core/assets/js/CTFd.js")),a=(o("./CTFd/themes/core/assets/js/ezq.js"),s(o("./CTFd/themes/core/assets/js/helpers.js")));function s(e){return e&&e.__esModule?e:{default:e}}var i={props:{editor:Object},data:function(){return{files:[],selectedFile:null}},methods:{getPageFiles:function(){var t=this;n.default.fetch("/api/v1/files?type=page",{credentials:"same-origin"}).then(function(e){return e.json()}).then(function(e){return t.files=e.data,t.files})},uploadChosenFiles:function(){var t=this,e=document.querySelector("#media-library-upload");a.default.files.upload(e,{},function(e){t.getPageFiles()})},selectFile:function(e){return this.selectedFile=e,this.selectedFile},buildSelectedFileUrl:function(){return n.default.config.urlRoot+"/files/"+this.selectedFile.location},deleteSelectedFile:function(){var t=this,e=this.selectedFile.id;confirm("Are you sure you want to delete this file?")&&n.default.fetch("/api/v1/files/"+e,{method:"DELETE"}).then(function(e){200===e.status&&e.json().then(function(e){e.success&&(t.getPageFiles(),t.selectedFile=null)})})},insertSelectedFile:function(){var e=this.$props.editor;e.hasOwnProperty("codemirror")&&(e=e.codemirror);var t=e.getDoc(),o=t.getCursor(),n=this.buildSelectedFileUrl(),a="far fa-file-image"===this.getIconClass(this.selectedFile.location),s=n.split("/").pop();link="[{0}]({1})".format(s,n),a&&(link="!"+link),t.replaceRange(link,o)},downloadSelectedFile:function(){var e=this.buildSelectedFileUrl();window.open(e,"_blank")},getIconClass:function(e){return{png:"far fa-file-image",jpg:"far fa-file-image",jpeg:"far fa-file-image",gif:"far fa-file-image",bmp:"far fa-file-image",svg:"far fa-file-image",txt:"far fa-file-alt",mov:"far fa-file-video",mp4:"far fa-file-video",wmv:"far fa-file-video",flv:"far fa-file-video",mkv:"far fa-file-video",avi:"far fa-file-video",pdf:"far fa-file-pdf",mp3:"far fa-file-sound",wav:"far fa-file-sound",aac:"far fa-file-sound",zip:"far fa-file-archive",gz:"far fa-file-archive",tar:"far fa-file-archive","7z":"far fa-file-archive",rar:"far fa-file-archive",py:"far fa-file-code",c:"far fa-file-code",cpp:"far fa-file-code",html:"far fa-file-code",js:"far fa-file-code",rb:"far fa-file-code",go:"far fa-file-code"}[e.split(".").pop()]||"far fa-file"}},created:function(){return this.getPageFiles()}};t.default=i},"./node_modules/moment/locale sync recursive ^\\.\\/.*$":function(e,t,o){var n={"./af":"./node_modules/moment/locale/af.js","./af.js":"./node_modules/moment/locale/af.js","./ar":"./node_modules/moment/locale/ar.js","./ar-dz":"./node_modules/moment/locale/ar-dz.js","./ar-dz.js":"./node_modules/moment/locale/ar-dz.js","./ar-kw":"./node_modules/moment/locale/ar-kw.js","./ar-kw.js":"./node_modules/moment/locale/ar-kw.js","./ar-ly":"./node_modules/moment/locale/ar-ly.js","./ar-ly.js":"./node_modules/moment/locale/ar-ly.js","./ar-ma":"./node_modules/moment/locale/ar-ma.js","./ar-ma.js":"./node_modules/moment/locale/ar-ma.js","./ar-sa":"./node_modules/moment/locale/ar-sa.js","./ar-sa.js":"./node_modules/moment/locale/ar-sa.js","./ar-tn":"./node_modules/moment/locale/ar-tn.js","./ar-tn.js":"./node_modules/moment/locale/ar-tn.js","./ar.js":"./node_modules/moment/locale/ar.js","./az":"./node_modules/moment/locale/az.js","./az.js":"./node_modules/moment/locale/az.js","./be":"./node_modules/moment/locale/be.js","./be.js":"./node_modules/moment/locale/be.js","./bg":"./node_modules/moment/locale/bg.js","./bg.js":"./node_modules/moment/locale/bg.js","./bm":"./node_modules/moment/locale/bm.js","./bm.js":"./node_modules/moment/locale/bm.js","./bn":"./node_modules/moment/locale/bn.js","./bn.js":"./node_modules/moment/locale/bn.js","./bo":"./node_modules/moment/locale/bo.js","./bo.js":"./node_modules/moment/locale/bo.js","./br":"./node_modules/moment/locale/br.js","./br.js":"./node_modules/moment/locale/br.js","./bs":"./node_modules/moment/locale/bs.js","./bs.js":"./node_modules/moment/locale/bs.js","./ca":"./node_modules/moment/locale/ca.js","./ca.js":"./node_modules/moment/locale/ca.js","./cs":"./node_modules/moment/locale/cs.js","./cs.js":"./node_modules/moment/locale/cs.js","./cv":"./node_modules/moment/locale/cv.js","./cv.js":"./node_modules/moment/locale/cv.js","./cy":"./node_modules/moment/locale/cy.js","./cy.js":"./node_modules/moment/locale/cy.js","./da":"./node_modules/moment/locale/da.js","./da.js":"./node_modules/moment/locale/da.js","./de":"./node_modules/moment/locale/de.js","./de-at":"./node_modules/moment/locale/de-at.js","./de-at.js":"./node_modules/moment/locale/de-at.js","./de-ch":"./node_modules/moment/locale/de-ch.js","./de-ch.js":"./node_modules/moment/locale/de-ch.js","./de.js":"./node_modules/moment/locale/de.js","./dv":"./node_modules/moment/locale/dv.js","./dv.js":"./node_modules/moment/locale/dv.js","./el":"./node_modules/moment/locale/el.js","./el.js":"./node_modules/moment/locale/el.js","./en-SG":"./node_modules/moment/locale/en-SG.js","./en-SG.js":"./node_modules/moment/locale/en-SG.js","./en-au":"./node_modules/moment/locale/en-au.js","./en-au.js":"./node_modules/moment/locale/en-au.js","./en-ca":"./node_modules/moment/locale/en-ca.js","./en-ca.js":"./node_modules/moment/locale/en-ca.js","./en-gb":"./node_modules/moment/locale/en-gb.js","./en-gb.js":"./node_modules/moment/locale/en-gb.js","./en-ie":"./node_modules/moment/locale/en-ie.js","./en-ie.js":"./node_modules/moment/locale/en-ie.js","./en-il":"./node_modules/moment/locale/en-il.js","./en-il.js":"./node_modules/moment/locale/en-il.js","./en-nz":"./node_modules/moment/locale/en-nz.js","./en-nz.js":"./node_modules/moment/locale/en-nz.js","./eo":"./node_modules/moment/locale/eo.js","./eo.js":"./node_modules/moment/locale/eo.js","./es":"./node_modules/moment/locale/es.js","./es-do":"./node_modules/moment/locale/es-do.js","./es-do.js":"./node_modules/moment/locale/es-do.js","./es-us":"./node_modules/moment/locale/es-us.js","./es-us.js":"./node_modules/moment/locale/es-us.js","./es.js":"./node_modules/moment/locale/es.js","./et":"./node_modules/moment/locale/et.js","./et.js":"./node_modules/moment/locale/et.js","./eu":"./node_modules/moment/locale/eu.js","./eu.js":"./node_modules/moment/locale/eu.js","./fa":"./node_modules/moment/locale/fa.js","./fa.js":"./node_modules/moment/locale/fa.js","./fi":"./node_modules/moment/locale/fi.js","./fi.js":"./node_modules/moment/locale/fi.js","./fo":"./node_modules/moment/locale/fo.js","./fo.js":"./node_modules/moment/locale/fo.js","./fr":"./node_modules/moment/locale/fr.js","./fr-ca":"./node_modules/moment/locale/fr-ca.js","./fr-ca.js":"./node_modules/moment/locale/fr-ca.js","./fr-ch":"./node_modules/moment/locale/fr-ch.js","./fr-ch.js":"./node_modules/moment/locale/fr-ch.js","./fr.js":"./node_modules/moment/locale/fr.js","./fy":"./node_modules/moment/locale/fy.js","./fy.js":"./node_modules/moment/locale/fy.js","./ga":"./node_modules/moment/locale/ga.js","./ga.js":"./node_modules/moment/locale/ga.js","./gd":"./node_modules/moment/locale/gd.js","./gd.js":"./node_modules/moment/locale/gd.js","./gl":"./node_modules/moment/locale/gl.js","./gl.js":"./node_modules/moment/locale/gl.js","./gom-latn":"./node_modules/moment/locale/gom-latn.js","./gom-latn.js":"./node_modules/moment/locale/gom-latn.js","./gu":"./node_modules/moment/locale/gu.js","./gu.js":"./node_modules/moment/locale/gu.js","./he":"./node_modules/moment/locale/he.js","./he.js":"./node_modules/moment/locale/he.js","./hi":"./node_modules/moment/locale/hi.js","./hi.js":"./node_modules/moment/locale/hi.js","./hr":"./node_modules/moment/locale/hr.js","./hr.js":"./node_modules/moment/locale/hr.js","./hu":"./node_modules/moment/locale/hu.js","./hu.js":"./node_modules/moment/locale/hu.js","./hy-am":"./node_modules/moment/locale/hy-am.js","./hy-am.js":"./node_modules/moment/locale/hy-am.js","./id":"./node_modules/moment/locale/id.js","./id.js":"./node_modules/moment/locale/id.js","./is":"./node_modules/moment/locale/is.js","./is.js":"./node_modules/moment/locale/is.js","./it":"./node_modules/moment/locale/it.js","./it-ch":"./node_modules/moment/locale/it-ch.js","./it-ch.js":"./node_modules/moment/locale/it-ch.js","./it.js":"./node_modules/moment/locale/it.js","./ja":"./node_modules/moment/locale/ja.js","./ja.js":"./node_modules/moment/locale/ja.js","./jv":"./node_modules/moment/locale/jv.js","./jv.js":"./node_modules/moment/locale/jv.js","./ka":"./node_modules/moment/locale/ka.js","./ka.js":"./node_modules/moment/locale/ka.js","./kk":"./node_modules/moment/locale/kk.js","./kk.js":"./node_modules/moment/locale/kk.js","./km":"./node_modules/moment/locale/km.js","./km.js":"./node_modules/moment/locale/km.js","./kn":"./node_modules/moment/locale/kn.js","./kn.js":"./node_modules/moment/locale/kn.js","./ko":"./node_modules/moment/locale/ko.js","./ko.js":"./node_modules/moment/locale/ko.js","./ku":"./node_modules/moment/locale/ku.js","./ku.js":"./node_modules/moment/locale/ku.js","./ky":"./node_modules/moment/locale/ky.js","./ky.js":"./node_modules/moment/locale/ky.js","./lb":"./node_modules/moment/locale/lb.js","./lb.js":"./node_modules/moment/locale/lb.js","./lo":"./node_modules/moment/locale/lo.js","./lo.js":"./node_modules/moment/locale/lo.js","./lt":"./node_modules/moment/locale/lt.js","./lt.js":"./node_modules/moment/locale/lt.js","./lv":"./node_modules/moment/locale/lv.js","./lv.js":"./node_modules/moment/locale/lv.js","./me":"./node_modules/moment/locale/me.js","./me.js":"./node_modules/moment/locale/me.js","./mi":"./node_modules/moment/locale/mi.js","./mi.js":"./node_modules/moment/locale/mi.js","./mk":"./node_modules/moment/locale/mk.js","./mk.js":"./node_modules/moment/locale/mk.js","./ml":"./node_modules/moment/locale/ml.js","./ml.js":"./node_modules/moment/locale/ml.js","./mn":"./node_modules/moment/locale/mn.js","./mn.js":"./node_modules/moment/locale/mn.js","./mr":"./node_modules/moment/locale/mr.js","./mr.js":"./node_modules/moment/locale/mr.js","./ms":"./node_modules/moment/locale/ms.js","./ms-my":"./node_modules/moment/locale/ms-my.js","./ms-my.js":"./node_modules/moment/locale/ms-my.js","./ms.js":"./node_modules/moment/locale/ms.js","./mt":"./node_modules/moment/locale/mt.js","./mt.js":"./node_modules/moment/locale/mt.js","./my":"./node_modules/moment/locale/my.js","./my.js":"./node_modules/moment/locale/my.js","./nb":"./node_modules/moment/locale/nb.js","./nb.js":"./node_modules/moment/locale/nb.js","./ne":"./node_modules/moment/locale/ne.js","./ne.js":"./node_modules/moment/locale/ne.js","./nl":"./node_modules/moment/locale/nl.js","./nl-be":"./node_modules/moment/locale/nl-be.js","./nl-be.js":"./node_modules/moment/locale/nl-be.js","./nl.js":"./node_modules/moment/locale/nl.js","./nn":"./node_modules/moment/locale/nn.js","./nn.js":"./node_modules/moment/locale/nn.js","./pa-in":"./node_modules/moment/locale/pa-in.js","./pa-in.js":"./node_modules/moment/locale/pa-in.js","./pl":"./node_modules/moment/locale/pl.js","./pl.js":"./node_modules/moment/locale/pl.js","./pt":"./node_modules/moment/locale/pt.js","./pt-br":"./node_modules/moment/locale/pt-br.js","./pt-br.js":"./node_modules/moment/locale/pt-br.js","./pt.js":"./node_modules/moment/locale/pt.js","./ro":"./node_modules/moment/locale/ro.js","./ro.js":"./node_modules/moment/locale/ro.js","./ru":"./node_modules/moment/locale/ru.js","./ru.js":"./node_modules/moment/locale/ru.js","./sd":"./node_modules/moment/locale/sd.js","./sd.js":"./node_modules/moment/locale/sd.js","./se":"./node_modules/moment/locale/se.js","./se.js":"./node_modules/moment/locale/se.js","./si":"./node_modules/moment/locale/si.js","./si.js":"./node_modules/moment/locale/si.js","./sk":"./node_modules/moment/locale/sk.js","./sk.js":"./node_modules/moment/locale/sk.js","./sl":"./node_modules/moment/locale/sl.js","./sl.js":"./node_modules/moment/locale/sl.js","./sq":"./node_modules/moment/locale/sq.js","./sq.js":"./node_modules/moment/locale/sq.js","./sr":"./node_modules/moment/locale/sr.js","./sr-cyrl":"./node_modules/moment/locale/sr-cyrl.js","./sr-cyrl.js":"./node_modules/moment/locale/sr-cyrl.js","./sr.js":"./node_modules/moment/locale/sr.js","./ss":"./node_modules/moment/locale/ss.js","./ss.js":"./node_modules/moment/locale/ss.js","./sv":"./node_modules/moment/locale/sv.js","./sv.js":"./node_modules/moment/locale/sv.js","./sw":"./node_modules/moment/locale/sw.js","./sw.js":"./node_modules/moment/locale/sw.js","./ta":"./node_modules/moment/locale/ta.js","./ta.js":"./node_modules/moment/locale/ta.js","./te":"./node_modules/moment/locale/te.js","./te.js":"./node_modules/moment/locale/te.js","./tet":"./node_modules/moment/locale/tet.js","./tet.js":"./node_modules/moment/locale/tet.js","./tg":"./node_modules/moment/locale/tg.js","./tg.js":"./node_modules/moment/locale/tg.js","./th":"./node_modules/moment/locale/th.js","./th.js":"./node_modules/moment/locale/th.js","./tl-ph":"./node_modules/moment/locale/tl-ph.js","./tl-ph.js":"./node_modules/moment/locale/tl-ph.js","./tlh":"./node_modules/moment/locale/tlh.js","./tlh.js":"./node_modules/moment/locale/tlh.js","./tr":"./node_modules/moment/locale/tr.js","./tr.js":"./node_modules/moment/locale/tr.js","./tzl":"./node_modules/moment/locale/tzl.js","./tzl.js":"./node_modules/moment/locale/tzl.js","./tzm":"./node_modules/moment/locale/tzm.js","./tzm-latn":"./node_modules/moment/locale/tzm-latn.js","./tzm-latn.js":"./node_modules/moment/locale/tzm-latn.js","./tzm.js":"./node_modules/moment/locale/tzm.js","./ug-cn":"./node_modules/moment/locale/ug-cn.js","./ug-cn.js":"./node_modules/moment/locale/ug-cn.js","./uk":"./node_modules/moment/locale/uk.js","./uk.js":"./node_modules/moment/locale/uk.js","./ur":"./node_modules/moment/locale/ur.js","./ur.js":"./node_modules/moment/locale/ur.js","./uz":"./node_modules/moment/locale/uz.js","./uz-latn":"./node_modules/moment/locale/uz-latn.js","./uz-latn.js":"./node_modules/moment/locale/uz-latn.js","./uz.js":"./node_modules/moment/locale/uz.js","./vi":"./node_modules/moment/locale/vi.js","./vi.js":"./node_modules/moment/locale/vi.js","./x-pseudo":"./node_modules/moment/locale/x-pseudo.js","./x-pseudo.js":"./node_modules/moment/locale/x-pseudo.js","./yo":"./node_modules/moment/locale/yo.js","./yo.js":"./node_modules/moment/locale/yo.js","./zh-cn":"./node_modules/moment/locale/zh-cn.js","./zh-cn.js":"./node_modules/moment/locale/zh-cn.js","./zh-hk":"./node_modules/moment/locale/zh-hk.js","./zh-hk.js":"./node_modules/moment/locale/zh-hk.js","./zh-tw":"./node_modules/moment/locale/zh-tw.js","./zh-tw.js":"./node_modules/moment/locale/zh-tw.js"};function a(e){var t=s(e);return o(t)}function s(e){var t=n[e];if(t+1)return t;var o=new Error("Cannot find module '"+e+"'");throw o.code="MODULE_NOT_FOUND",o}a.keys=function(){return Object.keys(n)},a.resolve=s,(e.exports=a).id="./node_modules/moment/locale sync recursive ^\\.\\/.*$"},0:function(e,t){}}); \ No newline at end of file +!function(d){function e(e){for(var t,o,n=e[0],a=e[1],s=e[2],i=0,l=[];i<n.length;i++)o=n[i],c[o]&&l.push(c[o][0]),c[o]=0;for(t in a)Object.prototype.hasOwnProperty.call(a,t)&&(d[t]=a[t]);for(m&&m(e);l.length;)l.shift()();return u.push.apply(u,s||[]),r()}function r(){for(var e,t=0;t<u.length;t++){for(var o=u[t],n=!0,a=1;a<o.length;a++){var s=o[a];0!==c[s]&&(n=!1)}n&&(u.splice(t--,1),e=i(i.s=o[0]))}return e}var o={},c={3:0,7:0},u=[];function i(e){if(o[e])return o[e].exports;var t=o[e]={i:e,l:!1,exports:{}};return d[e].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.m=d,i.c=o,i.d=function(e,t,o){i.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:o})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(t,e){if(1&e&&(t=i(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var o=Object.create(null);if(i.r(o),Object.defineProperty(o,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var n in t)i.d(o,n,function(e){return t[e]}.bind(null,n));return o},i.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(t,"a",t),t},i.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},i.p="/themes/admin/static/js";var t=window.webpackJsonp=window.webpackJsonp||[],n=t.push.bind(t);t.push=e,t=t.slice();for(var a=0;a<t.length;a++)e(t[a]);var m=n;u.push(["./CTFd/themes/admin/assets/js/pages/challenge.js",0,1]),r()}({"./CTFd/themes/admin/assets/js/challenges/files.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.addFile=function(e){e.preventDefault();var t=e.target,o={challenge:window.CHALLENGE_ID,type:"challenge"};s.default.files.upload(t,o,function(e){setTimeout(function(){window.location.reload()},700)})},t.deleteFile=function(e){var t=(0,n.default)(this).attr("file-id"),o=(0,n.default)(this).parent().parent();(0,i.ezQuery)({title:"Delete Files",body:"Are you sure you want to delete this file?",success:function(){a.default.fetch("/api/v1/files/"+t,{method:"DELETE"}).then(function(e){return e.json()}).then(function(e){e.success&&o.remove()})}})};var n=l(o("./node_modules/jquery/dist/jquery.js")),a=l(o("./CTFd/themes/core/assets/js/CTFd.js")),s=l(o("./CTFd/themes/core/assets/js/helpers.js")),i=o("./CTFd/themes/core/assets/js/ezq.js");function l(e){return e&&e.__esModule?e:{default:e}}},"./CTFd/themes/admin/assets/js/challenges/flags.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.deleteFlag=function(e){e.preventDefault();var t=(0,s.default)(this).attr("flag-id"),o=(0,s.default)(this).parent().parent();(0,n.ezQuery)({title:"Delete Flag",body:"Are you sure you want to delete this flag?",success:function(){i.default.fetch("/api/v1/flags/"+t,{method:"DELETE"}).then(function(e){return e.json()}).then(function(e){e.success&&o.remove()})}})},t.addFlagModal=function(e){s.default.get(i.default.config.urlRoot+"/api/v1/flags/types",function(e){var t=e.data,o=(0,s.default)("#flags-create-select");o.empty();var n=(0,s.default)("<option> -- </option>");for(var a in o.append(n),t)t.hasOwnProperty(a)&&(n=(0,s.default)("<option value='{0}'>{1}</option>".format(a,t[a].name)),o.append(n));(0,s.default)("#flag-edit-modal").modal()}),(0,s.default)("#flag-edit-modal form").submit(function(e){e.preventDefault();var t=(0,s.default)(this).serializeJSON(!0);t.challenge=window.CHALLENGE_ID,i.default.fetch("/api/v1/flags",{method:"POST",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(t)}).then(function(e){return e.json()}).then(function(e){window.location.reload()})}),(0,s.default)("#flag-edit-modal").modal()},t.editFlagModal=function(e){e.preventDefault();var n=(0,s.default)(this).attr("flag-id"),a=(0,s.default)(this).parent().parent();s.default.get(i.default.config.urlRoot+"/api/v1/flags/"+n,function(e){var o=e.data;s.default.get(i.default.config.urlRoot+o.templates.update,function(e){(0,s.default)("#edit-flags form").empty(),(0,s.default)("#edit-flags form").off();var t=l.default.compile(e);(0,s.default)("#edit-flags form").append(t.render(o)),(0,s.default)("#edit-flags form").submit(function(e){e.preventDefault();var t=(0,s.default)("#edit-flags form").serializeJSON();i.default.fetch("/api/v1/flags/"+n,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(t)}).then(function(e){return e.json()}).then(function(e){e.success&&((0,s.default)(a).find(".flag-content").text(e.data.content),(0,s.default)("#edit-flags").modal("toggle"))})}),(0,s.default)("#edit-flags").modal()})})},t.flagTypeSelect=function(e){e.preventDefault();var t=(0,s.default)(this).find("option:selected").text();s.default.get(i.default.config.urlRoot+"/api/v1/flags/types/"+t,function(e){var t=e.data;s.default.get(i.default.config.urlRoot+t.templates.create,function(e){var t=l.default.compile(e);(0,s.default)("#create-keys-entry-div").html(t.render()),(0,s.default)("#create-keys-button-div").show()})})};var s=a(o("./node_modules/jquery/dist/jquery.js")),i=a(o("./CTFd/themes/core/assets/js/CTFd.js")),l=a(o("./node_modules/nunjucks/browser/nunjucks.js")),n=o("./CTFd/themes/core/assets/js/ezq.js");function a(e){return e&&e.__esModule?e:{default:e}}},"./CTFd/themes/admin/assets/js/challenges/hints.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.showHintModal=function(e){e.preventDefault(),(0,a.default)("#hint-edit-modal form").find("input, textarea").val("").trigger("change"),(0,a.default)("#hint-edit-form textarea").each(function(e,t){t.hasOwnProperty("codemirror")&&t.codemirror.refresh()}),(0,a.default)("#hint-edit-modal").modal()},t.showEditHintModal=function(e){e.preventDefault();var t=(0,a.default)(this).attr("hint-id");s.default.fetch("/api/v1/hints/"+t+"?preview=true",{method:"GET",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"}}).then(function(e){return e.json()}).then(function(e){e.success&&((0,a.default)("#hint-edit-form input[name=content],textarea[name=content]").val(e.data.content).trigger("change"),(0,a.default)("#hint-edit-modal").on("shown.bs.modal",function(){(0,a.default)("#hint-edit-form textarea").each(function(e,t){t.hasOwnProperty("codemirror")&&t.codemirror.refresh()})}).on("hide.bs.modal",function(){(0,a.default)("#hint-edit-form textarea").each(function(e,t){(0,a.default)(t).val("").trigger("change"),t.hasOwnProperty("codemirror")&&t.codemirror.refresh()})}),(0,a.default)("#hint-edit-form input[name=cost]").val(e.data.cost),(0,a.default)("#hint-edit-form input[name=id]").val(e.data.id),(0,a.default)("#hint-edit-modal").modal())})},t.deleteHint=function(e){e.preventDefault();var t=(0,a.default)(this).attr("hint-id"),o=(0,a.default)(this).parent().parent();(0,n.ezQuery)({title:"Delete Hint",body:"Are you sure you want to delete this hint?",success:function(){s.default.fetch("/api/v1/hints/"+t,{method:"DELETE"}).then(function(e){return e.json()}).then(function(e){e.success&&o.remove()})}})},t.editHint=function(e){e.preventDefault();var t=(0,a.default)(this).serializeJSON(!0);t.challenge=window.CHALLENGE_ID;var o="POST",n="/api/v1/hints";t.id&&(o="PATCH",n="/api/v1/hints/"+t.id);s.default.fetch(n,{method:o,credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(t)}).then(function(e){return e.json()}).then(function(e){e.success&&window.location.reload()})};var a=i(o("./node_modules/jquery/dist/jquery.js")),s=i(o("./CTFd/themes/core/assets/js/CTFd.js")),n=o("./CTFd/themes/core/assets/js/ezq.js");function i(e){return e&&e.__esModule?e:{default:e}}},"./CTFd/themes/admin/assets/js/challenges/requirements.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.addRequirement=function(e){e.preventDefault();var t=(0,a.default)("#prerequisite-add-form").serializeJSON();if(!t.prerequisite)return;window.CHALLENGE_REQUIREMENTS.prerequisites.push(parseInt(t.prerequisite));var o={requirements:window.CHALLENGE_REQUIREMENTS};s.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(o)}).then(function(e){return e.json()}).then(function(e){e.success&&window.location.reload()})},t.deleteRequirement=function(e){var t=(0,a.default)(this).attr("challenge-id"),o=(0,a.default)(this).parent().parent();window.CHALLENGE_REQUIREMENTS.prerequisites.pop(t);var n={requirements:window.CHALLENGE_REQUIREMENTS};s.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(n)}).then(function(e){return e.json()}).then(function(e){e.success&&o.remove()})};var a=n(o("./node_modules/jquery/dist/jquery.js")),s=n(o("./CTFd/themes/core/assets/js/CTFd.js"));function n(e){return e&&e.__esModule?e:{default:e}}},"./CTFd/themes/admin/assets/js/challenges/tags.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.deleteTag=i,t.addTag=function(e){if(13!=e.keyCode)return;var t=(0,n.default)(this),o={value:t.val(),challenge:window.CHALLENGE_ID};a.default.api.post_tag_list({},o).then(function(e){if(e.success){var t=(0,n.default)("<span class='badge badge-primary mx-1 challenge-tag'><span>{0}</span><a class='btn-fa delete-tag' tag-id='{1}'>&times;</a></span>".format(e.data.value,e.data.id));(0,n.default)("#challenge-tags").append(t),t.click(i)}}),t.val("")};var n=s(o("./node_modules/jquery/dist/jquery.js")),a=s(o("./CTFd/themes/core/assets/js/CTFd.js"));function s(e){return e&&e.__esModule?e:{default:e}}function i(e){var t=(0,n.default)(this),o=t.attr("tag-id");a.default.api.delete_tag({tagId:o}).then(function(e){e.success&&t.parent().remove()})}},"./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue":function(e,t,o){o.r(t);var n=o("./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=template&id=50f8d42a&"),a=o("./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=script&lang=js&");for(var s in a)"default"!==s&&function(e){o.d(t,e,function(){return a[e]})}(s);var i=o("./node_modules/vue-loader/lib/runtime/componentNormalizer.js"),l=Object(i.a)(a.default,n.a,n.b,!1,null,null,null);l.options.__file="CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue",t.default=l.exports},"./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=script&lang=js&":function(e,t,o){o.r(t);var n=o("./node_modules/babel-loader/lib/index.js?!./node_modules/vue-loader/lib/index.js?!./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=script&lang=js&"),a=o.n(n);for(var s in n)"default"!==s&&function(e){o.d(t,e,function(){return n[e]})}(s);t.default=a.a},"./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=template&id=50f8d42a&":function(e,t,o){function n(){var o=this,e=o.$createElement,n=o._self._c||e;return n("div",{staticClass:"modal fade",attrs:{id:"media-modal",tabindex:"-1"}},[n("div",{staticClass:"modal-dialog modal-lg"},[n("div",{staticClass:"modal-content"},[o._m(0),o._v(" "),n("div",{staticClass:"modal-body"},[n("div",{staticClass:"modal-header"},[n("div",{staticClass:"container"},[n("div",{staticClass:"row mh-100"},[n("div",{staticClass:"col-md-6",attrs:{id:"media-library-list"}},o._l(o.files,function(t){return n("div",{key:t.id,staticClass:"media-item-wrapper"},[n("a",{attrs:{href:"javascript:void(0)"},on:{click:function(e){return o.selectFile(t),!1}}},[n("i",{class:o.getIconClass(t.location),attrs:{"aria-hidden":"true"}}),o._v(" "),n("small",{staticClass:"media-item-title"},[o._v(o._s(t.location.split("/").pop()))])])])}),0),o._v(" "),n("div",{staticClass:"col-md-6",attrs:{id:"media-library-details"}},[n("h4",{staticClass:"text-center"},[o._v("Media Details")]),o._v(" "),n("div",{attrs:{id:"media-item"}},[n("div",{staticClass:"text-center",attrs:{id:"media-icon"}},[this.selectedFile?n("div",["far fa-file-image"===o.getIconClass(this.selectedFile.location)?n("div",[n("img",{staticStyle:{"max-width":"100%","max-height":"100%","object-fit":"contain"},attrs:{src:o.buildSelectedFileUrl()}})]):n("div",[n("i",{class:o.getIconClass(this.selectedFile.location)+" fa-4x",attrs:{"aria-hidden":"true"}})])]):o._e()]),o._v(" "),n("br"),o._v(" "),this.selectedFile?n("div",{staticClass:"text-center",attrs:{id:"media-filename"}},[n("a",{attrs:{href:o.buildSelectedFileUrl(),target:"_blank"}},[o._v("\n "+o._s(this.selectedFile.location.split("/").pop())+"\n ")])]):o._e(),o._v(" "),n("br"),o._v(" "),n("div",{staticClass:"form-group"},[this.selectedFile?n("div",[o._v("\n Link:\n "),n("input",{staticClass:"form-control",attrs:{type:"text",id:"media-link",readonly:""},domProps:{value:o.buildSelectedFileUrl()}})]):n("div",[o._v("\n Link:\n "),n("input",{staticClass:"form-control",attrs:{type:"text",id:"media-link",readonly:""}})])]),o._v(" "),n("div",{staticClass:"form-group text-center"},[n("div",{staticClass:"row"},[n("div",{staticClass:"col-md-6"},[n("button",{staticClass:"btn btn-success w-100",attrs:{id:"media-insert","data-toggle":"tooltip","data-placement":"top",title:"Insert link into editor"},on:{click:o.insertSelectedFile}},[o._v("\n Insert\n ")])]),o._v(" "),n("div",{staticClass:"col-md-3"},[n("button",{staticClass:"btn btn-primary w-100",attrs:{id:"media-download","data-toggle":"tooltip","data-placement":"top",title:"Download file"},on:{click:o.downloadSelectedFile}},[n("i",{staticClass:"fas fa-download"})])]),o._v(" "),n("div",{staticClass:"col-md-3"},[n("button",{staticClass:"btn btn-danger w-100",attrs:{id:"media-delete","data-toggle":"tooltip","data-placement":"top",title:"Delete file"},on:{click:o.deleteSelectedFile}},[n("i",{staticClass:"far fa-trash-alt"})])])])])])])])])]),o._v(" "),o._m(1)]),o._v(" "),n("div",{staticClass:"modal-footer"},[n("div",{staticClass:"float-right"},[n("button",{staticClass:"btn btn-primary media-upload-button",attrs:{type:"submit"},on:{click:o.uploadChosenFiles}},[o._v("\n Upload\n ")])])])])])])}var a=[function(){var e=this,t=e.$createElement,o=e._self._c||t;return o("div",{staticClass:"modal-header"},[o("div",{staticClass:"container"},[o("div",{staticClass:"row"},[o("div",{staticClass:"col-md-12"},[o("h3",{staticClass:"text-center"},[e._v("Media Library")])])])]),e._v(" "),o("button",{staticClass:"close",attrs:{type:"button","data-dismiss":"modal","aria-label":"Close"}},[o("span",{attrs:{"aria-hidden":"true"}},[e._v("×")])])])},function(){var e=this,t=e.$createElement,o=e._self._c||t;return o("form",{attrs:{id:"media-library-upload",enctype:"multipart/form-data"}},[o("div",{staticClass:"form-group"},[o("label",{attrs:{for:"media-files"}},[e._v("\n Upload Files\n ")]),e._v(" "),o("input",{staticClass:"form-control-file",attrs:{type:"file",name:"file",id:"media-files",multiple:""}}),e._v(" "),o("sub",{staticClass:"help-block"},[e._v("\n Attach multiple files using Control+Click or Cmd+Click.\n ")])]),e._v(" "),o("input",{attrs:{type:"hidden",value:"page",name:"type"}})])}];n._withStripped=!0,o.d(t,"a",function(){return n}),o.d(t,"b",function(){return a})},"./CTFd/themes/admin/assets/js/pages/challenge.js":function(e,t,o){o("./CTFd/themes/admin/assets/js/pages/main.js");var n=o("./CTFd/themes/core/assets/js/utils.js"),l=f(o("./node_modules/jquery/dist/jquery.js"));o("./node_modules/bootstrap/js/dist/tab.js");var i=f(o("./CTFd/themes/core/assets/js/CTFd.js")),a=o("./CTFd/themes/core/assets/js/ezq.js"),d=f(o("./CTFd/themes/core/assets/js/helpers.js")),s=o("./CTFd/themes/admin/assets/js/challenges/files.js"),r=o("./CTFd/themes/admin/assets/js/challenges/tags.js"),c=o("./CTFd/themes/admin/assets/js/challenges/requirements.js"),u=o("./CTFd/themes/admin/assets/js/styles.js"),m=o("./CTFd/themes/admin/assets/js/challenges/hints.js"),p=o("./CTFd/themes/admin/assets/js/challenges/flags.js");function f(e){return e&&e.__esModule?e:{default:e}}function h(e){i.default.api.get_hint({hintId:e,preview:!0}).then(function(e){e.data.content&&function(e){(0,a.ezAlert)({title:"Hint",body:j.render(e.content),button:"Got it!"})}(e.data)})}var j=i.default.lib.markdown();function g(e,t){var o=e.data,n=(0,l.default)("#result-message"),a=(0,l.default)("#result-notification"),s=(0,l.default)("#submission-input");a.removeClass(),n.text(o.message),"authentication_required"!==o.status?("incorrect"===o.status?(a.addClass("alert alert-danger alert-dismissable text-center"),a.slideDown(),s.removeClass("correct"),s.addClass("wrong"),setTimeout(function(){s.removeClass("wrong")},3e3)):"correct"===o.status?(a.addClass("alert alert-success alert-dismissable text-center"),a.slideDown(),(0,l.default)(".challenge-solves").text(parseInt((0,l.default)(".challenge-solves").text().split(" ")[0])+1+" Solves"),s.val(""),s.removeClass("wrong"),s.addClass("correct")):"already_solved"===o.status?(a.addClass("alert alert-info alert-dismissable text-center"),a.slideDown(),s.addClass("correct")):"paused"===o.status?(a.addClass("alert alert-warning alert-dismissable text-center"),a.slideDown()):"ratelimited"===o.status&&(a.addClass("alert alert-warning alert-dismissable text-center"),a.slideDown(),s.addClass("too-fast"),setTimeout(function(){s.removeClass("too-fast")},3e3)),setTimeout(function(){(0,l.default)(".alert").slideUp(),(0,l.default)("#submit-key").removeClass("disabled-button"),(0,l.default)("#submit-key").prop("disabled",!1)},3e3),t&&t(o)):window.location=i.default.config.urlRoot+"/login?next="+i.default.config.urlRoot+window.location.pathname+window.location.hash}function _(t){i.default._internal.challenge={},l.default.getScript(i.default.config.urlRoot+t.scripts.view,function(){var e=t.create;(0,l.default)("#create-chal-entry-div").html(e),(0,u.bindMarkdownEditors)(),l.default.getScript(i.default.config.urlRoot+t.scripts.create,function(){(0,l.default)("#create-chal-entry-div form").submit(function(e){e.preventDefault();var t=(0,l.default)("#create-chal-entry-div form").serializeJSON();i.default.fetch("/api/v1/challenges",{method:"POST",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(t)}).then(function(e){return e.json()}).then(function(e){e.success&&((0,l.default)("#challenge-create-options #challenge_id").val(e.data.id),(0,l.default)("#challenge-create-options").modal())})})})})}function v(a){a.preventDefault();var s=(0,l.default)(a.target).serializeJSON(!0),o={challenge_id:s.challenge_id,content:s.flag||"",type:s.flag_type,data:s.flag_data?s.flag_data:""};Promise.all([new Promise(function(t,e){0!=o.content.length?i.default.fetch("/api/v1/flags",{method:"POST",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(o)}).then(function(e){t(e.json())}):t()}),new Promise(function(e,t){var o=a.target,n={challenge:s.challenge_id,type:"challenge"};(0,l.default)(o.elements.file).val()&&d.default.files.upload(o,n),e()})]).then(function(e){i.default.fetch("/api/v1/challenges/"+s.challenge_id,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify({state:s.state})}).then(function(e){return e.json()}).then(function(e){e.success&&setTimeout(function(){window.location=i.default.config.urlRoot+"/admin/challenges/"+s.challenge_id},700)})})}function y(e){var t=(0,l.default)(this).find("option:selected").data("meta");void 0!==t?_(t):(0,l.default)("#create-chal-entry-div").empty()}(0,l.default)(function(){(0,l.default)(".preview-challenge").click(function(e){window.challenge=new Object,i.default._internal.challenge={},l.default.get(i.default.config.urlRoot+"/api/v1/challenges/"+window.CHALLENGE_ID,function(e){var t=i.default._internal.challenge,o=e.data;o.solves=null,l.default.getScript(i.default.config.urlRoot+o.type_data.scripts.view,function(){(0,l.default)("#challenge-window").empty(),(0,l.default)("#challenge-window").append(o.view),(0,l.default)("#challenge-window #challenge-input").addClass("form-control"),(0,l.default)("#challenge-window #challenge-submit").addClass("btn btn-md btn-outline-secondary float-right"),(0,l.default)(".challenge-solves").hide(),(0,l.default)(".nav-tabs a").click(function(e){e.preventDefault(),(0,l.default)(this).tab("show")}),(0,l.default)("#challenge-window").on("hide.bs.modal",function(e){(0,l.default)("#challenge-input").removeClass("wrong"),(0,l.default)("#challenge-input").removeClass("correct"),(0,l.default)("#incorrect-key").slideUp(),(0,l.default)("#correct-key").slideUp(),(0,l.default)("#already-solved").slideUp(),(0,l.default)("#too-fast").slideUp()}),(0,l.default)(".load-hint").on("click",function(e){h((0,l.default)(this).data("hint-id"))}),(0,l.default)("#challenge-submit").click(function(e){e.preventDefault(),(0,l.default)("#challenge-submit").addClass("disabled-button"),(0,l.default)("#challenge-submit").prop("disabled",!0),i.default._internal.challenge.submit(!0).then(g)}),(0,l.default)("#challenge-input").keyup(function(e){13==e.keyCode&&(0,l.default)("#challenge-submit").click()}),t.postRender(),window.location.replace(window.location.href.split("#")[0]+"#preview"),(0,l.default)("#challenge-window").modal()})})}),(0,l.default)(".delete-challenge").click(function(e){(0,a.ezQuery)({title:"Delete Challenge",body:"Are you sure you want to delete {0}".format("<strong>"+(0,n.htmlEntities)(window.CHALLENGE_NAME)+"</strong>"),success:function(){i.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID,{method:"DELETE"}).then(function(e){return e.json()}).then(function(e){e.success&&(window.location=i.default.config.urlRoot+"/admin/challenges")})}})}),(0,l.default)("#challenge-update-container > form").submit(function(e){e.preventDefault();var o=(0,l.default)(e.target).serializeJSON(!0);i.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID+"/flags",{method:"GET",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"}}).then(function(e){return e.json()}).then(function(e){function t(){i.default.fetch("/api/v1/challenges/"+window.CHALLENGE_ID,{method:"PATCH",credentials:"same-origin",headers:{Accept:"application/json","Content-Type":"application/json"},body:JSON.stringify(o)}).then(function(e){return e.json()}).then(function(e){if(e.success){switch((0,l.default)(".challenge-state").text(e.data.state),e.data.state){case"visible":(0,l.default)(".challenge-state").removeClass("badge-danger").addClass("badge-success");break;case"hidden":(0,l.default)(".challenge-state").removeClass("badge-success").addClass("badge-danger")}(0,a.ezToast)({title:"Success",body:"Your challenge has been updated!"})}})}0===e.data.length&&"visible"===o.state?(0,a.ezQuery)({title:"Missing Flags",body:"This challenge does not have any flags meaning it may be unsolveable. Are you sure you'd like to update this challenge?",success:t}):t()})}),(0,l.default)("#challenge-create-options form").submit(v),(0,l.default)("#tags-add-input").keyup(r.addTag),(0,l.default)(".delete-tag").click(r.deleteTag),(0,l.default)("#prerequisite-add-form").submit(c.addRequirement),(0,l.default)(".delete-requirement").click(c.deleteRequirement),(0,l.default)("#file-add-form").submit(s.addFile),(0,l.default)(".delete-file").click(s.deleteFile),(0,l.default)("#hint-add-button").click(m.showHintModal),(0,l.default)(".delete-hint").click(m.deleteHint),(0,l.default)(".edit-hint").click(m.showEditHintModal),(0,l.default)("#hint-edit-form").submit(m.editHint),(0,l.default)("#flag-add-button").click(p.addFlagModal),(0,l.default)(".delete-flag").click(p.deleteFlag),(0,l.default)("#flags-create-select").change(p.flagTypeSelect),(0,l.default)(".edit-flag").click(p.editFlagModal),l.default.get(i.default.config.urlRoot+"/api/v1/challenges/types",function(e){(0,l.default)("#create-chals-select").empty();var t=e.data,o=Object.keys(t).length;if(1<o){for(var n in(0,l.default)("#create-chals-select").append("<option> -- </option>"),t){var a=t[n],s=(0,l.default)("<option/>");s.attr("value",a.type),s.text(a.name),s.data("meta",a),(0,l.default)("#create-chals-select").append(s)}(0,l.default)("#create-chals-select-div").show(),(0,l.default)("#create-chals-select").val("standard"),_(t.standard)}else if(1==o){var i=Object.keys(t)[0];(0,l.default)("#create-chals-select").empty(),_(t[i])}}),(0,l.default)("#create-chals-select").change(y)})},"./CTFd/themes/admin/assets/js/pages/main.js":function(e,t,o){var n=m(o("./CTFd/themes/core/assets/js/CTFd.js")),a=m(o("./node_modules/jquery/dist/jquery.js")),s=m(o("./node_modules/moment/moment.js")),i=m(o("./node_modules/nunjucks/browser/nunjucks.js")),l=o("./node_modules/howler/dist/howler.js"),d=m(o("./CTFd/themes/core/assets/js/events.js")),r=m(o("./CTFd/themes/core/assets/js/times.js")),c=m(o("./CTFd/themes/admin/assets/js/styles.js")),u=m(o("./CTFd/themes/core/assets/js/helpers.js"));function m(e){return e&&e.__esModule?e:{default:e}}n.default.init(window.init),window.CTFd=n.default,window.helpers=u.default,window.$=a.default,window.Moment=s.default,window.nunjucks=i.default,window.Howl=l.Howl,(0,a.default)(function(){(0,c.default)(),(0,r.default)(),(0,d.default)(n.default.config.urlRoot)})},"./CTFd/themes/admin/assets/js/styles.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.showMediaLibrary=r,t.bindMarkdownEditors=c,t.default=void 0,o("./node_modules/bootstrap/dist/js/bootstrap.bundle.js");var n=o("./CTFd/themes/core/assets/js/utils.js"),a=d(o("./node_modules/jquery/dist/jquery.js")),s=d(o("./node_modules/easymde/src/js/easymde.js")),i=d(o("./node_modules/vue/dist/vue.esm.browser.js")),l=d(o("./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue"));function d(e){return e&&e.__esModule?e:{default:e}}function r(e){var t=i.default.extend(l.default),o=document.createElement("div");document.querySelector("main").appendChild(o);var n=new t({propsData:{editor:e}}).$mount(o);(0,a.default)("#media-modal").on("hidden.bs.modal",function(e){n.$destroy(),(0,a.default)("#media-modal").remove()}),(0,a.default)("#media-modal").modal()}function c(){(0,a.default)("textarea.markdown").each(function(e,t){if(!1===t.hasOwnProperty("mde")){var o=new s.default({autoDownloadFontAwesome:!1,toolbar:["bold","italic","heading","|","quote","unordered-list","ordered-list","|","link","image",{name:"media",action:function(e){r(e)},className:"fas fa-file-upload",title:"Media Library"},"|","preview","guide"],element:this,initialValue:(0,a.default)(this).val(),forceSync:!0,minHeight:"200px"});this.mde=o,this.codemirror=o.codemirror,(0,a.default)(this).on("change keyup paste",function(){o.codemirror.getDoc().setValue((0,a.default)(this).val()),o.codemirror.refresh()})}})}t.default=function(){(0,a.default)(":input").each(function(){(0,a.default)(this).data("initial",(0,a.default)(this).val())}),(0,a.default)(function(){(0,a.default)("tr[data-href], td[data-href]").click(function(){if(!getSelection().toString()){var e=(0,a.default)(this).attr("data-href");e&&(window.location=e)}return!1}),(0,a.default)("[data-checkbox]").click(function(e){(0,a.default)(e.target).is("input[type=checkbox]")?e.stopImmediatePropagation():((0,a.default)(this).find("input[type=checkbox]").click(),e.stopImmediatePropagation())}),(0,a.default)("[data-checkbox-all]").on("click change",function(e){var t=(0,a.default)(this).prop("checked"),o=(0,a.default)(this).index()+1;(0,a.default)(this).closest("table").find("tr td:nth-child(".concat(o,") input[type=checkbox]")).prop("checked",t),e.stopImmediatePropagation()}),(0,a.default)("tr[data-href] a, tr[data-href] button").click(function(e){(0,a.default)(this).attr("data-dismiss")||e.stopPropagation()}),(0,a.default)(".page-select").change(function(){var e=new URL(window.location);e.searchParams.set("page",this.value),window.location.href=e.toString()}),(0,a.default)('a[data-toggle="tab"]').on("shown.bs.tab",function(e){sessionStorage.setItem("activeTab",(0,a.default)(e.target).attr("href"))});var e=sessionStorage.getItem("activeTab");if(e){var t=(0,a.default)('.nav-tabs a[href="'.concat(e,'"], .nav-pills a[href="').concat(e,'"]'));t.length?t.tab("show"):sessionStorage.removeItem("activeTab")}c(),(0,n.makeSortableTables)(),(0,a.default)('[data-toggle="tooltip"]').tooltip()})}},"./CTFd/themes/core/assets/js/CTFd.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=r(o("./CTFd/themes/core/assets/js/fetch.js")),a=r(o("./CTFd/themes/core/assets/js/config.js")),s=o("./CTFd/themes/core/assets/js/api.js");o("./CTFd/themes/core/assets/js/patch.js");var i=r(o("./node_modules/markdown-it/index.js")),l=r(o("./node_modules/jquery/dist/jquery.js")),d=r(o("./CTFd/themes/core/assets/js/ezq.js"));function r(e){return e&&e.__esModule?e:{default:e}}function c(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}var u=new s.API("/"),m={},p={ezq:d.default},f={$:l.default,markdown:function(e){var t=function(t){for(var e=1;e<arguments.length;e++)if(e%2){var o=null!=arguments[e]?arguments[e]:{},n=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(n=n.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),n.forEach(function(e){c(t,e,o[e])})}else Object.defineProperties(t,Object.getOwnPropertyDescriptors(arguments[e]));return t}({},{html:!0,linkify:!0},{},e),o=(0,i.default)(t);return o.renderer.rules.link_open=function(e,t,o,n,a){return e[t].attrPush(["target","_blank"]),a.renderToken(e,t,o)},o}},h=!1,j={run:function(e){e(g)}};var g={init:function(e){h||(h=!0,a.default.urlRoot=e.urlRoot||a.default.urlRoot,a.default.csrfNonce=e.csrfNonce||a.default.csrfNonce,a.default.userMode=e.userMode||a.default.userMode,u.domain=a.default.urlRoot+"/api/v1",m.id=e.userId)},config:a.default,fetch:n.default,user:m,ui:p,api:u,lib:f,_internal:{},plugin:j},_=g;t.default=_},"./CTFd/themes/core/assets/js/api.js":function(e,t,o){var c=n(o("./CTFd/themes/core/assets/js/fetch.js")),l=n(o("./node_modules/q/q.js"));function n(e){return e&&e.__esModule?e:{default:e}}function a(e){return(a="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var s=function(){"use strict";function e(e){var t="object"===a(e)?e.domain:e;if(this.domain=t||"",0===this.domain.length)throw new Error("Domain parameter must be specified as a string.")}function i(o,n){return o.$queryParameters&&Object.keys(o.$queryParameters).forEach(function(e){var t=o.$queryParameters[e];n[e]=t}),n}return e.prototype.request=function(e,t,o,n,a,s,i,l){var d=s&&Object.keys(s).length?function(e){var t=[];for(var o in e)e.hasOwnProperty(o)&&t.push(encodeURIComponent(o)+"="+encodeURIComponent(e[o]));return t.join("&")}(s):null,r=t+(d?"?"+d:"");n&&!Object.keys(n).length&&(n=void 0),(0,c.default)(r,{method:e,headers:a,body:JSON.stringify(n)}).then(function(e){return e.json()}).then(function(e){l.resolve(e)}).catch(function(e){l.reject(e)})},e.prototype.post_award_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/awards",e,{},a,n,{},t),t.promise},e.prototype.delete_award=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/awards/{award_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{award_id}",e.awardId),void 0===e.awardId?t.reject(new Error("Missing required parameter: awardId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_award=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/awards/{award_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{award_id}",e.awardId),void 0===e.awardId?t.reject(new Error("Missing required parameter: awardId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_challenge_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/challenges",e,{},a,n,{},t),t.promise},e.prototype.get_challenge_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/challenges",e,{},a,n,{},t),t.promise},e.prototype.post_challenge_attempt=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/challenges/attempt",e,{},a,n,{},t),t.promise},e.prototype.get_challenge_types=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/challenges/types",e,{},a,n,{},t),t.promise},e.prototype.patch_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_files=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/files",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_flags=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/flags",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_hints=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/hints",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/solves",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_tags=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/tags",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.id&&(a.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/configs",e,{},a,n,{},t),t.promise},e.prototype.patch_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("PATCH",o+"/configs",e,{},a,n,{},t),t.promise},e.prototype.get_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/configs",e,{},a,n,{},t),t.promise},e.prototype.patch_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_files_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/files",e,{},a,n,{},t),t.promise},e.prototype.get_files_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/files",e,{},a,n,{},t),t.promise},e.prototype.delete_files_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/files/{file_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{file_id}",e.fileId),void 0===e.fileId?t.reject(new Error("Missing required parameter: fileId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_files_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/files/{file_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{file_id}",e.fileId),void 0===e.fileId?t.reject(new Error("Missing required parameter: fileId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_flag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/flags",e,{},a,n,{},t),t.promise},e.prototype.get_flag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/flags",e,{},a,n,{},t),t.promise},e.prototype.get_flag_types=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/flags/types",e,{},a,n,{},t),t.promise},e.prototype.get_flag_types_1=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/types/{type_name}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{type_name}",e.typeName),void 0===e.typeName?t.reject(new Error("Missing required parameter: typeName")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.patch_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_hint_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/hints",e,{},a,n,{},t),t.promise},e.prototype.get_hint_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/hints",e,{},a,n,{},t),t.promise},e.prototype.patch_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_notification_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/notifications",e,{},a,n,{},t),t.promise},e.prototype.get_notification_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/notifications",e,{},a,n,{},t),t.promise},e.prototype.delete_notification=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/notifications/{notification_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{notification_id}",e.notificationId),void 0===e.notificationId?t.reject(new Error("Missing required parameter: notificationId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_notification=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/notifications/{notification_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{notification_id}",e.notificationId),void 0===e.notificationId?t.reject(new Error("Missing required parameter: notificationId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_page_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/pages",e,{},a,n,{},t),t.promise},e.prototype.get_page_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/pages",e,{},a,n,{},t),t.promise},e.prototype.patch_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_scoreboard_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/scoreboard",e,{},a,n,{},t),t.promise},e.prototype.get_scoreboard_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/scoreboard/top/{count}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{count}",e.count),void 0===e.count?t.reject(new Error("Missing required parameter: count")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_challenge_solve_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/challenges/solves",e,{},a,n,{},t),t.promise},e.prototype.get_challenge_solve_percentages=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/challenges/solves/percentages",e,{},a,n,{},t),t.promise},e.prototype.get_challenge_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/challenges/{column}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_submission_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/submissions/{column}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/teams",e,{},a,n,{},t),t.promise},e.prototype.get_user_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/users",e,{},a,n,{},t),t.promise},e.prototype.get_user_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/users/{column}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_submissions_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/submissions",e,{},a,n,{},t),t.promise},e.prototype.get_submissions_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/submissions",e,{},a,n,{},t),t.promise},e.prototype.delete_submission=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/submissions/{submission_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{submission_id}",e.submissionId),void 0===e.submissionId?t.reject(new Error("Missing required parameter: submissionId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_submission=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/submissions/{submission_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{submission_id}",e.submissionId),void 0===e.submissionId?t.reject(new Error("Missing required parameter: submissionId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_tag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/tags",e,{},a,n,{},t),t.promise},e.prototype.get_tag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/tags",e,{},a,n,{},t),t.promise},e.prototype.patch_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_team_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/teams",e,{},a,n,{},t),t.promise},e.prototype.get_team_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/teams",e,{},a,n,{},t),t.promise},e.prototype.patch_team_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.teamId&&(n.team_id=e.teamId),n=i(e,n),this.request("PATCH",o+"/teams/me",e,{},a,n,{},t),t.promise},e.prototype.get_team_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.teamId&&(n.team_id=e.teamId),n=i(e,n),this.request("GET",o+"/teams/me",e,{},a,n,{},t),t.promise},e.prototype.patch_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_awards=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/awards",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_fails=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/fails",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_team_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/solves",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.post_unlock_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/unlocks",e,{},a,n,{},t),t.promise},e.prototype.get_unlock_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/unlocks",e,{},a,n,{},t),t.promise},e.prototype.post_user_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/users",e,{},a,n,{},t),t.promise},e.prototype.get_user_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/users",e,{},a,n,{},t),t.promise},e.prototype.patch_user_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("PATCH",o+"/users/me",e,{},a,n,{},t),t.promise},e.prototype.get_user_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/users/me",e,{},a,n,{},t),t.promise},e.prototype.patch_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("PATCH",o+n,e,{},s,a,{},t)),t.promise},e.prototype.delete_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("DELETE",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_user_awards=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/awards",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_user_fails=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/fails",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e.prototype.get_user_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/solves",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(a=i(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise},e}();t.API=s},"./CTFd/themes/core/assets/js/config.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;t.default={urlRoot:"",csrfNonce:"",userMode:""}},"./CTFd/themes/core/assets/js/events.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var s=o("./node_modules/howler/dist/howler.js"),n=o("./node_modules/event-source-polyfill/src/eventsource.js"),i=o("./CTFd/themes/core/assets/js/ezq.js"),l=o("./CTFd/themes/core/assets/js/utils.js"),d=n.NativeEventSource||n.EventSourcePolyfill;t.default=function(e){var t=new d(e+"/events"),o=new l.WindowController,n=new s.Howl({src:[e+"/themes/core/static/sounds/notification.webm",e+"/themes/core/static/sounds/notification.mp3"]});function a(e){switch(e.type){case"toast":(0,l.inc_notification_counter)();var t=50<e.content.length?e.content.substring(0,47)+"...":e.content,o=!1;(0,i.ezToast)({title:e.title,body:t,onclick:function(){(0,i.ezAlert)({title:e.title,body:e.content,button:"Got it!",success:function(){o=!0,(0,l.dec_notification_counter)()}})},onclose:function(){o||(0,l.dec_notification_counter)()}});break;case"alert":(0,l.inc_notification_counter)(),(0,i.ezAlert)({title:e.title,body:e.content,button:"Got it!",success:function(){(0,l.dec_notification_counter)()}});break;case"background":default:(0,l.inc_notification_counter)()}e.sound&&n.play()}(0,l.init_notification_counter)(),o.notification=function(e){a(e)},o.masterDidChange=function(){this.isMaster?t.addEventListener("notification",function(e){var t=JSON.parse(e.data);o.broadcast("notification",t),a(t)},!1):t&&t.close()}}},"./CTFd/themes/core/assets/js/ezq.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.ezAlert=p,t.ezToast=f,t.ezQuery=h,t.ezProgressBar=j,t.ezBadge=g,t.default=void 0,o("./node_modules/bootstrap/js/dist/modal.js");var n,l=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};var s='<div class="modal fade" tabindex="-1" role="dialog"> <div class="modal-dialog" role="document"> <div class="modal-content"> <div class="modal-header"> <h5 class="modal-title">{0}</h5> <button type="button" class="close" data-dismiss="modal" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> </div> <div class="modal-body"> </div> <div class="modal-footer"> </div> </div> </div></div>',d='<div class="toast m-3" role="alert"> <div class="toast-header"> <strong class="mr-auto">{0}</strong> <button type="button" class="ml-2 mb-1 close" data-dismiss="toast" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> </div> <div class="toast-body">{1}</div></div>',i='<div class="progress"> <div class="progress-bar progress-bar-success progress-bar-striped progress-bar-animated" role="progressbar" style="width: {0}%"> </div></div>',a='<div class="alert alert-danger alert-dismissable" role="alert">\n <span class="sr-only">Error:</span>\n {0}\n <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>\n</div>',r='<div class="alert alert-success alert-dismissable submit-row" role="alert">\n <strong>Success!</strong>\n {0}\n <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>\n</div>',c='<button type="button" class="btn btn-primary" data-dismiss="modal">{0}</button>',u='<button type="button" class="btn btn-danger" data-dismiss="modal">No</button>',m='<button type="button" class="btn btn-primary" data-dismiss="modal">Yes</button>';function p(e){var t=s.format(e.title),o=(0,l.default)(t);"string"==typeof e.body?o.find(".modal-body").append("<p>".concat(e.body,"</p>")):o.find(".modal-body").append((0,l.default)(e.body));var n=(0,l.default)(c.format(e.button));return e.success&&(0,l.default)(n).click(function(){e.success()}),e.large&&o.find(".modal-dialog").addClass("modal-lg"),o.find(".modal-footer").append(n),(0,l.default)("main").append(o),o.modal("show"),(0,l.default)(o).on("hidden.bs.modal",function(){(0,l.default)(this).modal("dispose")}),o}function f(e){(0,l.default)("#ezq--notifications-toast-container").length||(0,l.default)("body").append((0,l.default)("<div/>").attr({id:"ezq--notifications-toast-container"}).css({position:"fixed",bottom:"0",right:"0","min-width":"20%"}));var t=d.format(e.title,e.body),o=(0,l.default)(t);if(e.onclose&&(0,l.default)(o).find("button[data-dismiss=toast]").click(function(){e.onclose()}),e.onclick){var n=(0,l.default)(o).find(".toast-body");n.addClass("cursor-pointer"),n.click(function(){e.onclick()})}var a=!1!==e.autohide,s=!1!==e.animation,i=e.delay||1e4;return(0,l.default)("#ezq--notifications-toast-container").prepend(o),o.toast({autohide:a,delay:i,animation:s}),o.toast("show"),o}function h(e){var t=s.format(e.title),o=(0,l.default)(t);"string"==typeof e.body?o.find(".modal-body").append("<p>".concat(e.body,"</p>")):o.find(".modal-body").append((0,l.default)(e.body));var n=(0,l.default)(m),a=(0,l.default)(u);return o.find(".modal-footer").append(a),o.find(".modal-footer").append(n),(0,l.default)("main").append(o),(0,l.default)(o).on("hidden.bs.modal",function(){(0,l.default)(this).modal("dispose")}),(0,l.default)(n).click(function(){e.success()}),o.modal("show"),o}function j(e){if(e.target){var t=(0,l.default)(e.target);return t.find(".progress-bar").css("width",e.width+"%"),t}var o=i.format(e.width),n=s.format(e.title),a=(0,l.default)(n);return a.find(".modal-body").append((0,l.default)(o)),(0,l.default)("main").append(a),a.modal("show")}function g(e){var t={success:r,error:a}[e.type].format(e.body);return(0,l.default)(t)}var _={ezAlert:p,ezToast:f,ezQuery:h,ezProgressBar:j,ezBadge:g};t.default=_},"./CTFd/themes/core/assets/js/fetch.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0,o("./node_modules/whatwg-fetch/fetch.js");var n,a=(n=o("./CTFd/themes/core/assets/js/config.js"))&&n.__esModule?n:{default:n};var s=window.fetch;t.default=function(e,t){return void 0===t&&(t={method:"GET",credentials:"same-origin",headers:{}}),e=a.default.urlRoot+e,void 0===t.headers&&(t.headers={}),t.credentials="same-origin",t.headers.Accept="application/json",t.headers["Content-Type"]="application/json",t.headers["CSRF-Token"]=a.default.csrfNonce,s(e,t)}},"./CTFd/themes/core/assets/js/patch.js":function(e,t,o){var n,l=(n=o("./node_modules/q/q.js"))&&n.__esModule?n:{default:n},a=o("./CTFd/themes/core/assets/js/api.js");function s(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function d(e,t){return function(t){for(var e=1;e<arguments.length;e++)if(e%2){var o=null!=arguments[e]?arguments[e]:{},n=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(n=n.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),n.forEach(function(e){s(t,e,o[e])})}else Object.defineProperties(t,Object.getOwnPropertyDescriptors(arguments[e]));return t}({},e,{},t)}a.API.prototype.requestRaw=function(e,t,o,n,a,s,i,l){var d=s&&Object.keys(s).length?function(e){var t=[];for(var o in e)e.hasOwnProperty(o)&&t.push(encodeURIComponent(o)+"="+encodeURIComponent(e[o]));return t.join("&")}(s):null,r=t+(d?"?"+d:"");n&&!Object.keys(n).length&&(n=void 0),fetch(r,{method:e,headers:a,body:n}).then(function(e){return e.json()}).then(function(e){l.resolve(e)}).catch(function(e){l.reject(e)})},a.API.prototype.patch_user_public=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a="/users/{user_id}",s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=a.replace("{user_id}",e.userId),void 0===e.userId?o.reject(new Error("Missing required parameter: userId")):this.request("PATCH",n+a,e,t,s,{},{},o),o.promise},a.API.prototype.patch_user_private=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],this.request("PATCH",n+"/users/me",e,t,a,{},{},o),o.promise},a.API.prototype.post_unlock_list=function(e,t){var o=l.default.defer(),n=this.domain,a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],this.request("POST",n+"/unlocks",e,t,a,{},{},o),o.promise},a.API.prototype.post_notification_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],this.request("POST",n+"/notifications",e,t,a,{},{},o),o.promise},a.API.prototype.post_files_list=function(e,t){var o=l.default.defer(),n=this.domain,a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],this.requestRaw("POST",n+"/files",e,t,a,{},{},o),o.promise},a.API.prototype.patch_config=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a="/configs/{config_key}",s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=a.replace("{config_key}",e.configKey),void 0===e.configKey?o.reject(new Error("Missing required parameter: configKey")):this.request("PATCH",n+a,e,t,s,{},{},o),o.promise},a.API.prototype.patch_config_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=d(e,a),this.request("PATCH",n+"/configs",e,t,s,a,{},o),o.promise},a.API.prototype.post_tag_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=d(e,a),this.request("POST",n+"/tags",e,t,s,a,{},o),o.promise},a.API.prototype.patch_team_public=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a="/teams/{team_id}",s={},i={};return i.Accept=["application/json"],i["Content-Type"]=["application/json"],a=a.replace("{team_id}",e.teamId),void 0===e.teamId?o.reject(new Error("Missing required parameter: teamId")):(s=d(e,s),this.request("PATCH",n+a,e,t,i,s,{},o)),o.promise},a.API.prototype.post_challenge_attempt=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],a=d(e,a),this.request("POST",n+"/challenges/attempt",e,t,s,a,{},o),o.promise},a.API.prototype.get_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",a={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(delete e.hintId,a=d(e,a),this.request("GET",o+n,e,{},s,a,{},t)),t.promise}},"./CTFd/themes/core/assets/js/times.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=s(o("./node_modules/moment/moment.js")),a=s(o("./node_modules/jquery/dist/jquery.js"));function s(e){return e&&e.__esModule?e:{default:e}}t.default=function(){(0,a.default)("[data-time]").each(function(e,t){t.innerText=(0,n.default)((0,a.default)(t).data("time")).local().format("MMMM Do, h:mm:ss A")})}},"./CTFd/themes/core/assets/js/utils.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.WindowController=a,t.colorHash=function(e){for(var t=0,o=0;o<e.length;o++)t=e.charCodeAt(o)+((t<<5)-t);for(var n="#",a=0;a<3;a++){n+=("00"+(t>>4*a&255).toString(16)).substr(-2)}return n},t.htmlEntities=function(e){return(0,i.default)("<div/>").text(e).html()},t.cumulativeSum=function(e){for(var t=e.concat(),o=0;o<e.length;o++)t[o]=e.slice(0,o+1).reduce(function(e,t){return e+t});return t},t.init_notification_counter=function(){var e=s.getItem(l);null===e?s.setItem(l,0):0<e&&(0,i.default)(".badge-notification").text(e)},t.set_notification_counter=function(e){s.setItem(l,e)},t.inc_notification_counter=function(){var e=s.getItem(l)||0;s.setItem(l,++e),(0,i.default)(".badge-notification").text(e)},t.dec_notification_counter=function(){var e=s.getItem(l)||0;0<e&&(s.setItem(l,--e),(0,i.default)(".badge-notification").text(e));0==e&&d()},t.clear_notification_counter=d,t.copyToClipboard=function(e,t){(0,i.default)(t).select(),document.execCommand("copy"),(0,i.default)(e.target).tooltip({title:"Copied!",trigger:"manual"}),(0,i.default)(e.target).tooltip("show"),setTimeout(function(){(0,i.default)(e.target).tooltip("hide")},1500)},t.makeSortableTables=function(){function s(e,t){return(0,i.default)(e).children("td").eq(t).text()}(0,i.default)("th.sort-col").append(' <i class="fas fa-sort"></i>'),(0,i.default)("th.sort-col").click(function(){var e=(0,i.default)(this).parents("table").eq(0),t=e.find("tr:gt(0)").toArray().sort(function(a){return function(e,t){var o=s(e,a),n=s(t,a);return i.default.isNumeric(o)&&i.default.isNumeric(n)?o-n:o.toString().localeCompare(n)}}((0,i.default)(this).index()));this.asc=!this.asc,this.asc||(t=t.reverse());for(var o=0;o<t.length;o++)e.append(t[o])})};var n,i=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};function a(){this.id=Math.random(),this.isMaster=!1,this.others={},window.addEventListener("storage",this,!1),window.addEventListener("unload",this,!1),this.broadcast("hello");var t=this;this._checkTimeout=setTimeout(function e(){t.check(),t._checkTimeout=setTimeout(e,9e3)},500),this._pingTimeout=setTimeout(function e(){t.sendPing(),t._pingTimeout=setTimeout(e,17e3)},17e3)}i.default.fn.serializeJSON=function(o){var n={},a=(0,i.default)(this),e=a.serializeArray();return(e=(e=e.concat(a.find("input[type=checkbox]:checked").map(function(){return{name:this.name,value:!0}}).get())).concat(a.find("input[type=checkbox]:not(:checked)").map(function(){return{name:this.name,value:!1}}).get())).map(function(e){if(o)if(null!==e.value&&""!==e.value)n[e.name]=e.value;else{var t=a.find(":input[name=".concat(e.name,"]"));t.data("initial")!==t.val()&&(n[e.name]=e.value)}else n[e.name]=e.value}),n},String.prototype.format=String.prototype.f=function(){for(var e=this,t=arguments.length;t--;)e=e.replace(new RegExp("\\{"+t+"\\}","gm"),arguments[t]);return e},String.prototype.hashCode=function(){var e,t,o=0;if(0==this.length)return o;for(e=0,t=this.length;e<t;e++)o=(o<<5)-o+this.charCodeAt(e),o|=0;return o},a.prototype.destroy=function(){clearTimeout(this._pingTimeout),clearTimeout(this._checkTimeout),window.removeEventListener("storage",this,!1),window.removeEventListener("unload",this,!1),this.broadcast("bye")},a.prototype.handleEvent=function(e){if("unload"===e.type)this.destroy();else if("broadcast"===e.key)try{var t=JSON.parse(e.newValue);t.id!==this.id&&this[t.type](t)}catch(e){}},a.prototype.sendPing=function(){this.broadcast("ping")},a.prototype.hello=function(e){this.ping(e),e.id<this.id?this.check():this.sendPing()},a.prototype.ping=function(e){this.others[e.id]=+new Date},a.prototype.bye=function(e){delete this.others[e.id],this.check()},a.prototype.check=function(e){var t,o=+new Date,n=!0;for(t in this.others)this.others[t]+23e3<o?delete this.others[t]:t<this.id&&(n=!1);this.isMaster!==n&&(this.isMaster=n,this.masterDidChange())},a.prototype.masterDidChange=function(){},a.prototype.broadcast=function(e,t){var o={id:this.id,type:e};for(var n in t)o[n]=t[n];try{localStorage.setItem("broadcast",JSON.stringify(o))}catch(e){console.log(e)}};var s=window.localStorage,l="unread_notifications";function d(){s.setItem(l,0),(0,i.default)(".badge-notification").empty()}},"./node_modules/babel-loader/lib/index.js?!./node_modules/vue-loader/lib/index.js?!./CTFd/themes/admin/assets/js/components/files/MediaLibrary.vue?vue&type=script&lang=js&":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=s(o("./CTFd/themes/core/assets/js/CTFd.js")),a=(o("./CTFd/themes/core/assets/js/ezq.js"),s(o("./CTFd/themes/core/assets/js/helpers.js")));function s(e){return e&&e.__esModule?e:{default:e}}var i={props:{editor:Object},data:function(){return{files:[],selectedFile:null}},methods:{getPageFiles:function(){var t=this;n.default.fetch("/api/v1/files?type=page",{credentials:"same-origin"}).then(function(e){return e.json()}).then(function(e){return t.files=e.data,t.files})},uploadChosenFiles:function(){var t=this,e=document.querySelector("#media-library-upload");a.default.files.upload(e,{},function(e){t.getPageFiles()})},selectFile:function(e){return this.selectedFile=e,this.selectedFile},buildSelectedFileUrl:function(){return n.default.config.urlRoot+"/files/"+this.selectedFile.location},deleteSelectedFile:function(){var t=this,e=this.selectedFile.id;confirm("Are you sure you want to delete this file?")&&n.default.fetch("/api/v1/files/"+e,{method:"DELETE"}).then(function(e){200===e.status&&e.json().then(function(e){e.success&&(t.getPageFiles(),t.selectedFile=null)})})},insertSelectedFile:function(){var e=this.$props.editor;e.hasOwnProperty("codemirror")&&(e=e.codemirror);var t=e.getDoc(),o=t.getCursor(),n=this.buildSelectedFileUrl(),a="far fa-file-image"===this.getIconClass(this.selectedFile.location),s=n.split("/").pop();link="[{0}]({1})".format(s,n),a&&(link="!"+link),t.replaceRange(link,o)},downloadSelectedFile:function(){var e=this.buildSelectedFileUrl();window.open(e,"_blank")},getIconClass:function(e){return{png:"far fa-file-image",jpg:"far fa-file-image",jpeg:"far fa-file-image",gif:"far fa-file-image",bmp:"far fa-file-image",svg:"far fa-file-image",txt:"far fa-file-alt",mov:"far fa-file-video",mp4:"far fa-file-video",wmv:"far fa-file-video",flv:"far fa-file-video",mkv:"far fa-file-video",avi:"far fa-file-video",pdf:"far fa-file-pdf",mp3:"far fa-file-sound",wav:"far fa-file-sound",aac:"far fa-file-sound",zip:"far fa-file-archive",gz:"far fa-file-archive",tar:"far fa-file-archive","7z":"far fa-file-archive",rar:"far fa-file-archive",py:"far fa-file-code",c:"far fa-file-code",cpp:"far fa-file-code",html:"far fa-file-code",js:"far fa-file-code",rb:"far fa-file-code",go:"far fa-file-code"}[e.split(".").pop()]||"far fa-file"}},created:function(){return this.getPageFiles()}};t.default=i},"./node_modules/moment/locale sync recursive ^\\.\\/.*$":function(e,t,o){var n={"./af":"./node_modules/moment/locale/af.js","./af.js":"./node_modules/moment/locale/af.js","./ar":"./node_modules/moment/locale/ar.js","./ar-dz":"./node_modules/moment/locale/ar-dz.js","./ar-dz.js":"./node_modules/moment/locale/ar-dz.js","./ar-kw":"./node_modules/moment/locale/ar-kw.js","./ar-kw.js":"./node_modules/moment/locale/ar-kw.js","./ar-ly":"./node_modules/moment/locale/ar-ly.js","./ar-ly.js":"./node_modules/moment/locale/ar-ly.js","./ar-ma":"./node_modules/moment/locale/ar-ma.js","./ar-ma.js":"./node_modules/moment/locale/ar-ma.js","./ar-sa":"./node_modules/moment/locale/ar-sa.js","./ar-sa.js":"./node_modules/moment/locale/ar-sa.js","./ar-tn":"./node_modules/moment/locale/ar-tn.js","./ar-tn.js":"./node_modules/moment/locale/ar-tn.js","./ar.js":"./node_modules/moment/locale/ar.js","./az":"./node_modules/moment/locale/az.js","./az.js":"./node_modules/moment/locale/az.js","./be":"./node_modules/moment/locale/be.js","./be.js":"./node_modules/moment/locale/be.js","./bg":"./node_modules/moment/locale/bg.js","./bg.js":"./node_modules/moment/locale/bg.js","./bm":"./node_modules/moment/locale/bm.js","./bm.js":"./node_modules/moment/locale/bm.js","./bn":"./node_modules/moment/locale/bn.js","./bn.js":"./node_modules/moment/locale/bn.js","./bo":"./node_modules/moment/locale/bo.js","./bo.js":"./node_modules/moment/locale/bo.js","./br":"./node_modules/moment/locale/br.js","./br.js":"./node_modules/moment/locale/br.js","./bs":"./node_modules/moment/locale/bs.js","./bs.js":"./node_modules/moment/locale/bs.js","./ca":"./node_modules/moment/locale/ca.js","./ca.js":"./node_modules/moment/locale/ca.js","./cs":"./node_modules/moment/locale/cs.js","./cs.js":"./node_modules/moment/locale/cs.js","./cv":"./node_modules/moment/locale/cv.js","./cv.js":"./node_modules/moment/locale/cv.js","./cy":"./node_modules/moment/locale/cy.js","./cy.js":"./node_modules/moment/locale/cy.js","./da":"./node_modules/moment/locale/da.js","./da.js":"./node_modules/moment/locale/da.js","./de":"./node_modules/moment/locale/de.js","./de-at":"./node_modules/moment/locale/de-at.js","./de-at.js":"./node_modules/moment/locale/de-at.js","./de-ch":"./node_modules/moment/locale/de-ch.js","./de-ch.js":"./node_modules/moment/locale/de-ch.js","./de.js":"./node_modules/moment/locale/de.js","./dv":"./node_modules/moment/locale/dv.js","./dv.js":"./node_modules/moment/locale/dv.js","./el":"./node_modules/moment/locale/el.js","./el.js":"./node_modules/moment/locale/el.js","./en-SG":"./node_modules/moment/locale/en-SG.js","./en-SG.js":"./node_modules/moment/locale/en-SG.js","./en-au":"./node_modules/moment/locale/en-au.js","./en-au.js":"./node_modules/moment/locale/en-au.js","./en-ca":"./node_modules/moment/locale/en-ca.js","./en-ca.js":"./node_modules/moment/locale/en-ca.js","./en-gb":"./node_modules/moment/locale/en-gb.js","./en-gb.js":"./node_modules/moment/locale/en-gb.js","./en-ie":"./node_modules/moment/locale/en-ie.js","./en-ie.js":"./node_modules/moment/locale/en-ie.js","./en-il":"./node_modules/moment/locale/en-il.js","./en-il.js":"./node_modules/moment/locale/en-il.js","./en-nz":"./node_modules/moment/locale/en-nz.js","./en-nz.js":"./node_modules/moment/locale/en-nz.js","./eo":"./node_modules/moment/locale/eo.js","./eo.js":"./node_modules/moment/locale/eo.js","./es":"./node_modules/moment/locale/es.js","./es-do":"./node_modules/moment/locale/es-do.js","./es-do.js":"./node_modules/moment/locale/es-do.js","./es-us":"./node_modules/moment/locale/es-us.js","./es-us.js":"./node_modules/moment/locale/es-us.js","./es.js":"./node_modules/moment/locale/es.js","./et":"./node_modules/moment/locale/et.js","./et.js":"./node_modules/moment/locale/et.js","./eu":"./node_modules/moment/locale/eu.js","./eu.js":"./node_modules/moment/locale/eu.js","./fa":"./node_modules/moment/locale/fa.js","./fa.js":"./node_modules/moment/locale/fa.js","./fi":"./node_modules/moment/locale/fi.js","./fi.js":"./node_modules/moment/locale/fi.js","./fo":"./node_modules/moment/locale/fo.js","./fo.js":"./node_modules/moment/locale/fo.js","./fr":"./node_modules/moment/locale/fr.js","./fr-ca":"./node_modules/moment/locale/fr-ca.js","./fr-ca.js":"./node_modules/moment/locale/fr-ca.js","./fr-ch":"./node_modules/moment/locale/fr-ch.js","./fr-ch.js":"./node_modules/moment/locale/fr-ch.js","./fr.js":"./node_modules/moment/locale/fr.js","./fy":"./node_modules/moment/locale/fy.js","./fy.js":"./node_modules/moment/locale/fy.js","./ga":"./node_modules/moment/locale/ga.js","./ga.js":"./node_modules/moment/locale/ga.js","./gd":"./node_modules/moment/locale/gd.js","./gd.js":"./node_modules/moment/locale/gd.js","./gl":"./node_modules/moment/locale/gl.js","./gl.js":"./node_modules/moment/locale/gl.js","./gom-latn":"./node_modules/moment/locale/gom-latn.js","./gom-latn.js":"./node_modules/moment/locale/gom-latn.js","./gu":"./node_modules/moment/locale/gu.js","./gu.js":"./node_modules/moment/locale/gu.js","./he":"./node_modules/moment/locale/he.js","./he.js":"./node_modules/moment/locale/he.js","./hi":"./node_modules/moment/locale/hi.js","./hi.js":"./node_modules/moment/locale/hi.js","./hr":"./node_modules/moment/locale/hr.js","./hr.js":"./node_modules/moment/locale/hr.js","./hu":"./node_modules/moment/locale/hu.js","./hu.js":"./node_modules/moment/locale/hu.js","./hy-am":"./node_modules/moment/locale/hy-am.js","./hy-am.js":"./node_modules/moment/locale/hy-am.js","./id":"./node_modules/moment/locale/id.js","./id.js":"./node_modules/moment/locale/id.js","./is":"./node_modules/moment/locale/is.js","./is.js":"./node_modules/moment/locale/is.js","./it":"./node_modules/moment/locale/it.js","./it-ch":"./node_modules/moment/locale/it-ch.js","./it-ch.js":"./node_modules/moment/locale/it-ch.js","./it.js":"./node_modules/moment/locale/it.js","./ja":"./node_modules/moment/locale/ja.js","./ja.js":"./node_modules/moment/locale/ja.js","./jv":"./node_modules/moment/locale/jv.js","./jv.js":"./node_modules/moment/locale/jv.js","./ka":"./node_modules/moment/locale/ka.js","./ka.js":"./node_modules/moment/locale/ka.js","./kk":"./node_modules/moment/locale/kk.js","./kk.js":"./node_modules/moment/locale/kk.js","./km":"./node_modules/moment/locale/km.js","./km.js":"./node_modules/moment/locale/km.js","./kn":"./node_modules/moment/locale/kn.js","./kn.js":"./node_modules/moment/locale/kn.js","./ko":"./node_modules/moment/locale/ko.js","./ko.js":"./node_modules/moment/locale/ko.js","./ku":"./node_modules/moment/locale/ku.js","./ku.js":"./node_modules/moment/locale/ku.js","./ky":"./node_modules/moment/locale/ky.js","./ky.js":"./node_modules/moment/locale/ky.js","./lb":"./node_modules/moment/locale/lb.js","./lb.js":"./node_modules/moment/locale/lb.js","./lo":"./node_modules/moment/locale/lo.js","./lo.js":"./node_modules/moment/locale/lo.js","./lt":"./node_modules/moment/locale/lt.js","./lt.js":"./node_modules/moment/locale/lt.js","./lv":"./node_modules/moment/locale/lv.js","./lv.js":"./node_modules/moment/locale/lv.js","./me":"./node_modules/moment/locale/me.js","./me.js":"./node_modules/moment/locale/me.js","./mi":"./node_modules/moment/locale/mi.js","./mi.js":"./node_modules/moment/locale/mi.js","./mk":"./node_modules/moment/locale/mk.js","./mk.js":"./node_modules/moment/locale/mk.js","./ml":"./node_modules/moment/locale/ml.js","./ml.js":"./node_modules/moment/locale/ml.js","./mn":"./node_modules/moment/locale/mn.js","./mn.js":"./node_modules/moment/locale/mn.js","./mr":"./node_modules/moment/locale/mr.js","./mr.js":"./node_modules/moment/locale/mr.js","./ms":"./node_modules/moment/locale/ms.js","./ms-my":"./node_modules/moment/locale/ms-my.js","./ms-my.js":"./node_modules/moment/locale/ms-my.js","./ms.js":"./node_modules/moment/locale/ms.js","./mt":"./node_modules/moment/locale/mt.js","./mt.js":"./node_modules/moment/locale/mt.js","./my":"./node_modules/moment/locale/my.js","./my.js":"./node_modules/moment/locale/my.js","./nb":"./node_modules/moment/locale/nb.js","./nb.js":"./node_modules/moment/locale/nb.js","./ne":"./node_modules/moment/locale/ne.js","./ne.js":"./node_modules/moment/locale/ne.js","./nl":"./node_modules/moment/locale/nl.js","./nl-be":"./node_modules/moment/locale/nl-be.js","./nl-be.js":"./node_modules/moment/locale/nl-be.js","./nl.js":"./node_modules/moment/locale/nl.js","./nn":"./node_modules/moment/locale/nn.js","./nn.js":"./node_modules/moment/locale/nn.js","./pa-in":"./node_modules/moment/locale/pa-in.js","./pa-in.js":"./node_modules/moment/locale/pa-in.js","./pl":"./node_modules/moment/locale/pl.js","./pl.js":"./node_modules/moment/locale/pl.js","./pt":"./node_modules/moment/locale/pt.js","./pt-br":"./node_modules/moment/locale/pt-br.js","./pt-br.js":"./node_modules/moment/locale/pt-br.js","./pt.js":"./node_modules/moment/locale/pt.js","./ro":"./node_modules/moment/locale/ro.js","./ro.js":"./node_modules/moment/locale/ro.js","./ru":"./node_modules/moment/locale/ru.js","./ru.js":"./node_modules/moment/locale/ru.js","./sd":"./node_modules/moment/locale/sd.js","./sd.js":"./node_modules/moment/locale/sd.js","./se":"./node_modules/moment/locale/se.js","./se.js":"./node_modules/moment/locale/se.js","./si":"./node_modules/moment/locale/si.js","./si.js":"./node_modules/moment/locale/si.js","./sk":"./node_modules/moment/locale/sk.js","./sk.js":"./node_modules/moment/locale/sk.js","./sl":"./node_modules/moment/locale/sl.js","./sl.js":"./node_modules/moment/locale/sl.js","./sq":"./node_modules/moment/locale/sq.js","./sq.js":"./node_modules/moment/locale/sq.js","./sr":"./node_modules/moment/locale/sr.js","./sr-cyrl":"./node_modules/moment/locale/sr-cyrl.js","./sr-cyrl.js":"./node_modules/moment/locale/sr-cyrl.js","./sr.js":"./node_modules/moment/locale/sr.js","./ss":"./node_modules/moment/locale/ss.js","./ss.js":"./node_modules/moment/locale/ss.js","./sv":"./node_modules/moment/locale/sv.js","./sv.js":"./node_modules/moment/locale/sv.js","./sw":"./node_modules/moment/locale/sw.js","./sw.js":"./node_modules/moment/locale/sw.js","./ta":"./node_modules/moment/locale/ta.js","./ta.js":"./node_modules/moment/locale/ta.js","./te":"./node_modules/moment/locale/te.js","./te.js":"./node_modules/moment/locale/te.js","./tet":"./node_modules/moment/locale/tet.js","./tet.js":"./node_modules/moment/locale/tet.js","./tg":"./node_modules/moment/locale/tg.js","./tg.js":"./node_modules/moment/locale/tg.js","./th":"./node_modules/moment/locale/th.js","./th.js":"./node_modules/moment/locale/th.js","./tl-ph":"./node_modules/moment/locale/tl-ph.js","./tl-ph.js":"./node_modules/moment/locale/tl-ph.js","./tlh":"./node_modules/moment/locale/tlh.js","./tlh.js":"./node_modules/moment/locale/tlh.js","./tr":"./node_modules/moment/locale/tr.js","./tr.js":"./node_modules/moment/locale/tr.js","./tzl":"./node_modules/moment/locale/tzl.js","./tzl.js":"./node_modules/moment/locale/tzl.js","./tzm":"./node_modules/moment/locale/tzm.js","./tzm-latn":"./node_modules/moment/locale/tzm-latn.js","./tzm-latn.js":"./node_modules/moment/locale/tzm-latn.js","./tzm.js":"./node_modules/moment/locale/tzm.js","./ug-cn":"./node_modules/moment/locale/ug-cn.js","./ug-cn.js":"./node_modules/moment/locale/ug-cn.js","./uk":"./node_modules/moment/locale/uk.js","./uk.js":"./node_modules/moment/locale/uk.js","./ur":"./node_modules/moment/locale/ur.js","./ur.js":"./node_modules/moment/locale/ur.js","./uz":"./node_modules/moment/locale/uz.js","./uz-latn":"./node_modules/moment/locale/uz-latn.js","./uz-latn.js":"./node_modules/moment/locale/uz-latn.js","./uz.js":"./node_modules/moment/locale/uz.js","./vi":"./node_modules/moment/locale/vi.js","./vi.js":"./node_modules/moment/locale/vi.js","./x-pseudo":"./node_modules/moment/locale/x-pseudo.js","./x-pseudo.js":"./node_modules/moment/locale/x-pseudo.js","./yo":"./node_modules/moment/locale/yo.js","./yo.js":"./node_modules/moment/locale/yo.js","./zh-cn":"./node_modules/moment/locale/zh-cn.js","./zh-cn.js":"./node_modules/moment/locale/zh-cn.js","./zh-hk":"./node_modules/moment/locale/zh-hk.js","./zh-hk.js":"./node_modules/moment/locale/zh-hk.js","./zh-tw":"./node_modules/moment/locale/zh-tw.js","./zh-tw.js":"./node_modules/moment/locale/zh-tw.js"};function a(e){var t=s(e);return o(t)}function s(e){var t=n[e];if(t+1)return t;var o=new Error("Cannot find module '"+e+"'");throw o.code="MODULE_NOT_FOUND",o}a.keys=function(){return Object.keys(n)},a.resolve=s,(e.exports=a).id="./node_modules/moment/locale sync recursive ^\\.\\/.*$"},0:function(e,t){}}); \ No newline at end of file diff --git a/CTFd/themes/core/assets/js/pages/challenges.js b/CTFd/themes/core/assets/js/pages/challenges.js index e87c6feb8..98b6dbad9 100644 --- a/CTFd/themes/core/assets/js/pages/challenges.js +++ b/CTFd/themes/core/assets/js/pages/challenges.js @@ -52,6 +52,13 @@ const displayChal = chal => { $("#challenge-window").empty(); + // Inject challenge data into the plugin + challenge.data = responses[0].data; + + // Call preRender function in plugin + challenge.preRender(); + + // Build HTML from the Jinja response in API $("#challenge-window").append(responses[0].data.view); $("#challenge-window #challenge-input").addClass("form-control"); diff --git a/CTFd/themes/core/static/js/pages/challenges.dev.js b/CTFd/themes/core/static/js/pages/challenges.dev.js index bfa2ac4b3..81b48c088 100644 --- a/CTFd/themes/core/static/js/pages/challenges.dev.js +++ b/CTFd/themes/core/static/js/pages/challenges.dev.js @@ -162,7 +162,7 @@ /***/ (function(module, exports, __webpack_require__) { ; -eval("\n\n__webpack_require__(/*! ./main */ \"./CTFd/themes/core/assets/js/pages/main.js\");\n\n__webpack_require__(/*! bootstrap/js/dist/tab */ \"./node_modules/bootstrap/js/dist/tab.js\");\n\nvar _ezq = __webpack_require__(/*! ../ezq */ \"./CTFd/themes/core/assets/js/ezq.js\");\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./CTFd/themes/core/assets/js/utils.js\");\n\nvar _moment = _interopRequireDefault(__webpack_require__(/*! moment */ \"./node_modules/moment/moment.js\"));\n\nvar _jquery = _interopRequireDefault(__webpack_require__(/*! jquery */ \"./node_modules/jquery/dist/jquery.js\"));\n\nvar _CTFd = _interopRequireDefault(__webpack_require__(/*! ../CTFd */ \"./CTFd/themes/core/assets/js/CTFd.js\"));\n\nvar _config = _interopRequireDefault(__webpack_require__(/*! ../config */ \"./CTFd/themes/core/assets/js/config.js\"));\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nvar api_func = {\n teams: function teams(x) {\n return _CTFd.default.api.get_team_solves({\n teamId: x\n });\n },\n users: function users(x) {\n return _CTFd.default.api.get_user_solves({\n userId: x\n });\n }\n};\n\nvar md = _CTFd.default.lib.markdown();\n\n_CTFd.default._internal.challenge = {};\nvar challenges = [];\nvar solves = [];\n\nvar loadChal = function loadChal(id) {\n var chal = _jquery.default.grep(challenges, function (chal) {\n return chal.id == id;\n })[0];\n\n if (chal.type === \"hidden\") {\n (0, _ezq.ezAlert)({\n title: \"Challenge Hidden!\",\n body: \"You haven't unlocked this challenge yet!\",\n button: \"Got it!\"\n });\n return;\n }\n\n displayChal(chal);\n};\n\nvar loadChalByName = function loadChalByName(name) {\n var idx = name.lastIndexOf(\"-\");\n var pieces = [name.slice(0, idx), name.slice(idx + 1)];\n var id = pieces[1];\n\n var chal = _jquery.default.grep(challenges, function (chal) {\n return chal.id == id;\n })[0];\n\n displayChal(chal);\n};\n\nvar displayChal = function displayChal(chal) {\n return Promise.all([_CTFd.default.api.get_challenge({\n challengeId: chal.id\n }), _jquery.default.getScript(_config.default.urlRoot + chal.script), _jquery.default.get(_config.default.urlRoot + chal.template)]).then(function (responses) {\n var challenge = _CTFd.default._internal.challenge;\n (0, _jquery.default)(\"#challenge-window\").empty();\n (0, _jquery.default)(\"#challenge-window\").append(responses[0].data.view);\n (0, _jquery.default)(\"#challenge-window #challenge-input\").addClass(\"form-control\");\n (0, _jquery.default)(\"#challenge-window #challenge-submit\").addClass(\"btn btn-md btn-outline-secondary float-right\");\n var modal = (0, _jquery.default)(\"#challenge-window\").find(\".modal-dialog\");\n\n if (window.init.theme_settings && window.init.theme_settings.challenge_window_size) {\n switch (window.init.theme_settings.challenge_window_size) {\n case \"sm\":\n modal.addClass(\"modal-sm\");\n break;\n\n case \"lg\":\n modal.addClass(\"modal-lg\");\n break;\n\n case \"xl\":\n modal.addClass(\"modal-xl\");\n break;\n\n default:\n break;\n }\n }\n\n (0, _jquery.default)(\".challenge-solves\").click(function (_event) {\n getSolves((0, _jquery.default)(\"#challenge-id\").val());\n });\n (0, _jquery.default)(\".nav-tabs a\").click(function (event) {\n event.preventDefault();\n (0, _jquery.default)(this).tab(\"show\");\n }); // Handle modal toggling\n\n (0, _jquery.default)(\"#challenge-window\").on(\"hide.bs.modal\", function (_event) {\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"wrong\");\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"correct\");\n (0, _jquery.default)(\"#incorrect-key\").slideUp();\n (0, _jquery.default)(\"#correct-key\").slideUp();\n (0, _jquery.default)(\"#already-solved\").slideUp();\n (0, _jquery.default)(\"#too-fast\").slideUp();\n });\n (0, _jquery.default)(\".load-hint\").on(\"click\", function (_event) {\n loadHint((0, _jquery.default)(this).data(\"hint-id\"));\n });\n (0, _jquery.default)(\"#challenge-submit\").click(function (event) {\n event.preventDefault();\n (0, _jquery.default)(\"#challenge-submit\").addClass(\"disabled-button\");\n (0, _jquery.default)(\"#challenge-submit\").prop(\"disabled\", true);\n\n _CTFd.default._internal.challenge.submit().then(renderSubmissionResponse).then(loadChals).then(markSolves);\n });\n (0, _jquery.default)(\"#challenge-input\").keyup(function (event) {\n if (event.keyCode == 13) {\n (0, _jquery.default)(\"#challenge-submit\").click();\n }\n });\n challenge.postRender();\n window.location.replace(window.location.href.split(\"#\")[0] + \"#\".concat(chal.name, \"-\").concat(chal.id));\n (0, _jquery.default)(\"#challenge-window\").modal();\n });\n};\n\nfunction renderSubmissionResponse(response) {\n var result = response.data;\n var result_message = (0, _jquery.default)(\"#result-message\");\n var result_notification = (0, _jquery.default)(\"#result-notification\");\n var answer_input = (0, _jquery.default)(\"#challenge-input\");\n result_notification.removeClass();\n result_message.text(result.message);\n\n if (result.status === \"authentication_required\") {\n window.location = _CTFd.default.config.urlRoot + \"/login?next=\" + _CTFd.default.config.urlRoot + window.location.pathname + window.location.hash;\n return;\n } else if (result.status === \"incorrect\") {\n // Incorrect key\n result_notification.addClass(\"alert alert-danger alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.removeClass(\"correct\");\n answer_input.addClass(\"wrong\");\n setTimeout(function () {\n answer_input.removeClass(\"wrong\");\n }, 3000);\n } else if (result.status === \"correct\") {\n // Challenge Solved\n result_notification.addClass(\"alert alert-success alert-dismissable text-center\");\n result_notification.slideDown();\n (0, _jquery.default)(\".challenge-solves\").text(parseInt((0, _jquery.default)(\".challenge-solves\").text().split(\" \")[0]) + 1 + \" Solves\");\n answer_input.val(\"\");\n answer_input.removeClass(\"wrong\");\n answer_input.addClass(\"correct\");\n } else if (result.status === \"already_solved\") {\n // Challenge already solved\n result_notification.addClass(\"alert alert-info alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.addClass(\"correct\");\n } else if (result.status === \"paused\") {\n // CTF is paused\n result_notification.addClass(\"alert alert-warning alert-dismissable text-center\");\n result_notification.slideDown();\n } else if (result.status === \"ratelimited\") {\n // Keys per minute too high\n result_notification.addClass(\"alert alert-warning alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.addClass(\"too-fast\");\n setTimeout(function () {\n answer_input.removeClass(\"too-fast\");\n }, 3000);\n }\n\n setTimeout(function () {\n (0, _jquery.default)(\".alert\").slideUp();\n (0, _jquery.default)(\"#challenge-submit\").removeClass(\"disabled-button\");\n (0, _jquery.default)(\"#challenge-submit\").prop(\"disabled\", false);\n }, 3000);\n}\n\nfunction markSolves() {\n return api_func[_CTFd.default.config.userMode](\"me\").then(function (response) {\n var solves = response.data;\n\n for (var i = solves.length - 1; i >= 0; i--) {\n var btn = (0, _jquery.default)('button[value=\"' + solves[i].challenge_id + '\"]');\n btn.addClass(\"solved-challenge\");\n btn.prepend(\"<i class='fas fa-check corner-button-check'></i>\");\n }\n });\n}\n\nfunction loadUserSolves() {\n if (_CTFd.default.user.id == 0) {\n return Promise.resolve();\n }\n\n return api_func[_CTFd.default.config.userMode](\"me\").then(function (response) {\n var solves = response.data;\n\n for (var i = solves.length - 1; i >= 0; i--) {\n var chal_id = solves[i].challenge_id;\n solves.push(chal_id);\n }\n });\n}\n\nfunction getSolves(id) {\n return _CTFd.default.api.get_challenge_solves({\n challengeId: id\n }).then(function (response) {\n var data = response.data;\n (0, _jquery.default)(\".challenge-solves\").text(parseInt(data.length) + \" Solves\");\n var box = (0, _jquery.default)(\"#challenge-solves-names\");\n box.empty();\n\n for (var i = 0; i < data.length; i++) {\n var _id = data[i].account_id;\n var name = data[i].name;\n var date = (0, _moment.default)(data[i].date).local().fromNow();\n var account_url = data[i].account_url;\n box.append('<tr><td><a href=\"{0}\">{2}</td><td>{3}</td></tr>'.format(account_url, _id, (0, _utils.htmlEntities)(name), date));\n }\n });\n}\n\nfunction loadChals() {\n return _CTFd.default.api.get_challenge_list().then(function (response) {\n var categories = [];\n var $challenges_board = (0, _jquery.default)(\"#challenges-board\");\n challenges = response.data;\n $challenges_board.empty();\n\n for (var i = challenges.length - 1; i >= 0; i--) {\n challenges[i].solves = 0;\n\n if (_jquery.default.inArray(challenges[i].category, categories) == -1) {\n var category = challenges[i].category;\n categories.push(category);\n var categoryid = category.replace(/ /g, \"-\").hashCode();\n var categoryrow = (0, _jquery.default)(\"\" + '<div id=\"{0}-row\" class=\"pt-5\">'.format(categoryid) + '<div class=\"category-header col-md-12 mb-3\">' + \"</div>\" + '<div class=\"category-challenges col-md-12\">' + '<div class=\"challenges-row col-md-12\"></div>' + \"</div>\" + \"</div>\");\n categoryrow.find(\".category-header\").append((0, _jquery.default)(\"<h3>\" + category + \"</h3>\"));\n $challenges_board.append(categoryrow);\n }\n }\n\n for (var _i = 0; _i <= challenges.length - 1; _i++) {\n var chalinfo = challenges[_i];\n var chalid = chalinfo.name.replace(/ /g, \"-\").hashCode();\n var catid = chalinfo.category.replace(/ /g, \"-\").hashCode();\n var chalwrap = (0, _jquery.default)(\"<div id='{0}' class='col-md-3 d-inline-block'></div>\".format(chalid));\n var chalbutton = void 0;\n\n if (solves.indexOf(chalinfo.id) == -1) {\n chalbutton = (0, _jquery.default)(\"<button class='btn btn-dark challenge-button w-100 text-truncate pt-3 pb-3 mb-2' value='{0}'></button>\".format(chalinfo.id));\n } else {\n chalbutton = (0, _jquery.default)(\"<button class='btn btn-dark challenge-button solved-challenge w-100 text-truncate pt-3 pb-3 mb-2' value='{0}'><i class='fas fa-check corner-button-check'></i></button>\".format(chalinfo.id));\n }\n\n var chalheader = (0, _jquery.default)(\"<p>{0}</p>\".format(chalinfo.name));\n var chalscore = (0, _jquery.default)(\"<span>{0}</span>\".format(chalinfo.value));\n\n for (var j = 0; j < chalinfo.tags.length; j++) {\n var tag = \"tag-\" + chalinfo.tags[j].value.replace(/ /g, \"-\");\n chalwrap.addClass(tag);\n }\n\n chalbutton.append(chalheader);\n chalbutton.append(chalscore);\n chalwrap.append(chalbutton);\n (0, _jquery.default)(\"#\" + catid + \"-row\").find(\".category-challenges > .challenges-row\").append(chalwrap);\n }\n\n (0, _jquery.default)(\".challenge-button\").click(function (_event) {\n loadChal(this.value);\n getSolves(this.value);\n });\n });\n}\n\nfunction update() {\n return loadUserSolves() // Load the user's solved challenge ids\n .then(loadChals) // Load the full list of challenges\n .then(markSolves);\n}\n\n(0, _jquery.default)(function () {\n update().then(function () {\n if (window.location.hash.length > 0) {\n loadChalByName(decodeURIComponent(window.location.hash.substring(1)));\n }\n });\n (0, _jquery.default)(\"#challenge-input\").keyup(function (event) {\n if (event.keyCode == 13) {\n (0, _jquery.default)(\"#challenge-submit\").click();\n }\n });\n (0, _jquery.default)(\".nav-tabs a\").click(function (event) {\n event.preventDefault();\n (0, _jquery.default)(this).tab(\"show\");\n });\n (0, _jquery.default)(\"#challenge-window\").on(\"hidden.bs.modal\", function (_event) {\n (0, _jquery.default)(\".nav-tabs a:first\").tab(\"show\");\n history.replaceState(\"\", window.document.title, window.location.pathname);\n });\n (0, _jquery.default)(\".challenge-solves\").click(function (_event) {\n getSolves((0, _jquery.default)(\"#challenge-id\").val());\n });\n (0, _jquery.default)(\"#challenge-window\").on(\"hide.bs.modal\", function (_event) {\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"wrong\");\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"correct\");\n (0, _jquery.default)(\"#incorrect-key\").slideUp();\n (0, _jquery.default)(\"#correct-key\").slideUp();\n (0, _jquery.default)(\"#already-solved\").slideUp();\n (0, _jquery.default)(\"#too-fast\").slideUp();\n });\n});\nsetInterval(update, 300000); // Update every 5 minutes.\n\nvar displayHint = function displayHint(data) {\n (0, _ezq.ezAlert)({\n title: \"Hint\",\n body: md.render(data.content),\n button: \"Got it!\"\n });\n};\n\nvar displayUnlock = function displayUnlock(id) {\n (0, _ezq.ezQuery)({\n title: \"Unlock Hint?\",\n body: \"Are you sure you want to open this hint?\",\n success: function success() {\n var params = {\n target: id,\n type: \"hints\"\n };\n\n _CTFd.default.api.post_unlock_list({}, params).then(function (response) {\n if (response.success) {\n _CTFd.default.api.get_hint({\n hintId: id\n }).then(function (response) {\n displayHint(response.data);\n });\n\n return;\n }\n\n (0, _ezq.ezAlert)({\n title: \"Error\",\n body: md.render(response.errors.score),\n button: \"Got it!\"\n });\n });\n }\n });\n};\n\nvar loadHint = function loadHint(id) {\n _CTFd.default.api.get_hint({\n hintId: id\n }).then(function (response) {\n if (response.data.content) {\n displayHint(response.data);\n return;\n }\n\n displayUnlock(id);\n });\n};\n\n//# sourceURL=webpack:///./CTFd/themes/core/assets/js/pages/challenges.js?"); +eval("\n\n__webpack_require__(/*! ./main */ \"./CTFd/themes/core/assets/js/pages/main.js\");\n\n__webpack_require__(/*! bootstrap/js/dist/tab */ \"./node_modules/bootstrap/js/dist/tab.js\");\n\nvar _ezq = __webpack_require__(/*! ../ezq */ \"./CTFd/themes/core/assets/js/ezq.js\");\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./CTFd/themes/core/assets/js/utils.js\");\n\nvar _moment = _interopRequireDefault(__webpack_require__(/*! moment */ \"./node_modules/moment/moment.js\"));\n\nvar _jquery = _interopRequireDefault(__webpack_require__(/*! jquery */ \"./node_modules/jquery/dist/jquery.js\"));\n\nvar _CTFd = _interopRequireDefault(__webpack_require__(/*! ../CTFd */ \"./CTFd/themes/core/assets/js/CTFd.js\"));\n\nvar _config = _interopRequireDefault(__webpack_require__(/*! ../config */ \"./CTFd/themes/core/assets/js/config.js\"));\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nvar api_func = {\n teams: function teams(x) {\n return _CTFd.default.api.get_team_solves({\n teamId: x\n });\n },\n users: function users(x) {\n return _CTFd.default.api.get_user_solves({\n userId: x\n });\n }\n};\n\nvar md = _CTFd.default.lib.markdown();\n\n_CTFd.default._internal.challenge = {};\nvar challenges = [];\nvar solves = [];\n\nvar loadChal = function loadChal(id) {\n var chal = _jquery.default.grep(challenges, function (chal) {\n return chal.id == id;\n })[0];\n\n if (chal.type === \"hidden\") {\n (0, _ezq.ezAlert)({\n title: \"Challenge Hidden!\",\n body: \"You haven't unlocked this challenge yet!\",\n button: \"Got it!\"\n });\n return;\n }\n\n displayChal(chal);\n};\n\nvar loadChalByName = function loadChalByName(name) {\n var idx = name.lastIndexOf(\"-\");\n var pieces = [name.slice(0, idx), name.slice(idx + 1)];\n var id = pieces[1];\n\n var chal = _jquery.default.grep(challenges, function (chal) {\n return chal.id == id;\n })[0];\n\n displayChal(chal);\n};\n\nvar displayChal = function displayChal(chal) {\n return Promise.all([_CTFd.default.api.get_challenge({\n challengeId: chal.id\n }), _jquery.default.getScript(_config.default.urlRoot + chal.script), _jquery.default.get(_config.default.urlRoot + chal.template)]).then(function (responses) {\n var challenge = _CTFd.default._internal.challenge;\n (0, _jquery.default)(\"#challenge-window\").empty(); // Inject challenge data into the plugin\n\n challenge.data = responses[0].data; // Call preRender function in plugin\n\n challenge.preRender(); // Build HTML from the Jinja response in API\n\n (0, _jquery.default)(\"#challenge-window\").append(responses[0].data.view);\n (0, _jquery.default)(\"#challenge-window #challenge-input\").addClass(\"form-control\");\n (0, _jquery.default)(\"#challenge-window #challenge-submit\").addClass(\"btn btn-md btn-outline-secondary float-right\");\n var modal = (0, _jquery.default)(\"#challenge-window\").find(\".modal-dialog\");\n\n if (window.init.theme_settings && window.init.theme_settings.challenge_window_size) {\n switch (window.init.theme_settings.challenge_window_size) {\n case \"sm\":\n modal.addClass(\"modal-sm\");\n break;\n\n case \"lg\":\n modal.addClass(\"modal-lg\");\n break;\n\n case \"xl\":\n modal.addClass(\"modal-xl\");\n break;\n\n default:\n break;\n }\n }\n\n (0, _jquery.default)(\".challenge-solves\").click(function (_event) {\n getSolves((0, _jquery.default)(\"#challenge-id\").val());\n });\n (0, _jquery.default)(\".nav-tabs a\").click(function (event) {\n event.preventDefault();\n (0, _jquery.default)(this).tab(\"show\");\n }); // Handle modal toggling\n\n (0, _jquery.default)(\"#challenge-window\").on(\"hide.bs.modal\", function (_event) {\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"wrong\");\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"correct\");\n (0, _jquery.default)(\"#incorrect-key\").slideUp();\n (0, _jquery.default)(\"#correct-key\").slideUp();\n (0, _jquery.default)(\"#already-solved\").slideUp();\n (0, _jquery.default)(\"#too-fast\").slideUp();\n });\n (0, _jquery.default)(\".load-hint\").on(\"click\", function (_event) {\n loadHint((0, _jquery.default)(this).data(\"hint-id\"));\n });\n (0, _jquery.default)(\"#challenge-submit\").click(function (event) {\n event.preventDefault();\n (0, _jquery.default)(\"#challenge-submit\").addClass(\"disabled-button\");\n (0, _jquery.default)(\"#challenge-submit\").prop(\"disabled\", true);\n\n _CTFd.default._internal.challenge.submit().then(renderSubmissionResponse).then(loadChals).then(markSolves);\n });\n (0, _jquery.default)(\"#challenge-input\").keyup(function (event) {\n if (event.keyCode == 13) {\n (0, _jquery.default)(\"#challenge-submit\").click();\n }\n });\n challenge.postRender();\n window.location.replace(window.location.href.split(\"#\")[0] + \"#\".concat(chal.name, \"-\").concat(chal.id));\n (0, _jquery.default)(\"#challenge-window\").modal();\n });\n};\n\nfunction renderSubmissionResponse(response) {\n var result = response.data;\n var result_message = (0, _jquery.default)(\"#result-message\");\n var result_notification = (0, _jquery.default)(\"#result-notification\");\n var answer_input = (0, _jquery.default)(\"#challenge-input\");\n result_notification.removeClass();\n result_message.text(result.message);\n\n if (result.status === \"authentication_required\") {\n window.location = _CTFd.default.config.urlRoot + \"/login?next=\" + _CTFd.default.config.urlRoot + window.location.pathname + window.location.hash;\n return;\n } else if (result.status === \"incorrect\") {\n // Incorrect key\n result_notification.addClass(\"alert alert-danger alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.removeClass(\"correct\");\n answer_input.addClass(\"wrong\");\n setTimeout(function () {\n answer_input.removeClass(\"wrong\");\n }, 3000);\n } else if (result.status === \"correct\") {\n // Challenge Solved\n result_notification.addClass(\"alert alert-success alert-dismissable text-center\");\n result_notification.slideDown();\n (0, _jquery.default)(\".challenge-solves\").text(parseInt((0, _jquery.default)(\".challenge-solves\").text().split(\" \")[0]) + 1 + \" Solves\");\n answer_input.val(\"\");\n answer_input.removeClass(\"wrong\");\n answer_input.addClass(\"correct\");\n } else if (result.status === \"already_solved\") {\n // Challenge already solved\n result_notification.addClass(\"alert alert-info alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.addClass(\"correct\");\n } else if (result.status === \"paused\") {\n // CTF is paused\n result_notification.addClass(\"alert alert-warning alert-dismissable text-center\");\n result_notification.slideDown();\n } else if (result.status === \"ratelimited\") {\n // Keys per minute too high\n result_notification.addClass(\"alert alert-warning alert-dismissable text-center\");\n result_notification.slideDown();\n answer_input.addClass(\"too-fast\");\n setTimeout(function () {\n answer_input.removeClass(\"too-fast\");\n }, 3000);\n }\n\n setTimeout(function () {\n (0, _jquery.default)(\".alert\").slideUp();\n (0, _jquery.default)(\"#challenge-submit\").removeClass(\"disabled-button\");\n (0, _jquery.default)(\"#challenge-submit\").prop(\"disabled\", false);\n }, 3000);\n}\n\nfunction markSolves() {\n return api_func[_CTFd.default.config.userMode](\"me\").then(function (response) {\n var solves = response.data;\n\n for (var i = solves.length - 1; i >= 0; i--) {\n var btn = (0, _jquery.default)('button[value=\"' + solves[i].challenge_id + '\"]');\n btn.addClass(\"solved-challenge\");\n btn.prepend(\"<i class='fas fa-check corner-button-check'></i>\");\n }\n });\n}\n\nfunction loadUserSolves() {\n if (_CTFd.default.user.id == 0) {\n return Promise.resolve();\n }\n\n return api_func[_CTFd.default.config.userMode](\"me\").then(function (response) {\n var solves = response.data;\n\n for (var i = solves.length - 1; i >= 0; i--) {\n var chal_id = solves[i].challenge_id;\n solves.push(chal_id);\n }\n });\n}\n\nfunction getSolves(id) {\n return _CTFd.default.api.get_challenge_solves({\n challengeId: id\n }).then(function (response) {\n var data = response.data;\n (0, _jquery.default)(\".challenge-solves\").text(parseInt(data.length) + \" Solves\");\n var box = (0, _jquery.default)(\"#challenge-solves-names\");\n box.empty();\n\n for (var i = 0; i < data.length; i++) {\n var _id = data[i].account_id;\n var name = data[i].name;\n var date = (0, _moment.default)(data[i].date).local().fromNow();\n var account_url = data[i].account_url;\n box.append('<tr><td><a href=\"{0}\">{2}</td><td>{3}</td></tr>'.format(account_url, _id, (0, _utils.htmlEntities)(name), date));\n }\n });\n}\n\nfunction loadChals() {\n return _CTFd.default.api.get_challenge_list().then(function (response) {\n var categories = [];\n var $challenges_board = (0, _jquery.default)(\"#challenges-board\");\n challenges = response.data;\n $challenges_board.empty();\n\n for (var i = challenges.length - 1; i >= 0; i--) {\n challenges[i].solves = 0;\n\n if (_jquery.default.inArray(challenges[i].category, categories) == -1) {\n var category = challenges[i].category;\n categories.push(category);\n var categoryid = category.replace(/ /g, \"-\").hashCode();\n var categoryrow = (0, _jquery.default)(\"\" + '<div id=\"{0}-row\" class=\"pt-5\">'.format(categoryid) + '<div class=\"category-header col-md-12 mb-3\">' + \"</div>\" + '<div class=\"category-challenges col-md-12\">' + '<div class=\"challenges-row col-md-12\"></div>' + \"</div>\" + \"</div>\");\n categoryrow.find(\".category-header\").append((0, _jquery.default)(\"<h3>\" + category + \"</h3>\"));\n $challenges_board.append(categoryrow);\n }\n }\n\n for (var _i = 0; _i <= challenges.length - 1; _i++) {\n var chalinfo = challenges[_i];\n var chalid = chalinfo.name.replace(/ /g, \"-\").hashCode();\n var catid = chalinfo.category.replace(/ /g, \"-\").hashCode();\n var chalwrap = (0, _jquery.default)(\"<div id='{0}' class='col-md-3 d-inline-block'></div>\".format(chalid));\n var chalbutton = void 0;\n\n if (solves.indexOf(chalinfo.id) == -1) {\n chalbutton = (0, _jquery.default)(\"<button class='btn btn-dark challenge-button w-100 text-truncate pt-3 pb-3 mb-2' value='{0}'></button>\".format(chalinfo.id));\n } else {\n chalbutton = (0, _jquery.default)(\"<button class='btn btn-dark challenge-button solved-challenge w-100 text-truncate pt-3 pb-3 mb-2' value='{0}'><i class='fas fa-check corner-button-check'></i></button>\".format(chalinfo.id));\n }\n\n var chalheader = (0, _jquery.default)(\"<p>{0}</p>\".format(chalinfo.name));\n var chalscore = (0, _jquery.default)(\"<span>{0}</span>\".format(chalinfo.value));\n\n for (var j = 0; j < chalinfo.tags.length; j++) {\n var tag = \"tag-\" + chalinfo.tags[j].value.replace(/ /g, \"-\");\n chalwrap.addClass(tag);\n }\n\n chalbutton.append(chalheader);\n chalbutton.append(chalscore);\n chalwrap.append(chalbutton);\n (0, _jquery.default)(\"#\" + catid + \"-row\").find(\".category-challenges > .challenges-row\").append(chalwrap);\n }\n\n (0, _jquery.default)(\".challenge-button\").click(function (_event) {\n loadChal(this.value);\n getSolves(this.value);\n });\n });\n}\n\nfunction update() {\n return loadUserSolves() // Load the user's solved challenge ids\n .then(loadChals) // Load the full list of challenges\n .then(markSolves);\n}\n\n(0, _jquery.default)(function () {\n update().then(function () {\n if (window.location.hash.length > 0) {\n loadChalByName(decodeURIComponent(window.location.hash.substring(1)));\n }\n });\n (0, _jquery.default)(\"#challenge-input\").keyup(function (event) {\n if (event.keyCode == 13) {\n (0, _jquery.default)(\"#challenge-submit\").click();\n }\n });\n (0, _jquery.default)(\".nav-tabs a\").click(function (event) {\n event.preventDefault();\n (0, _jquery.default)(this).tab(\"show\");\n });\n (0, _jquery.default)(\"#challenge-window\").on(\"hidden.bs.modal\", function (_event) {\n (0, _jquery.default)(\".nav-tabs a:first\").tab(\"show\");\n history.replaceState(\"\", window.document.title, window.location.pathname);\n });\n (0, _jquery.default)(\".challenge-solves\").click(function (_event) {\n getSolves((0, _jquery.default)(\"#challenge-id\").val());\n });\n (0, _jquery.default)(\"#challenge-window\").on(\"hide.bs.modal\", function (_event) {\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"wrong\");\n (0, _jquery.default)(\"#challenge-input\").removeClass(\"correct\");\n (0, _jquery.default)(\"#incorrect-key\").slideUp();\n (0, _jquery.default)(\"#correct-key\").slideUp();\n (0, _jquery.default)(\"#already-solved\").slideUp();\n (0, _jquery.default)(\"#too-fast\").slideUp();\n });\n});\nsetInterval(update, 300000); // Update every 5 minutes.\n\nvar displayHint = function displayHint(data) {\n (0, _ezq.ezAlert)({\n title: \"Hint\",\n body: md.render(data.content),\n button: \"Got it!\"\n });\n};\n\nvar displayUnlock = function displayUnlock(id) {\n (0, _ezq.ezQuery)({\n title: \"Unlock Hint?\",\n body: \"Are you sure you want to open this hint?\",\n success: function success() {\n var params = {\n target: id,\n type: \"hints\"\n };\n\n _CTFd.default.api.post_unlock_list({}, params).then(function (response) {\n if (response.success) {\n _CTFd.default.api.get_hint({\n hintId: id\n }).then(function (response) {\n displayHint(response.data);\n });\n\n return;\n }\n\n (0, _ezq.ezAlert)({\n title: \"Error\",\n body: md.render(response.errors.score),\n button: \"Got it!\"\n });\n });\n }\n });\n};\n\nvar loadHint = function loadHint(id) {\n _CTFd.default.api.get_hint({\n hintId: id\n }).then(function (response) {\n if (response.data.content) {\n displayHint(response.data);\n return;\n }\n\n displayUnlock(id);\n });\n};\n\n//# sourceURL=webpack:///./CTFd/themes/core/assets/js/pages/challenges.js?"); /***/ }) diff --git a/CTFd/themes/core/static/js/pages/challenges.min.js b/CTFd/themes/core/static/js/pages/challenges.min.js index 6800718a5..b67b710a9 100644 --- a/CTFd/themes/core/static/js/pages/challenges.min.js +++ b/CTFd/themes/core/static/js/pages/challenges.min.js @@ -1 +1 @@ -!function(r){function e(e){for(var t,o,n=e[0],s=e[1],a=e[2],i=0,l=[];i<n.length;i++)o=n[i],c[o]&&l.push(c[o][0]),c[o]=0;for(t in s)Object.prototype.hasOwnProperty.call(s,t)&&(r[t]=s[t]);for(m&&m(e);l.length;)l.shift()();return u.push.apply(u,a||[]),d()}function d(){for(var e,t=0;t<u.length;t++){for(var o=u[t],n=!0,s=1;s<o.length;s++){var a=o[s];0!==c[a]&&(n=!1)}n&&(u.splice(t--,1),e=i(i.s=o[0]))}return e}var o={},c={3:0,4:0},u=[];function i(e){if(o[e])return o[e].exports;var t=o[e]={i:e,l:!1,exports:{}};return r[e].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.m=r,i.c=o,i.d=function(e,t,o){i.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:o})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(t,e){if(1&e&&(t=i(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var o=Object.create(null);if(i.r(o),Object.defineProperty(o,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var n in t)i.d(o,n,function(e){return t[e]}.bind(null,n));return o},i.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(t,"a",t),t},i.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},i.p="/themes/core/static/js";var t=window.webpackJsonp=window.webpackJsonp||[],n=t.push.bind(t);t.push=e,t=t.slice();for(var s=0;s<t.length;s++)e(t[s]);var m=n;u.push(["./CTFd/themes/core/assets/js/pages/challenges.js",0,1]),d()}({"./CTFd/themes/core/assets/js/CTFd.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=d(o("./CTFd/themes/core/assets/js/fetch.js")),s=d(o("./CTFd/themes/core/assets/js/config.js")),a=o("./CTFd/themes/core/assets/js/api.js");o("./CTFd/themes/core/assets/js/patch.js");var i=d(o("./node_modules/markdown-it/index.js")),l=d(o("./node_modules/jquery/dist/jquery.js")),r=d(o("./CTFd/themes/core/assets/js/ezq.js"));function d(e){return e&&e.__esModule?e:{default:e}}function c(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}var u=new a.API("/"),m={},p={ezq:r.default},f={$:l.default,markdown:function(e){var t=function(t){for(var e=1;e<arguments.length;e++)if(e%2){var o=null!=arguments[e]?arguments[e]:{},n=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(n=n.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),n.forEach(function(e){c(t,e,o[e])})}else Object.defineProperties(t,Object.getOwnPropertyDescriptors(arguments[e]));return t}({},{html:!0,linkify:!0},{},e),o=(0,i.default)(t);return o.renderer.rules.link_open=function(e,t,o,n,s){return e[t].attrPush(["target","_blank"]),s.renderToken(e,t,o)},o}},j=!1,h={run:function(e){e(_)}};var _={init:function(e){j||(j=!0,s.default.urlRoot=e.urlRoot||s.default.urlRoot,s.default.csrfNonce=e.csrfNonce||s.default.csrfNonce,s.default.userMode=e.userMode||s.default.userMode,u.domain=s.default.urlRoot+"/api/v1",m.id=e.userId)},config:s.default,fetch:n.default,user:m,ui:p,api:u,lib:f,_internal:{},plugin:h},g=_;t.default=g},"./CTFd/themes/core/assets/js/api.js":function(e,t,o){var c=n(o("./CTFd/themes/core/assets/js/fetch.js")),l=n(o("./node_modules/q/q.js"));function n(e){return e&&e.__esModule?e:{default:e}}function s(e){return(s="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var a=function(){"use strict";function e(e){var t="object"===s(e)?e.domain:e;if(this.domain=t||"",0===this.domain.length)throw new Error("Domain parameter must be specified as a string.")}function i(o,n){return o.$queryParameters&&Object.keys(o.$queryParameters).forEach(function(e){var t=o.$queryParameters[e];n[e]=t}),n}return e.prototype.request=function(e,t,o,n,s,a,i,l){var r=a&&Object.keys(a).length?function(e){var t=[];for(var o in e)e.hasOwnProperty(o)&&t.push(encodeURIComponent(o)+"="+encodeURIComponent(e[o]));return t.join("&")}(a):null,d=t+(r?"?"+r:"");n&&!Object.keys(n).length&&(n=void 0),(0,c.default)(d,{method:e,headers:s,body:JSON.stringify(n)}).then(function(e){return e.json()}).then(function(e){l.resolve(e)}).catch(function(e){l.reject(e)})},e.prototype.post_award_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/awards",e,{},s,n,{},t),t.promise},e.prototype.delete_award=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/awards/{award_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{award_id}",e.awardId),void 0===e.awardId?t.reject(new Error("Missing required parameter: awardId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_award=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/awards/{award_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{award_id}",e.awardId),void 0===e.awardId?t.reject(new Error("Missing required parameter: awardId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_challenge_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/challenges",e,{},s,n,{},t),t.promise},e.prototype.get_challenge_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/challenges",e,{},s,n,{},t),t.promise},e.prototype.post_challenge_attempt=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/challenges/attempt",e,{},s,n,{},t),t.promise},e.prototype.get_challenge_types=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/challenges/types",e,{},s,n,{},t),t.promise},e.prototype.patch_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_files=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/files",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_flags=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/flags",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_hints=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/hints",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/solves",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_tags=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/tags",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/configs",e,{},s,n,{},t),t.promise},e.prototype.patch_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("PATCH",o+"/configs",e,{},s,n,{},t),t.promise},e.prototype.get_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/configs",e,{},s,n,{},t),t.promise},e.prototype.patch_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_files_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/files",e,{},s,n,{},t),t.promise},e.prototype.get_files_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/files",e,{},s,n,{},t),t.promise},e.prototype.delete_files_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/files/{file_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{file_id}",e.fileId),void 0===e.fileId?t.reject(new Error("Missing required parameter: fileId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_files_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/files/{file_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{file_id}",e.fileId),void 0===e.fileId?t.reject(new Error("Missing required parameter: fileId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_flag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/flags",e,{},s,n,{},t),t.promise},e.prototype.get_flag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/flags",e,{},s,n,{},t),t.promise},e.prototype.get_flag_types=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/flags/types",e,{},s,n,{},t),t.promise},e.prototype.get_flag_types_1=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/types/{type_name}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{type_name}",e.typeName),void 0===e.typeName?t.reject(new Error("Missing required parameter: typeName")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.patch_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_hint_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/hints",e,{},s,n,{},t),t.promise},e.prototype.get_hint_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/hints",e,{},s,n,{},t),t.promise},e.prototype.patch_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_notification_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/notifications",e,{},s,n,{},t),t.promise},e.prototype.get_notification_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/notifications",e,{},s,n,{},t),t.promise},e.prototype.delete_notification=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/notifications/{notification_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{notification_id}",e.notificationId),void 0===e.notificationId?t.reject(new Error("Missing required parameter: notificationId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_notification=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/notifications/{notification_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{notification_id}",e.notificationId),void 0===e.notificationId?t.reject(new Error("Missing required parameter: notificationId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_page_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/pages",e,{},s,n,{},t),t.promise},e.prototype.get_page_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/pages",e,{},s,n,{},t),t.promise},e.prototype.patch_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_scoreboard_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/scoreboard",e,{},s,n,{},t),t.promise},e.prototype.get_scoreboard_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/scoreboard/top/{count}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{count}",e.count),void 0===e.count?t.reject(new Error("Missing required parameter: count")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_solve_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/challenges/solves",e,{},s,n,{},t),t.promise},e.prototype.get_challenge_solve_percentages=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/challenges/solves/percentages",e,{},s,n,{},t),t.promise},e.prototype.get_challenge_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/challenges/{column}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_submission_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/submissions/{column}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/teams",e,{},s,n,{},t),t.promise},e.prototype.get_user_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/users",e,{},s,n,{},t),t.promise},e.prototype.get_user_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/users/{column}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_submissions_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/submissions",e,{},s,n,{},t),t.promise},e.prototype.get_submissions_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/submissions",e,{},s,n,{},t),t.promise},e.prototype.delete_submission=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/submissions/{submission_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{submission_id}",e.submissionId),void 0===e.submissionId?t.reject(new Error("Missing required parameter: submissionId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_submission=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/submissions/{submission_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{submission_id}",e.submissionId),void 0===e.submissionId?t.reject(new Error("Missing required parameter: submissionId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_tag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/tags",e,{},s,n,{},t),t.promise},e.prototype.get_tag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/tags",e,{},s,n,{},t),t.promise},e.prototype.patch_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_team_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/teams",e,{},s,n,{},t),t.promise},e.prototype.get_team_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/teams",e,{},s,n,{},t),t.promise},e.prototype.patch_team_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.teamId&&(n.team_id=e.teamId),n=i(e,n),this.request("PATCH",o+"/teams/me",e,{},s,n,{},t),t.promise},e.prototype.get_team_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.teamId&&(n.team_id=e.teamId),n=i(e,n),this.request("GET",o+"/teams/me",e,{},s,n,{},t),t.promise},e.prototype.patch_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_awards=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/awards",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_fails=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/fails",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/solves",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_unlock_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/unlocks",e,{},s,n,{},t),t.promise},e.prototype.get_unlock_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/unlocks",e,{},s,n,{},t),t.promise},e.prototype.post_user_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/users",e,{},s,n,{},t),t.promise},e.prototype.get_user_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/users",e,{},s,n,{},t),t.promise},e.prototype.patch_user_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("PATCH",o+"/users/me",e,{},s,n,{},t),t.promise},e.prototype.get_user_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/users/me",e,{},s,n,{},t),t.promise},e.prototype.patch_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_user_awards=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/awards",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_user_fails=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/fails",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_user_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/solves",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e}();t.API=a},"./CTFd/themes/core/assets/js/config.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;t.default={urlRoot:"",csrfNonce:"",userMode:""}},"./CTFd/themes/core/assets/js/events.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var a=o("./node_modules/howler/dist/howler.js"),n=o("./node_modules/event-source-polyfill/src/eventsource.js"),i=o("./CTFd/themes/core/assets/js/ezq.js"),l=o("./CTFd/themes/core/assets/js/utils.js"),r=n.NativeEventSource||n.EventSourcePolyfill;t.default=function(e){var t=new r(e+"/events"),o=new l.WindowController,n=new a.Howl({src:[e+"/themes/core/static/sounds/notification.webm",e+"/themes/core/static/sounds/notification.mp3"]});function s(e){switch(e.type){case"toast":(0,l.inc_notification_counter)();var t=50<e.content.length?e.content.substring(0,47)+"...":e.content,o=!1;(0,i.ezToast)({title:e.title,body:t,onclick:function(){(0,i.ezAlert)({title:e.title,body:e.content,button:"Got it!",success:function(){o=!0,(0,l.dec_notification_counter)()}})},onclose:function(){o||(0,l.dec_notification_counter)()}});break;case"alert":(0,l.inc_notification_counter)(),(0,i.ezAlert)({title:e.title,body:e.content,button:"Got it!",success:function(){(0,l.dec_notification_counter)()}});break;case"background":default:(0,l.inc_notification_counter)()}e.sound&&n.play()}(0,l.init_notification_counter)(),o.notification=function(e){s(e)},o.masterDidChange=function(){this.isMaster?t.addEventListener("notification",function(e){var t=JSON.parse(e.data);o.broadcast("notification",t),s(t)},!1):t&&t.close()}}},"./CTFd/themes/core/assets/js/ezq.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.ezAlert=p,t.ezToast=f,t.ezQuery=j,t.ezProgressBar=h,t.ezBadge=_,t.default=void 0,o("./node_modules/bootstrap/js/dist/modal.js");var n,l=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};var a='<div class="modal fade" tabindex="-1" role="dialog"> <div class="modal-dialog" role="document"> <div class="modal-content"> <div class="modal-header"> <h5 class="modal-title">{0}</h5> <button type="button" class="close" data-dismiss="modal" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> </div> <div class="modal-body"> </div> <div class="modal-footer"> </div> </div> </div></div>',r='<div class="toast m-3" role="alert"> <div class="toast-header"> <strong class="mr-auto">{0}</strong> <button type="button" class="ml-2 mb-1 close" data-dismiss="toast" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> </div> <div class="toast-body">{1}</div></div>',i='<div class="progress"> <div class="progress-bar progress-bar-success progress-bar-striped progress-bar-animated" role="progressbar" style="width: {0}%"> </div></div>',s='<div class="alert alert-danger alert-dismissable" role="alert">\n <span class="sr-only">Error:</span>\n {0}\n <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>\n</div>',d='<div class="alert alert-success alert-dismissable submit-row" role="alert">\n <strong>Success!</strong>\n {0}\n <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>\n</div>',c='<button type="button" class="btn btn-primary" data-dismiss="modal">{0}</button>',u='<button type="button" class="btn btn-danger" data-dismiss="modal">No</button>',m='<button type="button" class="btn btn-primary" data-dismiss="modal">Yes</button>';function p(e){var t=a.format(e.title),o=(0,l.default)(t);"string"==typeof e.body?o.find(".modal-body").append("<p>".concat(e.body,"</p>")):o.find(".modal-body").append((0,l.default)(e.body));var n=(0,l.default)(c.format(e.button));return e.success&&(0,l.default)(n).click(function(){e.success()}),e.large&&o.find(".modal-dialog").addClass("modal-lg"),o.find(".modal-footer").append(n),(0,l.default)("main").append(o),o.modal("show"),(0,l.default)(o).on("hidden.bs.modal",function(){(0,l.default)(this).modal("dispose")}),o}function f(e){(0,l.default)("#ezq--notifications-toast-container").length||(0,l.default)("body").append((0,l.default)("<div/>").attr({id:"ezq--notifications-toast-container"}).css({position:"fixed",bottom:"0",right:"0","min-width":"20%"}));var t=r.format(e.title,e.body),o=(0,l.default)(t);if(e.onclose&&(0,l.default)(o).find("button[data-dismiss=toast]").click(function(){e.onclose()}),e.onclick){var n=(0,l.default)(o).find(".toast-body");n.addClass("cursor-pointer"),n.click(function(){e.onclick()})}var s=!1!==e.autohide,a=!1!==e.animation,i=e.delay||1e4;return(0,l.default)("#ezq--notifications-toast-container").prepend(o),o.toast({autohide:s,delay:i,animation:a}),o.toast("show"),o}function j(e){var t=a.format(e.title),o=(0,l.default)(t);"string"==typeof e.body?o.find(".modal-body").append("<p>".concat(e.body,"</p>")):o.find(".modal-body").append((0,l.default)(e.body));var n=(0,l.default)(m),s=(0,l.default)(u);return o.find(".modal-footer").append(s),o.find(".modal-footer").append(n),(0,l.default)("main").append(o),(0,l.default)(o).on("hidden.bs.modal",function(){(0,l.default)(this).modal("dispose")}),(0,l.default)(n).click(function(){e.success()}),o.modal("show"),o}function h(e){if(e.target){var t=(0,l.default)(e.target);return t.find(".progress-bar").css("width",e.width+"%"),t}var o=i.format(e.width),n=a.format(e.title),s=(0,l.default)(n);return s.find(".modal-body").append((0,l.default)(o)),(0,l.default)("main").append(s),s.modal("show")}function _(e){var t={success:d,error:s}[e.type].format(e.body);return(0,l.default)(t)}var g={ezAlert:p,ezToast:f,ezQuery:j,ezProgressBar:h,ezBadge:_};t.default=g},"./CTFd/themes/core/assets/js/fetch.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0,o("./node_modules/whatwg-fetch/fetch.js");var n,s=(n=o("./CTFd/themes/core/assets/js/config.js"))&&n.__esModule?n:{default:n};var a=window.fetch;t.default=function(e,t){return void 0===t&&(t={method:"GET",credentials:"same-origin",headers:{}}),e=s.default.urlRoot+e,void 0===t.headers&&(t.headers={}),t.credentials="same-origin",t.headers.Accept="application/json",t.headers["Content-Type"]="application/json",t.headers["CSRF-Token"]=s.default.csrfNonce,a(e,t)}},"./CTFd/themes/core/assets/js/pages/challenges.js":function(e,t,o){o("./CTFd/themes/core/assets/js/pages/main.js"),o("./node_modules/bootstrap/js/dist/tab.js");var n=o("./CTFd/themes/core/assets/js/ezq.js"),r=o("./CTFd/themes/core/assets/js/utils.js"),d=i(o("./node_modules/moment/moment.js")),_=i(o("./node_modules/jquery/dist/jquery.js")),a=i(o("./CTFd/themes/core/assets/js/CTFd.js")),s=i(o("./CTFd/themes/core/assets/js/config.js"));function i(e){return e&&e.__esModule?e:{default:e}}var l={teams:function(e){return a.default.api.get_team_solves({teamId:e})},users:function(e){return a.default.api.get_user_solves({userId:e})}},c=a.default.lib.markdown();a.default._internal.challenge={};var g=[],v=[],y=function(t){var e=_.default.grep(g,function(e){return e.id==t})[0];"hidden"!==e.type?u(e):(0,n.ezAlert)({title:"Challenge Hidden!",body:"You haven't unlocked this challenge yet!",button:"Got it!"})},u=function(n){return Promise.all([a.default.api.get_challenge({challengeId:n.id}),_.default.getScript(s.default.urlRoot+n.script),_.default.get(s.default.urlRoot+n.template)]).then(function(e){var t=a.default._internal.challenge;(0,_.default)("#challenge-window").empty(),(0,_.default)("#challenge-window").append(e[0].data.view),(0,_.default)("#challenge-window #challenge-input").addClass("form-control"),(0,_.default)("#challenge-window #challenge-submit").addClass("btn btn-md btn-outline-secondary float-right");var o=(0,_.default)("#challenge-window").find(".modal-dialog");if(window.init.theme_settings&&window.init.theme_settings.challenge_window_size)switch(window.init.theme_settings.challenge_window_size){case"sm":o.addClass("modal-sm");break;case"lg":o.addClass("modal-lg");break;case"xl":o.addClass("modal-xl")}(0,_.default)(".challenge-solves").click(function(e){b((0,_.default)("#challenge-id").val())}),(0,_.default)(".nav-tabs a").click(function(e){e.preventDefault(),(0,_.default)(this).tab("show")}),(0,_.default)("#challenge-window").on("hide.bs.modal",function(e){(0,_.default)("#challenge-input").removeClass("wrong"),(0,_.default)("#challenge-input").removeClass("correct"),(0,_.default)("#incorrect-key").slideUp(),(0,_.default)("#correct-key").slideUp(),(0,_.default)("#already-solved").slideUp(),(0,_.default)("#too-fast").slideUp()}),(0,_.default)(".load-hint").on("click",function(e){T((0,_.default)(this).data("hint-id"))}),(0,_.default)("#challenge-submit").click(function(e){e.preventDefault(),(0,_.default)("#challenge-submit").addClass("disabled-button"),(0,_.default)("#challenge-submit").prop("disabled",!0),a.default._internal.challenge.submit().then(m).then(f).then(p)}),(0,_.default)("#challenge-input").keyup(function(e){13==e.keyCode&&(0,_.default)("#challenge-submit").click()}),t.postRender(),window.location.replace(window.location.href.split("#")[0]+"#".concat(n.name,"-").concat(n.id)),(0,_.default)("#challenge-window").modal()})};function m(e){var t=e.data,o=(0,_.default)("#result-message"),n=(0,_.default)("#result-notification"),s=(0,_.default)("#challenge-input");n.removeClass(),o.text(t.message),"authentication_required"!==t.status?("incorrect"===t.status?(n.addClass("alert alert-danger alert-dismissable text-center"),n.slideDown(),s.removeClass("correct"),s.addClass("wrong"),setTimeout(function(){s.removeClass("wrong")},3e3)):"correct"===t.status?(n.addClass("alert alert-success alert-dismissable text-center"),n.slideDown(),(0,_.default)(".challenge-solves").text(parseInt((0,_.default)(".challenge-solves").text().split(" ")[0])+1+" Solves"),s.val(""),s.removeClass("wrong"),s.addClass("correct")):"already_solved"===t.status?(n.addClass("alert alert-info alert-dismissable text-center"),n.slideDown(),s.addClass("correct")):"paused"===t.status?(n.addClass("alert alert-warning alert-dismissable text-center"),n.slideDown()):"ratelimited"===t.status&&(n.addClass("alert alert-warning alert-dismissable text-center"),n.slideDown(),s.addClass("too-fast"),setTimeout(function(){s.removeClass("too-fast")},3e3)),setTimeout(function(){(0,_.default)(".alert").slideUp(),(0,_.default)("#challenge-submit").removeClass("disabled-button"),(0,_.default)("#challenge-submit").prop("disabled",!1)},3e3)):window.location=a.default.config.urlRoot+"/login?next="+a.default.config.urlRoot+window.location.pathname+window.location.hash}function p(){return l[a.default.config.userMode]("me").then(function(e){for(var t=e.data,o=t.length-1;0<=o;o--){var n=(0,_.default)('button[value="'+t[o].challenge_id+'"]');n.addClass("solved-challenge"),n.prepend("<i class='fas fa-check corner-button-check'></i>")}})}function b(e){return a.default.api.get_challenge_solves({challengeId:e}).then(function(e){var t=e.data;(0,_.default)(".challenge-solves").text(parseInt(t.length)+" Solves");var o=(0,_.default)("#challenge-solves-names");o.empty();for(var n=0;n<t.length;n++){var s=t[n].account_id,a=t[n].name,i=(0,d.default)(t[n].date).local().fromNow(),l=t[n].account_url;o.append('<tr><td><a href="{0}">{2}</td><td>{3}</td></tr>'.format(l,s,(0,r.htmlEntities)(a),i))}})}function f(){return a.default.api.get_challenge_list().then(function(e){var t=[],o=(0,_.default)("#challenges-board");g=e.data,o.empty();for(var n=g.length-1;0<=n;n--)if(g[n].solves=0,-1==_.default.inArray(g[n].category,t)){var s=g[n].category;t.push(s);var a=s.replace(/ /g,"-").hashCode(),i=(0,_.default)('<div id="{0}-row" class="pt-5">'.format(a)+'<div class="category-header col-md-12 mb-3"></div><div class="category-challenges col-md-12"><div class="challenges-row col-md-12"></div></div></div>');i.find(".category-header").append((0,_.default)("<h3>"+s+"</h3>")),o.append(i)}for(var l=0;l<=g.length-1;l++){var r=g[l],d=r.name.replace(/ /g,"-").hashCode(),c=r.category.replace(/ /g,"-").hashCode(),u=(0,_.default)("<div id='{0}' class='col-md-3 d-inline-block'></div>".format(d)),m=void 0;m=-1==v.indexOf(r.id)?(0,_.default)("<button class='btn btn-dark challenge-button w-100 text-truncate pt-3 pb-3 mb-2' value='{0}'></button>".format(r.id)):(0,_.default)("<button class='btn btn-dark challenge-button solved-challenge w-100 text-truncate pt-3 pb-3 mb-2' value='{0}'><i class='fas fa-check corner-button-check'></i></button>".format(r.id));for(var p=(0,_.default)("<p>{0}</p>".format(r.name)),f=(0,_.default)("<span>{0}</span>".format(r.value)),j=0;j<r.tags.length;j++){var h="tag-"+r.tags[j].value.replace(/ /g,"-");u.addClass(h)}m.append(p),m.append(f),u.append(m),(0,_.default)("#"+c+"-row").find(".category-challenges > .challenges-row").append(u)}(0,_.default)(".challenge-button").click(function(e){y(this.value),b(this.value)})})}function j(){return(0==a.default.user.id?Promise.resolve():l[a.default.config.userMode]("me").then(function(e){for(var t=e.data,o=t.length-1;0<=o;o--){var n=t[o].challenge_id;t.push(n)}})).then(f).then(p)}(0,_.default)(function(){j().then(function(){0<window.location.hash.length&&function(e){var t=e.lastIndexOf("-"),o=[e.slice(0,t),e.slice(t+1)][1],n=_.default.grep(g,function(e){return e.id==o})[0];u(n)}(decodeURIComponent(window.location.hash.substring(1)))}),(0,_.default)("#challenge-input").keyup(function(e){13==e.keyCode&&(0,_.default)("#challenge-submit").click()}),(0,_.default)(".nav-tabs a").click(function(e){e.preventDefault(),(0,_.default)(this).tab("show")}),(0,_.default)("#challenge-window").on("hidden.bs.modal",function(e){(0,_.default)(".nav-tabs a:first").tab("show"),history.replaceState("",window.document.title,window.location.pathname)}),(0,_.default)(".challenge-solves").click(function(e){b((0,_.default)("#challenge-id").val())}),(0,_.default)("#challenge-window").on("hide.bs.modal",function(e){(0,_.default)("#challenge-input").removeClass("wrong"),(0,_.default)("#challenge-input").removeClass("correct"),(0,_.default)("#incorrect-key").slideUp(),(0,_.default)("#correct-key").slideUp(),(0,_.default)("#already-solved").slideUp(),(0,_.default)("#too-fast").slideUp()})}),setInterval(j,3e5);function h(e){(0,n.ezAlert)({title:"Hint",body:c.render(e.content),button:"Got it!"})}var T=function(t){a.default.api.get_hint({hintId:t}).then(function(e){e.data.content?h(e.data):function(t){(0,n.ezQuery)({title:"Unlock Hint?",body:"Are you sure you want to open this hint?",success:function(){var e={target:t,type:"hints"};a.default.api.post_unlock_list({},e).then(function(e){e.success?a.default.api.get_hint({hintId:t}).then(function(e){h(e.data)}):(0,n.ezAlert)({title:"Error",body:c.render(e.errors.score),button:"Got it!"})})}})}(t)})}},"./CTFd/themes/core/assets/js/pages/main.js":function(e,t,o){var n=p(o("./CTFd/themes/core/assets/js/CTFd.js")),s=p(o("./node_modules/jquery/dist/jquery.js")),a=p(o("./node_modules/moment/moment.js")),i=p(o("./node_modules/nunjucks/browser/nunjucks.js")),l=o("./node_modules/howler/dist/howler.js"),r=p(o("./CTFd/themes/core/assets/js/events.js")),d=p(o("./CTFd/themes/core/assets/js/config.js")),c=p(o("./CTFd/themes/core/assets/js/styles.js")),u=p(o("./CTFd/themes/core/assets/js/times.js")),m=p(o("./CTFd/themes/core/assets/js/helpers.js"));function p(e){return e&&e.__esModule?e:{default:e}}n.default.init(window.init),window.CTFd=n.default,window.helpers=m.default,window.$=s.default,window.Moment=a.default,window.nunjucks=i.default,window.Howl=l.Howl,(0,s.default)(function(){(0,c.default)(),(0,u.default)(),(0,r.default)(d.default.urlRoot)})},"./CTFd/themes/core/assets/js/patch.js":function(e,t,o){var n,l=(n=o("./node_modules/q/q.js"))&&n.__esModule?n:{default:n},s=o("./CTFd/themes/core/assets/js/api.js");function a(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function r(e,t){return function(t){for(var e=1;e<arguments.length;e++)if(e%2){var o=null!=arguments[e]?arguments[e]:{},n=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(n=n.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),n.forEach(function(e){a(t,e,o[e])})}else Object.defineProperties(t,Object.getOwnPropertyDescriptors(arguments[e]));return t}({},e,{},t)}s.API.prototype.requestRaw=function(e,t,o,n,s,a,i,l){var r=a&&Object.keys(a).length?function(e){var t=[];for(var o in e)e.hasOwnProperty(o)&&t.push(encodeURIComponent(o)+"="+encodeURIComponent(e[o]));return t.join("&")}(a):null,d=t+(r?"?"+r:"");n&&!Object.keys(n).length&&(n=void 0),fetch(d,{method:e,headers:s,body:n}).then(function(e){return e.json()}).then(function(e){l.resolve(e)}).catch(function(e){l.reject(e)})},s.API.prototype.patch_user_public=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s="/users/{user_id}",a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=s.replace("{user_id}",e.userId),void 0===e.userId?o.reject(new Error("Missing required parameter: userId")):this.request("PATCH",n+s,e,t,a,{},{},o),o.promise},s.API.prototype.patch_user_private=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],this.request("PATCH",n+"/users/me",e,t,s,{},{},o),o.promise},s.API.prototype.post_unlock_list=function(e,t){var o=l.default.defer(),n=this.domain,s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],this.request("POST",n+"/unlocks",e,t,s,{},{},o),o.promise},s.API.prototype.post_notification_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],this.request("POST",n+"/notifications",e,t,s,{},{},o),o.promise},s.API.prototype.post_files_list=function(e,t){var o=l.default.defer(),n=this.domain,s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],this.requestRaw("POST",n+"/files",e,t,s,{},{},o),o.promise},s.API.prototype.patch_config=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s="/configs/{config_key}",a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=s.replace("{config_key}",e.configKey),void 0===e.configKey?o.reject(new Error("Missing required parameter: configKey")):this.request("PATCH",n+s,e,t,a,{},{},o),o.promise},s.API.prototype.patch_config_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=r(e,s),this.request("PATCH",n+"/configs",e,t,a,s,{},o),o.promise},s.API.prototype.post_tag_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=r(e,s),this.request("POST",n+"/tags",e,t,a,s,{},o),o.promise},s.API.prototype.patch_team_public=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s="/teams/{team_id}",a={},i={};return i.Accept=["application/json"],i["Content-Type"]=["application/json"],s=s.replace("{team_id}",e.teamId),void 0===e.teamId?o.reject(new Error("Missing required parameter: teamId")):(a=r(e,a),this.request("PATCH",n+s,e,t,i,a,{},o)),o.promise},s.API.prototype.post_challenge_attempt=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=r(e,s),this.request("POST",n+"/challenges/attempt",e,t,a,s,{},o),o.promise},s.API.prototype.get_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(delete e.hintId,s=r(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise}},"./CTFd/themes/core/assets/js/styles.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0,o("./node_modules/bootstrap/dist/js/bootstrap.bundle.js");var n,s=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};t.default=function(){(0,s.default)(":input").each(function(){(0,s.default)(this).data("initial",(0,s.default)(this).val())}),(0,s.default)(".form-control").bind({focus:function(){(0,s.default)(this).removeClass("input-filled-invalid"),(0,s.default)(this).addClass("input-filled-valid")},blur:function(){""===(0,s.default)(this).val()&&((0,s.default)(this).removeClass("input-filled-invalid"),(0,s.default)(this).removeClass("input-filled-valid"))}}),(0,s.default)(".form-control").each(function(){(0,s.default)(this).val()&&(0,s.default)(this).addClass("input-filled-valid")}),(0,s.default)(".page-select").change(function(){var e=new URL(window.location);e.searchParams.set("page",this.value),window.location.href=e.toString()}),(0,s.default)('[data-toggle="tooltip"]').tooltip()}},"./CTFd/themes/core/assets/js/times.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=a(o("./node_modules/moment/moment.js")),s=a(o("./node_modules/jquery/dist/jquery.js"));function a(e){return e&&e.__esModule?e:{default:e}}t.default=function(){(0,s.default)("[data-time]").each(function(e,t){t.innerText=(0,n.default)((0,s.default)(t).data("time")).local().format("MMMM Do, h:mm:ss A")})}},"./CTFd/themes/core/assets/js/utils.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.WindowController=s,t.colorHash=function(e){for(var t=0,o=0;o<e.length;o++)t=e.charCodeAt(o)+((t<<5)-t);for(var n="#",s=0;s<3;s++){n+=("00"+(t>>4*s&255).toString(16)).substr(-2)}return n},t.htmlEntities=function(e){return(0,i.default)("<div/>").text(e).html()},t.cumulativeSum=function(e){for(var t=e.concat(),o=0;o<e.length;o++)t[o]=e.slice(0,o+1).reduce(function(e,t){return e+t});return t},t.init_notification_counter=function(){var e=a.getItem(l);null===e?a.setItem(l,0):0<e&&(0,i.default)(".badge-notification").text(e)},t.set_notification_counter=function(e){a.setItem(l,e)},t.inc_notification_counter=function(){var e=a.getItem(l)||0;a.setItem(l,++e),(0,i.default)(".badge-notification").text(e)},t.dec_notification_counter=function(){var e=a.getItem(l)||0;0<e&&(a.setItem(l,--e),(0,i.default)(".badge-notification").text(e));0==e&&r()},t.clear_notification_counter=r,t.copyToClipboard=function(e,t){(0,i.default)(t).select(),document.execCommand("copy"),(0,i.default)(e.target).tooltip({title:"Copied!",trigger:"manual"}),(0,i.default)(e.target).tooltip("show"),setTimeout(function(){(0,i.default)(e.target).tooltip("hide")},1500)},t.makeSortableTables=function(){function a(e,t){return(0,i.default)(e).children("td").eq(t).text()}(0,i.default)("th.sort-col").append(' <i class="fas fa-sort"></i>'),(0,i.default)("th.sort-col").click(function(){var e=(0,i.default)(this).parents("table").eq(0),t=e.find("tr:gt(0)").toArray().sort(function(s){return function(e,t){var o=a(e,s),n=a(t,s);return i.default.isNumeric(o)&&i.default.isNumeric(n)?o-n:o.toString().localeCompare(n)}}((0,i.default)(this).index()));this.asc=!this.asc,this.asc||(t=t.reverse());for(var o=0;o<t.length;o++)e.append(t[o])})};var n,i=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};function s(){this.id=Math.random(),this.isMaster=!1,this.others={},window.addEventListener("storage",this,!1),window.addEventListener("unload",this,!1),this.broadcast("hello");var t=this;this._checkTimeout=setTimeout(function e(){t.check(),t._checkTimeout=setTimeout(e,9e3)},500),this._pingTimeout=setTimeout(function e(){t.sendPing(),t._pingTimeout=setTimeout(e,17e3)},17e3)}i.default.fn.serializeJSON=function(o){var n={},s=(0,i.default)(this),e=s.serializeArray();return(e=(e=e.concat(s.find("input[type=checkbox]:checked").map(function(){return{name:this.name,value:!0}}).get())).concat(s.find("input[type=checkbox]:not(:checked)").map(function(){return{name:this.name,value:!1}}).get())).map(function(e){if(o)if(null!==e.value&&""!==e.value)n[e.name]=e.value;else{var t=s.find(":input[name=".concat(e.name,"]"));t.data("initial")!==t.val()&&(n[e.name]=e.value)}else n[e.name]=e.value}),n},String.prototype.format=String.prototype.f=function(){for(var e=this,t=arguments.length;t--;)e=e.replace(new RegExp("\\{"+t+"\\}","gm"),arguments[t]);return e},String.prototype.hashCode=function(){var e,t,o=0;if(0==this.length)return o;for(e=0,t=this.length;e<t;e++)o=(o<<5)-o+this.charCodeAt(e),o|=0;return o},s.prototype.destroy=function(){clearTimeout(this._pingTimeout),clearTimeout(this._checkTimeout),window.removeEventListener("storage",this,!1),window.removeEventListener("unload",this,!1),this.broadcast("bye")},s.prototype.handleEvent=function(e){if("unload"===e.type)this.destroy();else if("broadcast"===e.key)try{var t=JSON.parse(e.newValue);t.id!==this.id&&this[t.type](t)}catch(e){}},s.prototype.sendPing=function(){this.broadcast("ping")},s.prototype.hello=function(e){this.ping(e),e.id<this.id?this.check():this.sendPing()},s.prototype.ping=function(e){this.others[e.id]=+new Date},s.prototype.bye=function(e){delete this.others[e.id],this.check()},s.prototype.check=function(e){var t,o=+new Date,n=!0;for(t in this.others)this.others[t]+23e3<o?delete this.others[t]:t<this.id&&(n=!1);this.isMaster!==n&&(this.isMaster=n,this.masterDidChange())},s.prototype.masterDidChange=function(){},s.prototype.broadcast=function(e,t){var o={id:this.id,type:e};for(var n in t)o[n]=t[n];try{localStorage.setItem("broadcast",JSON.stringify(o))}catch(e){console.log(e)}};var a=window.localStorage,l="unread_notifications";function r(){a.setItem(l,0),(0,i.default)(".badge-notification").empty()}},"./node_modules/moment/locale sync recursive ^\\.\\/.*$":function(e,t,o){var n={"./af":"./node_modules/moment/locale/af.js","./af.js":"./node_modules/moment/locale/af.js","./ar":"./node_modules/moment/locale/ar.js","./ar-dz":"./node_modules/moment/locale/ar-dz.js","./ar-dz.js":"./node_modules/moment/locale/ar-dz.js","./ar-kw":"./node_modules/moment/locale/ar-kw.js","./ar-kw.js":"./node_modules/moment/locale/ar-kw.js","./ar-ly":"./node_modules/moment/locale/ar-ly.js","./ar-ly.js":"./node_modules/moment/locale/ar-ly.js","./ar-ma":"./node_modules/moment/locale/ar-ma.js","./ar-ma.js":"./node_modules/moment/locale/ar-ma.js","./ar-sa":"./node_modules/moment/locale/ar-sa.js","./ar-sa.js":"./node_modules/moment/locale/ar-sa.js","./ar-tn":"./node_modules/moment/locale/ar-tn.js","./ar-tn.js":"./node_modules/moment/locale/ar-tn.js","./ar.js":"./node_modules/moment/locale/ar.js","./az":"./node_modules/moment/locale/az.js","./az.js":"./node_modules/moment/locale/az.js","./be":"./node_modules/moment/locale/be.js","./be.js":"./node_modules/moment/locale/be.js","./bg":"./node_modules/moment/locale/bg.js","./bg.js":"./node_modules/moment/locale/bg.js","./bm":"./node_modules/moment/locale/bm.js","./bm.js":"./node_modules/moment/locale/bm.js","./bn":"./node_modules/moment/locale/bn.js","./bn.js":"./node_modules/moment/locale/bn.js","./bo":"./node_modules/moment/locale/bo.js","./bo.js":"./node_modules/moment/locale/bo.js","./br":"./node_modules/moment/locale/br.js","./br.js":"./node_modules/moment/locale/br.js","./bs":"./node_modules/moment/locale/bs.js","./bs.js":"./node_modules/moment/locale/bs.js","./ca":"./node_modules/moment/locale/ca.js","./ca.js":"./node_modules/moment/locale/ca.js","./cs":"./node_modules/moment/locale/cs.js","./cs.js":"./node_modules/moment/locale/cs.js","./cv":"./node_modules/moment/locale/cv.js","./cv.js":"./node_modules/moment/locale/cv.js","./cy":"./node_modules/moment/locale/cy.js","./cy.js":"./node_modules/moment/locale/cy.js","./da":"./node_modules/moment/locale/da.js","./da.js":"./node_modules/moment/locale/da.js","./de":"./node_modules/moment/locale/de.js","./de-at":"./node_modules/moment/locale/de-at.js","./de-at.js":"./node_modules/moment/locale/de-at.js","./de-ch":"./node_modules/moment/locale/de-ch.js","./de-ch.js":"./node_modules/moment/locale/de-ch.js","./de.js":"./node_modules/moment/locale/de.js","./dv":"./node_modules/moment/locale/dv.js","./dv.js":"./node_modules/moment/locale/dv.js","./el":"./node_modules/moment/locale/el.js","./el.js":"./node_modules/moment/locale/el.js","./en-SG":"./node_modules/moment/locale/en-SG.js","./en-SG.js":"./node_modules/moment/locale/en-SG.js","./en-au":"./node_modules/moment/locale/en-au.js","./en-au.js":"./node_modules/moment/locale/en-au.js","./en-ca":"./node_modules/moment/locale/en-ca.js","./en-ca.js":"./node_modules/moment/locale/en-ca.js","./en-gb":"./node_modules/moment/locale/en-gb.js","./en-gb.js":"./node_modules/moment/locale/en-gb.js","./en-ie":"./node_modules/moment/locale/en-ie.js","./en-ie.js":"./node_modules/moment/locale/en-ie.js","./en-il":"./node_modules/moment/locale/en-il.js","./en-il.js":"./node_modules/moment/locale/en-il.js","./en-nz":"./node_modules/moment/locale/en-nz.js","./en-nz.js":"./node_modules/moment/locale/en-nz.js","./eo":"./node_modules/moment/locale/eo.js","./eo.js":"./node_modules/moment/locale/eo.js","./es":"./node_modules/moment/locale/es.js","./es-do":"./node_modules/moment/locale/es-do.js","./es-do.js":"./node_modules/moment/locale/es-do.js","./es-us":"./node_modules/moment/locale/es-us.js","./es-us.js":"./node_modules/moment/locale/es-us.js","./es.js":"./node_modules/moment/locale/es.js","./et":"./node_modules/moment/locale/et.js","./et.js":"./node_modules/moment/locale/et.js","./eu":"./node_modules/moment/locale/eu.js","./eu.js":"./node_modules/moment/locale/eu.js","./fa":"./node_modules/moment/locale/fa.js","./fa.js":"./node_modules/moment/locale/fa.js","./fi":"./node_modules/moment/locale/fi.js","./fi.js":"./node_modules/moment/locale/fi.js","./fo":"./node_modules/moment/locale/fo.js","./fo.js":"./node_modules/moment/locale/fo.js","./fr":"./node_modules/moment/locale/fr.js","./fr-ca":"./node_modules/moment/locale/fr-ca.js","./fr-ca.js":"./node_modules/moment/locale/fr-ca.js","./fr-ch":"./node_modules/moment/locale/fr-ch.js","./fr-ch.js":"./node_modules/moment/locale/fr-ch.js","./fr.js":"./node_modules/moment/locale/fr.js","./fy":"./node_modules/moment/locale/fy.js","./fy.js":"./node_modules/moment/locale/fy.js","./ga":"./node_modules/moment/locale/ga.js","./ga.js":"./node_modules/moment/locale/ga.js","./gd":"./node_modules/moment/locale/gd.js","./gd.js":"./node_modules/moment/locale/gd.js","./gl":"./node_modules/moment/locale/gl.js","./gl.js":"./node_modules/moment/locale/gl.js","./gom-latn":"./node_modules/moment/locale/gom-latn.js","./gom-latn.js":"./node_modules/moment/locale/gom-latn.js","./gu":"./node_modules/moment/locale/gu.js","./gu.js":"./node_modules/moment/locale/gu.js","./he":"./node_modules/moment/locale/he.js","./he.js":"./node_modules/moment/locale/he.js","./hi":"./node_modules/moment/locale/hi.js","./hi.js":"./node_modules/moment/locale/hi.js","./hr":"./node_modules/moment/locale/hr.js","./hr.js":"./node_modules/moment/locale/hr.js","./hu":"./node_modules/moment/locale/hu.js","./hu.js":"./node_modules/moment/locale/hu.js","./hy-am":"./node_modules/moment/locale/hy-am.js","./hy-am.js":"./node_modules/moment/locale/hy-am.js","./id":"./node_modules/moment/locale/id.js","./id.js":"./node_modules/moment/locale/id.js","./is":"./node_modules/moment/locale/is.js","./is.js":"./node_modules/moment/locale/is.js","./it":"./node_modules/moment/locale/it.js","./it-ch":"./node_modules/moment/locale/it-ch.js","./it-ch.js":"./node_modules/moment/locale/it-ch.js","./it.js":"./node_modules/moment/locale/it.js","./ja":"./node_modules/moment/locale/ja.js","./ja.js":"./node_modules/moment/locale/ja.js","./jv":"./node_modules/moment/locale/jv.js","./jv.js":"./node_modules/moment/locale/jv.js","./ka":"./node_modules/moment/locale/ka.js","./ka.js":"./node_modules/moment/locale/ka.js","./kk":"./node_modules/moment/locale/kk.js","./kk.js":"./node_modules/moment/locale/kk.js","./km":"./node_modules/moment/locale/km.js","./km.js":"./node_modules/moment/locale/km.js","./kn":"./node_modules/moment/locale/kn.js","./kn.js":"./node_modules/moment/locale/kn.js","./ko":"./node_modules/moment/locale/ko.js","./ko.js":"./node_modules/moment/locale/ko.js","./ku":"./node_modules/moment/locale/ku.js","./ku.js":"./node_modules/moment/locale/ku.js","./ky":"./node_modules/moment/locale/ky.js","./ky.js":"./node_modules/moment/locale/ky.js","./lb":"./node_modules/moment/locale/lb.js","./lb.js":"./node_modules/moment/locale/lb.js","./lo":"./node_modules/moment/locale/lo.js","./lo.js":"./node_modules/moment/locale/lo.js","./lt":"./node_modules/moment/locale/lt.js","./lt.js":"./node_modules/moment/locale/lt.js","./lv":"./node_modules/moment/locale/lv.js","./lv.js":"./node_modules/moment/locale/lv.js","./me":"./node_modules/moment/locale/me.js","./me.js":"./node_modules/moment/locale/me.js","./mi":"./node_modules/moment/locale/mi.js","./mi.js":"./node_modules/moment/locale/mi.js","./mk":"./node_modules/moment/locale/mk.js","./mk.js":"./node_modules/moment/locale/mk.js","./ml":"./node_modules/moment/locale/ml.js","./ml.js":"./node_modules/moment/locale/ml.js","./mn":"./node_modules/moment/locale/mn.js","./mn.js":"./node_modules/moment/locale/mn.js","./mr":"./node_modules/moment/locale/mr.js","./mr.js":"./node_modules/moment/locale/mr.js","./ms":"./node_modules/moment/locale/ms.js","./ms-my":"./node_modules/moment/locale/ms-my.js","./ms-my.js":"./node_modules/moment/locale/ms-my.js","./ms.js":"./node_modules/moment/locale/ms.js","./mt":"./node_modules/moment/locale/mt.js","./mt.js":"./node_modules/moment/locale/mt.js","./my":"./node_modules/moment/locale/my.js","./my.js":"./node_modules/moment/locale/my.js","./nb":"./node_modules/moment/locale/nb.js","./nb.js":"./node_modules/moment/locale/nb.js","./ne":"./node_modules/moment/locale/ne.js","./ne.js":"./node_modules/moment/locale/ne.js","./nl":"./node_modules/moment/locale/nl.js","./nl-be":"./node_modules/moment/locale/nl-be.js","./nl-be.js":"./node_modules/moment/locale/nl-be.js","./nl.js":"./node_modules/moment/locale/nl.js","./nn":"./node_modules/moment/locale/nn.js","./nn.js":"./node_modules/moment/locale/nn.js","./pa-in":"./node_modules/moment/locale/pa-in.js","./pa-in.js":"./node_modules/moment/locale/pa-in.js","./pl":"./node_modules/moment/locale/pl.js","./pl.js":"./node_modules/moment/locale/pl.js","./pt":"./node_modules/moment/locale/pt.js","./pt-br":"./node_modules/moment/locale/pt-br.js","./pt-br.js":"./node_modules/moment/locale/pt-br.js","./pt.js":"./node_modules/moment/locale/pt.js","./ro":"./node_modules/moment/locale/ro.js","./ro.js":"./node_modules/moment/locale/ro.js","./ru":"./node_modules/moment/locale/ru.js","./ru.js":"./node_modules/moment/locale/ru.js","./sd":"./node_modules/moment/locale/sd.js","./sd.js":"./node_modules/moment/locale/sd.js","./se":"./node_modules/moment/locale/se.js","./se.js":"./node_modules/moment/locale/se.js","./si":"./node_modules/moment/locale/si.js","./si.js":"./node_modules/moment/locale/si.js","./sk":"./node_modules/moment/locale/sk.js","./sk.js":"./node_modules/moment/locale/sk.js","./sl":"./node_modules/moment/locale/sl.js","./sl.js":"./node_modules/moment/locale/sl.js","./sq":"./node_modules/moment/locale/sq.js","./sq.js":"./node_modules/moment/locale/sq.js","./sr":"./node_modules/moment/locale/sr.js","./sr-cyrl":"./node_modules/moment/locale/sr-cyrl.js","./sr-cyrl.js":"./node_modules/moment/locale/sr-cyrl.js","./sr.js":"./node_modules/moment/locale/sr.js","./ss":"./node_modules/moment/locale/ss.js","./ss.js":"./node_modules/moment/locale/ss.js","./sv":"./node_modules/moment/locale/sv.js","./sv.js":"./node_modules/moment/locale/sv.js","./sw":"./node_modules/moment/locale/sw.js","./sw.js":"./node_modules/moment/locale/sw.js","./ta":"./node_modules/moment/locale/ta.js","./ta.js":"./node_modules/moment/locale/ta.js","./te":"./node_modules/moment/locale/te.js","./te.js":"./node_modules/moment/locale/te.js","./tet":"./node_modules/moment/locale/tet.js","./tet.js":"./node_modules/moment/locale/tet.js","./tg":"./node_modules/moment/locale/tg.js","./tg.js":"./node_modules/moment/locale/tg.js","./th":"./node_modules/moment/locale/th.js","./th.js":"./node_modules/moment/locale/th.js","./tl-ph":"./node_modules/moment/locale/tl-ph.js","./tl-ph.js":"./node_modules/moment/locale/tl-ph.js","./tlh":"./node_modules/moment/locale/tlh.js","./tlh.js":"./node_modules/moment/locale/tlh.js","./tr":"./node_modules/moment/locale/tr.js","./tr.js":"./node_modules/moment/locale/tr.js","./tzl":"./node_modules/moment/locale/tzl.js","./tzl.js":"./node_modules/moment/locale/tzl.js","./tzm":"./node_modules/moment/locale/tzm.js","./tzm-latn":"./node_modules/moment/locale/tzm-latn.js","./tzm-latn.js":"./node_modules/moment/locale/tzm-latn.js","./tzm.js":"./node_modules/moment/locale/tzm.js","./ug-cn":"./node_modules/moment/locale/ug-cn.js","./ug-cn.js":"./node_modules/moment/locale/ug-cn.js","./uk":"./node_modules/moment/locale/uk.js","./uk.js":"./node_modules/moment/locale/uk.js","./ur":"./node_modules/moment/locale/ur.js","./ur.js":"./node_modules/moment/locale/ur.js","./uz":"./node_modules/moment/locale/uz.js","./uz-latn":"./node_modules/moment/locale/uz-latn.js","./uz-latn.js":"./node_modules/moment/locale/uz-latn.js","./uz.js":"./node_modules/moment/locale/uz.js","./vi":"./node_modules/moment/locale/vi.js","./vi.js":"./node_modules/moment/locale/vi.js","./x-pseudo":"./node_modules/moment/locale/x-pseudo.js","./x-pseudo.js":"./node_modules/moment/locale/x-pseudo.js","./yo":"./node_modules/moment/locale/yo.js","./yo.js":"./node_modules/moment/locale/yo.js","./zh-cn":"./node_modules/moment/locale/zh-cn.js","./zh-cn.js":"./node_modules/moment/locale/zh-cn.js","./zh-hk":"./node_modules/moment/locale/zh-hk.js","./zh-hk.js":"./node_modules/moment/locale/zh-hk.js","./zh-tw":"./node_modules/moment/locale/zh-tw.js","./zh-tw.js":"./node_modules/moment/locale/zh-tw.js"};function s(e){var t=a(e);return o(t)}function a(e){var t=n[e];if(t+1)return t;var o=new Error("Cannot find module '"+e+"'");throw o.code="MODULE_NOT_FOUND",o}s.keys=function(){return Object.keys(n)},s.resolve=a,(e.exports=s).id="./node_modules/moment/locale sync recursive ^\\.\\/.*$"}}); \ No newline at end of file +!function(r){function e(e){for(var t,o,n=e[0],s=e[1],a=e[2],i=0,l=[];i<n.length;i++)o=n[i],c[o]&&l.push(c[o][0]),c[o]=0;for(t in s)Object.prototype.hasOwnProperty.call(s,t)&&(r[t]=s[t]);for(m&&m(e);l.length;)l.shift()();return u.push.apply(u,a||[]),d()}function d(){for(var e,t=0;t<u.length;t++){for(var o=u[t],n=!0,s=1;s<o.length;s++){var a=o[s];0!==c[a]&&(n=!1)}n&&(u.splice(t--,1),e=i(i.s=o[0]))}return e}var o={},c={3:0,4:0},u=[];function i(e){if(o[e])return o[e].exports;var t=o[e]={i:e,l:!1,exports:{}};return r[e].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.m=r,i.c=o,i.d=function(e,t,o){i.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:o})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(t,e){if(1&e&&(t=i(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var o=Object.create(null);if(i.r(o),Object.defineProperty(o,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var n in t)i.d(o,n,function(e){return t[e]}.bind(null,n));return o},i.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(t,"a",t),t},i.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},i.p="/themes/core/static/js";var t=window.webpackJsonp=window.webpackJsonp||[],n=t.push.bind(t);t.push=e,t=t.slice();for(var s=0;s<t.length;s++)e(t[s]);var m=n;u.push(["./CTFd/themes/core/assets/js/pages/challenges.js",0,1]),d()}({"./CTFd/themes/core/assets/js/CTFd.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=d(o("./CTFd/themes/core/assets/js/fetch.js")),s=d(o("./CTFd/themes/core/assets/js/config.js")),a=o("./CTFd/themes/core/assets/js/api.js");o("./CTFd/themes/core/assets/js/patch.js");var i=d(o("./node_modules/markdown-it/index.js")),l=d(o("./node_modules/jquery/dist/jquery.js")),r=d(o("./CTFd/themes/core/assets/js/ezq.js"));function d(e){return e&&e.__esModule?e:{default:e}}function c(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}var u=new a.API("/"),m={},p={ezq:r.default},f={$:l.default,markdown:function(e){var t=function(t){for(var e=1;e<arguments.length;e++)if(e%2){var o=null!=arguments[e]?arguments[e]:{},n=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(n=n.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),n.forEach(function(e){c(t,e,o[e])})}else Object.defineProperties(t,Object.getOwnPropertyDescriptors(arguments[e]));return t}({},{html:!0,linkify:!0},{},e),o=(0,i.default)(t);return o.renderer.rules.link_open=function(e,t,o,n,s){return e[t].attrPush(["target","_blank"]),s.renderToken(e,t,o)},o}},j=!1,h={run:function(e){e(_)}};var _={init:function(e){j||(j=!0,s.default.urlRoot=e.urlRoot||s.default.urlRoot,s.default.csrfNonce=e.csrfNonce||s.default.csrfNonce,s.default.userMode=e.userMode||s.default.userMode,u.domain=s.default.urlRoot+"/api/v1",m.id=e.userId)},config:s.default,fetch:n.default,user:m,ui:p,api:u,lib:f,_internal:{},plugin:h},g=_;t.default=g},"./CTFd/themes/core/assets/js/api.js":function(e,t,o){var c=n(o("./CTFd/themes/core/assets/js/fetch.js")),l=n(o("./node_modules/q/q.js"));function n(e){return e&&e.__esModule?e:{default:e}}function s(e){return(s="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var a=function(){"use strict";function e(e){var t="object"===s(e)?e.domain:e;if(this.domain=t||"",0===this.domain.length)throw new Error("Domain parameter must be specified as a string.")}function i(o,n){return o.$queryParameters&&Object.keys(o.$queryParameters).forEach(function(e){var t=o.$queryParameters[e];n[e]=t}),n}return e.prototype.request=function(e,t,o,n,s,a,i,l){var r=a&&Object.keys(a).length?function(e){var t=[];for(var o in e)e.hasOwnProperty(o)&&t.push(encodeURIComponent(o)+"="+encodeURIComponent(e[o]));return t.join("&")}(a):null,d=t+(r?"?"+r:"");n&&!Object.keys(n).length&&(n=void 0),(0,c.default)(d,{method:e,headers:s,body:JSON.stringify(n)}).then(function(e){return e.json()}).then(function(e){l.resolve(e)}).catch(function(e){l.reject(e)})},e.prototype.post_award_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/awards",e,{},s,n,{},t),t.promise},e.prototype.delete_award=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/awards/{award_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{award_id}",e.awardId),void 0===e.awardId?t.reject(new Error("Missing required parameter: awardId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_award=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/awards/{award_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{award_id}",e.awardId),void 0===e.awardId?t.reject(new Error("Missing required parameter: awardId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_challenge_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/challenges",e,{},s,n,{},t),t.promise},e.prototype.get_challenge_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/challenges",e,{},s,n,{},t),t.promise},e.prototype.post_challenge_attempt=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/challenges/attempt",e,{},s,n,{},t),t.promise},e.prototype.get_challenge_types=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/challenges/types",e,{},s,n,{},t),t.promise},e.prototype.patch_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_files=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/files",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_flags=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/flags",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_hints=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/hints",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/solves",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_tags=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/challenges/{challenge_id}/tags",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],void 0!==e.id&&(s.id=e.id),n=n.replace("{challenge_id}",e.challengeId),void 0===e.challengeId?t.reject(new Error("Missing required parameter: challengeId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/configs",e,{},s,n,{},t),t.promise},e.prototype.patch_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("PATCH",o+"/configs",e,{},s,n,{},t),t.promise},e.prototype.get_config_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/configs",e,{},s,n,{},t),t.promise},e.prototype.patch_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_config=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/configs/{config_key}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{config_key}",e.configKey),void 0===e.configKey?t.reject(new Error("Missing required parameter: configKey")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_files_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/files",e,{},s,n,{},t),t.promise},e.prototype.get_files_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/files",e,{},s,n,{},t),t.promise},e.prototype.delete_files_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/files/{file_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{file_id}",e.fileId),void 0===e.fileId?t.reject(new Error("Missing required parameter: fileId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_files_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/files/{file_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{file_id}",e.fileId),void 0===e.fileId?t.reject(new Error("Missing required parameter: fileId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_flag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/flags",e,{},s,n,{},t),t.promise},e.prototype.get_flag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/flags",e,{},s,n,{},t),t.promise},e.prototype.get_flag_types=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/flags/types",e,{},s,n,{},t),t.promise},e.prototype.get_flag_types_1=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/types/{type_name}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{type_name}",e.typeName),void 0===e.typeName?t.reject(new Error("Missing required parameter: typeName")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.patch_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_flag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/flags/{flag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{flag_id}",e.flagId),void 0===e.flagId?t.reject(new Error("Missing required parameter: flagId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_hint_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/hints",e,{},s,n,{},t),t.promise},e.prototype.get_hint_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/hints",e,{},s,n,{},t),t.promise},e.prototype.patch_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_notification_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/notifications",e,{},s,n,{},t),t.promise},e.prototype.get_notification_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/notifications",e,{},s,n,{},t),t.promise},e.prototype.delete_notification=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/notifications/{notification_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{notification_id}",e.notificationId),void 0===e.notificationId?t.reject(new Error("Missing required parameter: notificationId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_notification=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/notifications/{notification_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{notification_id}",e.notificationId),void 0===e.notificationId?t.reject(new Error("Missing required parameter: notificationId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_page_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/pages",e,{},s,n,{},t),t.promise},e.prototype.get_page_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/pages",e,{},s,n,{},t),t.promise},e.prototype.patch_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_page_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/pages/{page_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{page_id}",e.pageId),void 0===e.pageId?t.reject(new Error("Missing required parameter: pageId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_scoreboard_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/scoreboard",e,{},s,n,{},t),t.promise},e.prototype.get_scoreboard_detail=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/scoreboard/top/{count}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{count}",e.count),void 0===e.count?t.reject(new Error("Missing required parameter: count")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_challenge_solve_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/challenges/solves",e,{},s,n,{},t),t.promise},e.prototype.get_challenge_solve_percentages=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/challenges/solves/percentages",e,{},s,n,{},t),t.promise},e.prototype.get_challenge_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/challenges/{column}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_submission_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/submissions/{column}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/teams",e,{},s,n,{},t),t.promise},e.prototype.get_user_statistics=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/statistics/users",e,{},s,n,{},t),t.promise},e.prototype.get_user_property_counts=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/statistics/users/{column}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{column}",e.column),void 0===e.column?t.reject(new Error("Missing required parameter: column")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_submissions_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/submissions",e,{},s,n,{},t),t.promise},e.prototype.get_submissions_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/submissions",e,{},s,n,{},t),t.promise},e.prototype.delete_submission=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/submissions/{submission_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{submission_id}",e.submissionId),void 0===e.submissionId?t.reject(new Error("Missing required parameter: submissionId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_submission=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/submissions/{submission_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{submission_id}",e.submissionId),void 0===e.submissionId?t.reject(new Error("Missing required parameter: submissionId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_tag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/tags",e,{},s,n,{},t),t.promise},e.prototype.get_tag_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/tags",e,{},s,n,{},t),t.promise},e.prototype.patch_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_tag=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/tags/{tag_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{tag_id}",e.tagId),void 0===e.tagId?t.reject(new Error("Missing required parameter: tagId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_team_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/teams",e,{},s,n,{},t),t.promise},e.prototype.get_team_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/teams",e,{},s,n,{},t),t.promise},e.prototype.patch_team_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.teamId&&(n.team_id=e.teamId),n=i(e,n),this.request("PATCH",o+"/teams/me",e,{},s,n,{},t),t.promise},e.prototype.get_team_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],void 0!==e.teamId&&(n.team_id=e.teamId),n=i(e,n),this.request("GET",o+"/teams/me",e,{},s,n,{},t),t.promise},e.prototype.patch_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_awards=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/awards",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_fails=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/fails",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_team_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/teams/{team_id}/solves",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{team_id}",e.teamId),void 0===e.teamId?t.reject(new Error("Missing required parameter: teamId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.post_unlock_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/unlocks",e,{},s,n,{},t),t.promise},e.prototype.get_unlock_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/unlocks",e,{},s,n,{},t),t.promise},e.prototype.post_user_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("POST",o+"/users",e,{},s,n,{},t),t.promise},e.prototype.get_user_list=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/users",e,{},s,n,{},t),t.promise},e.prototype.patch_user_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("PATCH",o+"/users/me",e,{},s,n,{},t),t.promise},e.prototype.get_user_private=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n={},s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],n=i(e,n),this.request("GET",o+"/users/me",e,{},s,n,{},t),t.promise},e.prototype.patch_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("PATCH",o+n,e,{},a,s,{},t)),t.promise},e.prototype.delete_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("DELETE",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_user_public=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_user_awards=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/awards",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_user_fails=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/fails",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e.prototype.get_user_solves=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/users/{user_id}/solves",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{user_id}",e.userId),void 0===e.userId?t.reject(new Error("Missing required parameter: userId")):(s=i(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise},e}();t.API=a},"./CTFd/themes/core/assets/js/config.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;t.default={urlRoot:"",csrfNonce:"",userMode:""}},"./CTFd/themes/core/assets/js/events.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var a=o("./node_modules/howler/dist/howler.js"),n=o("./node_modules/event-source-polyfill/src/eventsource.js"),i=o("./CTFd/themes/core/assets/js/ezq.js"),l=o("./CTFd/themes/core/assets/js/utils.js"),r=n.NativeEventSource||n.EventSourcePolyfill;t.default=function(e){var t=new r(e+"/events"),o=new l.WindowController,n=new a.Howl({src:[e+"/themes/core/static/sounds/notification.webm",e+"/themes/core/static/sounds/notification.mp3"]});function s(e){switch(e.type){case"toast":(0,l.inc_notification_counter)();var t=50<e.content.length?e.content.substring(0,47)+"...":e.content,o=!1;(0,i.ezToast)({title:e.title,body:t,onclick:function(){(0,i.ezAlert)({title:e.title,body:e.content,button:"Got it!",success:function(){o=!0,(0,l.dec_notification_counter)()}})},onclose:function(){o||(0,l.dec_notification_counter)()}});break;case"alert":(0,l.inc_notification_counter)(),(0,i.ezAlert)({title:e.title,body:e.content,button:"Got it!",success:function(){(0,l.dec_notification_counter)()}});break;case"background":default:(0,l.inc_notification_counter)()}e.sound&&n.play()}(0,l.init_notification_counter)(),o.notification=function(e){s(e)},o.masterDidChange=function(){this.isMaster?t.addEventListener("notification",function(e){var t=JSON.parse(e.data);o.broadcast("notification",t),s(t)},!1):t&&t.close()}}},"./CTFd/themes/core/assets/js/ezq.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.ezAlert=p,t.ezToast=f,t.ezQuery=j,t.ezProgressBar=h,t.ezBadge=_,t.default=void 0,o("./node_modules/bootstrap/js/dist/modal.js");var n,l=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};var a='<div class="modal fade" tabindex="-1" role="dialog"> <div class="modal-dialog" role="document"> <div class="modal-content"> <div class="modal-header"> <h5 class="modal-title">{0}</h5> <button type="button" class="close" data-dismiss="modal" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> </div> <div class="modal-body"> </div> <div class="modal-footer"> </div> </div> </div></div>',r='<div class="toast m-3" role="alert"> <div class="toast-header"> <strong class="mr-auto">{0}</strong> <button type="button" class="ml-2 mb-1 close" data-dismiss="toast" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> </div> <div class="toast-body">{1}</div></div>',i='<div class="progress"> <div class="progress-bar progress-bar-success progress-bar-striped progress-bar-animated" role="progressbar" style="width: {0}%"> </div></div>',s='<div class="alert alert-danger alert-dismissable" role="alert">\n <span class="sr-only">Error:</span>\n {0}\n <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>\n</div>',d='<div class="alert alert-success alert-dismissable submit-row" role="alert">\n <strong>Success!</strong>\n {0}\n <button type="button" class="close" data-dismiss="alert" aria-label="Close"><span aria-hidden="true">×</span></button>\n</div>',c='<button type="button" class="btn btn-primary" data-dismiss="modal">{0}</button>',u='<button type="button" class="btn btn-danger" data-dismiss="modal">No</button>',m='<button type="button" class="btn btn-primary" data-dismiss="modal">Yes</button>';function p(e){var t=a.format(e.title),o=(0,l.default)(t);"string"==typeof e.body?o.find(".modal-body").append("<p>".concat(e.body,"</p>")):o.find(".modal-body").append((0,l.default)(e.body));var n=(0,l.default)(c.format(e.button));return e.success&&(0,l.default)(n).click(function(){e.success()}),e.large&&o.find(".modal-dialog").addClass("modal-lg"),o.find(".modal-footer").append(n),(0,l.default)("main").append(o),o.modal("show"),(0,l.default)(o).on("hidden.bs.modal",function(){(0,l.default)(this).modal("dispose")}),o}function f(e){(0,l.default)("#ezq--notifications-toast-container").length||(0,l.default)("body").append((0,l.default)("<div/>").attr({id:"ezq--notifications-toast-container"}).css({position:"fixed",bottom:"0",right:"0","min-width":"20%"}));var t=r.format(e.title,e.body),o=(0,l.default)(t);if(e.onclose&&(0,l.default)(o).find("button[data-dismiss=toast]").click(function(){e.onclose()}),e.onclick){var n=(0,l.default)(o).find(".toast-body");n.addClass("cursor-pointer"),n.click(function(){e.onclick()})}var s=!1!==e.autohide,a=!1!==e.animation,i=e.delay||1e4;return(0,l.default)("#ezq--notifications-toast-container").prepend(o),o.toast({autohide:s,delay:i,animation:a}),o.toast("show"),o}function j(e){var t=a.format(e.title),o=(0,l.default)(t);"string"==typeof e.body?o.find(".modal-body").append("<p>".concat(e.body,"</p>")):o.find(".modal-body").append((0,l.default)(e.body));var n=(0,l.default)(m),s=(0,l.default)(u);return o.find(".modal-footer").append(s),o.find(".modal-footer").append(n),(0,l.default)("main").append(o),(0,l.default)(o).on("hidden.bs.modal",function(){(0,l.default)(this).modal("dispose")}),(0,l.default)(n).click(function(){e.success()}),o.modal("show"),o}function h(e){if(e.target){var t=(0,l.default)(e.target);return t.find(".progress-bar").css("width",e.width+"%"),t}var o=i.format(e.width),n=a.format(e.title),s=(0,l.default)(n);return s.find(".modal-body").append((0,l.default)(o)),(0,l.default)("main").append(s),s.modal("show")}function _(e){var t={success:d,error:s}[e.type].format(e.body);return(0,l.default)(t)}var g={ezAlert:p,ezToast:f,ezQuery:j,ezProgressBar:h,ezBadge:_};t.default=g},"./CTFd/themes/core/assets/js/fetch.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0,o("./node_modules/whatwg-fetch/fetch.js");var n,s=(n=o("./CTFd/themes/core/assets/js/config.js"))&&n.__esModule?n:{default:n};var a=window.fetch;t.default=function(e,t){return void 0===t&&(t={method:"GET",credentials:"same-origin",headers:{}}),e=s.default.urlRoot+e,void 0===t.headers&&(t.headers={}),t.credentials="same-origin",t.headers.Accept="application/json",t.headers["Content-Type"]="application/json",t.headers["CSRF-Token"]=s.default.csrfNonce,a(e,t)}},"./CTFd/themes/core/assets/js/pages/challenges.js":function(e,t,o){o("./CTFd/themes/core/assets/js/pages/main.js"),o("./node_modules/bootstrap/js/dist/tab.js");var n=o("./CTFd/themes/core/assets/js/ezq.js"),r=o("./CTFd/themes/core/assets/js/utils.js"),d=i(o("./node_modules/moment/moment.js")),_=i(o("./node_modules/jquery/dist/jquery.js")),a=i(o("./CTFd/themes/core/assets/js/CTFd.js")),s=i(o("./CTFd/themes/core/assets/js/config.js"));function i(e){return e&&e.__esModule?e:{default:e}}var l={teams:function(e){return a.default.api.get_team_solves({teamId:e})},users:function(e){return a.default.api.get_user_solves({userId:e})}},c=a.default.lib.markdown();a.default._internal.challenge={};var g=[],v=[],y=function(t){var e=_.default.grep(g,function(e){return e.id==t})[0];"hidden"!==e.type?u(e):(0,n.ezAlert)({title:"Challenge Hidden!",body:"You haven't unlocked this challenge yet!",button:"Got it!"})},u=function(n){return Promise.all([a.default.api.get_challenge({challengeId:n.id}),_.default.getScript(s.default.urlRoot+n.script),_.default.get(s.default.urlRoot+n.template)]).then(function(e){var t=a.default._internal.challenge;(0,_.default)("#challenge-window").empty(),t.data=e[0].data,t.preRender(),(0,_.default)("#challenge-window").append(e[0].data.view),(0,_.default)("#challenge-window #challenge-input").addClass("form-control"),(0,_.default)("#challenge-window #challenge-submit").addClass("btn btn-md btn-outline-secondary float-right");var o=(0,_.default)("#challenge-window").find(".modal-dialog");if(window.init.theme_settings&&window.init.theme_settings.challenge_window_size)switch(window.init.theme_settings.challenge_window_size){case"sm":o.addClass("modal-sm");break;case"lg":o.addClass("modal-lg");break;case"xl":o.addClass("modal-xl")}(0,_.default)(".challenge-solves").click(function(e){b((0,_.default)("#challenge-id").val())}),(0,_.default)(".nav-tabs a").click(function(e){e.preventDefault(),(0,_.default)(this).tab("show")}),(0,_.default)("#challenge-window").on("hide.bs.modal",function(e){(0,_.default)("#challenge-input").removeClass("wrong"),(0,_.default)("#challenge-input").removeClass("correct"),(0,_.default)("#incorrect-key").slideUp(),(0,_.default)("#correct-key").slideUp(),(0,_.default)("#already-solved").slideUp(),(0,_.default)("#too-fast").slideUp()}),(0,_.default)(".load-hint").on("click",function(e){T((0,_.default)(this).data("hint-id"))}),(0,_.default)("#challenge-submit").click(function(e){e.preventDefault(),(0,_.default)("#challenge-submit").addClass("disabled-button"),(0,_.default)("#challenge-submit").prop("disabled",!0),a.default._internal.challenge.submit().then(m).then(f).then(p)}),(0,_.default)("#challenge-input").keyup(function(e){13==e.keyCode&&(0,_.default)("#challenge-submit").click()}),t.postRender(),window.location.replace(window.location.href.split("#")[0]+"#".concat(n.name,"-").concat(n.id)),(0,_.default)("#challenge-window").modal()})};function m(e){var t=e.data,o=(0,_.default)("#result-message"),n=(0,_.default)("#result-notification"),s=(0,_.default)("#challenge-input");n.removeClass(),o.text(t.message),"authentication_required"!==t.status?("incorrect"===t.status?(n.addClass("alert alert-danger alert-dismissable text-center"),n.slideDown(),s.removeClass("correct"),s.addClass("wrong"),setTimeout(function(){s.removeClass("wrong")},3e3)):"correct"===t.status?(n.addClass("alert alert-success alert-dismissable text-center"),n.slideDown(),(0,_.default)(".challenge-solves").text(parseInt((0,_.default)(".challenge-solves").text().split(" ")[0])+1+" Solves"),s.val(""),s.removeClass("wrong"),s.addClass("correct")):"already_solved"===t.status?(n.addClass("alert alert-info alert-dismissable text-center"),n.slideDown(),s.addClass("correct")):"paused"===t.status?(n.addClass("alert alert-warning alert-dismissable text-center"),n.slideDown()):"ratelimited"===t.status&&(n.addClass("alert alert-warning alert-dismissable text-center"),n.slideDown(),s.addClass("too-fast"),setTimeout(function(){s.removeClass("too-fast")},3e3)),setTimeout(function(){(0,_.default)(".alert").slideUp(),(0,_.default)("#challenge-submit").removeClass("disabled-button"),(0,_.default)("#challenge-submit").prop("disabled",!1)},3e3)):window.location=a.default.config.urlRoot+"/login?next="+a.default.config.urlRoot+window.location.pathname+window.location.hash}function p(){return l[a.default.config.userMode]("me").then(function(e){for(var t=e.data,o=t.length-1;0<=o;o--){var n=(0,_.default)('button[value="'+t[o].challenge_id+'"]');n.addClass("solved-challenge"),n.prepend("<i class='fas fa-check corner-button-check'></i>")}})}function b(e){return a.default.api.get_challenge_solves({challengeId:e}).then(function(e){var t=e.data;(0,_.default)(".challenge-solves").text(parseInt(t.length)+" Solves");var o=(0,_.default)("#challenge-solves-names");o.empty();for(var n=0;n<t.length;n++){var s=t[n].account_id,a=t[n].name,i=(0,d.default)(t[n].date).local().fromNow(),l=t[n].account_url;o.append('<tr><td><a href="{0}">{2}</td><td>{3}</td></tr>'.format(l,s,(0,r.htmlEntities)(a),i))}})}function f(){return a.default.api.get_challenge_list().then(function(e){var t=[],o=(0,_.default)("#challenges-board");g=e.data,o.empty();for(var n=g.length-1;0<=n;n--)if(g[n].solves=0,-1==_.default.inArray(g[n].category,t)){var s=g[n].category;t.push(s);var a=s.replace(/ /g,"-").hashCode(),i=(0,_.default)('<div id="{0}-row" class="pt-5">'.format(a)+'<div class="category-header col-md-12 mb-3"></div><div class="category-challenges col-md-12"><div class="challenges-row col-md-12"></div></div></div>');i.find(".category-header").append((0,_.default)("<h3>"+s+"</h3>")),o.append(i)}for(var l=0;l<=g.length-1;l++){var r=g[l],d=r.name.replace(/ /g,"-").hashCode(),c=r.category.replace(/ /g,"-").hashCode(),u=(0,_.default)("<div id='{0}' class='col-md-3 d-inline-block'></div>".format(d)),m=void 0;m=-1==v.indexOf(r.id)?(0,_.default)("<button class='btn btn-dark challenge-button w-100 text-truncate pt-3 pb-3 mb-2' value='{0}'></button>".format(r.id)):(0,_.default)("<button class='btn btn-dark challenge-button solved-challenge w-100 text-truncate pt-3 pb-3 mb-2' value='{0}'><i class='fas fa-check corner-button-check'></i></button>".format(r.id));for(var p=(0,_.default)("<p>{0}</p>".format(r.name)),f=(0,_.default)("<span>{0}</span>".format(r.value)),j=0;j<r.tags.length;j++){var h="tag-"+r.tags[j].value.replace(/ /g,"-");u.addClass(h)}m.append(p),m.append(f),u.append(m),(0,_.default)("#"+c+"-row").find(".category-challenges > .challenges-row").append(u)}(0,_.default)(".challenge-button").click(function(e){y(this.value),b(this.value)})})}function j(){return(0==a.default.user.id?Promise.resolve():l[a.default.config.userMode]("me").then(function(e){for(var t=e.data,o=t.length-1;0<=o;o--){var n=t[o].challenge_id;t.push(n)}})).then(f).then(p)}(0,_.default)(function(){j().then(function(){0<window.location.hash.length&&function(e){var t=e.lastIndexOf("-"),o=[e.slice(0,t),e.slice(t+1)][1],n=_.default.grep(g,function(e){return e.id==o})[0];u(n)}(decodeURIComponent(window.location.hash.substring(1)))}),(0,_.default)("#challenge-input").keyup(function(e){13==e.keyCode&&(0,_.default)("#challenge-submit").click()}),(0,_.default)(".nav-tabs a").click(function(e){e.preventDefault(),(0,_.default)(this).tab("show")}),(0,_.default)("#challenge-window").on("hidden.bs.modal",function(e){(0,_.default)(".nav-tabs a:first").tab("show"),history.replaceState("",window.document.title,window.location.pathname)}),(0,_.default)(".challenge-solves").click(function(e){b((0,_.default)("#challenge-id").val())}),(0,_.default)("#challenge-window").on("hide.bs.modal",function(e){(0,_.default)("#challenge-input").removeClass("wrong"),(0,_.default)("#challenge-input").removeClass("correct"),(0,_.default)("#incorrect-key").slideUp(),(0,_.default)("#correct-key").slideUp(),(0,_.default)("#already-solved").slideUp(),(0,_.default)("#too-fast").slideUp()})}),setInterval(j,3e5);function h(e){(0,n.ezAlert)({title:"Hint",body:c.render(e.content),button:"Got it!"})}var T=function(t){a.default.api.get_hint({hintId:t}).then(function(e){e.data.content?h(e.data):function(t){(0,n.ezQuery)({title:"Unlock Hint?",body:"Are you sure you want to open this hint?",success:function(){var e={target:t,type:"hints"};a.default.api.post_unlock_list({},e).then(function(e){e.success?a.default.api.get_hint({hintId:t}).then(function(e){h(e.data)}):(0,n.ezAlert)({title:"Error",body:c.render(e.errors.score),button:"Got it!"})})}})}(t)})}},"./CTFd/themes/core/assets/js/pages/main.js":function(e,t,o){var n=p(o("./CTFd/themes/core/assets/js/CTFd.js")),s=p(o("./node_modules/jquery/dist/jquery.js")),a=p(o("./node_modules/moment/moment.js")),i=p(o("./node_modules/nunjucks/browser/nunjucks.js")),l=o("./node_modules/howler/dist/howler.js"),r=p(o("./CTFd/themes/core/assets/js/events.js")),d=p(o("./CTFd/themes/core/assets/js/config.js")),c=p(o("./CTFd/themes/core/assets/js/styles.js")),u=p(o("./CTFd/themes/core/assets/js/times.js")),m=p(o("./CTFd/themes/core/assets/js/helpers.js"));function p(e){return e&&e.__esModule?e:{default:e}}n.default.init(window.init),window.CTFd=n.default,window.helpers=m.default,window.$=s.default,window.Moment=a.default,window.nunjucks=i.default,window.Howl=l.Howl,(0,s.default)(function(){(0,c.default)(),(0,u.default)(),(0,r.default)(d.default.urlRoot)})},"./CTFd/themes/core/assets/js/patch.js":function(e,t,o){var n,l=(n=o("./node_modules/q/q.js"))&&n.__esModule?n:{default:n},s=o("./CTFd/themes/core/assets/js/api.js");function a(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e}function r(e,t){return function(t){for(var e=1;e<arguments.length;e++)if(e%2){var o=null!=arguments[e]?arguments[e]:{},n=Object.keys(o);"function"==typeof Object.getOwnPropertySymbols&&(n=n.concat(Object.getOwnPropertySymbols(o).filter(function(e){return Object.getOwnPropertyDescriptor(o,e).enumerable}))),n.forEach(function(e){a(t,e,o[e])})}else Object.defineProperties(t,Object.getOwnPropertyDescriptors(arguments[e]));return t}({},e,{},t)}s.API.prototype.requestRaw=function(e,t,o,n,s,a,i,l){var r=a&&Object.keys(a).length?function(e){var t=[];for(var o in e)e.hasOwnProperty(o)&&t.push(encodeURIComponent(o)+"="+encodeURIComponent(e[o]));return t.join("&")}(a):null,d=t+(r?"?"+r:"");n&&!Object.keys(n).length&&(n=void 0),fetch(d,{method:e,headers:s,body:n}).then(function(e){return e.json()}).then(function(e){l.resolve(e)}).catch(function(e){l.reject(e)})},s.API.prototype.patch_user_public=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s="/users/{user_id}",a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=s.replace("{user_id}",e.userId),void 0===e.userId?o.reject(new Error("Missing required parameter: userId")):this.request("PATCH",n+s,e,t,a,{},{},o),o.promise},s.API.prototype.patch_user_private=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],this.request("PATCH",n+"/users/me",e,t,s,{},{},o),o.promise},s.API.prototype.post_unlock_list=function(e,t){var o=l.default.defer(),n=this.domain,s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],this.request("POST",n+"/unlocks",e,t,s,{},{},o),o.promise},s.API.prototype.post_notification_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],this.request("POST",n+"/notifications",e,t,s,{},{},o),o.promise},s.API.prototype.post_files_list=function(e,t){var o=l.default.defer(),n=this.domain,s={};return s.Accept=["application/json"],s["Content-Type"]=["application/json"],this.requestRaw("POST",n+"/files",e,t,s,{},{},o),o.promise},s.API.prototype.patch_config=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s="/configs/{config_key}",a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=s.replace("{config_key}",e.configKey),void 0===e.configKey?o.reject(new Error("Missing required parameter: configKey")):this.request("PATCH",n+s,e,t,a,{},{},o),o.promise},s.API.prototype.patch_config_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=r(e,s),this.request("PATCH",n+"/configs",e,t,a,s,{},o),o.promise},s.API.prototype.post_tag_list=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=r(e,s),this.request("POST",n+"/tags",e,t,a,s,{},o),o.promise},s.API.prototype.patch_team_public=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s="/teams/{team_id}",a={},i={};return i.Accept=["application/json"],i["Content-Type"]=["application/json"],s=s.replace("{team_id}",e.teamId),void 0===e.teamId?o.reject(new Error("Missing required parameter: teamId")):(a=r(e,a),this.request("PATCH",n+s,e,t,i,a,{},o)),o.promise},s.API.prototype.post_challenge_attempt=function(e,t){void 0===e&&(e={});var o=l.default.defer(),n=this.domain,s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],s=r(e,s),this.request("POST",n+"/challenges/attempt",e,t,a,s,{},o),o.promise},s.API.prototype.get_hint=function(e){void 0===e&&(e={});var t=l.default.defer(),o=this.domain,n="/hints/{hint_id}",s={},a={};return a.Accept=["application/json"],a["Content-Type"]=["application/json"],n=n.replace("{hint_id}",e.hintId),void 0===e.hintId?t.reject(new Error("Missing required parameter: hintId")):(delete e.hintId,s=r(e,s),this.request("GET",o+n,e,{},a,s,{},t)),t.promise}},"./CTFd/themes/core/assets/js/styles.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0,o("./node_modules/bootstrap/dist/js/bootstrap.bundle.js");var n,s=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};t.default=function(){(0,s.default)(":input").each(function(){(0,s.default)(this).data("initial",(0,s.default)(this).val())}),(0,s.default)(".form-control").bind({focus:function(){(0,s.default)(this).removeClass("input-filled-invalid"),(0,s.default)(this).addClass("input-filled-valid")},blur:function(){""===(0,s.default)(this).val()&&((0,s.default)(this).removeClass("input-filled-invalid"),(0,s.default)(this).removeClass("input-filled-valid"))}}),(0,s.default)(".form-control").each(function(){(0,s.default)(this).val()&&(0,s.default)(this).addClass("input-filled-valid")}),(0,s.default)(".page-select").change(function(){var e=new URL(window.location);e.searchParams.set("page",this.value),window.location.href=e.toString()}),(0,s.default)('[data-toggle="tooltip"]').tooltip()}},"./CTFd/themes/core/assets/js/times.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=a(o("./node_modules/moment/moment.js")),s=a(o("./node_modules/jquery/dist/jquery.js"));function a(e){return e&&e.__esModule?e:{default:e}}t.default=function(){(0,s.default)("[data-time]").each(function(e,t){t.innerText=(0,n.default)((0,s.default)(t).data("time")).local().format("MMMM Do, h:mm:ss A")})}},"./CTFd/themes/core/assets/js/utils.js":function(e,t,o){Object.defineProperty(t,"__esModule",{value:!0}),t.WindowController=s,t.colorHash=function(e){for(var t=0,o=0;o<e.length;o++)t=e.charCodeAt(o)+((t<<5)-t);for(var n="#",s=0;s<3;s++){n+=("00"+(t>>4*s&255).toString(16)).substr(-2)}return n},t.htmlEntities=function(e){return(0,i.default)("<div/>").text(e).html()},t.cumulativeSum=function(e){for(var t=e.concat(),o=0;o<e.length;o++)t[o]=e.slice(0,o+1).reduce(function(e,t){return e+t});return t},t.init_notification_counter=function(){var e=a.getItem(l);null===e?a.setItem(l,0):0<e&&(0,i.default)(".badge-notification").text(e)},t.set_notification_counter=function(e){a.setItem(l,e)},t.inc_notification_counter=function(){var e=a.getItem(l)||0;a.setItem(l,++e),(0,i.default)(".badge-notification").text(e)},t.dec_notification_counter=function(){var e=a.getItem(l)||0;0<e&&(a.setItem(l,--e),(0,i.default)(".badge-notification").text(e));0==e&&r()},t.clear_notification_counter=r,t.copyToClipboard=function(e,t){(0,i.default)(t).select(),document.execCommand("copy"),(0,i.default)(e.target).tooltip({title:"Copied!",trigger:"manual"}),(0,i.default)(e.target).tooltip("show"),setTimeout(function(){(0,i.default)(e.target).tooltip("hide")},1500)},t.makeSortableTables=function(){function a(e,t){return(0,i.default)(e).children("td").eq(t).text()}(0,i.default)("th.sort-col").append(' <i class="fas fa-sort"></i>'),(0,i.default)("th.sort-col").click(function(){var e=(0,i.default)(this).parents("table").eq(0),t=e.find("tr:gt(0)").toArray().sort(function(s){return function(e,t){var o=a(e,s),n=a(t,s);return i.default.isNumeric(o)&&i.default.isNumeric(n)?o-n:o.toString().localeCompare(n)}}((0,i.default)(this).index()));this.asc=!this.asc,this.asc||(t=t.reverse());for(var o=0;o<t.length;o++)e.append(t[o])})};var n,i=(n=o("./node_modules/jquery/dist/jquery.js"))&&n.__esModule?n:{default:n};function s(){this.id=Math.random(),this.isMaster=!1,this.others={},window.addEventListener("storage",this,!1),window.addEventListener("unload",this,!1),this.broadcast("hello");var t=this;this._checkTimeout=setTimeout(function e(){t.check(),t._checkTimeout=setTimeout(e,9e3)},500),this._pingTimeout=setTimeout(function e(){t.sendPing(),t._pingTimeout=setTimeout(e,17e3)},17e3)}i.default.fn.serializeJSON=function(o){var n={},s=(0,i.default)(this),e=s.serializeArray();return(e=(e=e.concat(s.find("input[type=checkbox]:checked").map(function(){return{name:this.name,value:!0}}).get())).concat(s.find("input[type=checkbox]:not(:checked)").map(function(){return{name:this.name,value:!1}}).get())).map(function(e){if(o)if(null!==e.value&&""!==e.value)n[e.name]=e.value;else{var t=s.find(":input[name=".concat(e.name,"]"));t.data("initial")!==t.val()&&(n[e.name]=e.value)}else n[e.name]=e.value}),n},String.prototype.format=String.prototype.f=function(){for(var e=this,t=arguments.length;t--;)e=e.replace(new RegExp("\\{"+t+"\\}","gm"),arguments[t]);return e},String.prototype.hashCode=function(){var e,t,o=0;if(0==this.length)return o;for(e=0,t=this.length;e<t;e++)o=(o<<5)-o+this.charCodeAt(e),o|=0;return o},s.prototype.destroy=function(){clearTimeout(this._pingTimeout),clearTimeout(this._checkTimeout),window.removeEventListener("storage",this,!1),window.removeEventListener("unload",this,!1),this.broadcast("bye")},s.prototype.handleEvent=function(e){if("unload"===e.type)this.destroy();else if("broadcast"===e.key)try{var t=JSON.parse(e.newValue);t.id!==this.id&&this[t.type](t)}catch(e){}},s.prototype.sendPing=function(){this.broadcast("ping")},s.prototype.hello=function(e){this.ping(e),e.id<this.id?this.check():this.sendPing()},s.prototype.ping=function(e){this.others[e.id]=+new Date},s.prototype.bye=function(e){delete this.others[e.id],this.check()},s.prototype.check=function(e){var t,o=+new Date,n=!0;for(t in this.others)this.others[t]+23e3<o?delete this.others[t]:t<this.id&&(n=!1);this.isMaster!==n&&(this.isMaster=n,this.masterDidChange())},s.prototype.masterDidChange=function(){},s.prototype.broadcast=function(e,t){var o={id:this.id,type:e};for(var n in t)o[n]=t[n];try{localStorage.setItem("broadcast",JSON.stringify(o))}catch(e){console.log(e)}};var a=window.localStorage,l="unread_notifications";function r(){a.setItem(l,0),(0,i.default)(".badge-notification").empty()}},"./node_modules/moment/locale sync recursive ^\\.\\/.*$":function(e,t,o){var n={"./af":"./node_modules/moment/locale/af.js","./af.js":"./node_modules/moment/locale/af.js","./ar":"./node_modules/moment/locale/ar.js","./ar-dz":"./node_modules/moment/locale/ar-dz.js","./ar-dz.js":"./node_modules/moment/locale/ar-dz.js","./ar-kw":"./node_modules/moment/locale/ar-kw.js","./ar-kw.js":"./node_modules/moment/locale/ar-kw.js","./ar-ly":"./node_modules/moment/locale/ar-ly.js","./ar-ly.js":"./node_modules/moment/locale/ar-ly.js","./ar-ma":"./node_modules/moment/locale/ar-ma.js","./ar-ma.js":"./node_modules/moment/locale/ar-ma.js","./ar-sa":"./node_modules/moment/locale/ar-sa.js","./ar-sa.js":"./node_modules/moment/locale/ar-sa.js","./ar-tn":"./node_modules/moment/locale/ar-tn.js","./ar-tn.js":"./node_modules/moment/locale/ar-tn.js","./ar.js":"./node_modules/moment/locale/ar.js","./az":"./node_modules/moment/locale/az.js","./az.js":"./node_modules/moment/locale/az.js","./be":"./node_modules/moment/locale/be.js","./be.js":"./node_modules/moment/locale/be.js","./bg":"./node_modules/moment/locale/bg.js","./bg.js":"./node_modules/moment/locale/bg.js","./bm":"./node_modules/moment/locale/bm.js","./bm.js":"./node_modules/moment/locale/bm.js","./bn":"./node_modules/moment/locale/bn.js","./bn.js":"./node_modules/moment/locale/bn.js","./bo":"./node_modules/moment/locale/bo.js","./bo.js":"./node_modules/moment/locale/bo.js","./br":"./node_modules/moment/locale/br.js","./br.js":"./node_modules/moment/locale/br.js","./bs":"./node_modules/moment/locale/bs.js","./bs.js":"./node_modules/moment/locale/bs.js","./ca":"./node_modules/moment/locale/ca.js","./ca.js":"./node_modules/moment/locale/ca.js","./cs":"./node_modules/moment/locale/cs.js","./cs.js":"./node_modules/moment/locale/cs.js","./cv":"./node_modules/moment/locale/cv.js","./cv.js":"./node_modules/moment/locale/cv.js","./cy":"./node_modules/moment/locale/cy.js","./cy.js":"./node_modules/moment/locale/cy.js","./da":"./node_modules/moment/locale/da.js","./da.js":"./node_modules/moment/locale/da.js","./de":"./node_modules/moment/locale/de.js","./de-at":"./node_modules/moment/locale/de-at.js","./de-at.js":"./node_modules/moment/locale/de-at.js","./de-ch":"./node_modules/moment/locale/de-ch.js","./de-ch.js":"./node_modules/moment/locale/de-ch.js","./de.js":"./node_modules/moment/locale/de.js","./dv":"./node_modules/moment/locale/dv.js","./dv.js":"./node_modules/moment/locale/dv.js","./el":"./node_modules/moment/locale/el.js","./el.js":"./node_modules/moment/locale/el.js","./en-SG":"./node_modules/moment/locale/en-SG.js","./en-SG.js":"./node_modules/moment/locale/en-SG.js","./en-au":"./node_modules/moment/locale/en-au.js","./en-au.js":"./node_modules/moment/locale/en-au.js","./en-ca":"./node_modules/moment/locale/en-ca.js","./en-ca.js":"./node_modules/moment/locale/en-ca.js","./en-gb":"./node_modules/moment/locale/en-gb.js","./en-gb.js":"./node_modules/moment/locale/en-gb.js","./en-ie":"./node_modules/moment/locale/en-ie.js","./en-ie.js":"./node_modules/moment/locale/en-ie.js","./en-il":"./node_modules/moment/locale/en-il.js","./en-il.js":"./node_modules/moment/locale/en-il.js","./en-nz":"./node_modules/moment/locale/en-nz.js","./en-nz.js":"./node_modules/moment/locale/en-nz.js","./eo":"./node_modules/moment/locale/eo.js","./eo.js":"./node_modules/moment/locale/eo.js","./es":"./node_modules/moment/locale/es.js","./es-do":"./node_modules/moment/locale/es-do.js","./es-do.js":"./node_modules/moment/locale/es-do.js","./es-us":"./node_modules/moment/locale/es-us.js","./es-us.js":"./node_modules/moment/locale/es-us.js","./es.js":"./node_modules/moment/locale/es.js","./et":"./node_modules/moment/locale/et.js","./et.js":"./node_modules/moment/locale/et.js","./eu":"./node_modules/moment/locale/eu.js","./eu.js":"./node_modules/moment/locale/eu.js","./fa":"./node_modules/moment/locale/fa.js","./fa.js":"./node_modules/moment/locale/fa.js","./fi":"./node_modules/moment/locale/fi.js","./fi.js":"./node_modules/moment/locale/fi.js","./fo":"./node_modules/moment/locale/fo.js","./fo.js":"./node_modules/moment/locale/fo.js","./fr":"./node_modules/moment/locale/fr.js","./fr-ca":"./node_modules/moment/locale/fr-ca.js","./fr-ca.js":"./node_modules/moment/locale/fr-ca.js","./fr-ch":"./node_modules/moment/locale/fr-ch.js","./fr-ch.js":"./node_modules/moment/locale/fr-ch.js","./fr.js":"./node_modules/moment/locale/fr.js","./fy":"./node_modules/moment/locale/fy.js","./fy.js":"./node_modules/moment/locale/fy.js","./ga":"./node_modules/moment/locale/ga.js","./ga.js":"./node_modules/moment/locale/ga.js","./gd":"./node_modules/moment/locale/gd.js","./gd.js":"./node_modules/moment/locale/gd.js","./gl":"./node_modules/moment/locale/gl.js","./gl.js":"./node_modules/moment/locale/gl.js","./gom-latn":"./node_modules/moment/locale/gom-latn.js","./gom-latn.js":"./node_modules/moment/locale/gom-latn.js","./gu":"./node_modules/moment/locale/gu.js","./gu.js":"./node_modules/moment/locale/gu.js","./he":"./node_modules/moment/locale/he.js","./he.js":"./node_modules/moment/locale/he.js","./hi":"./node_modules/moment/locale/hi.js","./hi.js":"./node_modules/moment/locale/hi.js","./hr":"./node_modules/moment/locale/hr.js","./hr.js":"./node_modules/moment/locale/hr.js","./hu":"./node_modules/moment/locale/hu.js","./hu.js":"./node_modules/moment/locale/hu.js","./hy-am":"./node_modules/moment/locale/hy-am.js","./hy-am.js":"./node_modules/moment/locale/hy-am.js","./id":"./node_modules/moment/locale/id.js","./id.js":"./node_modules/moment/locale/id.js","./is":"./node_modules/moment/locale/is.js","./is.js":"./node_modules/moment/locale/is.js","./it":"./node_modules/moment/locale/it.js","./it-ch":"./node_modules/moment/locale/it-ch.js","./it-ch.js":"./node_modules/moment/locale/it-ch.js","./it.js":"./node_modules/moment/locale/it.js","./ja":"./node_modules/moment/locale/ja.js","./ja.js":"./node_modules/moment/locale/ja.js","./jv":"./node_modules/moment/locale/jv.js","./jv.js":"./node_modules/moment/locale/jv.js","./ka":"./node_modules/moment/locale/ka.js","./ka.js":"./node_modules/moment/locale/ka.js","./kk":"./node_modules/moment/locale/kk.js","./kk.js":"./node_modules/moment/locale/kk.js","./km":"./node_modules/moment/locale/km.js","./km.js":"./node_modules/moment/locale/km.js","./kn":"./node_modules/moment/locale/kn.js","./kn.js":"./node_modules/moment/locale/kn.js","./ko":"./node_modules/moment/locale/ko.js","./ko.js":"./node_modules/moment/locale/ko.js","./ku":"./node_modules/moment/locale/ku.js","./ku.js":"./node_modules/moment/locale/ku.js","./ky":"./node_modules/moment/locale/ky.js","./ky.js":"./node_modules/moment/locale/ky.js","./lb":"./node_modules/moment/locale/lb.js","./lb.js":"./node_modules/moment/locale/lb.js","./lo":"./node_modules/moment/locale/lo.js","./lo.js":"./node_modules/moment/locale/lo.js","./lt":"./node_modules/moment/locale/lt.js","./lt.js":"./node_modules/moment/locale/lt.js","./lv":"./node_modules/moment/locale/lv.js","./lv.js":"./node_modules/moment/locale/lv.js","./me":"./node_modules/moment/locale/me.js","./me.js":"./node_modules/moment/locale/me.js","./mi":"./node_modules/moment/locale/mi.js","./mi.js":"./node_modules/moment/locale/mi.js","./mk":"./node_modules/moment/locale/mk.js","./mk.js":"./node_modules/moment/locale/mk.js","./ml":"./node_modules/moment/locale/ml.js","./ml.js":"./node_modules/moment/locale/ml.js","./mn":"./node_modules/moment/locale/mn.js","./mn.js":"./node_modules/moment/locale/mn.js","./mr":"./node_modules/moment/locale/mr.js","./mr.js":"./node_modules/moment/locale/mr.js","./ms":"./node_modules/moment/locale/ms.js","./ms-my":"./node_modules/moment/locale/ms-my.js","./ms-my.js":"./node_modules/moment/locale/ms-my.js","./ms.js":"./node_modules/moment/locale/ms.js","./mt":"./node_modules/moment/locale/mt.js","./mt.js":"./node_modules/moment/locale/mt.js","./my":"./node_modules/moment/locale/my.js","./my.js":"./node_modules/moment/locale/my.js","./nb":"./node_modules/moment/locale/nb.js","./nb.js":"./node_modules/moment/locale/nb.js","./ne":"./node_modules/moment/locale/ne.js","./ne.js":"./node_modules/moment/locale/ne.js","./nl":"./node_modules/moment/locale/nl.js","./nl-be":"./node_modules/moment/locale/nl-be.js","./nl-be.js":"./node_modules/moment/locale/nl-be.js","./nl.js":"./node_modules/moment/locale/nl.js","./nn":"./node_modules/moment/locale/nn.js","./nn.js":"./node_modules/moment/locale/nn.js","./pa-in":"./node_modules/moment/locale/pa-in.js","./pa-in.js":"./node_modules/moment/locale/pa-in.js","./pl":"./node_modules/moment/locale/pl.js","./pl.js":"./node_modules/moment/locale/pl.js","./pt":"./node_modules/moment/locale/pt.js","./pt-br":"./node_modules/moment/locale/pt-br.js","./pt-br.js":"./node_modules/moment/locale/pt-br.js","./pt.js":"./node_modules/moment/locale/pt.js","./ro":"./node_modules/moment/locale/ro.js","./ro.js":"./node_modules/moment/locale/ro.js","./ru":"./node_modules/moment/locale/ru.js","./ru.js":"./node_modules/moment/locale/ru.js","./sd":"./node_modules/moment/locale/sd.js","./sd.js":"./node_modules/moment/locale/sd.js","./se":"./node_modules/moment/locale/se.js","./se.js":"./node_modules/moment/locale/se.js","./si":"./node_modules/moment/locale/si.js","./si.js":"./node_modules/moment/locale/si.js","./sk":"./node_modules/moment/locale/sk.js","./sk.js":"./node_modules/moment/locale/sk.js","./sl":"./node_modules/moment/locale/sl.js","./sl.js":"./node_modules/moment/locale/sl.js","./sq":"./node_modules/moment/locale/sq.js","./sq.js":"./node_modules/moment/locale/sq.js","./sr":"./node_modules/moment/locale/sr.js","./sr-cyrl":"./node_modules/moment/locale/sr-cyrl.js","./sr-cyrl.js":"./node_modules/moment/locale/sr-cyrl.js","./sr.js":"./node_modules/moment/locale/sr.js","./ss":"./node_modules/moment/locale/ss.js","./ss.js":"./node_modules/moment/locale/ss.js","./sv":"./node_modules/moment/locale/sv.js","./sv.js":"./node_modules/moment/locale/sv.js","./sw":"./node_modules/moment/locale/sw.js","./sw.js":"./node_modules/moment/locale/sw.js","./ta":"./node_modules/moment/locale/ta.js","./ta.js":"./node_modules/moment/locale/ta.js","./te":"./node_modules/moment/locale/te.js","./te.js":"./node_modules/moment/locale/te.js","./tet":"./node_modules/moment/locale/tet.js","./tet.js":"./node_modules/moment/locale/tet.js","./tg":"./node_modules/moment/locale/tg.js","./tg.js":"./node_modules/moment/locale/tg.js","./th":"./node_modules/moment/locale/th.js","./th.js":"./node_modules/moment/locale/th.js","./tl-ph":"./node_modules/moment/locale/tl-ph.js","./tl-ph.js":"./node_modules/moment/locale/tl-ph.js","./tlh":"./node_modules/moment/locale/tlh.js","./tlh.js":"./node_modules/moment/locale/tlh.js","./tr":"./node_modules/moment/locale/tr.js","./tr.js":"./node_modules/moment/locale/tr.js","./tzl":"./node_modules/moment/locale/tzl.js","./tzl.js":"./node_modules/moment/locale/tzl.js","./tzm":"./node_modules/moment/locale/tzm.js","./tzm-latn":"./node_modules/moment/locale/tzm-latn.js","./tzm-latn.js":"./node_modules/moment/locale/tzm-latn.js","./tzm.js":"./node_modules/moment/locale/tzm.js","./ug-cn":"./node_modules/moment/locale/ug-cn.js","./ug-cn.js":"./node_modules/moment/locale/ug-cn.js","./uk":"./node_modules/moment/locale/uk.js","./uk.js":"./node_modules/moment/locale/uk.js","./ur":"./node_modules/moment/locale/ur.js","./ur.js":"./node_modules/moment/locale/ur.js","./uz":"./node_modules/moment/locale/uz.js","./uz-latn":"./node_modules/moment/locale/uz-latn.js","./uz-latn.js":"./node_modules/moment/locale/uz-latn.js","./uz.js":"./node_modules/moment/locale/uz.js","./vi":"./node_modules/moment/locale/vi.js","./vi.js":"./node_modules/moment/locale/vi.js","./x-pseudo":"./node_modules/moment/locale/x-pseudo.js","./x-pseudo.js":"./node_modules/moment/locale/x-pseudo.js","./yo":"./node_modules/moment/locale/yo.js","./yo.js":"./node_modules/moment/locale/yo.js","./zh-cn":"./node_modules/moment/locale/zh-cn.js","./zh-cn.js":"./node_modules/moment/locale/zh-cn.js","./zh-hk":"./node_modules/moment/locale/zh-hk.js","./zh-hk.js":"./node_modules/moment/locale/zh-hk.js","./zh-tw":"./node_modules/moment/locale/zh-tw.js","./zh-tw.js":"./node_modules/moment/locale/zh-tw.js"};function s(e){var t=a(e);return o(t)}function a(e){var t=n[e];if(t+1)return t;var o=new Error("Cannot find module '"+e+"'");throw o.code="MODULE_NOT_FOUND",o}s.keys=function(){return Object.keys(n)},s.resolve=a,(e.exports=s).id="./node_modules/moment/locale sync recursive ^\\.\\/.*$"}}); \ No newline at end of file diff --git a/Makefile b/Makefile index f8a9fdea4..2fe7d6450 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,9 @@ format: prettier --write '**/*.md' test: - pytest -rf --cov=CTFd --cov-context=test --ignore=node_modules/ \ + pytest -rf --cov=CTFd --cov-context=test \ + --ignore-glob="**/node_modules/" \ + --ignore=node_modules/ \ -W ignore::sqlalchemy.exc.SADeprecationWarning \ -W ignore::sqlalchemy.exc.SAWarning \ -n auto diff --git a/development.txt b/development.txt index 0885c19e8..39d368738 100644 --- a/development.txt +++ b/development.txt @@ -14,6 +14,7 @@ pytest-xdist==1.32.0 pytest-cov==2.9.0 sphinx_rtd_theme==0.4.3 flask-debugtoolbar==0.11.0 +isort==4.3.21 flake8-isort==3.0.0 Faker==4.1.0 pipdeptree==0.13.2 diff --git a/tests/api/v1/test_challenges.py b/tests/api/v1/test_challenges.py index 31dbed8e2..fd88cd0e7 100644 --- a/tests/api/v1/test_challenges.py +++ b/tests/api/v1/test_challenges.py @@ -244,6 +244,21 @@ def test_api_challenge_get_visibility_private(): destroy_ctfd(app) +def test_api_challenge_get_with_admin_only_account_visibility(): + """Can a private user get /api/v1/challenges/<challenge_id> if account_visibility is admins_only""" + app = create_ctfd() + with app.app_context(): + gen_challenge(app.db) + register_user(app) + client = login_as_user(app) + r = client.get("/api/v1/challenges/1") + assert r.status_code == 200 + set_config("account_visibility", "admins") + r = client.get("/api/v1/challenges/1") + assert r.status_code == 200 + destroy_ctfd(app) + + def test_api_challenge_get_ctftime_private(): """Can a private user get /api/v1/challenges/<challenge_id> if ctftime is over""" app = create_ctfd()
mne-tools__mne-python-3718
ENH?: News / updates It seems like we should have a little news/updates section of one-liners on the website, including things like: 1. Release notifications 2. Upcoming MNE-Python workshops 3. Upcoming coding sprints If people agree I can put some old ones (last couple of release dates), and we can add to it as announcement-worthy things come up.
[ { "content": "#!/usr/bin/env python\n\"\"\"Parse google scholar -> rst for MNE citations.\n\nExample usage::\n\n $ cited_mne --backend selenium --clear\n\n\"\"\"\n\n# Author: Mainak Jas <[email protected]>\n# License : BSD 3-clause\n\n# Parts of this code were copied from google_scholar_parser\n# (https://github.com/carlosp420/google_scholar_parser)\n\nimport os\nimport os.path as op\nimport re\nimport time\nimport random\nimport requests\n\nimport numpy as np\nfrom joblib import Memory\nfrom BeautifulSoup import BeautifulSoup\n\nfrom mne.externals.tempita import Template\nfrom mne.commands.utils import get_optparser\n\n# cache to avoid making too many calls to Google Scholar\ncachedir = 'cachedir'\nif not os.path.exists(cachedir):\n os.mkdir(cachedir)\nmem = Memory(cachedir=cachedir, verbose=2)\n\nUA = ('Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) '\n 'Gecko/20100913 Firefox/3.6.9')\n\n# ##### Templates for citations #####\nhtml = (u\"\"\".. _cited\n\nPublications from MNE users\n===========================\n\nPapers citing MNE as extracted from Google Scholar (on %s).\n\n\"\"\")\n\ncite_template = Template(u\"\"\"\n{{for ii, publication in enumerate(publications)}}\n{{ii + 1}}. {{publication}}.\n{{endfor}}\n\n\"\"\")\n\n\ndef parse_soup_page(soup):\n \"\"\"Parse the page using BeautifulSoup.\n\n Parameters\n ----------\n soup : instance of BeautifulSoup\n The page to be parsed.\n\n Returns\n -------\n titles : list\n The article titles.\n authors : list\n The name of the authors.\n links : list\n Hyperlinks to the articles.\n \"\"\"\n titles, authors, links = list(), list(), list()\n for div in soup.findAll('div'):\n if div.name == \"div\" and div.get('class') == \"gs_ri\":\n links.append(div.a['href'])\n div_pub = div.findAll('div')\n for d in div_pub:\n if d.name == 'div' and d.get('class') == 'gs_a':\n authors.append(d.text)\n titles.append(div.a.text)\n return titles, authors, links\n\n\ndef get_total_citations(soup):\n \"\"\"Get total citations.\"\"\"\n results = soup.find('div', attrs={'id': 'gs_ab_md'}).contents[0]\n matches = re.search(\"About\\s(\\d+)\\s\", results)\n if matches:\n hits = matches.groups()[0]\n return hits\n\n\ndef _get_soup(url, backend='selenium'):\n \"\"\"Get BeautifulSoup object from url.\n\n Parameters\n ----------\n url : str\n The url to fetch.\n backend : 'selenium' | 'requests'\n Use selenium by default because google can ask for captcha. For\n 'selenium' backend Firefox must be installed.\n\n Returns\n -------\n soup : instance of BeautifulSoup\n The soup page from the url.\n \"\"\"\n if backend == 'requests':\n req = requests.get(url, headers={'User-Agent': UA})\n html_doc = req.text\n soup = BeautifulSoup(html_doc)\n if soup.find('div', attrs={'id': 'gs_ab_md'}) is None:\n print('Falling back on to selenium backend due to captcha.')\n backend = 'selenium'\n\n if backend == 'selenium':\n from selenium import webdriver\n import selenium.webdriver.support.ui as ui\n\n driver = webdriver.Firefox()\n # give enough time to solve captcha.\n wait = ui.WebDriverWait(driver, 200)\n\n driver.get(url)\n wait.until(lambda driver: driver.find_elements_by_id('gs_ab_md'))\n\n html_doc = driver.page_source\n soup = BeautifulSoup(html_doc)\n driver.close()\n\n return soup\n\n\[email protected]\ndef get_citing_articles(cites_url, backend):\n \"\"\"Get the citing articles.\n\n Parameters\n ----------\n cites_url: str\n A citation url from Google Scholar.\n backend : 'selenium' | 'requests'\n Use selenium by default because google can ask for captcha. For\n 'selenium' backend Firefox must be installed.\n\n\n Returns\n -------\n titles : list\n The article titles.\n authors : list\n The name of the authors.\n links : list\n Hyperlinks to the articles.\n \"\"\"\n n = random.random() * 5\n time.sleep(n)\n print(\"\\nSleeping: {0} seconds\".format(n))\n\n # GS seems to allow only 20 hits per page!\n cites_url += \"&num=20\"\n soup = _get_soup(cites_url, backend=backend)\n hits = get_total_citations(soup)\n print(\"Got a total of {0} citations\".format(hits))\n\n hits = int(hits)\n index = 0\n titles, authors, links = list(), list(), list()\n while hits > 1:\n n = random.random() * 2\n time.sleep(n)\n if index > 0:\n url = cites_url + \"&start=\" + str(index)\n else:\n url = cites_url\n index += 20\n hits -= 20\n print(\"{0} more citations to process\".format(hits))\n soup = soup = _get_soup(url, backend=backend)\n title, author, link = parse_soup_page(soup)\n for this_title, this_author, this_link in zip(title, author, link):\n titles.append(this_title)\n authors.append(this_author)\n links.append(this_link)\n\n return titles, authors, links\n\nif __name__ == '__main__':\n parser = get_optparser(__file__)\n parser.add_option(\"-c\", \"--clear\", dest=\"clear\", action='store_true',\n help=\"if True, clear the cache.\", default=False)\n parser.add_option(\"-b\", \"--backend\", dest=\"backend\",\n help=\"backend for parsing (selenium | requests)\",\n default='requests')\n options, args = parser.parse_args()\n backend, clear = options.backend, options.clear\n\n if clear:\n mem.clear()\n\n random.seed()\n gen_date = time.strftime(\"%B %d, %Y\")\n html = html % gen_date\n\n url_tails = ['1521584321377182930', '12188330066413208874']\n papers = ['MEG and EEG data analysis with MNE-Python',\n 'MNE software for processing MEG and EEG data']\n\n publications = list()\n for url_tail, paper in zip(url_tails, papers):\n titles, authors, links = get_citing_articles(\n 'https://scholar.google.co.in/scholar?cites=%s'\n % url_tail, backend=backend)\n\n this_publication = list()\n for ii in range(len(titles)):\n pub = '`%s. <%s>`_. %s' % (titles[ii], links[ii], authors[ii])\n this_publication.append(pub)\n\n this_publication = [p.encode('utf8') for p in this_publication]\n publications.append(this_publication)\n\n # get a union of the citations for the two papers, sorted in\n # alphabetic order\n publications = np.union1d(publications[1], publications[0]).tolist()\n\n # sort by year of publication\n years = list()\n for pub in publications:\n m = re.search('\\d{4} -', pub)\n if m is None:\n years.append(-1)\n else:\n years.append(int(m.group(0)[:-2]))\n order = np.argsort(years)[::-1]\n publications = [publications[idx] for idx in order]\n\n # filter out publications not containing (http://, https://, ftp://)\n publications = [p for p in publications if\n any(sub in p for sub in ('http://', 'https://', 'ftp://'))]\n\n # create rst & cleanup\n this_html = cite_template.substitute(publications=publications)\n this_html = this_html.replace('&hellip;', '...')\n html += this_html\n\n # output an rst file\n with open(op.join('..', 'cited.rst'), 'w') as f:\n f.write(html.encode('utf8'))\n", "path": "doc/sphinxext/cited_mne.py" } ]
[ { "content": "#!/usr/bin/env python\n\"\"\"Parse google scholar -> rst for MNE citations.\n\nExample usage::\n\n $ cited_mne --backend selenium --clear\n\n\"\"\"\n\n# Author: Mainak Jas <[email protected]>\n# License : BSD 3-clause\n\n# Parts of this code were copied from google_scholar_parser\n# (https://github.com/carlosp420/google_scholar_parser)\n\nimport os\nimport os.path as op\nimport re\nimport time\nimport random\nimport requests\n\nimport numpy as np\nfrom joblib import Memory\nfrom BeautifulSoup import BeautifulSoup\n\nfrom mne.externals.tempita import Template\nfrom mne.commands.utils import get_optparser\n\n# cache to avoid making too many calls to Google Scholar\ncachedir = 'cachedir'\nif not os.path.exists(cachedir):\n os.mkdir(cachedir)\nmem = Memory(cachedir=cachedir, verbose=2)\n\nUA = ('Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) '\n 'Gecko/20100913 Firefox/3.6.9')\n\n# ##### Templates for citations #####\nhtml = (u\"\"\".. _cited\n\nPublications by users\n=====================\n\nPapers citing MNE as extracted from Google Scholar (on %s).\n\n\"\"\")\n\ncite_template = Template(u\"\"\"\n{{for ii, publication in enumerate(publications)}}\n{{ii + 1}}. {{publication}}.\n{{endfor}}\n\n\"\"\")\n\n\ndef parse_soup_page(soup):\n \"\"\"Parse the page using BeautifulSoup.\n\n Parameters\n ----------\n soup : instance of BeautifulSoup\n The page to be parsed.\n\n Returns\n -------\n titles : list\n The article titles.\n authors : list\n The name of the authors.\n links : list\n Hyperlinks to the articles.\n \"\"\"\n titles, authors, links = list(), list(), list()\n for div in soup.findAll('div'):\n if div.name == \"div\" and div.get('class') == \"gs_ri\":\n links.append(div.a['href'])\n div_pub = div.findAll('div')\n for d in div_pub:\n if d.name == 'div' and d.get('class') == 'gs_a':\n authors.append(d.text)\n titles.append(div.a.text)\n return titles, authors, links\n\n\ndef get_total_citations(soup):\n \"\"\"Get total citations.\"\"\"\n results = soup.find('div', attrs={'id': 'gs_ab_md'}).contents[0]\n matches = re.search(\"About\\s(\\d+)\\s\", results)\n if matches:\n hits = matches.groups()[0]\n return hits\n\n\ndef _get_soup(url, backend='selenium'):\n \"\"\"Get BeautifulSoup object from url.\n\n Parameters\n ----------\n url : str\n The url to fetch.\n backend : 'selenium' | 'requests'\n Use selenium by default because google can ask for captcha. For\n 'selenium' backend Firefox must be installed.\n\n Returns\n -------\n soup : instance of BeautifulSoup\n The soup page from the url.\n \"\"\"\n if backend == 'requests':\n req = requests.get(url, headers={'User-Agent': UA})\n html_doc = req.text\n soup = BeautifulSoup(html_doc)\n if soup.find('div', attrs={'id': 'gs_ab_md'}) is None:\n print('Falling back on to selenium backend due to captcha.')\n backend = 'selenium'\n\n if backend == 'selenium':\n from selenium import webdriver\n import selenium.webdriver.support.ui as ui\n\n driver = webdriver.Firefox()\n # give enough time to solve captcha.\n wait = ui.WebDriverWait(driver, 200)\n\n driver.get(url)\n wait.until(lambda driver: driver.find_elements_by_id('gs_ab_md'))\n\n html_doc = driver.page_source\n soup = BeautifulSoup(html_doc)\n driver.close()\n\n return soup\n\n\[email protected]\ndef get_citing_articles(cites_url, backend):\n \"\"\"Get the citing articles.\n\n Parameters\n ----------\n cites_url: str\n A citation url from Google Scholar.\n backend : 'selenium' | 'requests'\n Use selenium by default because google can ask for captcha. For\n 'selenium' backend Firefox must be installed.\n\n\n Returns\n -------\n titles : list\n The article titles.\n authors : list\n The name of the authors.\n links : list\n Hyperlinks to the articles.\n \"\"\"\n n = random.random() * 5\n time.sleep(n)\n print(\"\\nSleeping: {0} seconds\".format(n))\n\n # GS seems to allow only 20 hits per page!\n cites_url += \"&num=20\"\n soup = _get_soup(cites_url, backend=backend)\n hits = get_total_citations(soup)\n print(\"Got a total of {0} citations\".format(hits))\n\n hits = int(hits)\n index = 0\n titles, authors, links = list(), list(), list()\n while hits > 1:\n n = random.random() * 2\n time.sleep(n)\n if index > 0:\n url = cites_url + \"&start=\" + str(index)\n else:\n url = cites_url\n index += 20\n hits -= 20\n print(\"{0} more citations to process\".format(hits))\n soup = soup = _get_soup(url, backend=backend)\n title, author, link = parse_soup_page(soup)\n for this_title, this_author, this_link in zip(title, author, link):\n titles.append(this_title)\n authors.append(this_author)\n links.append(this_link)\n\n return titles, authors, links\n\nif __name__ == '__main__':\n parser = get_optparser(__file__)\n parser.add_option(\"-c\", \"--clear\", dest=\"clear\", action='store_true',\n help=\"if True, clear the cache.\", default=False)\n parser.add_option(\"-b\", \"--backend\", dest=\"backend\",\n help=\"backend for parsing (selenium | requests)\",\n default='requests')\n options, args = parser.parse_args()\n backend, clear = options.backend, options.clear\n\n if clear:\n mem.clear()\n\n random.seed()\n gen_date = time.strftime(\"%B %d, %Y\")\n html = html % gen_date\n\n url_tails = ['1521584321377182930', '12188330066413208874']\n papers = ['MEG and EEG data analysis with MNE-Python',\n 'MNE software for processing MEG and EEG data']\n\n publications = list()\n for url_tail, paper in zip(url_tails, papers):\n titles, authors, links = get_citing_articles(\n 'https://scholar.google.co.in/scholar?cites=%s'\n % url_tail, backend=backend)\n\n this_publication = list()\n for ii in range(len(titles)):\n pub = '`%s. <%s>`_. %s' % (titles[ii], links[ii], authors[ii])\n this_publication.append(pub)\n\n this_publication = [p.encode('utf8') for p in this_publication]\n publications.append(this_publication)\n\n # get a union of the citations for the two papers, sorted in\n # alphabetic order\n publications = np.union1d(publications[1], publications[0]).tolist()\n\n # sort by year of publication\n years = list()\n for pub in publications:\n m = re.search('\\d{4} -', pub)\n if m is None:\n years.append(-1)\n else:\n years.append(int(m.group(0)[:-2]))\n order = np.argsort(years)[::-1]\n publications = [publications[idx] for idx in order]\n\n # filter out publications not containing (http://, https://, ftp://)\n publications = [p for p in publications if\n any(sub in p for sub in ('http://', 'https://', 'ftp://'))]\n\n # create rst & cleanup\n this_html = cite_template.substitute(publications=publications)\n this_html = this_html.replace('&hellip;', '...')\n html += this_html\n\n # output an rst file\n with open(op.join('..', 'cited.rst'), 'w') as f:\n f.write(html.encode('utf8'))\n", "path": "doc/sphinxext/cited_mne.py" } ]
diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index 274a91c295d..88dd326385c 100755 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -5,7 +5,7 @@ {% block extrahead %} -<link href='http://fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700' rel='stylesheet' type='text/css'> +<link href='https://fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700' rel='stylesheet' type='text/css'> {% if use_google_analytics|tobool %} <script type="text/javascript"> @@ -24,7 +24,7 @@ {% if (use_twitter|tobool) or (use_media_buttons|tobool) %} <script type="text/javascript"> !function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s); - js.id=id;js.src="http://platform.twitter.com/widgets.js"; + js.id=id;js.src="https://platform.twitter.com/widgets.js"; fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs"); </script> {% endif %} @@ -54,4 +54,4 @@ {# put the sidebar before the body #} {% block sidebar1 %}{{ sidebar() }}{% endblock %} -{% block sidebar2 %}{% endblock %} \ No newline at end of file +{% block sidebar2 %}{% endblock %} diff --git a/doc/index.rst b/doc/index.rst index 54e8454b22f..158b6330461 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -82,28 +82,13 @@ .. raw:: html - <script type="text/javascript" src="http://www.ohloh.net/p/586838/widgets/project_basic_stats.js"></script> + <script type="text/javascript" src="https://www.ohloh.net/p/586838/widgets/project_basic_stats.js"></script> .. container:: col-md-4 .. raw:: html - <h2>Documentation</h2> - - - :ref:`get_started` - - :ref:`tutorials` - - :ref:`general_examples` - - :ref:`API Reference <api_reference>` - - :ref:`manual` - - :ref:`contributing` - - :ref:`FAQ` - - :ref:`whats_new` - - :ref:`Cite MNE <cite>` - - :ref:`cited` - - .. raw:: html - - <h2>Community</h2> + <h2>More help</h2> - `MNE mailing list`_ for analysis talk - `GitHub <https://github.com/mne-tools/mne-python/issues/>`_ for @@ -112,11 +97,12 @@ .. raw:: html - <h2>Versions</h2> + <h2>News</h2> - - `Stable <http://martinos.org/mne/stable>`_ - - `Development <http://martinos.org/mne/dev>`_ + - :ref:`whats_new` + - :ref:`cited` + - :ref:`cite` .. raw:: html - <a class="twitter-timeline" href="https://twitter.com/mne_python" data-widget-id="317730454184804352">Tweets by @mne_python</a> + <a class="twitter-timeline" href="https://twitter.com/mne_python" data-widget-id="317730454184804352">Updates by @mne_python</a> diff --git a/doc/python_reference.rst b/doc/python_reference.rst index ad278c249eb..1af5dc89cc8 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -975,6 +975,8 @@ Simulation simulate_sparse_stc select_source_in_label +.. _api_decoding: + Decoding ======== diff --git a/doc/sphinxext/cited_mne.py b/doc/sphinxext/cited_mne.py index 7de017696a8..e50ee46e021 100644 --- a/doc/sphinxext/cited_mne.py +++ b/doc/sphinxext/cited_mne.py @@ -39,8 +39,8 @@ # ##### Templates for citations ##### html = (u""".. _cited -Publications from MNE users -=========================== +Publications by users +===================== Papers citing MNE as extracted from Google Scholar (on %s). diff --git a/doc/whats_new.rst b/doc/whats_new.rst index efbfe5ad175..e4bdd4af444 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,8 +1,8 @@ .. include:: links.inc .. _whats_new: -What's new -========== +MNE-Python Code Updates +======================= .. Note, we are now using links to highlight new functions and classes. Please be sure to follow the examples below like :func:`mne.stats.f_mway_rm`, so the whats_new page will have a link to the function/class documentation.